diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7777554b2..9ca4ef6c7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -32,6 +32,8 @@ why we ask this as well as instructions for how to proceed, see the ./configure make make install + # Optionally, you might instead want to use `make install-all` + # since `multi_extension` regression test would fail due to missing downgrade scripts. cd src/test/regress make check ``` @@ -51,7 +53,7 @@ why we ask this as well as instructions for how to proceed, see the autoconf flex git libcurl4-gnutls-dev libicu-dev \ libkrb5-dev liblz4-dev libpam0g-dev libreadline-dev \ libselinux1-dev libssl-dev libxslt1-dev libzstd-dev \ - make uuid-dev + make uuid-dev mitmproxy ``` 2. Get, build, and test the code @@ -62,6 +64,8 @@ why we ask this as well as instructions for how to proceed, see the ./configure make sudo make install + # Optionally, you might instead want to use `sudo make install-all` + # since `multi_extension` regression test would fail due to missing downgrade scripts. cd src/test/regress make check ``` @@ -104,6 +108,8 @@ why we ask this as well as instructions for how to proceed, see the PG_CONFIG=/usr/pgsql-14/bin/pg_config ./configure make sudo make install + # Optionally, you might instead want to use `sudo make install-all` + # since `multi_extension` regression test would fail due to missing downgrade scripts. cd src/test/regress make check ``` diff --git a/ci/README.md b/ci/README.md index eef0fd4de..31fdb7e0e 100644 --- a/ci/README.md +++ b/ci/README.md @@ -156,9 +156,9 @@ git merge "community/$PR_BRANCH" familiar with the change. 5. You should rerun the `check-merge-to-enterprise` check on `community/$PR_BRANCH`. You can use re-run from failed option in circle CI. -6. You can now merge the PR on enterprise. Be sure to NOT use "squash and merge", +6. You can now merge the PR on community. Be sure to NOT use "squash and merge", but instead use the regular "merge commit" mode. -7. You can now merge the PR on community. Be sure to NOT use "squash and merge", +7. You can now merge the PR on enterprise. Be sure to NOT use "squash and merge", but instead use the regular "merge commit" mode. The subsequent PRs on community will be able to pass the diff --git a/ci/check_enterprise_merge.sh b/ci/check_enterprise_merge.sh index 040a5c224..d29ffcad8 100755 --- a/ci/check_enterprise_merge.sh +++ b/ci/check_enterprise_merge.sh @@ -66,10 +66,10 @@ fi git merge --abort # If we have a conflict on enterprise merge on the master branch, we have a problem. -# Provide an error message to indicate that enterprise merge is needed. +# Provide an error message to indicate that enterprise merge is needed to fix this check. if [[ $PR_BRANCH = master ]]; then - echo "ERROR: Master branch has merge conlicts with enterprise-master." - echo "Try re-running this job if you merged community PR before enterprise PR. Otherwise conflicts need to be resolved as a separate PR on enterprise." + echo "ERROR: Master branch has merge conflicts with enterprise-master." + echo "Try re-running this CI job after merging your changes into enterprise-master." exit 1 fi diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index 8183feddc..916962b4a 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -24,10 +24,12 @@ #include "catalog/pg_am.h" #include "catalog/pg_publication.h" #include "catalog/pg_trigger.h" +#include "catalog/pg_extension.h" #include "catalog/storage.h" #include "catalog/storage_xlog.h" #include "commands/progress.h" #include "commands/vacuum.h" +#include "commands/extension.h" #include "executor/executor.h" #include "nodes/makefuncs.h" #include "optimizer/plancat.h" @@ -154,6 +156,20 @@ static void ColumnarReadMissingRowsIntoIndex(TableScanDesc scan, Relation indexR static ItemPointerData TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort, ItemPointer targetItemPointer); +/* functions for CheckCitusColumnarVersion */ +static bool CheckAvailableVersionColumnar(int elevel); +static bool CheckInstalledVersionColumnar(int elevel); +static char * AvailableExtensionVersionColumnar(void); +static char * InstalledExtensionVersionColumnar(void); +static bool CitusColumnarHasBeenLoadedInternal(void); +static bool CitusColumnarHasBeenLoaded(void); +static bool CheckCitusColumnarVersion(int elevel); +static bool MajorVersionsCompatibleColumnar(char *leftVersion, char *rightVersion); + +/* global variables for CheckCitusColumnarVersion */ +static bool extensionLoadedColumnar = false; +static bool EnableVersionChecksColumnar = true; +static bool citusVersionKnownCompatibleColumnar = false; /* Custom tuple slot ops used for columnar. Initialized in columnar_tableam_init(). */ static TupleTableSlotOps TTSOpsColumnar; @@ -171,7 +187,7 @@ columnar_beginscan(Relation relation, Snapshot snapshot, ParallelTableScanDesc parallel_scan, uint32 flags) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); int natts = relation->rd_att->natts; @@ -194,6 +210,7 @@ columnar_beginscan_extended(Relation relation, Snapshot snapshot, ParallelTableScanDesc parallel_scan, uint32 flags, Bitmapset *attr_needed, List *scanQual) { + CheckCitusColumnarVersion(ERROR); Oid relfilenode = relation->rd_node.relNode; /* @@ -418,7 +435,7 @@ columnar_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan) static IndexFetchTableData * columnar_index_fetch_begin(Relation rel) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); Oid relfilenode = rel->rd_node.relNode; if (PendingWritesInUpperTransactions(relfilenode, GetCurrentSubTransactionId())) @@ -643,7 +660,7 @@ static bool columnar_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, Snapshot snapshot) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); uint64 rowNumber = tid_to_row_number(slot->tts_tid); StripeMetadata *stripeMetadata = FindStripeByRowNumber(rel, rowNumber, snapshot); @@ -656,7 +673,7 @@ static TransactionId columnar_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); /* * XXX: We didn't bother implementing index_delete_tuple for neither of @@ -717,7 +734,7 @@ static void columnar_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); /* * columnar_init_write_state allocates the write state in a longer @@ -765,7 +782,7 @@ static void columnar_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); ColumnarWriteState *writeState = columnar_init_write_state(relation, RelationGetDescr(relation), @@ -841,7 +858,7 @@ columnar_relation_set_new_filenode(Relation rel, TransactionId *freezeXid, MultiXactId *minmulti) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); if (persistence == RELPERSISTENCE_UNLOGGED) { @@ -878,8 +895,7 @@ columnar_relation_set_new_filenode(Relation rel, static void columnar_relation_nontransactional_truncate(Relation rel) { - CheckCitusVersion(ERROR); - + CheckCitusColumnarVersion(ERROR); RelFileNode relfilenode = rel->rd_node; NonTransactionDropWriteState(relfilenode.relNode); @@ -926,7 +942,7 @@ columnar_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, double *tups_vacuumed, double *tups_recently_dead) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); TupleDesc sourceDesc = RelationGetDescr(OldHeap); TupleDesc targetDesc = RelationGetDescr(NewHeap); @@ -1024,7 +1040,7 @@ static void columnar_vacuum_rel(Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy) { - if (!CheckCitusVersion(WARNING)) + if (!CheckCitusColumnarVersion(WARNING)) { /* * Skip if the extension catalogs are not up-to-date, but avoid @@ -1342,7 +1358,7 @@ columnar_index_build_range_scan(Relation columnarRelation, void *callback_state, TableScanDesc scan) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); if (start_blockno != 0 || numblocks != InvalidBlockNumber) { @@ -1592,7 +1608,7 @@ columnar_index_validate_scan(Relation columnarRelation, ValidateIndexState * validateIndexState) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); ColumnarReportTotalVirtualBlocks(columnarRelation, snapshot, PROGRESS_SCAN_BLOCKS_TOTAL); @@ -1764,7 +1780,7 @@ TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort, ItemPointer targetIt static uint64 columnar_relation_size(Relation rel, ForkNumber forkNumber) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); uint64 nblocks = 0; @@ -1791,7 +1807,7 @@ columnar_relation_size(Relation rel, ForkNumber forkNumber) static bool columnar_relation_needs_toast_table(Relation rel) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); return false; } @@ -1802,8 +1818,7 @@ columnar_estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac) { - CheckCitusVersion(ERROR); - + CheckCitusColumnarVersion(ERROR); RelationOpenSmgr(rel); *pages = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM); *tuples = ColumnarTableRowCount(rel); @@ -1910,6 +1925,15 @@ columnar_tableam_init() TTSOpsColumnar = TTSOpsVirtual; TTSOpsColumnar.copy_heap_tuple = ColumnarSlotCopyHeapTuple; + DefineCustomBoolVariable( + "columnar.enable_version_checks", + gettext_noop("Enables Version Check for Columnar"), + NULL, + &EnableVersionChecksColumnar, + true, + PGC_USERSET, + GUC_NO_SHOW_ALL, + NULL, NULL, NULL); } @@ -1968,7 +1992,7 @@ ColumnarTableDropHook(Oid relid) if (IsColumnarTableAmTable(relid)) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); /* * Drop metadata. No need to drop storage here since for @@ -2093,8 +2117,7 @@ ColumnarProcessUtility(PlannedStmt *pstmt, if (rel->rd_tableam == GetColumnarTableAmRoutine()) { - CheckCitusVersion(ERROR); - + CheckCitusColumnarVersion(ERROR); if (!ColumnarSupportsIndexAM(indexStmt->accessMethod)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -2316,7 +2339,7 @@ PG_FUNCTION_INFO_V1(alter_columnar_table_set); Datum alter_columnar_table_set(PG_FUNCTION_ARGS) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); Oid relationId = PG_GETARG_OID(0); @@ -2440,7 +2463,7 @@ PG_FUNCTION_INFO_V1(alter_columnar_table_reset); Datum alter_columnar_table_reset(PG_FUNCTION_ARGS) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); Oid relationId = PG_GETARG_OID(0); @@ -2579,3 +2602,318 @@ downgrade_columnar_storage(PG_FUNCTION_ARGS) table_close(rel, AccessExclusiveLock); PG_RETURN_VOID(); } + + +/* + * Code to check the Citus Version, helps remove dependency from Citus + */ + +/* + * CitusColumnarHasBeenLoaded returns true if the citus extension has been created + * in the current database and the extension script has been executed. Otherwise, + * it returns false. The result is cached as this is called very frequently. + */ +bool +CitusColumnarHasBeenLoaded(void) +{ + if (!extensionLoadedColumnar || creating_extension) + { + /* + * Refresh if we have not determined whether the extension has been + * loaded yet, or in case of ALTER EXTENSION since we want to treat + * Citus as "not loaded" during ALTER EXTENSION citus. + */ + bool extensionLoaded = CitusColumnarHasBeenLoadedInternal(); + extensionLoadedColumnar = extensionLoaded; + } + + return extensionLoadedColumnar; +} + + +/* + * CitusColumnarHasBeenLoadedInternal returns true if the citus extension has been created + * in the current database and the extension script has been executed. Otherwise, + * it returns false. + */ +static bool +CitusColumnarHasBeenLoadedInternal(void) +{ + if (IsBinaryUpgrade) + { + /* never use Citus logic during pg_upgrade */ + return false; + } + + Oid citusExtensionOid = get_extension_oid("citus", true); + if (citusExtensionOid == InvalidOid) + { + /* Citus extension does not exist yet */ + return false; + } + + if (creating_extension && CurrentExtensionObject == citusExtensionOid) + { + /* + * We do not use Citus hooks during CREATE/ALTER EXTENSION citus + * since the objects used by the C code might be not be there yet. + */ + return false; + } + + /* citus extension exists and has been created */ + return true; +} + + +/* + * CheckCitusColumnarVersion checks whether there is a version mismatch between the + * available version and the loaded version or between the installed version + * and the loaded version. Returns true if compatible, false otherwise. + * + * As a side effect, this function also sets citusVersionKnownCompatible_Columnar global + * variable to true which reduces version check cost of next calls. + */ +bool +CheckCitusColumnarVersion(int elevel) +{ + if (citusVersionKnownCompatibleColumnar || + !CitusColumnarHasBeenLoaded() || + !EnableVersionChecksColumnar) + { + return true; + } + + if (CheckAvailableVersionColumnar(elevel) && CheckInstalledVersionColumnar(elevel)) + { + citusVersionKnownCompatibleColumnar = true; + return true; + } + else + { + return false; + } +} + + +/* + * CheckAvailableVersion compares CITUS_EXTENSIONVERSION and the currently + * available version from the citus.control file. If they are not compatible, + * this function logs an error with the specified elevel and returns false, + * otherwise it returns true. + */ +bool +CheckAvailableVersionColumnar(int elevel) +{ + if (!EnableVersionChecksColumnar) + { + return true; + } + + char *availableVersion = AvailableExtensionVersionColumnar(); + + if (!MajorVersionsCompatibleColumnar(availableVersion, CITUS_EXTENSIONVERSION)) + { + ereport(elevel, (errmsg("loaded Citus library version differs from latest " + "available extension version"), + errdetail("Loaded library requires %s, but the latest control " + "file specifies %s.", CITUS_MAJORVERSION, + availableVersion), + errhint("Restart the database to load the latest Citus " + "library."))); + pfree(availableVersion); + return false; + } + pfree(availableVersion); + return true; +} + + +/* + * CheckInstalledVersion compares CITUS_EXTENSIONVERSION and the + * extension's current version from the pg_extension catalog table. If they + * are not compatible, this function logs an error with the specified elevel, + * otherwise it returns true. + */ +static bool +CheckInstalledVersionColumnar(int elevel) +{ + Assert(CitusColumnarHasBeenLoaded()); + Assert(EnableVersionChecksColumnar); + + char *installedVersion = InstalledExtensionVersionColumnar(); + + if (!MajorVersionsCompatibleColumnar(installedVersion, CITUS_EXTENSIONVERSION)) + { + ereport(elevel, (errmsg("loaded Citus library version differs from installed " + "extension version"), + errdetail("Loaded library requires %s, but the installed " + "extension version is %s.", CITUS_MAJORVERSION, + installedVersion), + errhint("Run ALTER EXTENSION citus UPDATE and try again."))); + pfree(installedVersion); + return false; + } + pfree(installedVersion); + return true; +} + + +/* + * MajorVersionsCompatible checks whether both versions are compatible. They + * are if major and minor version numbers match, the schema version is + * ignored. Returns true if compatible, false otherwise. + */ +bool +MajorVersionsCompatibleColumnar(char *leftVersion, char *rightVersion) +{ + const char schemaVersionSeparator = '-'; + + char *leftSeperatorPosition = strchr(leftVersion, schemaVersionSeparator); + char *rightSeperatorPosition = strchr(rightVersion, schemaVersionSeparator); + int leftComparisionLimit = 0; + int rightComparisionLimit = 0; + + if (leftSeperatorPosition != NULL) + { + leftComparisionLimit = leftSeperatorPosition - leftVersion; + } + else + { + leftComparisionLimit = strlen(leftVersion); + } + + if (rightSeperatorPosition != NULL) + { + rightComparisionLimit = rightSeperatorPosition - rightVersion; + } + else + { + rightComparisionLimit = strlen(leftVersion); + } + + /* we can error out early if hypens are not in the same position */ + if (leftComparisionLimit != rightComparisionLimit) + { + return false; + } + + return strncmp(leftVersion, rightVersion, leftComparisionLimit) == 0; +} + + +/* + * AvailableExtensionVersion returns the Citus version from citus.control file. It also + * saves the result, thus consecutive calls to CitusExtensionAvailableVersion will + * not read the citus.control file again. + */ +static char * +AvailableExtensionVersionColumnar(void) +{ + LOCAL_FCINFO(fcinfo, 0); + FmgrInfo flinfo; + + bool goForward = true; + bool doCopy = false; + char *availableExtensionVersion; + + EState *estate = CreateExecutorState(); + ReturnSetInfo *extensionsResultSet = makeNode(ReturnSetInfo); + extensionsResultSet->econtext = GetPerTupleExprContext(estate); + extensionsResultSet->allowedModes = SFRM_Materialize; + + fmgr_info(F_PG_AVAILABLE_EXTENSIONS, &flinfo); + InitFunctionCallInfoData(*fcinfo, &flinfo, 0, InvalidOid, NULL, + (Node *) extensionsResultSet); + + /* pg_available_extensions returns result set containing all available extensions */ + (*pg_available_extensions)(fcinfo); + + TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat( + extensionsResultSet->setDesc, + &TTSOpsMinimalTuple); + bool hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, + doCopy, + tupleTableSlot); + while (hasTuple) + { + bool isNull = false; + + Datum extensionNameDatum = slot_getattr(tupleTableSlot, 1, &isNull); + char *extensionName = NameStr(*DatumGetName(extensionNameDatum)); + if (strcmp(extensionName, "citus") == 0) + { + Datum availableVersion = slot_getattr(tupleTableSlot, 2, &isNull); + + + availableExtensionVersion = text_to_cstring(DatumGetTextPP(availableVersion)); + + + ExecClearTuple(tupleTableSlot); + ExecDropSingleTupleTableSlot(tupleTableSlot); + + return availableExtensionVersion; + } + + ExecClearTuple(tupleTableSlot); + hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, + doCopy, tupleTableSlot); + } + + ExecDropSingleTupleTableSlot(tupleTableSlot); + + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("citus extension is not found"))); +} + + +/* + * InstalledExtensionVersion returns the Citus version in PostgreSQL pg_extension table. + */ +static char * +InstalledExtensionVersionColumnar(void) +{ + ScanKeyData entry[1]; + char *installedExtensionVersion = NULL; + + Relation relation = table_open(ExtensionRelationId, AccessShareLock); + + ScanKeyInit(&entry[0], Anum_pg_extension_extname, BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum("citus")); + + SysScanDesc scandesc = systable_beginscan(relation, ExtensionNameIndexId, true, + NULL, 1, entry); + + HeapTuple extensionTuple = systable_getnext(scandesc); + + /* We assume that there can be at most one matching tuple */ + if (HeapTupleIsValid(extensionTuple)) + { + int extensionIndex = Anum_pg_extension_extversion; + TupleDesc tupleDescriptor = RelationGetDescr(relation); + bool isNull = false; + + Datum installedVersion = heap_getattr(extensionTuple, extensionIndex, + tupleDescriptor, &isNull); + + if (isNull) + { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("citus extension version is null"))); + } + + + installedExtensionVersion = text_to_cstring(DatumGetTextPP(installedVersion)); + } + else + { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("citus extension is not loaded"))); + } + + systable_endscan(scandesc); + + table_close(relation, AccessShareLock); + + return installedExtensionVersion; +} diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c index ddbe63f52..7f047ec1d 100644 --- a/src/backend/distributed/commands/collation.c +++ b/src/backend/distributed/commands/collation.c @@ -37,8 +37,7 @@ static char * CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollationName); static List * FilterNameListForDistributedCollations(List *objects, bool missing_ok, List **addresses); -static void EnsureSequentialModeForCollationDDL(void); - +static bool ShouldPropagateDefineCollationStmt(void); /* * GetCreateCollationDDLInternal returns a CREATE COLLATE sql string for the @@ -256,7 +255,7 @@ PreprocessDropCollationStmt(Node *node, const char *queryString, char *dropStmtSql = DeparseTreeNode((Node *) stmt); stmt->objects = oldCollations; - EnsureSequentialModeForCollationDDL(); + EnsureSequentialMode(OBJECT_COLLATION); /* to prevent recursion with mx we disable ddl propagation */ List *commands = list_make3(DISABLE_DDL_PROPAGATION, @@ -292,7 +291,7 @@ PreprocessAlterCollationOwnerStmt(Node *node, const char *queryString, QualifyTreeNode((Node *) stmt); char *sql = DeparseTreeNode((Node *) stmt); - EnsureSequentialModeForCollationDDL(); + EnsureSequentialMode(OBJECT_COLLATION); List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) sql, ENABLE_DDL_PROPAGATION); @@ -328,7 +327,7 @@ PreprocessRenameCollationStmt(Node *node, const char *queryString, /* deparse sql*/ char *renameStmtSql = DeparseTreeNode((Node *) stmt); - EnsureSequentialModeForCollationDDL(); + EnsureSequentialMode(OBJECT_COLLATION); /* to prevent recursion with mx we disable ddl propagation */ List *commands = list_make3(DISABLE_DDL_PROPAGATION, @@ -363,7 +362,7 @@ PreprocessAlterCollationSchemaStmt(Node *node, const char *queryString, QualifyTreeNode((Node *) stmt); char *sql = DeparseTreeNode((Node *) stmt); - EnsureSequentialModeForCollationDDL(); + EnsureSequentialMode(OBJECT_COLLATION); List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) sql, @@ -453,47 +452,6 @@ AlterCollationSchemaStmtObjectAddress(Node *node, bool missing_ok) } -/* - * EnsureSequentialModeForCollationDDL makes sure that the current transaction is already in - * sequential mode, or can still safely be put in sequential mode, it errors if that is - * not possible. The error contains information for the user to retry the transaction with - * sequential mode set from the beginning. - * - * As collations are node scoped objects there exists only 1 instance of the collation used by - * potentially multiple shards. To make sure all shards in the transaction can interact - * with the type the type needs to be visible on all connections used by the transaction, - * meaning we can only use 1 connection per node. - */ -static void -EnsureSequentialModeForCollationDDL(void) -{ - if (!IsTransactionBlock()) - { - /* we do not need to switch to sequential mode if we are not in a transaction */ - return; - } - - if (ParallelQueryExecutedInTransaction()) - { - ereport(ERROR, (errmsg("cannot create or modify collation because there was a " - "parallel operation on a distributed table in the " - "transaction"), - errdetail("When creating or altering a collation, Citus needs to " - "perform all operations over a single connection per " - "node to ensure consistency."), - errhint("Try re-running the transaction with " - "\"SET LOCAL citus.multi_shard_modify_mode TO " - "\'sequential\';\""))); - } - - ereport(DEBUG1, (errmsg("switching to sequential query execution mode"), - errdetail("Collation is created or altered. To make sure subsequent " - "commands see the collation correctly we need to make sure to " - "use only one connection for all future commands"))); - SetLocalMultiShardModifyModeToSequential(); -} - - /* * GenerateBackupNameForCollationCollision generates a new collation name for an existing collation. * The name is generated in such a way that the new name doesn't overlap with an existing collation @@ -561,6 +519,26 @@ DefineCollationStmtObjectAddress(Node *node, bool missing_ok) } +/* + * PreprocessDefineCollationStmt executed before the collation has been + * created locally to ensure that if the collation create statement will + * be propagated, the node is a coordinator node + */ +List * +PreprocessDefineCollationStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + Assert(castNode(DefineStmt, node)->kind == OBJECT_COLLATION); + + if (ShouldPropagateDefineCollationStmt()) + { + EnsureCoordinator(); + } + + return NIL; +} + + /* * PostprocessDefineCollationStmt executed after the collation has been * created locally and before we create it on the worker nodes. @@ -573,16 +551,7 @@ PostprocessDefineCollationStmt(Node *node, const char *queryString) { Assert(castNode(DefineStmt, node)->kind == OBJECT_COLLATION); - if (!ShouldPropagate()) - { - return NIL; - } - - /* - * If the create collation command is a part of a multi-statement transaction, - * do not propagate it - */ - if (IsMultiStatementTransaction()) + if (!ShouldPropagateDefineCollationStmt()) { return NIL; } @@ -590,13 +559,38 @@ PostprocessDefineCollationStmt(Node *node, const char *queryString) ObjectAddress collationAddress = DefineCollationStmtObjectAddress(node, false); - if (IsObjectDistributed(&collationAddress)) - { - EnsureCoordinator(); - } - EnsureDependenciesExistOnAllNodes(&collationAddress); - return NodeDDLTaskList(NON_COORDINATOR_NODES, CreateCollationDDLsIdempotent( + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make1(DISABLE_DDL_PROPAGATION); + commands = list_concat(commands, CreateCollationDDLsIdempotent( collationAddress.objectId)); + commands = lappend(commands, ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * ShouldPropagateDefineCollationStmt checks if collation define + * statement should be propagated. Don't propagate if: + * - metadata syncing if off + * - statement is part of a multi stmt transaction and the multi shard connection + * type is not sequential + */ +static bool +ShouldPropagateDefineCollationStmt() +{ + if (!ShouldPropagate()) + { + return false; + } + + if (IsMultiStatementTransaction() && + MultiShardConnectionType != SEQUENTIAL_CONNECTION) + { + return false; + } + + return true; } diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 43bd1ebd6..eb51195bf 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -60,6 +60,7 @@ #include "distributed/remote_commands.h" #include "distributed/shared_library_init.h" #include "distributed/worker_protocol.h" +#include "distributed/worker_shard_visibility.h" #include "distributed/worker_transaction.h" #include "distributed/version_compat.h" #include "executor/executor.h" @@ -327,6 +328,7 @@ create_reference_table(PG_FUNCTION_ARGS) * - we are on the coordinator * - the current user is the owner of the table * - relation kind is supported + * - relation is not a shard */ static void EnsureCitusTableCanBeCreated(Oid relationOid) @@ -343,6 +345,14 @@ EnsureCitusTableCanBeCreated(Oid relationOid) * will be performed in CreateDistributedTable. */ EnsureRelationKindSupported(relationOid); + + /* + * When coordinator is added to the metadata, or on the workers, + * some of the relations of the coordinator node may/will be shards. + * We disallow creating distributed tables from shard relations, by + * erroring out here. + */ + ErrorIfRelationIsAKnownShard(relationOid); } diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index b4fb15110..59902b038 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -29,7 +29,6 @@ #include "distributed/relation_access_tracking.h" #include "distributed/worker_transaction.h" -static void EnsureSequentialModeForDatabaseDDL(void); static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid); static Oid get_database_owner(Oid db_oid); @@ -66,7 +65,7 @@ PreprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString, QualifyTreeNode((Node *) stmt); const char *sql = DeparseTreeNode((Node *) stmt); - EnsureSequentialModeForDatabaseDDL(); + EnsureSequentialMode(OBJECT_DATABASE); List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) sql, ENABLE_DDL_PROPAGATION); @@ -177,39 +176,3 @@ get_database_owner(Oid db_oid) return dba; } - - -/* - * EnsureSequentialModeForDatabaseDDL makes sure that the current transaction is already - * in sequential mode, or can still safely be put in sequential mode, it errors if that is - * not possible. The error contains information for the user to retry the transaction with - * sequential mode set from the beginning. - */ -static void -EnsureSequentialModeForDatabaseDDL(void) -{ - if (!IsTransactionBlock()) - { - /* we do not need to switch to sequential mode if we are not in a transaction */ - return; - } - - if (ParallelQueryExecutedInTransaction()) - { - ereport(ERROR, (errmsg("cannot create or modify database because there was a " - "parallel operation on a distributed table in the " - "transaction"), - errdetail("When creating or altering a database, Citus needs to " - "perform all operations over a single connection per " - "node to ensure consistency."), - errhint("Try re-running the transaction with " - "\"SET LOCAL citus.multi_shard_modify_mode TO " - "\'sequential\';\""))); - } - - ereport(DEBUG1, (errmsg("switching to sequential query execution mode"), - errdetail("Database is created or altered. To make sure subsequent " - "commands see the type correctly we need to make sure to " - "use only one connection for all future commands"))); - SetLocalMultiShardModifyModeToSequential(); -} diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index f82ddf065..84e875602 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -241,6 +241,17 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) return NIL; } + /* + * Indices are created separately, however, they do show up in the dependency + * list for a table since they will have potentially their own dependencies. + * The commands will be added to both shards and metadata tables via the table + * creation commands. + */ + if (relKind == RELKIND_INDEX) + { + return NIL; + } + if (relKind == RELKIND_RELATION || relKind == RELKIND_PARTITIONED_TABLE || relKind == RELKIND_FOREIGN_TABLE) { @@ -317,6 +328,11 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) return DDLCommands; } + case OCLASS_TSCONFIG: + { + return CreateTextSearchConfigDDLCommandsIdempotent(dependency); + } + case OCLASS_TYPE: { return CreateTypeDDLCommandsIdempotent(dependency); @@ -396,6 +412,15 @@ ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort) ObjectAddress *dependency = NULL; foreach_ptr(dependency, dependencies) { + if (IsObjectAddressOwnedByExtension(dependency, NULL)) + { + /* + * we expect extension-owned objects to be created as a result + * of the extension being created. + */ + continue; + } + ddlCommands = list_concat(ddlCommands, GetDependencyCreateDDLCommands(dependency)); } diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 755286ffb..37758aec3 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -175,7 +175,7 @@ static DistributeObjectOps Any_CreateFunction = { .preprocess = PreprocessCreateFunctionStmt, .postprocess = PostprocessCreateFunctionStmt, .address = CreateFunctionStmtObjectAddress, - .markDistributed = false, + .markDistributed = true, }; static DistributeObjectOps Any_CreatePolicy = { .deparse = NULL, @@ -193,6 +193,14 @@ static DistributeObjectOps Any_CreateForeignServer = { .address = CreateForeignServerStmtObjectAddress, .markDistributed = true, }; +static DistributeObjectOps Any_CreateSchema = { + .deparse = DeparseCreateSchemaStmt, + .qualify = NULL, + .preprocess = PreprocessCreateSchemaStmt, + .postprocess = NULL, + .address = CreateSchemaStmtObjectAddress, + .markDistributed = true, +}; static DistributeObjectOps Any_CreateStatistics = { .deparse = DeparseCreateStatisticsStmt, .qualify = QualifyCreateStatisticsStmt, @@ -268,7 +276,7 @@ static DistributeObjectOps Collation_AlterOwner = { static DistributeObjectOps Collation_Define = { .deparse = NULL, .qualify = NULL, - .preprocess = NULL, + .preprocess = PreprocessDefineCollationStmt, .postprocess = PostprocessDefineCollationStmt, .address = DefineCollationStmtObjectAddress, .markDistributed = true, @@ -497,6 +505,62 @@ static DistributeObjectOps Sequence_Rename = { .address = RenameSequenceStmtObjectAddress, .markDistributed = false, }; +static DistributeObjectOps TextSearchConfig_Alter = { + .deparse = DeparseAlterTextSearchConfigurationStmt, + .qualify = QualifyAlterTextSearchConfigurationStmt, + .preprocess = PreprocessAlterTextSearchConfigurationStmt, + .postprocess = NULL, + .address = AlterTextSearchConfigurationStmtObjectAddress, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchConfig_AlterObjectSchema = { + .deparse = DeparseAlterTextSearchConfigurationSchemaStmt, + .qualify = QualifyAlterTextSearchConfigurationSchemaStmt, + .preprocess = PreprocessAlterTextSearchConfigurationSchemaStmt, + .postprocess = PostprocessAlterTextSearchConfigurationSchemaStmt, + .address = AlterTextSearchConfigurationSchemaStmtObjectAddress, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchConfig_AlterOwner = { + .deparse = DeparseAlterTextSearchConfigurationOwnerStmt, + .qualify = QualifyAlterTextSearchConfigurationOwnerStmt, + .preprocess = PreprocessAlterTextSearchConfigurationOwnerStmt, + .postprocess = PostprocessAlterTextSearchConfigurationOwnerStmt, + .address = AlterTextSearchConfigurationOwnerObjectAddress, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchConfig_Comment = { + .deparse = DeparseTextSearchConfigurationCommentStmt, + .qualify = QualifyTextSearchConfigurationCommentStmt, + .preprocess = PreprocessTextSearchConfigurationCommentStmt, + .postprocess = NULL, + .address = TextSearchConfigurationCommentObjectAddress, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchConfig_Define = { + .deparse = DeparseCreateTextSearchStmt, + .qualify = NULL, + .preprocess = NULL, + .postprocess = PostprocessCreateTextSearchConfigurationStmt, + .address = CreateTextSearchConfigurationObjectAddress, + .markDistributed = true, +}; +static DistributeObjectOps TextSearchConfig_Drop = { + .deparse = DeparseDropTextSearchConfigurationStmt, + .qualify = QualifyDropTextSearchConfigurationStmt, + .preprocess = PreprocessDropTextSearchConfigurationStmt, + .postprocess = NULL, + .address = NULL, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchConfig_Rename = { + .deparse = DeparseRenameTextSearchConfigurationStmt, + .qualify = QualifyRenameTextSearchConfigurationStmt, + .preprocess = PreprocessRenameTextSearchConfigurationStmt, + .postprocess = NULL, + .address = RenameTextSearchConfigurationStmtObjectAddress, + .markDistributed = false, +}; static DistributeObjectOps Trigger_AlterObjectDepends = { .deparse = NULL, .qualify = NULL, @@ -538,7 +602,7 @@ static DistributeObjectOps Routine_Rename = { .markDistributed = false, }; static DistributeObjectOps Schema_Drop = { - .deparse = NULL, + .deparse = DeparseDropSchemaStmt, .qualify = NULL, .preprocess = PreprocessDropSchemaStmt, .postprocess = NULL, @@ -803,6 +867,11 @@ GetDistributeObjectOps(Node *node) return &Table_AlterObjectSchema; } + case OBJECT_TSCONFIGURATION: + { + return &TextSearchConfig_AlterObjectSchema; + } + case OBJECT_TYPE: { return &Type_AlterObjectSchema; @@ -860,6 +929,11 @@ GetDistributeObjectOps(Node *node) return &Statistics_AlterOwner; } + case OBJECT_TSCONFIGURATION: + { + return &TextSearchConfig_AlterOwner; + } + case OBJECT_TYPE: { return &Type_AlterOwner; @@ -941,11 +1015,33 @@ GetDistributeObjectOps(Node *node) return &Any_AlterTableMoveAll; } + case T_AlterTSConfigurationStmt: + { + return &TextSearchConfig_Alter; + } + case T_ClusterStmt: { return &Any_Cluster; } + case T_CommentStmt: + { + CommentStmt *stmt = castNode(CommentStmt, node); + switch (stmt->objtype) + { + case OBJECT_TSCONFIGURATION: + { + return &TextSearchConfig_Comment; + } + + default: + { + return &NoDistributeOps; + } + } + } + case T_CompositeTypeStmt: { return &Any_CompositeType; @@ -976,6 +1072,11 @@ GetDistributeObjectOps(Node *node) return &Any_CreatePolicy; } + case T_CreateSchemaStmt: + { + return &Any_CreateSchema; + } + case T_CreateStatsStmt: { return &Any_CreateStatistics; @@ -1001,6 +1102,11 @@ GetDistributeObjectOps(Node *node) return &Collation_Define; } + case OBJECT_TSCONFIGURATION: + { + return &TextSearchConfig_Define; + } + default: { return &NoDistributeOps; @@ -1078,6 +1184,11 @@ GetDistributeObjectOps(Node *node) return &Table_Drop; } + case OBJECT_TSCONFIGURATION: + { + return &TextSearchConfig_Drop; + } + case OBJECT_TYPE: { return &Type_Drop; @@ -1177,6 +1288,11 @@ GetDistributeObjectOps(Node *node) return &Statistics_Rename; } + case OBJECT_TSCONFIGURATION: + { + return &TextSearchConfig_Rename; + } + case OBJECT_TYPE: { return &Type_Rename; diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index ef5b6c1b1..dc1363a65 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -37,7 +37,6 @@ static void AddSchemaFieldIfMissing(CreateExtensionStmt *stmt); static List * FilterDistributedExtensions(List *extensionObjectList); static List * ExtensionNameListToObjectAddressList(List *extensionObjectList); static void MarkExistingObjectDependenciesDistributedIfSupported(void); -static void EnsureSequentialModeForExtensionDDL(void); static bool ShouldPropagateExtensionCommand(Node *parseTree); static bool IsAlterExtensionSetSchemaCitus(Node *parseTree); static Node * RecreateExtensionStmt(Oid extensionOid); @@ -163,7 +162,7 @@ PostprocessCreateExtensionStmt(Node *node, const char *queryString) * Make sure that the current transaction is already in sequential mode, * or can still safely be put in sequential mode */ - EnsureSequentialModeForExtensionDDL(); + EnsureSequentialMode(OBJECT_EXTENSION); /* * Here we append "schema" field to the "options" list (if not specified) @@ -274,7 +273,7 @@ PreprocessDropExtensionStmt(Node *node, const char *queryString, * Make sure that the current transaction is already in sequential mode, * or can still safely be put in sequential mode */ - EnsureSequentialModeForExtensionDDL(); + EnsureSequentialMode(OBJECT_EXTENSION); List *distributedExtensionAddresses = ExtensionNameListToObjectAddressList( distributedExtensions); @@ -409,7 +408,7 @@ PreprocessAlterExtensionSchemaStmt(Node *node, const char *queryString, * Make sure that the current transaction is already in sequential mode, * or can still safely be put in sequential mode */ - EnsureSequentialModeForExtensionDDL(); + EnsureSequentialMode(OBJECT_EXTENSION); const char *alterExtensionStmtSql = DeparseTreeNode(node); @@ -478,7 +477,7 @@ PreprocessAlterExtensionUpdateStmt(Node *node, const char *queryString, * Make sure that the current transaction is already in sequential mode, * or can still safely be put in sequential mode */ - EnsureSequentialModeForExtensionDDL(); + EnsureSequentialMode(OBJECT_EXTENSION); const char *alterExtensionStmtSql = DeparseTreeNode((Node *) alterExtensionStmt); @@ -603,44 +602,6 @@ PreprocessAlterExtensionContentsStmt(Node *node, const char *queryString, } -/* - * EnsureSequentialModeForExtensionDDL makes sure that the current transaction is already in - * sequential mode, or can still safely be put in sequential mode, it errors if that is - * not possible. The error contains information for the user to retry the transaction with - * sequential mode set from the beginning. - * - * As extensions are node scoped objects there exists only 1 instance of the - * extension used by potentially multiple shards. To make sure all shards in - * the transaction can interact with the extension the extension needs to be - * visible on all connections used by the transaction, meaning we can only use - * 1 connection per node. - */ -static void -EnsureSequentialModeForExtensionDDL(void) -{ - if (ParallelQueryExecutedInTransaction()) - { - ereport(ERROR, (errmsg("cannot run extension command because there was a " - "parallel operation on a distributed table in the " - "transaction"), - errdetail( - "When running command on/for a distributed extension, Citus needs to " - "perform all operations over a single connection per " - "node to ensure consistency."), - errhint("Try re-running the transaction with " - "\"SET LOCAL citus.multi_shard_modify_mode TO " - "\'sequential\';\""))); - } - - ereport(DEBUG1, (errmsg("switching to sequential query execution mode"), - errdetail( - "A command for a distributed extension is run. To make sure subsequent " - "commands see the type correctly we need to make sure to " - "use only one connection for all future commands"))); - SetLocalMultiShardModifyModeToSequential(); -} - - /* * ShouldPropagateExtensionCommand determines whether to propagate an extension * command to the worker nodes. diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 497a32dbb..80b870b62 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -25,6 +25,7 @@ #include "access/htup_details.h" #include "access/xact.h" #include "catalog/pg_aggregate.h" +#include "catalog/dependency.h" #include "catalog/namespace.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" @@ -38,6 +39,7 @@ #include "distributed/listutils.h" #include "distributed/maintenanced.h" #include "distributed/metadata_utility.h" +#include "distributed/metadata/dependency.h" #include "distributed/coordinator_protocol.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata/pg_dist_object.h" @@ -77,15 +79,14 @@ static int GetFunctionColocationId(Oid functionOid, char *colocateWithName, Oid static void EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid distributionColumnType, Oid sourceRelationId); -static void EnsureSequentialModeForFunctionDDL(void); static bool ShouldPropagateCreateFunction(CreateFunctionStmt *stmt); static bool ShouldPropagateAlterFunction(const ObjectAddress *address); static bool ShouldAddFunctionSignature(FunctionParameterMode mode); +static ObjectAddress * GetUndistributableDependency(ObjectAddress *functionAddress); static ObjectAddress FunctionToObjectAddress(ObjectType objectType, ObjectWithArgs *objectWithArgs, bool missing_ok); static void ErrorIfUnsupportedAlterFunctionStmt(AlterFunctionStmt *stmt); -static void ErrorIfFunctionDependsOnExtension(const ObjectAddress *functionAddress); static char * quote_qualified_func_name(Oid funcOid); static void DistributeFunctionWithDistributionArgument(RegProcedure funcOid, char *distributionArgumentName, @@ -101,6 +102,9 @@ static void DistributeFunctionColocatedWithDistributedTable(RegProcedure funcOid static void DistributeFunctionColocatedWithReferenceTable(const ObjectAddress *functionAddress); +static void EnsureExtensionFunctionCanBeDistributed(const ObjectAddress functionAddress, + const ObjectAddress extensionAddress, + char *distributionArgumentName); PG_FUNCTION_INFO_V1(create_distributed_function); @@ -127,6 +131,7 @@ create_distributed_function(PG_FUNCTION_ARGS) char *colocateWithTableName = NULL; bool *forceDelegationAddress = NULL; bool forceDelegation = false; + ObjectAddress extensionAddress = { 0 }; /* if called on NULL input, error out */ if (funcOid == InvalidOid) @@ -187,22 +192,35 @@ create_distributed_function(PG_FUNCTION_ARGS) EnsureFunctionOwner(funcOid); ObjectAddressSet(functionAddress, ProcedureRelationId, funcOid); - ErrorIfFunctionDependsOnExtension(&functionAddress); /* - * when we allow propagation within a transaction block we should make sure to only - * allow this in sequential mode + * If the function is owned by an extension, only update the + * pg_dist_object, and not propagate the CREATE FUNCTION. Function + * will be created by the virtue of the extension creation. */ - EnsureSequentialModeForFunctionDDL(); + if (IsObjectAddressOwnedByExtension(&functionAddress, &extensionAddress)) + { + EnsureExtensionFunctionCanBeDistributed(functionAddress, extensionAddress, + distributionArgumentName); + } + else + { + /* + * when we allow propagation within a transaction block we should make sure + * to only allow this in sequential mode. + */ + EnsureSequentialMode(OBJECT_FUNCTION); - EnsureDependenciesExistOnAllNodes(&functionAddress); + EnsureDependenciesExistOnAllNodes(&functionAddress); - const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true); - const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid); - initStringInfo(&ddlCommand); - appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_METADATA_SYNC, - createFunctionSQL, alterFunctionOwnerSQL, ENABLE_METADATA_SYNC); - SendCommandToWorkersAsUser(NON_COORDINATOR_NODES, CurrentUserName(), ddlCommand.data); + const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true); + const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid); + initStringInfo(&ddlCommand); + appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_METADATA_SYNC, + createFunctionSQL, alterFunctionOwnerSQL, ENABLE_METADATA_SYNC); + SendCommandToWorkersAsUser(NON_COORDINATOR_NODES, CurrentUserName(), + ddlCommand.data); + } MarkObjectDistributed(&functionAddress); @@ -744,7 +762,7 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress, /* * GetFunctionDDLCommand returns the complete "CREATE OR REPLACE FUNCTION ..." statement for - * the specified function followed by "ALTER FUNCTION .. SET OWNER ..". + * the specified function. * * useCreateOrReplace is ignored for non-aggregate functions. */ @@ -1153,84 +1171,25 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) } -/* - * EnsureSequentialModeForFunctionDDL makes sure that the current transaction is already in - * sequential mode, or can still safely be put in sequential mode, it errors if that is - * not possible. The error contains information for the user to retry the transaction with - * sequential mode set from the beginning. - * - * As functions are node scoped objects there exists only 1 instance of the function used by - * potentially multiple shards. To make sure all shards in the transaction can interact - * with the function the function needs to be visible on all connections used by the transaction, - * meaning we can only use 1 connection per node. - */ -static void -EnsureSequentialModeForFunctionDDL(void) -{ - if (ParallelQueryExecutedInTransaction()) - { - ereport(ERROR, (errmsg("cannot create function because there was a " - "parallel operation on a distributed table in the " - "transaction"), - errdetail("When creating a distributed function, Citus needs to " - "perform all operations over a single connection per " - "node to ensure consistency."), - errhint("Try re-running the transaction with " - "\"SET LOCAL citus.multi_shard_modify_mode TO " - "\'sequential\';\""))); - } - - ereport(DEBUG1, (errmsg("switching to sequential query execution mode"), - errdetail( - "A distributed function is created. To make sure subsequent " - "commands see the type correctly we need to make sure to " - "use only one connection for all future commands"))); - SetLocalMultiShardModifyModeToSequential(); -} - - /* * ShouldPropagateCreateFunction tests if we need to propagate a CREATE FUNCTION - * statement. We only propagate replace's of distributed functions to keep the function on - * the workers in sync with the one on the coordinator. + * statement. */ static bool ShouldPropagateCreateFunction(CreateFunctionStmt *stmt) { - if (creating_extension) + if (!ShouldPropagate()) { - /* - * extensions should be created separately on the workers, functions cascading - * from an extension should therefore not be propagated. - */ - return false; - } - - if (!EnableMetadataSync) - { - /* - * we are configured to disable object propagation, should not propagate anything - */ - return false; - } - - if (!stmt->replace) - { - /* - * Since we only care for a replace of distributed functions if the statement is - * not a replace we are going to ignore. - */ return false; } /* - * Even though its a replace we should accept an non-existing function, it will just - * not be distributed + * If the create command is a part of a multi-statement transaction that is not in + * sequential mode, don't propagate. */ - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, true); - if (!IsObjectDistributed(&address)) + if (IsMultiStatementTransaction() && + MultiShardConnectionType != SEQUENTIAL_CONNECTION) { - /* do not propagate alter function for non-distributed functions */ return false; } @@ -1274,12 +1233,10 @@ ShouldPropagateAlterFunction(const ObjectAddress *address) /* * PreprocessCreateFunctionStmt is called during the planning phase for CREATE [OR REPLACE] - * FUNCTION. We primarily care for the replace variant of this statement to keep - * distributed functions in sync. We bail via a check on ShouldPropagateCreateFunction - * which checks for the OR REPLACE modifier. + * FUNCTION before it is created on the local node internally. * * Since we use pg_get_functiondef to get the ddl command we actually do not do any - * planning here, instead we defer the plan creation to the processing step. + * planning here, instead we defer the plan creation to the postprocessing step. * * Instead we do our basic housekeeping where we make sure we are on the coordinator and * can propagate the function in sequential mode. @@ -1297,10 +1254,10 @@ PreprocessCreateFunctionStmt(Node *node, const char *queryString, EnsureCoordinator(); - EnsureSequentialModeForFunctionDDL(); + EnsureSequentialMode(OBJECT_FUNCTION); /* - * ddl jobs will be generated during the Processing phase as we need the function to + * ddl jobs will be generated during the postprocessing phase as we need the function to * be updated in the catalog to get its sql representation */ return NIL; @@ -1311,6 +1268,11 @@ PreprocessCreateFunctionStmt(Node *node, const char *queryString, * PostprocessCreateFunctionStmt actually creates the plan we need to execute for function * propagation. This is the downside of using pg_get_functiondef to get the sql statement. * + * If function depends on any non-distributed relation (except sequence and composite type), + * Citus can not distribute it. In order to not to prevent users from creating local + * functions on the coordinator WARNING message will be sent to the customer about the case + * instead of erroring out. + * * Besides creating the plan we also make sure all (new) dependencies of the function are * created on all nodes. */ @@ -1324,18 +1286,113 @@ PostprocessCreateFunctionStmt(Node *node, const char *queryString) return NIL; } - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - EnsureDependenciesExistOnAllNodes(&address); + ObjectAddress functionAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - List *commands = list_make4(DISABLE_DDL_PROPAGATION, - GetFunctionDDLCommand(address.objectId, true), - GetFunctionAlterOwnerCommand(address.objectId), - ENABLE_DDL_PROPAGATION); + if (IsObjectAddressOwnedByExtension(&functionAddress, NULL)) + { + return NIL; + } + + /* + * This check should have been valid for all objects not only for functions. Though, + * we do this limited check for now as functions are more likely to be used with + * such dependencies, and we want to scope it for now. + */ + ObjectAddress *undistributableDependency = GetUndistributableDependency( + &functionAddress); + if (undistributableDependency != NULL) + { + if (SupportedDependencyByCitus(undistributableDependency)) + { + /* + * Citus can't distribute some relations as dependency, although those + * types as supported by Citus. So we can use get_rel_name directly + */ + RangeVar *functionRangeVar = makeRangeVarFromNameList(stmt->funcname); + char *functionName = functionRangeVar->relname; + char *dependentRelationName = + get_rel_name(undistributableDependency->objectId); + + ereport(WARNING, (errmsg("Citus can't distribute function \"%s\" having " + "dependency on non-distributed relation \"%s\"", + functionName, dependentRelationName), + errdetail("Function will be created only locally"), + errhint("To distribute function, distribute dependent " + "relations first. Then, re-create the function"))); + } + else + { + char *objectType = NULL; + #if PG_VERSION_NUM >= PG_VERSION_14 + objectType = getObjectTypeDescription(undistributableDependency, false); + #else + objectType = getObjectTypeDescription(undistributableDependency); + #endif + ereport(WARNING, (errmsg("Citus can't distribute functions having " + "dependency on unsupported object of type \"%s\"", + objectType), + errdetail("Function will be created only locally"))); + } + + return NIL; + } + + EnsureDependenciesExistOnAllNodes(&functionAddress); + + List *commands = list_make1(DISABLE_DDL_PROPAGATION); + commands = list_concat(commands, CreateFunctionDDLCommandsIdempotent( + &functionAddress)); + commands = list_concat(commands, list_make1(ENABLE_DDL_PROPAGATION)); return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } +/* + * GetUndistributableDependency checks whether object has any non-distributable + * dependency. If any one found, it will be returned. + */ +static ObjectAddress * +GetUndistributableDependency(ObjectAddress *objectAddress) +{ + List *dependencies = GetAllDependenciesForObject(objectAddress); + ObjectAddress *dependency = NULL; + foreach_ptr(dependency, dependencies) + { + if (IsObjectDistributed(dependency)) + { + continue; + } + + if (!SupportedDependencyByCitus(dependency)) + { + /* + * Since roles should be handled manually with Citus community, skip them. + */ + if (getObjectClass(dependency) != OCLASS_ROLE) + { + return dependency; + } + } + + if (getObjectClass(dependency) == OCLASS_CLASS) + { + /* + * Citus can only distribute dependent non-distributed sequence + * and composite types. + */ + char relKind = get_rel_relkind(dependency->objectId); + if (relKind != RELKIND_SEQUENCE && relKind != RELKIND_COMPOSITE_TYPE) + { + return dependency; + } + } + } + + return NULL; +} + + /* * CreateFunctionStmtObjectAddress returns the ObjectAddress for the subject of the * CREATE [OR REPLACE] FUNCTION statement. If missing_ok is false it will error with the @@ -1416,7 +1473,7 @@ PreprocessAlterFunctionStmt(Node *node, const char *queryString, EnsureCoordinator(); ErrorIfUnsupportedAlterFunctionStmt(stmt); - EnsureSequentialModeForFunctionDDL(); + EnsureSequentialMode(OBJECT_FUNCTION); QualifyTreeNode((Node *) stmt); const char *sql = DeparseTreeNode((Node *) stmt); @@ -1450,7 +1507,7 @@ PreprocessRenameFunctionStmt(Node *node, const char *queryString, } EnsureCoordinator(); - EnsureSequentialModeForFunctionDDL(); + EnsureSequentialMode(OBJECT_FUNCTION); QualifyTreeNode((Node *) stmt); const char *sql = DeparseTreeNode((Node *) stmt); @@ -1482,7 +1539,7 @@ PreprocessAlterFunctionSchemaStmt(Node *node, const char *queryString, } EnsureCoordinator(); - EnsureSequentialModeForFunctionDDL(); + EnsureSequentialMode(OBJECT_FUNCTION); QualifyTreeNode((Node *) stmt); const char *sql = DeparseTreeNode((Node *) stmt); @@ -1515,7 +1572,7 @@ PreprocessAlterFunctionOwnerStmt(Node *node, const char *queryString, } EnsureCoordinator(); - EnsureSequentialModeForFunctionDDL(); + EnsureSequentialMode(OBJECT_FUNCTION); QualifyTreeNode((Node *) stmt); const char *sql = DeparseTreeNode((Node *) stmt); @@ -1605,7 +1662,7 @@ PreprocessDropFunctionStmt(Node *node, const char *queryString, * types, so we block the call. */ EnsureCoordinator(); - EnsureSequentialModeForFunctionDDL(); + EnsureSequentialMode(OBJECT_FUNCTION); /* remove the entries for the distributed objects on dropping */ ObjectAddress *address = NULL; @@ -2013,33 +2070,6 @@ ErrorIfUnsupportedAlterFunctionStmt(AlterFunctionStmt *stmt) } -/* - * ErrorIfFunctionDependsOnExtension functions depending on extensions should raise an - * error informing the user why they can't be distributed. - */ -static void -ErrorIfFunctionDependsOnExtension(const ObjectAddress *functionAddress) -{ - /* captures the extension address during lookup */ - ObjectAddress extensionAddress = { 0 }; - - if (IsObjectAddressOwnedByExtension(functionAddress, &extensionAddress)) - { - char *functionName = - getObjectIdentity_compat(functionAddress, /* missingOk: */ false); - char *extensionName = - getObjectIdentity_compat(&extensionAddress, /* missingOk: */ false); - ereport(ERROR, (errmsg("unable to create a distributed function from functions " - "owned by an extension"), - errdetail("Function \"%s\" has a dependency on extension \"%s\". " - "Functions depending on an extension cannot be " - "distributed. Create the function by creating the " - "extension on the workers.", functionName, - extensionName))); - } -} - - /* returns the quoted qualified name of a given function oid */ static char * quote_qualified_func_name(Oid funcOid) @@ -2048,3 +2078,54 @@ quote_qualified_func_name(Oid funcOid) get_namespace_name(get_func_namespace(funcOid)), get_func_name(funcOid)); } + + +/* + * EnsureExtensionFuncionCanBeCreated checks if the dependent objects + * (including extension) exists on all nodes, if not, creates them. In + * addition, it also checks if distribution argument is passed. + */ +static void +EnsureExtensionFunctionCanBeDistributed(const ObjectAddress functionAddress, + const ObjectAddress extensionAddress, + char *distributionArgumentName) +{ + if (CitusExtensionObject(&extensionAddress)) + { + /* + * Citus extension is a special case. It's the extension that + * provides the 'distributed capabilities' in the first place. + * Trying to distribute it's own function(s) doesn't make sense. + */ + ereport(ERROR, (errmsg("Citus extension functions(%s) " + "cannot be distributed.", + get_func_name(functionAddress.objectId)))); + } + + /* + * Distributing functions from extensions has the most benefit when + * distribution argument is specified. + */ + if (distributionArgumentName == NULL) + { + ereport(ERROR, (errmsg("Extension functions(%s) " + "without distribution argument " + "are not supported.", + get_func_name(functionAddress.objectId)))); + } + + /* + * Ensure corresponding extension is in pg_dist_object. + * Functions owned by an extension are depending internally on that extension, + * hence EnsureDependenciesExistOnAllNodes() creates the extension, which in + * turn creates the function, and thus we don't have to create it ourself like + * we do for non-extension functions. + */ + ereport(DEBUG1, (errmsg("Extension(%s) owning the " + "function(%s) is not distributed, " + "attempting to propogate the extension", + get_extension_name(extensionAddress.objectId), + get_func_name(functionAddress.objectId)))); + + EnsureDependenciesExistOnAllNodes(&functionAddress); +} diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index cfdd6ad63..5ff984f66 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -725,12 +725,6 @@ PostprocessIndexStmt(Node *node, const char *queryString) { IndexStmt *indexStmt = castNode(IndexStmt, node); - /* we are only processing CONCURRENT index statements */ - if (!indexStmt->concurrent) - { - return NIL; - } - /* this logic only applies to the coordinator */ if (!IsCoordinator()) { @@ -747,14 +741,36 @@ PostprocessIndexStmt(Node *node, const char *queryString) return NIL; } + Oid indexRelationId = get_relname_relid(indexStmt->idxname, schemaId); + + /* ensure dependencies of index exist on all nodes */ + ObjectAddress address = { 0 }; + ObjectAddressSet(address, RelationRelationId, indexRelationId); + EnsureDependenciesExistOnAllNodes(&address); + + /* furtheron we are only processing CONCURRENT index statements */ + if (!indexStmt->concurrent) + { + return NIL; + } + + /* + * EnsureDependenciesExistOnAllNodes could have distributed objects that are required + * by this index. During the propagation process an active snapshout might be left as + * a side effect of inserting the local tuples via SPI. To not leak a snapshot like + * that we will pop any snapshot if we have any right before we commit. + */ + if (ActiveSnapshotSet()) + { + PopActiveSnapshot(); + } + /* commit the current transaction and start anew */ CommitTransactionCommand(); StartTransactionCommand(); /* get the affected relation and index */ Relation relation = table_openrv(indexStmt->relation, ShareUpdateExclusiveLock); - Oid indexRelationId = get_relname_relid(indexStmt->idxname, - schemaId); Relation indexRelation = index_open(indexRelationId, RowExclusiveLock); /* close relations but retain locks */ diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index fff2b1c94..e0c0f2b11 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -40,8 +40,39 @@ #include "utils/relcache.h" +static ObjectAddress GetObjectAddressBySchemaName(char *schemaName, bool missing_ok); static List * FilterDistributedSchemas(List *schemas); -static void EnsureSequentialModeForSchemaDDL(void); +static bool SchemaHasDistributedTableWithFKey(char *schemaName); +static bool ShouldPropagateCreateSchemaStmt(void); + + +/* + * PreprocessCreateSchemaStmt is called during the planning phase for + * CREATE SCHEMA .. + */ +List * +PreprocessCreateSchemaStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + if (!ShouldPropagateCreateSchemaStmt()) + { + return NIL; + } + + EnsureCoordinator(); + + EnsureSequentialMode(OBJECT_SCHEMA); + + /* deparse sql*/ + const char *sql = DeparseTreeNode(node); + + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} /* @@ -53,76 +84,54 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { DropStmt *dropStatement = castNode(DropStmt, node); - Relation pgClass = NULL; - HeapTuple heapTuple = NULL; - SysScanDesc scanDescriptor = NULL; - ScanKeyData scanKey[1]; - int scanKeyCount = 1; - Oid scanIndexId = InvalidOid; - bool useIndex = false; + Assert(dropStatement->removeType == OBJECT_SCHEMA); - if (dropStatement->behavior != DROP_CASCADE) + if (!ShouldPropagate()) { return NIL; } - Value *schemaValue = NULL; - foreach_ptr(schemaValue, dropStatement->objects) + EnsureCoordinator(); + + List *distributedSchemas = FilterDistributedSchemas(dropStatement->objects); + + if (list_length(distributedSchemas) < 1) { - const char *schemaString = strVal(schemaValue); - Oid namespaceOid = get_namespace_oid(schemaString, true); - - if (namespaceOid == InvalidOid) - { - continue; - } - - pgClass = table_open(RelationRelationId, AccessShareLock); - - ScanKeyInit(&scanKey[0], Anum_pg_class_relnamespace, BTEqualStrategyNumber, - F_OIDEQ, namespaceOid); - scanDescriptor = systable_beginscan(pgClass, scanIndexId, useIndex, NULL, - scanKeyCount, scanKey); - - heapTuple = systable_getnext(scanDescriptor); - while (HeapTupleIsValid(heapTuple)) - { - Form_pg_class relationForm = (Form_pg_class) GETSTRUCT(heapTuple); - char *relationName = NameStr(relationForm->relname); - Oid relationId = get_relname_relid(relationName, namespaceOid); - - /* we're not interested in non-valid, non-distributed relations */ - if (relationId == InvalidOid || !IsCitusTable(relationId)) - { - heapTuple = systable_getnext(scanDescriptor); - continue; - } - - if (IsCitusTableType(relationId, REFERENCE_TABLE)) - { - /* prevent concurrent EnsureReferenceTablesExistOnAllNodes */ - int colocationId = CreateReferenceTableColocationId(); - LockColocationId(colocationId, ExclusiveLock); - } - - /* invalidate foreign key cache if the table involved in any foreign key */ - if (TableReferenced(relationId) || TableReferencing(relationId)) - { - MarkInvalidateForeignKeyGraph(); - - systable_endscan(scanDescriptor); - table_close(pgClass, NoLock); - return NIL; - } - - heapTuple = systable_getnext(scanDescriptor); - } - - systable_endscan(scanDescriptor); - table_close(pgClass, NoLock); + return NIL; } - return NIL; + EnsureSequentialMode(OBJECT_SCHEMA); + + Value *schemaVal = NULL; + foreach_ptr(schemaVal, distributedSchemas) + { + if (SchemaHasDistributedTableWithFKey(strVal(schemaVal))) + { + MarkInvalidateForeignKeyGraph(); + break; + } + } + + /* + * We swap around the schema's in the statement to only contain the distributed + * schemas before deparsing. We need to restore the original list as postgres + * will execute on this statement locally, which requires all original schemas + * from the user to be present. + */ + List *originalObjects = dropStatement->objects; + + dropStatement->objects = distributedSchemas; + + const char *sql = DeparseTreeNode(node); + + dropStatement->objects = originalObjects; + + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } @@ -194,7 +203,7 @@ PreprocessAlterSchemaRenameStmt(Node *node, const char *queryString, /* deparse sql*/ const char *renameStmtSql = DeparseTreeNode(node); - EnsureSequentialModeForSchemaDDL(); + EnsureSequentialMode(OBJECT_SCHEMA); /* to prevent recursion with mx we disable ddl propagation */ List *commands = list_make3(DISABLE_DDL_PROPAGATION, @@ -205,6 +214,19 @@ PreprocessAlterSchemaRenameStmt(Node *node, const char *queryString, } +/* + * CreateSchemaStmtObjectAddress returns the ObjectAddress of the schema that is + * the object of the CreateSchemaStmt. Errors if missing_ok is false. + */ +ObjectAddress +CreateSchemaStmtObjectAddress(Node *node, bool missing_ok) +{ + CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node); + + return GetObjectAddressBySchemaName(stmt->schemaname, missing_ok); +} + + /* * AlterSchemaRenameStmtObjectAddress returns the ObjectAddress of the schema that is * the object of the RenameStmt. Errors if missing_ok is false. @@ -215,7 +237,17 @@ AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok) RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_SCHEMA); - const char *schemaName = stmt->subname; + return GetObjectAddressBySchemaName(stmt->subname, missing_ok); +} + + +/* + * GetObjectAddressBySchemaName returns the ObjectAddress of the schema with the + * given name. Errors out if schema is not found and missing_ok is false. + */ +ObjectAddress +GetObjectAddressBySchemaName(char *schemaName, bool missing_ok) +{ Oid schemaOid = get_namespace_oid(schemaName, missing_ok); ObjectAddress address = { 0 }; @@ -261,38 +293,85 @@ FilterDistributedSchemas(List *schemas) /* - * EnsureSequentialModeForSchemaDDL makes sure that the current transaction is already in - * sequential mode, or can still safely be put in sequential mode, it errors if that is - * not possible. The error contains information for the user to retry the transaction with - * sequential mode set from the beginning. - * - * Copy-pasted from type.c + * SchemaHasDistributedTableWithFKey takes a schema name and scans the relations within + * that schema. If any one of the relations has a foreign key relationship, it returns + * true. Returns false otherwise. */ -static void -EnsureSequentialModeForSchemaDDL(void) +static bool +SchemaHasDistributedTableWithFKey(char *schemaName) { - if (!IsTransactionBlock()) + ScanKeyData scanKey[1]; + int scanKeyCount = 1; + Oid scanIndexId = InvalidOid; + bool useIndex = false; + + Oid namespaceOid = get_namespace_oid(schemaName, true); + + if (namespaceOid == InvalidOid) { - /* we do not need to switch to sequential mode if we are not in a transaction */ - return; + return false; } - if (ParallelQueryExecutedInTransaction()) + Relation pgClass = table_open(RelationRelationId, AccessShareLock); + + ScanKeyInit(&scanKey[0], Anum_pg_class_relnamespace, BTEqualStrategyNumber, + F_OIDEQ, namespaceOid); + SysScanDesc scanDescriptor = systable_beginscan(pgClass, scanIndexId, useIndex, NULL, + scanKeyCount, scanKey); + + HeapTuple heapTuple = systable_getnext(scanDescriptor); + while (HeapTupleIsValid(heapTuple)) { - ereport(ERROR, (errmsg("cannot create or modify schema because there was a " - "parallel operation on a distributed table in the " - "transaction"), - errdetail("When creating or altering a schema, Citus needs to " - "perform all operations over a single connection per " - "node to ensure consistency."), - errhint("Try re-running the transaction with " - "\"SET LOCAL citus.multi_shard_modify_mode TO " - "\'sequential\';\""))); + Form_pg_class relationForm = (Form_pg_class) GETSTRUCT(heapTuple); + char *relationName = NameStr(relationForm->relname); + Oid relationId = get_relname_relid(relationName, namespaceOid); + + /* we're not interested in non-valid, non-distributed relations */ + if (relationId == InvalidOid || !IsCitusTable(relationId)) + { + heapTuple = systable_getnext(scanDescriptor); + continue; + } + + /* invalidate foreign key cache if the table involved in any foreign key */ + if (TableReferenced(relationId) || TableReferencing(relationId)) + { + systable_endscan(scanDescriptor); + table_close(pgClass, NoLock); + return true; + } + + heapTuple = systable_getnext(scanDescriptor); } - ereport(DEBUG1, (errmsg("switching to sequential query execution mode"), - errdetail("Schema is created or altered. To make sure subsequent " - "commands see the schema correctly we need to make sure to " - "use only one connection for all future commands"))); - SetLocalMultiShardModifyModeToSequential(); + systable_endscan(scanDescriptor); + table_close(pgClass, NoLock); + + return false; +} + + +/* + * ShouldPropagateCreateSchemaStmt gets called only for CreateSchemaStmt's. + * This function wraps the ShouldPropagate function which is commonly used + * for all object types; additionally it checks whether there's a multi-statement + * transaction ongoing or not. For transaction blocks, we require sequential mode + * with this function, for CREATE SCHEMA statements. If Citus has not already + * switched to sequential mode, we don't propagate. + */ +static bool +ShouldPropagateCreateSchemaStmt() +{ + if (!ShouldPropagate()) + { + return false; + } + + if (IsMultiStatementTransaction() && + MultiShardConnectionType != SEQUENTIAL_CONNECTION) + { + return false; + } + + return true; } diff --git a/src/backend/distributed/commands/text_search.c b/src/backend/distributed/commands/text_search.c new file mode 100644 index 000000000..be78057f7 --- /dev/null +++ b/src/backend/distributed/commands/text_search.c @@ -0,0 +1,935 @@ +/*------------------------------------------------------------------------- + * + * text_search.c + * Commands for creating and altering TEXT SEARCH objects + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/genam.h" +#include "access/xact.h" +#include "catalog/namespace.h" +#include "catalog/objectaddress.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_config_map.h" +#include "catalog/pg_ts_dict.h" +#include "catalog/pg_ts_parser.h" +#include "commands/comment.h" +#include "commands/extension.h" +#include "fmgr.h" +#include "nodes/makefuncs.h" +#include "tsearch/ts_cache.h" +#include "tsearch/ts_public.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/metadata/distobject.h" +#include "distributed/metadata_sync.h" +#include "distributed/multi_executor.h" +#include "distributed/relation_access_tracking.h" +#include "distributed/worker_create_or_replace.h" + + +static List * GetDistributedTextSearchConfigurationNames(DropStmt *stmt); +static DefineStmt * GetTextSearchConfigDefineStmt(Oid tsconfigOid); +static List * GetTextSearchConfigCommentStmt(Oid tsconfigOid); +static List * get_ts_parser_namelist(Oid tsparserOid); +static List * GetTextSearchConfigMappingStmt(Oid tsconfigOid); +static List * GetTextSearchConfigOwnerStmts(Oid tsconfigOid); + +static List * get_ts_dict_namelist(Oid tsdictOid); +static Oid get_ts_config_parser_oid(Oid tsconfigOid); +static char * get_ts_parser_tokentype_name(Oid parserOid, int32 tokentype); + +/* + * PostprocessCreateTextSearchConfigurationStmt is called after the TEXT SEARCH + * CONFIGURATION has been created locally. + * + * Contrary to many other objects a text search configuration is often created as a copy + * of an existing configuration. After the copy there is no relation to the configuration + * that has been copied. This prevents our normal approach of ensuring dependencies to + * exist before forwarding a close ressemblance of the statement the user executed. + * + * Instead we recreate the object based on what we find in our own catalog, hence the + * amount of work we perform in the postprocess function, contrary to other objects. + */ +List * +PostprocessCreateTextSearchConfigurationStmt(Node *node, const char *queryString) +{ + DefineStmt *stmt = castNode(DefineStmt, node); + Assert(stmt->kind == OBJECT_TSCONFIGURATION); + + if (!ShouldPropagate()) + { + return NIL; + } + + /* + * If the create command is a part of a multi-statement transaction that is not in + * sequential mode, don't propagate. Instead we will rely on back filling. + */ + if (IsMultiStatementTransaction()) + { + if (MultiShardConnectionType != SEQUENTIAL_CONNECTION) + { + return NIL; + } + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + EnsureDependenciesExistOnAllNodes(&address); + + /* + * TEXT SEARCH CONFIGURATION objects are more complex with their mappings and the + * possibility of copying from existing templates that we will require the idempotent + * recreation commands to be run for successful propagation + */ + List *commands = CreateTextSearchConfigDDLCommandsIdempotent(&address); + + commands = lcons(DISABLE_DDL_PROPAGATION, commands); + commands = lappend(commands, ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +List * +GetCreateTextSearchConfigStatements(const ObjectAddress *address) +{ + Assert(address->classId == TSConfigRelationId); + List *stmts = NIL; + + /* CREATE TEXT SEARCH CONFIGURATION ...*/ + stmts = lappend(stmts, GetTextSearchConfigDefineStmt(address->objectId)); + + /* ALTER TEXT SEARCH CONFIGURATION ... OWNER TO ...*/ + stmts = list_concat(stmts, GetTextSearchConfigOwnerStmts(address->objectId)); + + /* COMMENT ON TEXT SEARCH CONFIGURATION ... */ + stmts = list_concat(stmts, GetTextSearchConfigCommentStmt(address->objectId)); + + + /* ALTER TEXT SEARCH CONFIGURATION ... ADD MAPPING FOR ... WITH ... */ + stmts = list_concat(stmts, GetTextSearchConfigMappingStmt(address->objectId)); + + return stmts; +} + + +/* + * CreateTextSearchConfigDDLCommandsIdempotent creates a list of ddl commands to recreate + * a TEXT SERACH CONFIGURATION object in an idempotent manner on workers. + */ +List * +CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address) +{ + List *stmts = GetCreateTextSearchConfigStatements(address); + List *sqls = DeparseTreeNodes(stmts); + return list_make1(WrapCreateOrReplaceList(sqls)); +} + + +/* + * PreprocessDropTextSearchConfigurationStmt prepares the statements we need to send to + * the workers. After we have dropped the schema's locally they also got removed from + * pg_dist_object so it is important to do all distribution checks before the change is + * made locally. + */ +List * +PreprocessDropTextSearchConfigurationStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + DropStmt *stmt = castNode(DropStmt, node); + Assert(stmt->removeType == OBJECT_TSCONFIGURATION); + + if (!ShouldPropagate()) + { + return NIL; + } + + List *distributedObjects = GetDistributedTextSearchConfigurationNames(stmt); + if (list_length(distributedObjects) == 0) + { + /* no distributed objects to remove */ + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + /* + * Temporarily replace the list of objects being dropped with only the list + * containing the distributed objects. After we have created the sql statement we + * restore the original list of objects to execute on locally. + * + * Because searchpaths on coordinator and workers might not be in sync we fully + * qualify the list before deparsing. This is safe because qualification doesn't + * change the original names in place, but insteads creates new ones. + */ + List *originalObjects = stmt->objects; + stmt->objects = distributedObjects; + QualifyTreeNode((Node *) stmt); + const char *dropStmtSql = DeparseTreeNode((Node *) stmt); + stmt->objects = originalObjects; + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) dropStmtSql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); +} + + +/* + * GetDistributedTextSearchConfigurationNames iterates over all text search configurations + * dropped, and create a list containign all configurations that are distributed. + */ +static List * +GetDistributedTextSearchConfigurationNames(DropStmt *stmt) +{ + List *objName = NULL; + List *distributedObjects = NIL; + foreach_ptr(objName, stmt->objects) + { + Oid tsconfigOid = get_ts_config_oid(objName, stmt->missing_ok); + if (!OidIsValid(tsconfigOid)) + { + /* skip missing configuration names, they can't be dirstibuted */ + continue; + } + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSConfigRelationId, tsconfigOid); + if (!IsObjectDistributed(&address)) + { + continue; + } + distributedObjects = lappend(distributedObjects, objName); + } + return distributedObjects; +} + + +/* + * PreprocessAlterTextSearchConfigurationStmt verifies if the configuration being altered + * is distributed in the cluster. If that is the case it will prepare the list of commands + * to send to the worker to apply the same changes remote. + */ +List * +PreprocessAlterTextSearchConfigurationStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + QualifyTreeNode((Node *) stmt); + const char *alterStmtSql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) alterStmtSql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); +} + + +/* + * PreprocessRenameTextSearchConfigurationStmt verifies if the configuration being altered + * is distributed in the cluster. If that is the case it will prepare the list of commands + * to send to the worker to apply the same changes remote. + */ +List * +PreprocessRenameTextSearchConfigurationStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + QualifyTreeNode((Node *) stmt); + + char *ddlCommand = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) ddlCommand, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); +} + + +/* + * PreprocessAlterTextSearchConfigurationSchemaStmt verifies if the configuration being + * altered is distributed in the cluster. If that is the case it will prepare the list of + * commands to send to the worker to apply the same changes remote. + */ +List * +PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, + stmt->missing_ok); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + QualifyTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); +} + + +/* + * PostprocessAlterTextSearchConfigurationSchemaStmt is invoked after the schema has been + * changed locally. Since changing the schema could result in new dependencies being found + * for this object we re-ensure all the dependencies for the configuration do exist. This + * is solely to propagate the new schema (and all its dependencies) if it was not already + * distributed in the cluster. + */ +List * +PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryString) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, + stmt->missing_ok); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + /* dependencies have changed (schema) let's ensure they exist */ + EnsureDependenciesExistOnAllNodes(&address); + + return NIL; +} + + +/* + * PreprocessTextSearchConfigurationCommentStmt propagates any comment on a distributed + * configuration to the workers. Since comments for configurations are promenently shown + * when listing all text search configurations this is purely a cosmetic thing when + * running in MX. + */ +List * +PreprocessTextSearchConfigurationCommentStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Assert(stmt->objtype == OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + QualifyTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); +} + + +/* + * PreprocessAlterTextSearchConfigurationOwnerStmt verifies if the configuration being + * altered is distributed in the cluster. If that is the case it will prepare the list of + * commands to send to the worker to apply the same changes remote. + */ +List * +PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + QualifyTreeNode((Node *) stmt); + char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PostprocessAlterTextSearchConfigurationOwnerStmt is invoked after the owner has been + * changed locally. Since changing the owner could result in new dependencies being found + * for this object we re-ensure all the dependencies for the configuration do exist. This + * is solely to propagate the new owner (and all its dependencies) if it was not already + * distributed in the cluster. + */ +List * +PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryString) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + /* dependencies have changed (owner) let's ensure they exist */ + EnsureDependenciesExistOnAllNodes(&address); + + return NIL; +} + + +/* + * GetTextSearchConfigDefineStmt returns the DefineStmt for a TEXT SEARCH CONFIGURATION + * based on the configuration as defined in the catalog identified by tsconfigOid. + * + * This statement will only contain the parser, as all other properties for text search + * configurations are stored as mappings in a different catalog. + */ +static DefineStmt * +GetTextSearchConfigDefineStmt(Oid tsconfigOid) +{ + HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search configuration %u", + tsconfigOid); + } + Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup); + + DefineStmt *stmt = makeNode(DefineStmt); + stmt->kind = OBJECT_TSCONFIGURATION; + + stmt->defnames = get_ts_config_namelist(tsconfigOid); + + List *parserNameList = get_ts_parser_namelist(config->cfgparser); + TypeName *parserTypeName = makeTypeNameFromNameList(parserNameList); + stmt->definition = list_make1(makeDefElem("parser", (Node *) parserTypeName, -1)); + + ReleaseSysCache(tup); + return stmt; +} + + +/* + * GetTextSearchConfigCommentStmt returns a list containing all entries to recreate a + * comment on the configuration identified by tsconfigOid. The list could be empty if + * there is no comment on a configuration. + * + * The reason for a list is for easy use when building a list of all statements to invoke + * to recreate the text search configuration. An empty list can easily be concatinated + * without inspection, contrary to a NULL ptr if we would return the CommentStmt struct. + */ +static List * +GetTextSearchConfigCommentStmt(Oid tsconfigOid) +{ + char *comment = GetComment(tsconfigOid, TSConfigRelationId, 0); + if (!comment) + { + return NIL; + } + + CommentStmt *stmt = makeNode(CommentStmt); + stmt->objtype = OBJECT_TSCONFIGURATION; + + stmt->object = (Node *) get_ts_config_namelist(tsconfigOid); + stmt->comment = comment; + return list_make1(stmt); +} + + +/* + * GetTextSearchConfigMappingStmt returns a list of all mappings from token_types to + * dictionaries configured on a text search configuration identified by tsconfigOid. + * + * Many mappings can exist on a configuration which all require their own statement to + * recreate. + */ +static List * +GetTextSearchConfigMappingStmt(Oid tsconfigOid) +{ + ScanKeyData mapskey = { 0 }; + + /* mapcfg = tsconfigOid */ + ScanKeyInit(&mapskey, + Anum_pg_ts_config_map_mapcfg, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(tsconfigOid)); + + Relation maprel = table_open(TSConfigMapRelationId, AccessShareLock); + Relation mapidx = index_open(TSConfigMapIndexId, AccessShareLock); + SysScanDesc mapscan = systable_beginscan_ordered(maprel, mapidx, NULL, 1, &mapskey); + + List *stmts = NIL; + AlterTSConfigurationStmt *stmt = NULL; + + /* + * We iterate the config mappings on the index order filtered by mapcfg. Meaning we + * get equal maptokentype's in 1 run. By comparing the current tokentype to the last + * we know when we can create a new stmt and append the previous constructed one to + * the list. + */ + int lastTokType = -1; + + /* + * We read all mappings filtered by config id, hence we only need to load the name + * once and can reuse for every statement. + */ + List *configName = get_ts_config_namelist(tsconfigOid); + + Oid parserOid = get_ts_config_parser_oid(tsconfigOid); + + HeapTuple maptup = NULL; + while ((maptup = systable_getnext_ordered(mapscan, ForwardScanDirection)) != NULL) + { + Form_pg_ts_config_map cfgmap = (Form_pg_ts_config_map) GETSTRUCT(maptup); + if (lastTokType != cfgmap->maptokentype) + { + /* creating a new statement, appending the previous one (if existing) */ + if (stmt != NULL) + { + stmts = lappend(stmts, stmt); + } + + stmt = makeNode(AlterTSConfigurationStmt); + stmt->cfgname = configName; + stmt->kind = ALTER_TSCONFIG_ADD_MAPPING; + stmt->tokentype = list_make1(makeString( + get_ts_parser_tokentype_name(parserOid, + cfgmap-> + maptokentype))); + + lastTokType = cfgmap->maptokentype; + } + + stmt->dicts = lappend(stmt->dicts, get_ts_dict_namelist(cfgmap->mapdict)); + } + + /* + * If we have ran atleast 1 iteration above we have the last stmt not added to the + * stmts list. + */ + if (stmt != NULL) + { + stmts = lappend(stmts, stmt); + stmt = NULL; + } + + systable_endscan_ordered(mapscan); + index_close(mapidx, NoLock); + table_close(maprel, NoLock); + + return stmts; +} + + +/* + * GetTextSearchConfigOwnerStmts returns a potentially empty list of statements to change + * the ownership of a TEXT SEARCH CONFIGURATION object. + * + * The list is for convenienve when building a full list of statements to recreate the + * configuration. + */ +static List * +GetTextSearchConfigOwnerStmts(Oid tsconfigOid) +{ + HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search configuration %u", + tsconfigOid); + } + Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup); + + AlterOwnerStmt *stmt = makeNode(AlterOwnerStmt); + stmt->objectType = OBJECT_TSCONFIGURATION; + stmt->object = (Node *) get_ts_config_namelist(tsconfigOid); + stmt->newowner = GetRoleSpecObjectForUser(config->cfgowner); + + ReleaseSysCache(tup); + return list_make1(stmt); +} + + +/* + * get_ts_config_namelist based on the tsconfigOid this function creates the namelist that + * identifies the configuration in a fully qualified manner, irregardless of the schema + * existing on the search_path. + */ +List * +get_ts_config_namelist(Oid tsconfigOid) +{ + HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search configuration %u", + tsconfigOid); + } + Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup); + + char *schema = get_namespace_name(config->cfgnamespace); + char *configName = pstrdup(NameStr(config->cfgname)); + List *names = list_make2(makeString(schema), makeString(configName)); + + ReleaseSysCache(tup); + return names; +} + + +/* + * get_ts_dict_namelist based on the tsdictOid this function creates the namelist that + * identifies the dictionary in a fully qualified manner, irregardless of the schema + * existing on the search_path. + */ +static List * +get_ts_dict_namelist(Oid tsdictOid) +{ + HeapTuple tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(tsdictOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search dictionary %u", tsdictOid); + } + Form_pg_ts_dict dict = (Form_pg_ts_dict) GETSTRUCT(tup); + + char *schema = get_namespace_name(dict->dictnamespace); + char *dictName = pstrdup(NameStr(dict->dictname)); + List *names = list_make2(makeString(schema), makeString(dictName)); + + ReleaseSysCache(tup); + return names; +} + + +/* + * get_ts_config_parser_oid based on the tsconfigOid this function returns the Oid of the + * parser used in the configuration. + */ +static Oid +get_ts_config_parser_oid(Oid tsconfigOid) +{ + HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search configuration %u", tsconfigOid); + } + Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup); + Oid parserOid = config->cfgparser; + + ReleaseSysCache(tup); + return parserOid; +} + + +/* + * get_ts_parser_tokentype_name returns the name of the token as known to the parser by + * its tokentype identifier. The parser used to resolve the token name is identified by + * parserOid and should be the same that emitted the tokentype to begin with. + */ +static char * +get_ts_parser_tokentype_name(Oid parserOid, int32 tokentype) +{ + TSParserCacheEntry *parserCache = lookup_ts_parser_cache(parserOid); + if (!OidIsValid(parserCache->lextypeOid)) + { + elog(ERROR, "method lextype isn't defined for text search parser %u", parserOid); + } + + /* take lextypes from parser */ + LexDescr *tokenlist = (LexDescr *) DatumGetPointer( + OidFunctionCall1(parserCache->lextypeOid, Int32GetDatum(0))); + + /* and find the one with lexid = tokentype */ + int tokenIndex = 0; + while (tokenlist && tokenlist[tokenIndex].lexid) + { + if (tokenlist[tokenIndex].lexid == tokentype) + { + return pstrdup(tokenlist[tokenIndex].alias); + } + tokenIndex++; + } + + /* we haven't found the token */ + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("token type \"%d\" does not exist in parser", tokentype))); +} + + +/* + * get_ts_parser_namelist based on the tsparserOid this function creates the namelist that + * identifies the parser in a fully qualified manner, irregardless of the schema existing + * on the search_path. + */ +static List * +get_ts_parser_namelist(Oid tsparserOid) +{ + HeapTuple tup = SearchSysCache1(TSPARSEROID, ObjectIdGetDatum(tsparserOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search parser %u", + tsparserOid); + } + Form_pg_ts_parser parser = (Form_pg_ts_parser) GETSTRUCT(tup); + + char *schema = get_namespace_name(parser->prsnamespace); + char *parserName = pstrdup(NameStr(parser->prsname)); + List *names = list_make2(makeString(schema), makeString(parserName)); + + ReleaseSysCache(tup); + return names; +} + + +/* + * CreateTextSearchConfigurationObjectAddress resolves the ObjectAddress for the object + * being created. If missing_pk is false the function will error, explaining to the user + * the text search configuration described in the statement doesn't exist. + */ +ObjectAddress +CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok) +{ + DefineStmt *stmt = castNode(DefineStmt, node); + Assert(stmt->kind == OBJECT_TSCONFIGURATION); + + Oid objid = get_ts_config_oid(stmt->defnames, missing_ok); + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSConfigRelationId, objid); + return address; +} + + +/* + * RenameTextSearchConfigurationStmtObjectAddress resolves the ObjectAddress for the TEXT + * SEARCH CONFIGURATION being renamed. Optionally errors if the configuration does not + * exist based on the missing_ok flag passed in by the caller. + */ +ObjectAddress +RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_TSCONFIGURATION); + + Oid objid = get_ts_config_oid(castNode(List, stmt->object), missing_ok); + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSConfigRelationId, objid); + return address; +} + + +/* + * AlterTextSearchConfigurationStmtObjectAddress resolves the ObjectAddress for the TEXT + * SEARCH CONFIGURATION being altered. Optionally errors if the configuration does not + * exist based on the missing_ok flag passed in by the caller. + */ +ObjectAddress +AlterTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) +{ + AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node); + + Oid objid = get_ts_config_oid(stmt->cfgname, missing_ok); + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSConfigRelationId, objid); + return address; +} + + +/* + * AlterTextSearchConfigurationSchemaStmtObjectAddress resolves the ObjectAddress for the + * TEXT SEARCH CONFIGURATION being moved to a different schema. Optionally errors if the + * configuration does not exist based on the missing_ok flag passed in by the caller. + * + * This can be called, either before or after the move of schema has been executed, hence + * the triple checking before the error might be thrown. Errors for non-existing schema's + * in edgecases will be raised by postgres while executing the move. + */ +ObjectAddress +AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + Oid objid = get_ts_config_oid(castNode(List, stmt->object), true); + + if (!OidIsValid(objid)) + { + /* + * couldn't find the text search configuration, might have already been moved to + * the new schema, we construct a new sequence name that uses the new schema to + * search in. + */ + char *schemaname = NULL; + char *config_name = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaname, &config_name); + + char *newSchemaName = stmt->newschema; + List *names = list_make2(makeString(newSchemaName), makeString(config_name)); + objid = get_ts_config_oid(names, true); + + if (!missing_ok && !OidIsValid(objid)) + { + /* + * if the text search config id is still invalid we couldn't find it, error + * with the same message postgres would error with if missing_ok is false + * (not ok to miss) + */ + + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("text search configuration \"%s\" does not exist", + NameListToString(castNode(List, stmt->object))))); + } + } + + ObjectAddress sequenceAddress = { 0 }; + ObjectAddressSet(sequenceAddress, TSConfigRelationId, objid); + return sequenceAddress; +} + + +/* + * TextSearchConfigurationCommentObjectAddress resolves the ObjectAddress for the TEXT + * SEARCH CONFIGURATION on which the comment is placed. Optionally errors if the + * configuration does not exist based on the missing_ok flag passed in by the caller. + */ +ObjectAddress +TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Assert(stmt->objtype == OBJECT_TSCONFIGURATION); + + Oid objid = get_ts_config_oid(castNode(List, stmt->object), missing_ok); + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSConfigRelationId, objid); + return address; +} + + +/* + * AlterTextSearchConfigurationOwnerObjectAddress resolves the ObjectAddress for the TEXT + * SEARCH CONFIGURATION for which the owner is changed. Optionally errors if the + * configuration does not exist based on the missing_ok flag passed in by the caller. + */ +ObjectAddress +AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Relation relation = NULL; + + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + return get_object_address(stmt->objectType, stmt->object, &relation, AccessShareLock, + missing_ok); +} + + +/* + * GenerateBackupNameForTextSearchConfiguration generates a safe name that is not in use + * already that can be used to rename an existing TEXT SEARCH CONFIGURATION to allow the + * configuration with a specific name to be created, even if this would not have been + * possible due to name collisions. + */ +char * +GenerateBackupNameForTextSearchConfiguration(const ObjectAddress *address) +{ + Assert(address->classId == TSConfigRelationId); + List *names = get_ts_config_namelist(address->objectId); + + RangeVar *rel = makeRangeVarFromNameList(names); + + char *newName = palloc0(NAMEDATALEN); + char suffix[NAMEDATALEN] = { 0 }; + char *baseName = rel->relname; + int baseLength = strlen(baseName); + int count = 0; + + while (true) + { + int suffixLength = SafeSnprintf(suffix, NAMEDATALEN - 1, "(citus_backup_%d)", + count); + + /* trim the base name at the end to leave space for the suffix and trailing \0 */ + baseLength = Min(baseLength, NAMEDATALEN - suffixLength - 1); + + /* clear newName before copying the potentially trimmed baseName and suffix */ + memset(newName, 0, NAMEDATALEN); + strncpy_s(newName, NAMEDATALEN, baseName, baseLength); + strncpy_s(newName + baseLength, NAMEDATALEN - baseLength, suffix, + suffixLength); + + + rel->relname = newName; + List *newNameList = MakeNameListFromRangeVar(rel); + + Oid tsconfigOid = get_ts_config_oid(newNameList, true); + if (!OidIsValid(tsconfigOid)) + { + return newName; + } + + count++; + } +} diff --git a/src/backend/distributed/commands/truncate.c b/src/backend/distributed/commands/truncate.c index 815a90f93..48d1dd10e 100644 --- a/src/backend/distributed/commands/truncate.c +++ b/src/backend/distributed/commands/truncate.c @@ -267,13 +267,17 @@ ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement) ErrorIfIllegallyChangingKnownShard(relationId); - if (IsCitusTable(relationId) && IsForeignTable(relationId)) + /* + * We allow truncating foreign tables that are added to metadata + * only on the coordinator, as user mappings are not propagated. + */ + if (IsForeignTable(relationId) && + IsCitusTableType(relationId, CITUS_LOCAL_TABLE) && + !IsCoordinator()) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("truncating distributed foreign tables is " - "currently unsupported"), - errhint("Consider undistributing table before TRUNCATE, " - "and then distribute or add to metadata again"))); + errmsg("truncating foreign tables that are added to metadata " + "can only be excuted on the coordinator"))); } } } diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index 78b56c2ff..eb59e8522 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -92,7 +92,6 @@ bool EnableCreateTypePropagation = true; static List * FilterNameListForDistributedTypes(List *objects, bool missing_ok); static List * TypeNameListToObjectAddresses(List *objects); static TypeName * MakeTypeNameFromRangeVar(const RangeVar *relation); -static void EnsureSequentialModeForTypeDDL(void); static Oid GetTypeOwner(Oid typeOid); /* recreate functions */ @@ -158,7 +157,7 @@ PreprocessCompositeTypeStmt(Node *node, const char *queryString, * when we allow propagation within a transaction block we should make sure to only * allow this in sequential mode */ - EnsureSequentialModeForTypeDDL(); + EnsureSequentialMode(OBJECT_TYPE); List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) compositeTypeStmtSql, @@ -223,7 +222,7 @@ PreprocessAlterTypeStmt(Node *node, const char *queryString, * regardless if in a transaction or not. If we would not propagate the alter * statement the types would be different on worker and coordinator. */ - EnsureSequentialModeForTypeDDL(); + EnsureSequentialMode(OBJECT_TYPE); List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) alterTypeStmtSql, @@ -266,7 +265,7 @@ PreprocessCreateEnumStmt(Node *node, const char *queryString, * when we allow propagation within a transaction block we should make sure to only * allow this in sequential mode */ - EnsureSequentialModeForTypeDDL(); + EnsureSequentialMode(OBJECT_TYPE); /* to prevent recursion with mx we disable ddl propagation */ List *commands = list_make3(DISABLE_DDL_PROPAGATION, @@ -325,7 +324,7 @@ PreprocessAlterEnumStmt(Node *node, const char *queryString, * (adding values to an enum can not run in a transaction anyway and would error by * postgres already). */ - EnsureSequentialModeForTypeDDL(); + EnsureSequentialMode(OBJECT_TYPE); /* * managing types can only be done on the coordinator if ddl propagation is on. when @@ -405,7 +404,7 @@ PreprocessDropTypeStmt(Node *node, const char *queryString, char *dropStmtSql = DeparseTreeNode((Node *) stmt); stmt->objects = oldTypes; - EnsureSequentialModeForTypeDDL(); + EnsureSequentialMode(OBJECT_TYPE); /* to prevent recursion with mx we disable ddl propagation */ List *commands = list_make3(DISABLE_DDL_PROPAGATION, @@ -442,7 +441,7 @@ PreprocessRenameTypeStmt(Node *node, const char *queryString, /* deparse sql*/ const char *renameStmtSql = DeparseTreeNode(node); - EnsureSequentialModeForTypeDDL(); + EnsureSequentialMode(OBJECT_TYPE); /* to prevent recursion with mx we disable ddl propagation */ List *commands = list_make3(DISABLE_DDL_PROPAGATION, @@ -480,7 +479,7 @@ PreprocessRenameTypeAttributeStmt(Node *node, const char *queryString, const char *sql = DeparseTreeNode((Node *) stmt); - EnsureSequentialModeForTypeDDL(); + EnsureSequentialMode(OBJECT_TYPE); List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) sql, ENABLE_DDL_PROPAGATION); @@ -513,7 +512,7 @@ PreprocessAlterTypeSchemaStmt(Node *node, const char *queryString, QualifyTreeNode((Node *) stmt); const char *sql = DeparseTreeNode((Node *) stmt); - EnsureSequentialModeForTypeDDL(); + EnsureSequentialMode(OBJECT_TYPE); List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) sql, @@ -572,7 +571,7 @@ PreprocessAlterTypeOwnerStmt(Node *node, const char *queryString, QualifyTreeNode((Node *) stmt); const char *sql = DeparseTreeNode((Node *) stmt); - EnsureSequentialModeForTypeDDL(); + EnsureSequentialMode(OBJECT_TYPE); List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) sql, ENABLE_DDL_PROPAGATION); @@ -958,6 +957,20 @@ CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress) return NIL; } + HeapTuple tup = SearchSysCacheCopy1(TYPEOID, ObjectIdGetDatum(typeAddress->objectId)); + if (!HeapTupleIsValid(tup)) + { + elog(ERROR, "cache lookup failed for type %u", typeAddress->objectId); + } + + /* Don't send any command if the type is a table's row type */ + Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup); + if (typTup->typtype == TYPTYPE_COMPOSITE && + get_rel_relkind(typTup->typrelid) != RELKIND_COMPOSITE_TYPE) + { + return NIL; + } + Node *stmt = CreateTypeStmtByObjectAddress(typeAddress); /* capture ddl command for recreation and wrap in create if not exists construct */ @@ -1116,47 +1129,6 @@ MakeTypeNameFromRangeVar(const RangeVar *relation) } -/* - * EnsureSequentialModeForTypeDDL makes sure that the current transaction is already in - * sequential mode, or can still safely be put in sequential mode, it errors if that is - * not possible. The error contains information for the user to retry the transaction with - * sequential mode set from the beginning. - * - * As types are node scoped objects there exists only 1 instance of the type used by - * potentially multiple shards. To make sure all shards in the transaction can interact - * with the type the type needs to be visible on all connections used by the transaction, - * meaning we can only use 1 connection per node. - */ -static void -EnsureSequentialModeForTypeDDL(void) -{ - if (!IsTransactionBlock()) - { - /* we do not need to switch to sequential mode if we are not in a transaction */ - return; - } - - if (ParallelQueryExecutedInTransaction()) - { - ereport(ERROR, (errmsg("cannot create or modify type because there was a " - "parallel operation on a distributed table in the " - "transaction"), - errdetail("When creating or altering a type, Citus needs to " - "perform all operations over a single connection per " - "node to ensure consistency."), - errhint("Try re-running the transaction with " - "\"SET LOCAL citus.multi_shard_modify_mode TO " - "\'sequential\';\""))); - } - - ereport(DEBUG1, (errmsg("switching to sequential query execution mode"), - errdetail("Type is created or altered. To make sure subsequent " - "commands see the type correctly we need to make sure to " - "use only one connection for all future commands"))); - SetLocalMultiShardModifyModeToSequential(); -} - - /* * ShouldPropagateTypeCreate returns if we should propagate the creation of a type. * diff --git a/src/backend/distributed/connection/connection_configuration.c b/src/backend/distributed/connection/connection_configuration.c index 32dc21e40..41017fab9 100644 --- a/src/backend/distributed/connection/connection_configuration.c +++ b/src/backend/distributed/connection/connection_configuration.c @@ -10,6 +10,7 @@ #include "postgres.h" +#include "distributed/backend_data.h" #include "distributed/citus_safe_lib.h" #include "distributed/connection_management.h" #include "distributed/metadata_cache.h" @@ -232,6 +233,10 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, */ char nodePortString[12] = ""; + StringInfo applicationName = makeStringInfo(); + appendStringInfo(applicationName, "%s%ld", CITUS_APPLICATION_NAME_PREFIX, + GetGlobalPID()); + /* * This function has three sections: * - Initialize the keywords and values (to be copied later) of global parameters @@ -260,7 +265,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, key->database, key->user, GetDatabaseEncodingName(), - CITUS_APPLICATION_NAME + applicationName->data }; /* diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c index 89a863109..13b52790a 100644 --- a/src/backend/distributed/connection/connection_management.c +++ b/src/backend/distributed/connection/connection_management.c @@ -19,6 +19,7 @@ #include "access/hash.h" #include "commands/dbcommands.h" +#include "distributed/backend_data.h" #include "distributed/connection_management.h" #include "distributed/errormessage.h" #include "distributed/error_codes.h" @@ -1459,7 +1460,7 @@ IsRebalancerInternalBackend(void) bool IsCitusInternalBackend(void) { - return application_name && strcmp(application_name, CITUS_APPLICATION_NAME) == 0; + return ExtractGlobalPID(application_name) != INVALID_CITUS_INTERNAL_BACKEND_GPID; } diff --git a/src/backend/distributed/deparser/deparse.c b/src/backend/distributed/deparser/deparse.c index cff1d0b16..8312d6407 100644 --- a/src/backend/distributed/deparser/deparse.c +++ b/src/backend/distributed/deparser/deparse.c @@ -17,6 +17,7 @@ #include "distributed/commands.h" #include "distributed/deparser.h" +#include "distributed/listutils.h" /* * DeparseTreeNode aims to be the inverse of postgres' ParseTreeNode. Currently with @@ -35,3 +36,20 @@ DeparseTreeNode(Node *stmt) return ops->deparse(stmt); } + + +/* + * DeparseTreeNodes deparses all stmts in the list from the statement datastructure into + * sql statements. + */ +List * +DeparseTreeNodes(List *stmts) +{ + List *sqls = NIL; + Node *stmt = NULL; + foreach_ptr(stmt, stmts) + { + sqls = lappend(sqls, DeparseTreeNode(stmt)); + } + return sqls; +} diff --git a/src/backend/distributed/deparser/deparse_schema_stmts.c b/src/backend/distributed/deparser/deparse_schema_stmts.c index 426146f56..f8c3d35a1 100644 --- a/src/backend/distributed/deparser/deparse_schema_stmts.c +++ b/src/backend/distributed/deparser/deparse_schema_stmts.c @@ -14,16 +14,47 @@ #include "distributed/citus_ruleutils.h" #include "distributed/deparser.h" +#include "distributed/listutils.h" #include "lib/stringinfo.h" #include "nodes/nodes.h" #include "utils/builtins.h" +static void AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt); +static void AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt); static void AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt); static void AppendGrantOnSchemaPrivileges(StringInfo buf, GrantStmt *stmt); static void AppendGrantOnSchemaSchemas(StringInfo buf, GrantStmt *stmt); static void AppendGrantOnSchemaGrantees(StringInfo buf, GrantStmt *stmt); static void AppendAlterSchemaRenameStmt(StringInfo buf, RenameStmt *stmt); +char * +DeparseCreateSchemaStmt(Node *node) +{ + CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node); + + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendCreateSchemaStmt(&str, stmt); + + return str.data; +} + + +char * +DeparseDropSchemaStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendDropSchemaStmt(&str, stmt); + + return str.data; +} + + char * DeparseGrantOnSchemaStmt(Node *node) { @@ -53,6 +84,70 @@ DeparseAlterSchemaRenameStmt(Node *node) } +static void +AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt) +{ + if (stmt->schemaElts != NIL) + { + elog(ERROR, "schema creating is not supported with other create commands"); + } + + if (stmt->schemaname == NULL) + { + elog(ERROR, "schema name should be specified"); + } + + appendStringInfoString(buf, "CREATE SCHEMA "); + + if (stmt->if_not_exists) + { + appendStringInfoString(buf, "IF NOT EXISTS "); + } + + appendStringInfo(buf, "%s ", quote_identifier(stmt->schemaname)); + + if (stmt->authrole != NULL) + { + appendStringInfo(buf, "AUTHORIZATION %s", RoleSpecString(stmt->authrole, true)); + } +} + + +static void +AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt) +{ + Assert(stmt->removeType == OBJECT_SCHEMA); + + appendStringInfoString(buf, "DROP SCHEMA "); + + if (stmt->missing_ok) + { + appendStringInfoString(buf, "IF EXISTS "); + } + + Value *schemaValue = NULL; + foreach_ptr(schemaValue, stmt->objects) + { + const char *schemaString = quote_identifier(strVal(schemaValue)); + appendStringInfo(buf, "%s", schemaString); + + if (schemaValue != llast(stmt->objects)) + { + appendStringInfoString(buf, ", "); + } + } + + if (stmt->behavior == DROP_CASCADE) + { + appendStringInfoString(buf, " CASCADE"); + } + else if (stmt->behavior == DROP_RESTRICT) + { + appendStringInfoString(buf, " RESTRICT"); + } +} + + static void AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt) { diff --git a/src/backend/distributed/deparser/deparse_text_search.c b/src/backend/distributed/deparser/deparse_text_search.c new file mode 100644 index 000000000..e1ac44f5a --- /dev/null +++ b/src/backend/distributed/deparser/deparse_text_search.c @@ -0,0 +1,377 @@ +/*------------------------------------------------------------------------- + * + * deparse_text_search.c + * All routines to deparse text search statements. + * This file contains all entry points specific for text search statement deparsing. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/namespace.h" +#include "utils/builtins.h" + +#include "distributed/citus_ruleutils.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" + +static void AppendDefElemList(StringInfo buf, List *defelms); + +static void AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes); +static void AppendStringInfoDictnames(StringInfo buf, List *dicts); + + +/* + * DeparseCreateTextSearchStmt returns the sql for a DefineStmt defining a TEXT SEARCH + * CONFIGURATION + * + * Although the syntax is mutually exclusive on the two arguments that can be passed in + * the deparser will syntactically correct multiple definitions if provided. * + */ +char * +DeparseCreateTextSearchStmt(Node *node) +{ + DefineStmt *stmt = castNode(DefineStmt, node); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + const char *identifier = NameListToQuotedString(stmt->defnames); + appendStringInfo(&buf, "CREATE TEXT SEARCH CONFIGURATION %s ", identifier); + appendStringInfoString(&buf, "("); + AppendDefElemList(&buf, stmt->definition); + appendStringInfoString(&buf, ");"); + + return buf.data; +} + + +/* + * AppendDefElemList specialization to append a comma separated list of definitions to a + * define statement. + * + * Currently only supports String and TypeName entries. Will error on others. + */ +static void +AppendDefElemList(StringInfo buf, List *defelems) +{ + DefElem *defelem = NULL; + bool first = true; + foreach_ptr(defelem, defelems) + { + if (!first) + { + appendStringInfoString(buf, ", "); + } + first = false; + + /* extract identifier from defelem */ + const char *identifier = NULL; + switch (nodeTag(defelem->arg)) + { + case T_String: + { + identifier = quote_identifier(strVal(defelem->arg)); + break; + } + + case T_TypeName: + { + TypeName *typeName = castNode(TypeName, defelem->arg); + identifier = NameListToQuotedString(typeName->names); + break; + } + + default: + { + ereport(ERROR, (errmsg("unexpected argument during deparsing of " + "TEXT SEARCH CONFIGURATION definition"))); + } + } + + /* stringify */ + appendStringInfo(buf, "%s = %s", defelem->defname, identifier); + } +} + + +/* + * DeparseDropTextSearchConfigurationStmt returns the sql representation for a DROP TEXT + * SEARCH CONFIGURATION ... statment. Supports dropping multiple configurations at once. + */ +char * +DeparseDropTextSearchConfigurationStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + Assert(stmt->removeType == OBJECT_TSCONFIGURATION); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + appendStringInfoString(&buf, "DROP TEXT SEARCH CONFIGURATION "); + List *nameList = NIL; + bool first = true; + foreach_ptr(nameList, stmt->objects) + { + if (!first) + { + appendStringInfoString(&buf, ", "); + } + first = false; + + appendStringInfoString(&buf, NameListToQuotedString(nameList)); + } + + if (stmt->behavior == DROP_CASCADE) + { + appendStringInfoString(&buf, " CASCADE"); + } + + appendStringInfoString(&buf, ";"); + + return buf.data; +} + + +/* + * DeparseRenameTextSearchConfigurationStmt returns the sql representation of a ALTER TEXT + * SEARCH CONFIGURATION ... RENAME TO ... statement. + */ +char * +DeparseRenameTextSearchConfigurationStmt(Node *node) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_TSCONFIGURATION); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + char *identifier = NameListToQuotedString(castNode(List, stmt->object)); + appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s RENAME TO %s;", + identifier, quote_identifier(stmt->newname)); + + return buf.data; +} + + +/* + * DeparseAlterTextSearchConfigurationStmt returns the ql representation of any generic + * ALTER TEXT SEARCH CONFIGURATION .... statement. The statements supported include: + * - ALTER TEXT SEARCH CONFIGURATIONS ... ADD MAPPING FOR [, ...] WITH [, ...] + * - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING FOR [, ...] WITH [, ...] + * - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING REPLACE ... WITH ... + * - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING FOR [, ...] REPLACE ... WITH ... + * - ALTER TEXT SEARCH CONFIGURATIONS ... DROP MAPPING [ IF EXISTS ] FOR ... + */ +char * +DeparseAlterTextSearchConfigurationStmt(Node *node) +{ + AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + char *identifier = NameListToQuotedString(castNode(List, stmt->cfgname)); + appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s", identifier); + + switch (stmt->kind) + { + case ALTER_TSCONFIG_ADD_MAPPING: + { + appendStringInfoString(&buf, " ADD MAPPING FOR "); + AppendStringInfoTokentypeList(&buf, stmt->tokentype); + + appendStringInfoString(&buf, " WITH "); + AppendStringInfoDictnames(&buf, stmt->dicts); + + break; + } + + case ALTER_TSCONFIG_ALTER_MAPPING_FOR_TOKEN: + { + appendStringInfoString(&buf, " ALTER MAPPING FOR "); + AppendStringInfoTokentypeList(&buf, stmt->tokentype); + + appendStringInfoString(&buf, " WITH "); + AppendStringInfoDictnames(&buf, stmt->dicts); + + break; + } + + case ALTER_TSCONFIG_REPLACE_DICT: + case ALTER_TSCONFIG_REPLACE_DICT_FOR_TOKEN: + { + appendStringInfoString(&buf, " ALTER MAPPING"); + if (list_length(stmt->tokentype) > 0) + { + appendStringInfoString(&buf, " FOR "); + AppendStringInfoTokentypeList(&buf, stmt->tokentype); + } + + if (list_length(stmt->dicts) != 2) + { + elog(ERROR, "unexpected number of dictionaries while deparsing ALTER " + "TEXT SEARCH CONFIGURATION ... ALTER MAPPING [FOR ...] REPLACE " + "statement."); + } + + appendStringInfo(&buf, " REPLACE %s", + NameListToQuotedString(linitial(stmt->dicts))); + + appendStringInfo(&buf, " WITH %s", + NameListToQuotedString(lsecond(stmt->dicts))); + + break; + } + + case ALTER_TSCONFIG_DROP_MAPPING: + { + appendStringInfoString(&buf, " DROP MAPPING"); + + if (stmt->missing_ok) + { + appendStringInfoString(&buf, " IF EXISTS"); + } + + appendStringInfoString(&buf, " FOR "); + AppendStringInfoTokentypeList(&buf, stmt->tokentype); + break; + } + + default: + { + elog(ERROR, "unable to deparse unsupported ALTER TEXT SEARCH STATEMENT"); + } + } + + appendStringInfoString(&buf, ";"); + + return buf.data; +} + + +/* + * DeparseAlterTextSearchConfigurationSchemaStmt returns the sql statement representing + * ALTER TEXT SEARCH CONFIGURATION ... SET SCHEMA ... statements. + */ +char * +DeparseAlterTextSearchConfigurationSchemaStmt(Node *node) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s SET SCHEMA %s;", + NameListToQuotedString(castNode(List, stmt->object)), + quote_identifier(stmt->newschema)); + + return buf.data; +} + + +/* + * DeparseTextSearchConfigurationCommentStmt returns the sql statement representing + * COMMENT ON TEXT SEARCH CONFIGURATION ... IS ... + */ +char * +DeparseTextSearchConfigurationCommentStmt(Node *node) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Assert(stmt->objtype == OBJECT_TSCONFIGURATION); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + appendStringInfo(&buf, "COMMENT ON TEXT SEARCH CONFIGURATION %s IS ", + NameListToQuotedString(castNode(List, stmt->object))); + + if (stmt->comment == NULL) + { + appendStringInfoString(&buf, "NULL"); + } + else + { + appendStringInfoString(&buf, quote_literal_cstr(stmt->comment)); + } + + appendStringInfoString(&buf, ";"); + + return buf.data; +} + + +/* + * AppendStringInfoTokentypeList specializes in adding a comma separated list of + * token_tyoe's to TEXT SEARCH CONFIGURATION commands + */ +static void +AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes) +{ + Value *tokentype = NULL; + bool first = true; + foreach_ptr(tokentype, tokentypes) + { + if (nodeTag(tokentype) != T_String) + { + elog(ERROR, + "unexpected tokentype for deparsing in text search configuration"); + } + + if (!first) + { + appendStringInfoString(buf, ", "); + } + first = false; + + appendStringInfoString(buf, strVal(tokentype)); + } +} + + +/* + * AppendStringInfoDictnames specializes in appending a comma separated list of + * dictionaries to TEXT SEARCH CONFIGURATION commands. + */ +static void +AppendStringInfoDictnames(StringInfo buf, List *dicts) +{ + List *dictNames = NIL; + bool first = true; + foreach_ptr(dictNames, dicts) + { + if (!first) + { + appendStringInfoString(buf, ", "); + } + first = false; + + char *dictIdentifier = NameListToQuotedString(dictNames); + appendStringInfoString(buf, dictIdentifier); + } +} + + +/* + * DeparseAlterTextSearchConfigurationOwnerStmt returns the sql statement representing + * ALTER TEXT SEARCH CONFIGURATION ... ONWER TO ... commands. + */ +char * +DeparseAlterTextSearchConfigurationOwnerStmt(Node *node) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s OWNER TO %s;", + NameListToQuotedString(castNode(List, stmt->object)), + RoleSpecString(stmt->newowner, true)); + + return buf.data; +} diff --git a/src/backend/distributed/deparser/qualify_text_search_stmts.c b/src/backend/distributed/deparser/qualify_text_search_stmts.c new file mode 100644 index 000000000..42c98039a --- /dev/null +++ b/src/backend/distributed/deparser/qualify_text_search_stmts.c @@ -0,0 +1,278 @@ +/*------------------------------------------------------------------------- + * + * qualify_text_search_stmts.c + * Functions specialized in fully qualifying all text search statements. These + * functions are dispatched from qualify.c + * + * Fully qualifying text search statements consists of adding the schema name + * to the subject of the types as well as any other branch of the parsetree. + * + * Goal would be that the deparser functions for these statements can + * serialize the statement without any external lookups. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/htup_details.h" +#include "catalog/namespace.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_dict.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "distributed/deparser.h" +#include "distributed/listutils.h" + +static Oid get_ts_config_namespace(Oid tsconfigOid); +static Oid get_ts_dict_namespace(Oid tsdictOid); + + +/* + * QualifyDropTextSearchConfigurationStmt adds any missing schema names to text search + * configurations being dropped. All configurations are expected to exists before fully + * qualifying the statement. Errors will be raised for objects not existing. Non-existing + * objects are expected to not be distributed. + */ +void +QualifyDropTextSearchConfigurationStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + Assert(stmt->removeType == OBJECT_TSCONFIGURATION); + + List *qualifiedObjects = NIL; + List *objName = NIL; + + foreach_ptr(objName, stmt->objects) + { + char *schemaName = NULL; + char *tsconfigName = NULL; + DeconstructQualifiedName(objName, &schemaName, &tsconfigName); + + if (!schemaName) + { + Oid tsconfigOid = get_ts_config_oid(objName, false); + Oid namespaceOid = get_ts_config_namespace(tsconfigOid); + schemaName = get_namespace_name(namespaceOid); + + objName = list_make2(makeString(schemaName), + makeString(tsconfigName)); + } + + qualifiedObjects = lappend(qualifiedObjects, objName); + } + + stmt->objects = qualifiedObjects; +} + + +/* + * QualifyAlterTextSearchConfigurationStmt adds the schema name (if missing) to the name + * of the text search configurations, as well as the dictionaries referenced. + */ +void +QualifyAlterTextSearchConfigurationStmt(Node *node) +{ + AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(stmt->cfgname, &schemaName, &objName); + + /* fully qualify the cfgname being altered */ + if (!schemaName) + { + Oid tsconfigOid = get_ts_config_oid(stmt->cfgname, false); + Oid namespaceOid = get_ts_config_namespace(tsconfigOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->cfgname = list_make2(makeString(schemaName), + makeString(objName)); + } + + /* fully qualify the dicts */ + bool useNewDicts = false; + List *dicts = NULL; + List *dictName = NIL; + foreach_ptr(dictName, stmt->dicts) + { + DeconstructQualifiedName(dictName, &schemaName, &objName); + + /* fully qualify the cfgname being altered */ + if (!schemaName) + { + Oid dictOid = get_ts_dict_oid(dictName, false); + Oid namespaceOid = get_ts_dict_namespace(dictOid); + schemaName = get_namespace_name(namespaceOid); + + useNewDicts = true; + dictName = list_make2(makeString(schemaName), makeString(objName)); + } + + dicts = lappend(dicts, dictName); + } + + if (useNewDicts) + { + /* swap original dicts with the new list */ + stmt->dicts = dicts; + } + else + { + /* we don't use the new list, everything was already qualified, free-ing */ + list_free(dicts); + } +} + + +/* + * QualifyRenameTextSearchConfigurationStmt adds the schema name (if missing) to the + * configuration being renamed. The new name will kept be without schema name since this + * command cannot be used to change the schema of a configuration. + */ +void +QualifyRenameTextSearchConfigurationStmt(Node *node) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_TSCONFIGURATION); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName); + + /* fully qualify the cfgname being altered */ + if (!schemaName) + { + Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false); + Oid namespaceOid = get_ts_config_namespace(tsconfigOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->object = (Node *) list_make2(makeString(schemaName), + makeString(objName)); + } +} + + +/* + * QualifyAlterTextSearchConfigurationSchemaStmt adds the schema name (if missing) for the + * text search being moved to a new schema. + */ +void +QualifyAlterTextSearchConfigurationSchemaStmt(Node *node) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName); + + if (!schemaName) + { + Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false); + Oid namespaceOid = get_ts_config_namespace(tsconfigOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->object = (Node *) list_make2(makeString(schemaName), + makeString(objName)); + } +} + + +/* + * QualifyTextSearchConfigurationCommentStmt adds the schema name (if missing) to the + * configuration name on which the comment is created. + */ +void +QualifyTextSearchConfigurationCommentStmt(Node *node) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Assert(stmt->objtype == OBJECT_TSCONFIGURATION); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName); + + if (!schemaName) + { + Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false); + Oid namespaceOid = get_ts_config_namespace(tsconfigOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->object = (Node *) list_make2(makeString(schemaName), + makeString(objName)); + } +} + + +/* + * QualifyAlterTextSearchConfigurationOwnerStmt adds the schema name (if missing) to the + * configuration for which the owner is changing. + */ +void +QualifyAlterTextSearchConfigurationOwnerStmt(Node *node) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName); + + if (!schemaName) + { + Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false); + Oid namespaceOid = get_ts_config_namespace(tsconfigOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->object = (Node *) list_make2(makeString(schemaName), + makeString(objName)); + } +} + + +/* + * get_ts_config_namespace returns the oid of the namespace which is housing the text + * search configuration identified by tsconfigOid. + */ +static Oid +get_ts_config_namespace(Oid tsconfigOid) +{ + HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid)); + + if (HeapTupleIsValid(tup)) + { + Form_pg_ts_config cfgform = (Form_pg_ts_config) GETSTRUCT(tup); + Oid namespaceOid = cfgform->cfgnamespace; + ReleaseSysCache(tup); + + return namespaceOid; + } + + return InvalidOid; +} + + +/* + * get_ts_dict_namespace returns the oid of the namespace which is housing the text + * search dictionary identified by tsdictOid. + */ +static Oid +get_ts_dict_namespace(Oid tsdictOid) +{ + HeapTuple tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(tsdictOid)); + + if (HeapTupleIsValid(tup)) + { + Form_pg_ts_dict cfgform = (Form_pg_ts_dict) GETSTRUCT(tup); + Oid namespaceOid = cfgform->dictnamespace; + ReleaseSysCache(tup); + + return namespaceOid; + } + + return InvalidOid; +} diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index a101d2968..28498e0f2 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -32,6 +32,7 @@ #include "distributed/distributed_planner.h" #include "distributed/multi_router_planner.h" #include "distributed/multi_server_executor.h" +#include "distributed/relation_access_tracking.h" #include "distributed/resource_lock.h" #include "distributed/transaction_management.h" #include "distributed/version_compat.h" @@ -81,6 +82,7 @@ int ExecutorLevel = 0; /* local function forward declarations */ static Relation StubRelation(TupleDesc tupleDescriptor); +static char * GetObjectTypeString(ObjectType objType); static bool AlterTableConstraintCheck(QueryDesc *queryDesc); static List * FindCitusCustomScanStates(PlanState *planState); static bool CitusCustomScanStateWalker(PlanState *planState, @@ -235,16 +237,17 @@ CitusExecutorRun(QueryDesc *queryDesc, * transactions. */ CitusTableCacheFlushInvalidatedEntries(); - - /* - * Within a 2PC, when a function is delegated to a remote node, we pin - * the distribution argument as the shard key for all the SQL in the - * function's block. The restriction is imposed to not to access other - * nodes from the current node and violate the transactional integrity - * of the 2PC. Now that the query is ending, reset the shard key to NULL. - */ - ResetAllowedShardKeyValue(); + InTopLevelDelegatedFunctionCall = false; } + + /* + * Within a 2PC, when a function is delegated to a remote node, we pin + * the distribution argument as the shard key for all the SQL in the + * function's block. The restriction is imposed to not to access other + * nodes from the current node, and violate the transactional integrity + * of the 2PC. Now that the query is ending, reset the shard key to NULL. + */ + CheckAndResetAllowedShardKeyValueIfNeeded(); } PG_CATCH(); { @@ -258,13 +261,15 @@ CitusExecutorRun(QueryDesc *queryDesc, if (ExecutorLevel == 0 && PlannerLevel == 0) { - /* - * In case of an exception, reset the pinned shard-key, for more - * details see the function header. - */ - ResetAllowedShardKeyValue(); + InTopLevelDelegatedFunctionCall = false; } + /* + * In case of an exception, reset the pinned shard-key, for more + * details see the function header. + */ + CheckAndResetAllowedShardKeyValueIfNeeded(); + PG_RE_THROW(); } PG_END_TRY(); @@ -691,6 +696,103 @@ SetLocalMultiShardModifyModeToSequential() } +/* + * EnsureSequentialMode makes sure that the current transaction is already in + * sequential mode, or can still safely be put in sequential mode, it errors if that is + * not possible. The error contains information for the user to retry the transaction with + * sequential mode set from the beginning. + * + * Takes an ObjectType to use in the error/debug messages. + */ +void +EnsureSequentialMode(ObjectType objType) +{ + char *objTypeString = GetObjectTypeString(objType); + + if (ParallelQueryExecutedInTransaction()) + { + ereport(ERROR, (errmsg("cannot run %s command because there was a " + "parallel operation on a distributed table in the " + "transaction", objTypeString), + errdetail("When running command on/for a distributed %s, Citus " + "needs to perform all operations over a single " + "connection per node to ensure consistency.", + objTypeString), + errhint("Try re-running the transaction with " + "\"SET LOCAL citus.multi_shard_modify_mode TO " + "\'sequential\';\""))); + } + + ereport(DEBUG1, (errmsg("switching to sequential query execution mode"), + errdetail( + "A command for a distributed %s is run. To make sure subsequent " + "commands see the %s correctly we need to make sure to " + "use only one connection for all future commands", + objTypeString, objTypeString))); + + SetLocalMultiShardModifyModeToSequential(); +} + + +/* + * GetObjectTypeString takes an ObjectType and returns the string version of it. + * We (for now) call this function only in EnsureSequentialMode, and use the returned + * string to generate error/debug messages. + * + * If GetObjectTypeString gets called with an ObjectType that is not in the switch + * statement, the function will return the string "object", and emit a debug message. + * In that case, make sure you've added the newly supported type to the switch statement. + */ +static char * +GetObjectTypeString(ObjectType objType) +{ + switch (objType) + { + case OBJECT_COLLATION: + { + return "collation"; + } + + case OBJECT_DATABASE: + { + return "database"; + } + + case OBJECT_EXTENSION: + { + return "extension"; + } + + case OBJECT_FUNCTION: + { + return "function"; + } + + case OBJECT_SCHEMA: + { + return "schema"; + } + + case OBJECT_TSCONFIGURATION: + { + return "text search configuration"; + } + + case OBJECT_TYPE: + { + return "type"; + } + + default: + { + ereport(DEBUG1, (errmsg("unsupported object type"), + errdetail("Please add string conversion for the object."))); + return "object"; + } + } +} + + /* * AlterTableConstraintCheck returns if the given query is an ALTER TABLE * constraint check query. diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index a9a154242..2d1f67ad3 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -124,6 +124,7 @@ typedef struct ViewDependencyNode static List * GetRelationSequenceDependencyList(Oid relationId); static List * GetRelationTriggerFunctionDependencyList(Oid relationId); static List * GetRelationStatsSchemaDependencyList(Oid relationId); +static List * GetRelationIndicesDependencyList(Oid relationId); static DependencyDefinition * CreateObjectAddressDependencyDef(Oid classId, Oid objectId); static List * CreateObjectAddressDependencyDefList(Oid classId, List *objectIdList); static ObjectAddress DependencyDefinitionObjectAddress(DependencyDefinition *definition); @@ -155,6 +156,8 @@ static bool FollowAllSupportedDependencies(ObjectAddressCollector *collector, DependencyDefinition *definition); static bool FollowNewSupportedDependencies(ObjectAddressCollector *collector, DependencyDefinition *definition); +static bool FollowAllDependencies(ObjectAddressCollector *collector, + DependencyDefinition *definition); static void ApplyAddToDependencyList(ObjectAddressCollector *collector, DependencyDefinition *definition); static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector, @@ -211,15 +214,42 @@ GetDependenciesForObject(const ObjectAddress *target) /* - * GetAllDependenciesForObject returns a list of all the ObjectAddresses to be - * created in order before the target object could safely be created on a - * worker. As a caller, you probably need GetDependenciesForObject() which - * eliminates already distributed objects from the returned list. + * GetAllSupportedDependenciesForObject returns a list of all the ObjectAddresses to be + * created in order before the target object could safely be created on a worker, if all + * dependent objects are distributable. As a caller, you probably need to use + * GetDependenciesForObject() which eliminates already distributed objects from the returned + * list. * * Some of the object might already be created on a worker. It should be created * in an idempotent way. */ List * +GetAllSupportedDependenciesForObject(const ObjectAddress *target) +{ + ObjectAddressCollector collector = { 0 }; + InitObjectAddressCollector(&collector); + + RecurseObjectDependencies(*target, + &ExpandCitusSupportedTypes, + &FollowAllSupportedDependencies, + &ApplyAddToDependencyList, + &collector); + + return collector.dependencyList; +} + + +/* + * GetAllDependenciesForObject returns a list of all the dependent objects of the given + * object irrespective of whether the dependent object is supported by Citus or not, if + * the object can be found as dependency with RecurseObjectDependencies and + * ExpandCitusSupportedTypes. + * + * This function will be used to provide meaningful error messages if any dependent + * object for a given object is not supported. If you want to create dependencies for + * an object, you probably need to use GetDependenciesForObject(). + */ +List * GetAllDependenciesForObject(const ObjectAddress *target) { ObjectAddressCollector collector = { 0 }; @@ -227,7 +257,7 @@ GetAllDependenciesForObject(const ObjectAddress *target) RecurseObjectDependencies(*target, &ExpandCitusSupportedTypes, - &FollowAllSupportedDependencies, + &FollowAllDependencies, &ApplyAddToDependencyList, &collector); @@ -639,6 +669,11 @@ SupportedDependencyByCitus(const ObjectAddress *address) return true; } + case OCLASS_TSCONFIG: + { + return true; + } + case OCLASS_TYPE: { switch (get_typtype(address->objectId)) @@ -686,7 +721,8 @@ SupportedDependencyByCitus(const ObjectAddress *address) relKind == RELKIND_RELATION || relKind == RELKIND_PARTITIONED_TABLE || relKind == RELKIND_FOREIGN_TABLE || - relKind == RELKIND_SEQUENCE) + relKind == RELKIND_SEQUENCE || + relKind == RELKIND_INDEX) { return true; } @@ -896,10 +932,61 @@ FollowAllSupportedDependencies(ObjectAddressCollector *collector, /* - * ApplyAddToDependencyList is an apply function for RecurseObjectDependencies that will collect - * all the ObjectAddresses for pg_depend entries to the context. The context here is - * assumed to be a (ObjectAddressCollector *) to the location where all ObjectAddresses - * will be collected. + * FollowAllDependencies applies filters on pg_depend entries to follow the dependency + * tree of objects in depth first order. We will visit all objects irrespective of it is + * supported by Citus or not. + */ +static bool +FollowAllDependencies(ObjectAddressCollector *collector, + DependencyDefinition *definition) +{ + if (definition->mode == DependencyPgDepend) + { + /* + * For dependencies found in pg_depend: + * + * Follow only normal and extension dependencies. The latter is used to reach the + * extensions, the objects that directly depend on the extension are eliminated + * during the "apply" phase. + * + * Other dependencies are internal dependencies and managed by postgres. + */ + if (definition->data.pg_depend.deptype != DEPENDENCY_NORMAL && + definition->data.pg_depend.deptype != DEPENDENCY_EXTENSION) + { + return false; + } + } + + /* rest of the tests are to see if we want to follow the actual dependency */ + ObjectAddress address = DependencyDefinitionObjectAddress(definition); + + /* + * If the object is already in our dependency list we do not have to follow any + * further + */ + if (IsObjectAddressCollected(address, collector)) + { + return false; + } + + if (CitusExtensionObject(&address)) + { + /* following citus extension could complicate role management */ + return false; + } + + return true; +} + + +/* + * ApplyAddToDependencyList is an apply function for RecurseObjectDependencies that will + * collect all the ObjectAddresses for pg_depend entries to the context, except it is + * extension owned one. + * + * The context here is assumed to be a (ObjectAddressCollector *) to the location where + * all ObjectAddresses will be collected. */ static void ApplyAddToDependencyList(ObjectAddressCollector *collector, @@ -1005,6 +1092,17 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe List *sequenceDependencyList = GetRelationSequenceDependencyList(relationId); result = list_concat(result, sequenceDependencyList); + + /* + * Tables could have indexes. Indexes themself could have dependencies that + * need to be propagated. eg. TEXT SEARCH CONFIGRUATIONS. Here we add the + * addresses of all indices to the list of objects to vist, as to make sure we + * create all objects required by the indices before we create the table + * including indices. + */ + + List *indexDependencyList = GetRelationIndicesDependencyList(relationId); + result = list_concat(result, indexDependencyList); } default: @@ -1048,6 +1146,28 @@ GetRelationStatsSchemaDependencyList(Oid relationId) } +/* + * CollectIndexOids implements PGIndexProcessor to create a list of all index oids + */ +static void +CollectIndexOids(Form_pg_index formPgIndex, List **oids, int flags) +{ + *oids = lappend_oid(*oids, formPgIndex->indexrelid); +} + + +/* + * GetRelationIndicesDependencyList creates a list of ObjectAddressDependencies for the + * indexes on a given relation. + */ +static List * +GetRelationIndicesDependencyList(Oid relationId) +{ + List *indexIds = ExecuteFunctionOnEachTableIndex(relationId, CollectIndexOids, 0); + return CreateObjectAddressDependencyDefList(RelationRelationId, indexIds); +} + + /* * GetRelationTriggerFunctionDependencyList returns a list of DependencyDefinition * objects for the functions that triggers of the relation with relationId depends. diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index ba67a073b..41b3b372d 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -405,6 +405,21 @@ GetDistributedObjectAddressList(void) } +/* + * GetRoleSpecObjectForUser creates a RoleSpec object for the given roleOid. + */ +RoleSpec * +GetRoleSpecObjectForUser(Oid roleOid) +{ + RoleSpec *roleSpec = makeNode(RoleSpec); + roleSpec->roletype = OidIsValid(roleOid) ? ROLESPEC_CSTRING : ROLESPEC_PUBLIC; + roleSpec->rolename = OidIsValid(roleOid) ? GetUserNameFromId(roleOid, false) : NULL; + roleSpec->location = -1; + + return roleSpec; +} + + /* * UpdateDistributedObjectColocationId gets an old and a new colocationId * and updates the colocationId of all tuples in citus.pg_dist_object which diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index ac6db8c61..161a56942 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -201,6 +201,9 @@ static bool workerNodeHashValid = false; /* default value is -1, for coordinator it's 0 and for worker nodes > 0 */ static int32 LocalGroupId = -1; +/* default value is -1, increases with every node starting from 1 */ +static int32 LocalNodeId = -1; + /* built first time through in InitializeDistCache */ static ScanKeyData DistPartitionScanKey[1]; static ScanKeyData DistShardScanKey[1]; @@ -3618,6 +3621,62 @@ GetLocalGroupId(void) } +/* + * GetNodeId returns the node identifier of the local node. + */ +int32 +GetLocalNodeId(void) +{ + InitializeCaches(); + + /* + * Already set the node id, no need to read the heap again. + */ + if (LocalNodeId != -1) + { + return LocalNodeId; + } + + uint32 nodeId = -1; + + int32 localGroupId = GetLocalGroupId(); + + bool includeNodesFromOtherClusters = false; + List *workerNodeList = ReadDistNode(includeNodesFromOtherClusters); + + WorkerNode *workerNode = NULL; + foreach_ptr(workerNode, workerNodeList) + { + if (workerNode->groupId == localGroupId && + workerNode->isActive) + { + nodeId = workerNode->nodeId; + break; + } + } + + /* + * nodeId is -1 if we cannot find an active node whose group id is + * localGroupId in pg_dist_node. + */ + if (nodeId == -1) + { + elog(DEBUG4, "there is no active node with group id '%d' on pg_dist_node", + localGroupId); + + /* + * This is expected if the coordinator is not added to the metadata. + * We'll return 0 for this case and for all cases so views can function almost normally + */ + nodeId = 0; + } + + LocalNodeId = nodeId; + + return nodeId; +} + + /* * RegisterLocalGroupIdCacheCallbacks registers the callbacks required to * maintain LocalGroupId at a consistent value. It's separate from @@ -4019,6 +4078,7 @@ InvalidateMetadataSystemCache(void) memset(&MetadataCache, 0, sizeof(MetadataCache)); workerNodeHashValid = false; LocalGroupId = -1; + LocalNodeId = -1; } @@ -4110,6 +4170,7 @@ InvalidateNodeRelationCacheCallback(Datum argument, Oid relationId) if (relationId == InvalidOid || relationId == MetadataCache.distNodeRelationId) { workerNodeHashValid = false; + LocalNodeId = -1; } } diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index ec5ffb3f9..e43bf4e43 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -102,7 +102,6 @@ static GrantStmt * GenerateGrantStmtForRights(ObjectType objectType, bool withGrantOption); static List * GetObjectsForGrantStmt(ObjectType objectType, Oid objectId); static AccessPriv * GetAccessPrivObjectForGrantStmt(char *permission); -static RoleSpec * GetRoleSpecObjectForGrantStmt(Oid roleOid); static List * GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid, AclItem *aclItem); static void SetLocalEnableMetadataSync(bool state); @@ -1782,7 +1781,7 @@ GenerateGrantStmtForRights(ObjectType objectType, stmt->objtype = objectType; stmt->objects = GetObjectsForGrantStmt(objectType, objectId); stmt->privileges = list_make1(GetAccessPrivObjectForGrantStmt(permission)); - stmt->grantees = list_make1(GetRoleSpecObjectForGrantStmt(roleOid)); + stmt->grantees = list_make1(GetRoleSpecObjectForUser(roleOid)); stmt->grant_option = withGrantOption; return stmt; @@ -1831,22 +1830,6 @@ GetAccessPrivObjectForGrantStmt(char *permission) } -/* - * GetRoleSpecObjectForGrantStmt creates a RoleSpec object for the given roleOid. - * It will be used when creating GrantStmt objects. - */ -static RoleSpec * -GetRoleSpecObjectForGrantStmt(Oid roleOid) -{ - RoleSpec *roleSpec = makeNode(RoleSpec); - roleSpec->roletype = OidIsValid(roleOid) ? ROLESPEC_CSTRING : ROLESPEC_PUBLIC; - roleSpec->rolename = OidIsValid(roleOid) ? GetUserNameFromId(roleOid, false) : NULL; - roleSpec->location = -1; - - return roleSpec; -} - - /* * SetLocalEnableMetadataSync sets the enable_metadata_sync locally */ diff --git a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c index c4da6764a..c2ec4db3a 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c +++ b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c @@ -410,6 +410,7 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, case OBJECT_FUNCTION: case OBJECT_PROCEDURE: case OBJECT_AGGREGATE: + case OBJECT_TSCONFIGURATION: case OBJECT_TYPE: case OBJECT_FOREIGN_SERVER: case OBJECT_SEQUENCE: diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index 716c5357c..3ca22f3b1 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -313,16 +313,6 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext) return NULL; } - if (fromFuncExpr && !IsMultiStatementTransaction()) - { - /* - * For now, let's not push the function from the FROM clause unless it's in a - * multistatement transaction with the forceDelegation flag ON. - */ - ereport(DEBUG2, (errmsg("function from the FROM clause is not pushed"))); - return NULL; - } - /* dissuade the planner from trying a generic plan with parameters */ (void) expression_tree_walker((Node *) funcExpr->args, contain_param_walker, &walkerParamContext); @@ -733,6 +723,16 @@ FunctionInFromClause(List *fromlist, Query *query) static void EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId) { + /* + * If the distribution key is already set, the key is fixed until + * the force-delegation function returns. All nested force-delegation + * functions must use the same key. + */ + if (AllowedDistributionColumnValue.isActive) + { + return; + } + /* * The saved distribution argument need to persist through the life * of the query, both during the planning (where we save) and execution @@ -744,6 +744,7 @@ EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId) colocationId)); AllowedDistributionColumnValue.distributionColumnValue = copyObject(distArgument); AllowedDistributionColumnValue.colocationId = colocationId; + AllowedDistributionColumnValue.executorLevel = ExecutorLevel; AllowedDistributionColumnValue.isActive = true; MemoryContextSwitchTo(oldcontext); } @@ -757,15 +758,22 @@ EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId) * the 2PC. Reset the distribution argument value once the function ends. */ void -ResetAllowedShardKeyValue(void) +CheckAndResetAllowedShardKeyValueIfNeeded(void) { - if (AllowedDistributionColumnValue.isActive) + /* + * If no distribution argument is pinned or the pinned argument was + * set by a nested-executor from upper level, nothing to reset. + */ + if (!AllowedDistributionColumnValue.isActive || + ExecutorLevel > AllowedDistributionColumnValue.executorLevel) { - pfree(AllowedDistributionColumnValue.distributionColumnValue); - AllowedDistributionColumnValue.isActive = false; + return; } - InTopLevelDelegatedFunctionCall = false; + Assert(ExecutorLevel == AllowedDistributionColumnValue.executorLevel); + pfree(AllowedDistributionColumnValue.distributionColumnValue); + AllowedDistributionColumnValue.isActive = false; + AllowedDistributionColumnValue.executorLevel = 0; } @@ -777,6 +785,7 @@ bool IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId) { Assert(AllowedDistributionColumnValue.isActive); + Assert(ExecutorLevel > AllowedDistributionColumnValue.executorLevel); ereport(DEBUG4, errmsg("Comparing saved:%s with Shard key: %s colocationid:%d:%d", pretty_format_node_dump( diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 16769393c..abc11cf39 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -460,6 +460,7 @@ StartupCitusBackend(void) InitializeMaintenanceDaemonBackend(); InitializeBackendData(); RegisterConnectionCleanup(); + AssignGlobalPID(); } @@ -514,6 +515,9 @@ CitusCleanupConnectionsAtExit(int code, Datum arg) * are already given away. */ DeallocateReservedConnections(); + + /* we don't want any monitoring view/udf to show already exited backends */ + UnSetGlobalPID(); } @@ -1985,7 +1989,6 @@ NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source) { /* this array _must_ be kept in an order usable by bsearch */ const char *allowedConninfoKeywords[] = { - "application_name", "connect_timeout", #if defined(ENABLE_GSS) && defined(ENABLE_SSPI) "gsslib", diff --git a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql index 9c23b31c2..d81b2c719 100644 --- a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql +++ b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql @@ -14,6 +14,26 @@ #include "udfs/worker_drop_sequence_dependency/11.0-1.sql" #include "udfs/worker_drop_shell_table/11.0-1.sql" +#include "udfs/get_all_active_transactions/11.0-1.sql" +#include "udfs/get_global_active_transactions/11.0-1.sql" + +#include "udfs/citus_worker_stat_activity/11.0-1.sql" +#include "udfs/worker_create_or_replace_object/11.0-1.sql" + +CREATE VIEW citus.citus_worker_stat_activity AS +SELECT * FROM pg_catalog.citus_worker_stat_activity(); +ALTER VIEW citus.citus_worker_stat_activity SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_worker_stat_activity TO PUBLIC; + +#include "udfs/citus_dist_stat_activity/11.0-1.sql" + +CREATE VIEW citus.citus_dist_stat_activity AS +SELECT * FROM pg_catalog.citus_dist_stat_activity(); +ALTER VIEW citus.citus_dist_stat_activity SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_dist_stat_activity TO PUBLIC; + +-- we have to recreate this view because recreated citus_dist_stat_activity that this view depends +#include "udfs/citus_lock_waits/11.0-1.sql" DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text); DROP FUNCTION pg_catalog.master_get_table_metadata(text); diff --git a/src/backend/distributed/sql/citus--8.3-1--9.0-1.sql b/src/backend/distributed/sql/citus--8.3-1--9.0-1.sql index dccc66d16..359360981 100644 --- a/src/backend/distributed/sql/citus--8.3-1--9.0-1.sql +++ b/src/backend/distributed/sql/citus--8.3-1--9.0-1.sql @@ -21,13 +21,7 @@ ALTER FUNCTION citus.restore_isolation_tester_func SET SCHEMA citus_internal; GRANT USAGE ON SCHEMA citus TO public; #include "udfs/pg_dist_shard_placement_trigger_func/9.0-1.sql" - -CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text) - RETURNS bool - LANGUAGE C STRICT - AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$; -COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text) - IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object'; +#include "udfs/worker_create_or_replace_object/9.0-1.sql" CREATE OR REPLACE FUNCTION pg_catalog.master_unmark_object_distributed(classid oid, objid oid, objsubid int) RETURNS void diff --git a/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql b/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql index 6fbe07ea3..204548c51 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql @@ -84,3 +84,131 @@ DROP FUNCTION pg_catalog.citus_shards_on_worker(); DROP FUNCTION pg_catalog.citus_shard_indexes_on_worker(); #include "../udfs/create_distributed_function/9.0-1.sql" ALTER TABLE citus.pg_dist_object DROP COLUMN force_delegation; + + +SET search_path = 'pg_catalog'; + + +DROP FUNCTION IF EXISTS get_all_active_transactions(); + + +CREATE OR REPLACE FUNCTION get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, + OUT transaction_number int8, OUT transaction_stamp timestamptz) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$get_all_active_transactions$$; + +COMMENT ON FUNCTION get_all_active_transactions(OUT datid oid, OUT datname text, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, + OUT transaction_number int8, OUT transaction_stamp timestamptz) +IS 'returns distributed transaction ids of active distributed transactions'; + +DROP FUNCTION IF EXISTS get_global_active_transactions(); + +CREATE FUNCTION get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz) + RETURNS SETOF RECORD + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$get_global_active_transactions$$; + COMMENT ON FUNCTION get_global_active_transactions(OUT database_id oid, OUT process_id int, OUT initiator_node_identifier int4, OUT transaction_number int8, OUT transaction_stamp timestamptz) + IS 'returns distributed transaction ids of active distributed transactions from each node of the cluster'; + +RESET search_path; + +DROP FUNCTION pg_catalog.citus_dist_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_dist_stat_activity$$; + +COMMENT ON FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text) +IS 'returns distributed transaction activity on distributed tables'; + +CREATE VIEW citus.citus_dist_stat_activity AS +SELECT * FROM pg_catalog.citus_dist_stat_activity(); +ALTER VIEW citus.citus_dist_stat_activity SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_dist_stat_activity TO PUBLIC; + +SET search_path = 'pg_catalog'; + +-- we have to recreate this view because we drop citus_dist_stat_activity that this view depends +CREATE VIEW citus.citus_lock_waits AS + +WITH +citus_dist_stat_activity AS +( + SELECT * FROM citus_dist_stat_activity +), +unique_global_wait_edges AS +( + SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges() +), +citus_dist_stat_activity_with_node_id AS +( + SELECT + citus_dist_stat_activity.*, (CASE citus_dist_stat_activity.distributed_query_host_name WHEN 'coordinator_host' THEN 0 ELSE pg_dist_node.nodeid END) as initiator_node_id + FROM + citus_dist_stat_activity LEFT JOIN pg_dist_node + ON + citus_dist_stat_activity.distributed_query_host_name = pg_dist_node.nodename AND + citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport +) +SELECT + waiting.pid AS waiting_pid, + blocking.pid AS blocking_pid, + waiting.query AS blocked_statement, + blocking.query AS current_statement_in_blocking_process, + waiting.initiator_node_id AS waiting_node_id, + blocking.initiator_node_id AS blocking_node_id, + waiting.distributed_query_host_name AS waiting_node_name, + blocking.distributed_query_host_name AS blocking_node_name, + waiting.distributed_query_host_port AS waiting_node_port, + blocking.distributed_query_host_port AS blocking_node_port +FROM + unique_global_wait_edges +JOIN + citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id) +JOIN + citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id); + +ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; + +DROP FUNCTION citus_worker_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_worker_stat_activity$$; + +COMMENT ON FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text) +IS 'returns distributed transaction activity on shards of distributed tables'; + +CREATE VIEW citus.citus_worker_stat_activity AS +SELECT * FROM pg_catalog.citus_worker_stat_activity(); +ALTER VIEW citus.citus_worker_stat_activity SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_worker_stat_activity TO PUBLIC; + +DROP FUNCTION pg_catalog.worker_create_or_replace_object(text[]); +#include "../udfs/worker_create_or_replace_object/9.0-1.sql" + +RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_dist_stat_activity/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_dist_stat_activity/11.0-1.sql new file mode 100644 index 000000000..7b38f627d --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_dist_stat_activity/11.0-1.sql @@ -0,0 +1,19 @@ +DROP FUNCTION IF EXISTS pg_catalog.citus_dist_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_dist_stat_activity$$; + +COMMENT ON FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +IS 'returns distributed transaction activity on distributed tables'; diff --git a/src/backend/distributed/sql/udfs/citus_dist_stat_activity/latest.sql b/src/backend/distributed/sql/udfs/citus_dist_stat_activity/latest.sql new file mode 100644 index 000000000..7b38f627d --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_dist_stat_activity/latest.sql @@ -0,0 +1,19 @@ +DROP FUNCTION IF EXISTS pg_catalog.citus_dist_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_dist_stat_activity$$; + +COMMENT ON FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +IS 'returns distributed transaction activity on distributed tables'; diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/11.0-1.sql new file mode 100644 index 000000000..2ae40374a --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/11.0-1.sql @@ -0,0 +1,44 @@ +SET search_path = 'pg_catalog'; + +CREATE VIEW citus.citus_lock_waits AS +WITH +citus_dist_stat_activity AS +( + SELECT * FROM citus_dist_stat_activity +), +unique_global_wait_edges AS +( + SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges() +), +citus_dist_stat_activity_with_node_id AS +( + SELECT + citus_dist_stat_activity.*, (CASE citus_dist_stat_activity.distributed_query_host_name WHEN 'coordinator_host' THEN 0 ELSE pg_dist_node.nodeid END) as initiator_node_id + FROM + citus_dist_stat_activity LEFT JOIN pg_dist_node + ON + citus_dist_stat_activity.distributed_query_host_name = pg_dist_node.nodename AND + citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport +) +SELECT + waiting.pid AS waiting_pid, + blocking.pid AS blocking_pid, + waiting.query AS blocked_statement, + blocking.query AS current_statement_in_blocking_process, + waiting.initiator_node_id AS waiting_node_id, + blocking.initiator_node_id AS blocking_node_id, + waiting.distributed_query_host_name AS waiting_node_name, + blocking.distributed_query_host_name AS blocking_node_name, + waiting.distributed_query_host_port AS waiting_node_port, + blocking.distributed_query_host_port AS blocking_node_port +FROM + unique_global_wait_edges +JOIN + citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id) +JOIN + citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id); + +ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; + +RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql new file mode 100644 index 000000000..2ae40374a --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql @@ -0,0 +1,44 @@ +SET search_path = 'pg_catalog'; + +CREATE VIEW citus.citus_lock_waits AS +WITH +citus_dist_stat_activity AS +( + SELECT * FROM citus_dist_stat_activity +), +unique_global_wait_edges AS +( + SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges() +), +citus_dist_stat_activity_with_node_id AS +( + SELECT + citus_dist_stat_activity.*, (CASE citus_dist_stat_activity.distributed_query_host_name WHEN 'coordinator_host' THEN 0 ELSE pg_dist_node.nodeid END) as initiator_node_id + FROM + citus_dist_stat_activity LEFT JOIN pg_dist_node + ON + citus_dist_stat_activity.distributed_query_host_name = pg_dist_node.nodename AND + citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport +) +SELECT + waiting.pid AS waiting_pid, + blocking.pid AS blocking_pid, + waiting.query AS blocked_statement, + blocking.query AS current_statement_in_blocking_process, + waiting.initiator_node_id AS waiting_node_id, + blocking.initiator_node_id AS blocking_node_id, + waiting.distributed_query_host_name AS waiting_node_name, + blocking.distributed_query_host_name AS blocking_node_name, + waiting.distributed_query_host_port AS waiting_node_port, + blocking.distributed_query_host_port AS blocking_node_port +FROM + unique_global_wait_edges +JOIN + citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id) +JOIN + citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id); + +ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; + +RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_worker_stat_activity/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_worker_stat_activity/11.0-1.sql new file mode 100644 index 000000000..6f585b2e8 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_worker_stat_activity/11.0-1.sql @@ -0,0 +1,19 @@ +DROP FUNCTION IF EXISTS pg_catalog.citus_worker_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_worker_stat_activity$$; + +COMMENT ON FUNCTION pg_catalog.citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +IS 'returns distributed transaction activity on shards of distributed tables'; diff --git a/src/backend/distributed/sql/udfs/citus_worker_stat_activity/latest.sql b/src/backend/distributed/sql/udfs/citus_worker_stat_activity/latest.sql new file mode 100644 index 000000000..6f585b2e8 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_worker_stat_activity/latest.sql @@ -0,0 +1,19 @@ +DROP FUNCTION IF EXISTS pg_catalog.citus_worker_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_worker_stat_activity$$; + +COMMENT ON FUNCTION pg_catalog.citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +IS 'returns distributed transaction activity on shards of distributed tables'; diff --git a/src/backend/distributed/sql/udfs/get_all_active_transactions/11.0-1.sql b/src/backend/distributed/sql/udfs/get_all_active_transactions/11.0-1.sql new file mode 100644 index 000000000..636abb0dd --- /dev/null +++ b/src/backend/distributed/sql/udfs/get_all_active_transactions/11.0-1.sql @@ -0,0 +1,12 @@ +DROP FUNCTION IF EXISTS pg_catalog.get_all_active_transactions(); +CREATE OR REPLACE FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, + OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, + OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$get_all_active_transactions$$; + +COMMENT ON FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT datname text, OUT process_id int, OUT initiator_node_identifier int4, + OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, + OUT global_pid int8) +IS 'returns transaction information for all Citus initiated transactions'; diff --git a/src/backend/distributed/sql/udfs/get_all_active_transactions/latest.sql b/src/backend/distributed/sql/udfs/get_all_active_transactions/latest.sql new file mode 100644 index 000000000..636abb0dd --- /dev/null +++ b/src/backend/distributed/sql/udfs/get_all_active_transactions/latest.sql @@ -0,0 +1,12 @@ +DROP FUNCTION IF EXISTS pg_catalog.get_all_active_transactions(); +CREATE OR REPLACE FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, + OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, + OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$get_all_active_transactions$$; + +COMMENT ON FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT datname text, OUT process_id int, OUT initiator_node_identifier int4, + OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, + OUT global_pid int8) +IS 'returns transaction information for all Citus initiated transactions'; diff --git a/src/backend/distributed/sql/udfs/get_global_active_transactions/11.0-1.sql b/src/backend/distributed/sql/udfs/get_global_active_transactions/11.0-1.sql new file mode 100644 index 000000000..c0831b521 --- /dev/null +++ b/src/backend/distributed/sql/udfs/get_global_active_transactions/11.0-1.sql @@ -0,0 +1,9 @@ +DROP FUNCTION IF EXISTS pg_catalog.get_global_active_transactions(); +CREATE OR REPLACE FUNCTION pg_catalog.get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8) + RETURNS SETOF RECORD + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$get_global_active_transactions$$; +COMMENT ON FUNCTION pg_catalog.get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8) + IS 'returns transaction information for all Citus initiated transactions from each node of the cluster'; diff --git a/src/backend/distributed/sql/udfs/get_global_active_transactions/latest.sql b/src/backend/distributed/sql/udfs/get_global_active_transactions/latest.sql new file mode 100644 index 000000000..c0831b521 --- /dev/null +++ b/src/backend/distributed/sql/udfs/get_global_active_transactions/latest.sql @@ -0,0 +1,9 @@ +DROP FUNCTION IF EXISTS pg_catalog.get_global_active_transactions(); +CREATE OR REPLACE FUNCTION pg_catalog.get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8) + RETURNS SETOF RECORD + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$get_global_active_transactions$$; +COMMENT ON FUNCTION pg_catalog.get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8) + IS 'returns transaction information for all Citus initiated transactions from each node of the cluster'; diff --git a/src/backend/distributed/sql/udfs/worker_create_or_replace_object/11.0-1.sql b/src/backend/distributed/sql/udfs/worker_create_or_replace_object/11.0-1.sql new file mode 100644 index 000000000..d9e21a9b2 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_create_or_replace_object/11.0-1.sql @@ -0,0 +1,15 @@ +CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text) + RETURNS bool + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$; + +COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text) + IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object'; + +CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statements text[]) + RETURNS bool + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_create_or_replace_object_array$$; + +COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statements text[]) + IS 'takes a lost of sql statements, before executing these it will check if the object already exists in that exact state otherwise replaces that named object with the new object'; diff --git a/src/backend/distributed/sql/udfs/worker_create_or_replace_object/9.0-1.sql b/src/backend/distributed/sql/udfs/worker_create_or_replace_object/9.0-1.sql new file mode 100644 index 000000000..d4ab612f0 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_create_or_replace_object/9.0-1.sql @@ -0,0 +1,6 @@ +CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text) + RETURNS bool + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$; +COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text) + IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object'; diff --git a/src/backend/distributed/sql/udfs/worker_create_or_replace_object/latest.sql b/src/backend/distributed/sql/udfs/worker_create_or_replace_object/latest.sql new file mode 100644 index 000000000..d9e21a9b2 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_create_or_replace_object/latest.sql @@ -0,0 +1,15 @@ +CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text) + RETURNS bool + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$; + +COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text) + IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object'; + +CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statements text[]) + RETURNS bool + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_create_or_replace_object_array$$; + +COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statements text[]) + IS 'takes a lost of sql statements, before executing these it will check if the object already exists in that exact state otherwise replaces that named object with the new object'; diff --git a/src/backend/distributed/test/dependency.c b/src/backend/distributed/test/dependency.c index 79ad1c139..82e818b8c 100644 --- a/src/backend/distributed/test/dependency.c +++ b/src/backend/distributed/test/dependency.c @@ -47,7 +47,7 @@ citus_get_all_dependencies_for_object(PG_FUNCTION_ARGS) ObjectAddress address = { 0 }; ObjectAddressSubSet(address, classid, objid, objsubid); - List *dependencies = GetAllDependenciesForObject(&address); + List *dependencies = GetAllSupportedDependenciesForObject(&address); ObjectAddress *dependency = NULL; foreach_ptr(dependency, dependencies) { diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index c76a80460..bc54e1da9 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -15,6 +15,9 @@ #include "distributed/pg_version_constants.h" #include "miscadmin.h" +#include "unistd.h" + +#include "safe_lib.h" #include "funcapi.h" #include "access/htup_details.h" @@ -43,7 +46,7 @@ #define GET_ACTIVE_TRANSACTION_QUERY "SELECT * FROM get_all_active_transactions();" -#define ACTIVE_TRANSACTION_COLUMN_COUNT 6 +#define ACTIVE_TRANSACTION_COLUMN_COUNT 7 /* * Each backend's data reside in the shared memory @@ -78,6 +81,7 @@ typedef struct BackendManagementShmemData static void StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescriptor); +static uint64 GenerateGlobalPID(void); static shmem_startup_hook_type prev_shmem_startup_hook = NULL; static BackendManagementShmemData *backendManagementShmemData = NULL; @@ -315,6 +319,7 @@ get_global_active_transactions(PG_FUNCTION_ARGS) values[3] = ParseBoolField(result, rowIndex, 3); values[4] = ParseIntField(result, rowIndex, 4); values[5] = ParseTimestampTzField(result, rowIndex, 5); + values[6] = ParseIntField(result, rowIndex, 6); tuplestore_putvalues(tupleStore, tupleDescriptor, values, isNulls); } @@ -384,8 +389,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto SpinLockAcquire(¤tBackend->mutex); - /* we're only interested in backends initiated by Citus */ - if (currentBackend->citusBackend.initiatorNodeIdentifier < 0) + if (currentBackend->globalPID == INVALID_CITUS_INTERNAL_BACKEND_GPID) { SpinLockRelease(¤tBackend->mutex); continue; @@ -427,6 +431,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto values[3] = !coordinatorOriginatedQuery; values[4] = UInt64GetDatum(transactionNumber); values[5] = TimestampTzGetDatum(transactionIdTimestamp); + values[6] = UInt64GetDatum(currentBackend->globalPID); tuplestore_putvalues(tupleStore, tupleDescriptor, values, isNulls); @@ -631,6 +636,7 @@ InitializeBackendData(void) /* zero out the backend data */ UnSetDistributedTransactionId(); + UnSetGlobalPID(); UnlockBackendSharedMemory(); } @@ -664,6 +670,24 @@ UnSetDistributedTransactionId(void) } +/* + * UnSetGlobalPID resets the global pid for the current backend. + */ +void +UnSetGlobalPID(void) +{ + /* backend does not exist if the extension is not created */ + if (MyBackendData) + { + SpinLockAcquire(&MyBackendData->mutex); + + MyBackendData->globalPID = 0; + + SpinLockRelease(&MyBackendData->mutex); + } +} + + /* * LockBackendSharedMemory is a simple wrapper around LWLockAcquire on the * shared memory lock. @@ -780,6 +804,109 @@ MarkCitusInitiatedCoordinatorBackend(void) } +/* + * AssignGlobalPID assigns a global process id for the current backend. + * If this is a Citus initiated backend, which means it is distributed part of a distributed + * query, then this function assigns the global pid extracted from the application name. + * If not, this function assigns a new generated global pid. + */ +void +AssignGlobalPID(void) +{ + uint64 globalPID = INVALID_CITUS_INTERNAL_BACKEND_GPID; + + if (!IsCitusInternalBackend()) + { + globalPID = GenerateGlobalPID(); + } + else + { + globalPID = ExtractGlobalPID(application_name); + } + + SpinLockAcquire(&MyBackendData->mutex); + MyBackendData->globalPID = globalPID; + SpinLockRelease(&MyBackendData->mutex); +} + + +/* + * GetGlobalPID returns the global process id of the current backend. + */ +uint64 +GetGlobalPID(void) +{ + uint64 globalPID = INVALID_CITUS_INTERNAL_BACKEND_GPID; + + if (MyBackendData) + { + SpinLockAcquire(&MyBackendData->mutex); + globalPID = MyBackendData->globalPID; + SpinLockRelease(&MyBackendData->mutex); + } + + return globalPID; +} + + +/* + * GenerateGlobalPID generates the global process id for the current backend. + */ +static uint64 +GenerateGlobalPID(void) +{ + /* + * We try to create a human readable global pid that consists of node id and process id. + * By multiplying node id with 10^10 and adding pid we generate a number where the smallest + * 10 digit represent the pid and the remaining digits are the node id. + * + * Both node id and pid are 32 bit. We use 10^10 to fit all possible pids. Some very large + * node ids might cause overflow. But even for the applications that scale around 50 nodes every + * day it'd take about 100K years. So we are not worried. + */ + return (((uint64) GetLocalNodeId()) * 10000000000) + getpid(); +} + + +/* + * ExtractGlobalPID extracts the global process id from the application name and returns it + * if the application name is not compatible with Citus' application names returns 0. + */ +uint64 +ExtractGlobalPID(char *applicationName) +{ + /* does application name exist */ + if (!applicationName) + { + return INVALID_CITUS_INTERNAL_BACKEND_GPID; + } + + /* we create our own copy of application name incase the original changes */ + char *applicationNameCopy = pstrdup(applicationName); + + uint64 prefixLength = strlen(CITUS_APPLICATION_NAME_PREFIX); + + /* does application name start with Citus's application name prefix */ + if (strncmp(applicationNameCopy, CITUS_APPLICATION_NAME_PREFIX, prefixLength) != 0) + { + return INVALID_CITUS_INTERNAL_BACKEND_GPID; + } + + /* are the remaining characters of the application name numbers */ + uint64 numberOfRemainingChars = strlen(applicationNameCopy) - prefixLength; + if (numberOfRemainingChars <= 0 || + !strisdigit_s(applicationNameCopy + prefixLength, numberOfRemainingChars)) + { + return INVALID_CITUS_INTERNAL_BACKEND_GPID; + } + + char *globalPIDString = &applicationNameCopy[prefixLength]; + uint64 globalPID = strtoul(globalPIDString, NULL, 10); + + return globalPID; +} + + /* * CurrentDistributedTransactionNumber returns the transaction number of the * current distributed transaction. The caller must make sure a distributed diff --git a/src/backend/distributed/transaction/citus_dist_stat_activity.c b/src/backend/distributed/transaction/citus_dist_stat_activity.c index d85959925..3fb15295f 100644 --- a/src/backend/distributed/transaction/citus_dist_stat_activity.c +++ b/src/backend/distributed/transaction/citus_dist_stat_activity.c @@ -108,7 +108,7 @@ * showing the initiator_node_id we expand it to initiator_node_host and * initiator_node_port. */ -#define CITUS_DIST_STAT_ACTIVITY_QUERY_COLS 23 +#define CITUS_DIST_STAT_ACTIVITY_QUERY_COLS 24 #define CITUS_DIST_STAT_ADDITIONAL_COLS 3 #define CITUS_DIST_STAT_ACTIVITY_COLS \ CITUS_DIST_STAT_ACTIVITY_QUERY_COLS + CITUS_DIST_STAT_ADDITIONAL_COLS @@ -147,14 +147,20 @@ SELECT \ pg_stat_activity.backend_xid, \ pg_stat_activity.backend_xmin, \ pg_stat_activity.query, \ - pg_stat_activity.backend_type \ + pg_stat_activity.backend_type, \ + dist_txs.global_pid \ FROM \ pg_stat_activity \ INNER JOIN \ - get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \ + get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp, global_pid) \ ON pg_stat_activity.pid = dist_txs.process_id \ WHERE \ - dist_txs.worker_query = false;" + backend_type = 'client backend' \ + AND \ + pg_stat_activity.query NOT ILIKE '%stat_activity%' \ + AND \ + pg_stat_activity.application_name NOT SIMILAR TO 'citus_internal gpid=\\d+'; \ +" #define CITUS_WORKER_STAT_ACTIVITY_QUERY \ "\ @@ -181,14 +187,15 @@ SELECT \ pg_stat_activity.backend_xid, \ pg_stat_activity.backend_xmin, \ pg_stat_activity.query, \ - pg_stat_activity.backend_type \ + pg_stat_activity.backend_type, \ + dist_txs.global_id \ FROM \ pg_stat_activity \ - LEFT JOIN \ - get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \ + JOIN \ + get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp, global_id) \ ON pg_stat_activity.pid = dist_txs.process_id \ WHERE \ - pg_stat_activity.application_name = 'citus_internal' \ + pg_stat_activity.application_name SIMILAR TO 'citus_internal gpid=\\d+' \ AND \ pg_stat_activity.query NOT ILIKE '%stat_activity%';" @@ -223,6 +230,7 @@ typedef struct CitusDistStat TransactionId backend_xmin; text *query; text *backend_type; + uint64 global_pid; } CitusDistStat; @@ -501,6 +509,7 @@ ParseCitusDistStat(PGresult *result, int64 rowIndex) citusDistStat->backend_xmin = ParseXIDField(result, rowIndex, 20); citusDistStat->query = ParseTextField(result, rowIndex, 21); citusDistStat->backend_type = ParseTextField(result, rowIndex, 22); + citusDistStat->global_pid = ParseIntField(result, rowIndex, 23); return citusDistStat; } @@ -688,6 +697,7 @@ HeapTupleToCitusDistStat(HeapTuple result, TupleDesc rowDescriptor) citusDistStat->backend_xmin = ParseXIDFieldFromHeapTuple(result, rowDescriptor, 21); citusDistStat->query = ParseTextFieldFromHeapTuple(result, rowDescriptor, 22); citusDistStat->backend_type = ParseTextFieldFromHeapTuple(result, rowDescriptor, 23); + citusDistStat->global_pid = ParseIntFieldFromHeapTuple(result, rowDescriptor, 24); return citusDistStat; } @@ -1098,6 +1108,8 @@ ReturnCitusDistStats(List *citusStatsList, FunctionCallInfo fcinfo) nulls[25] = true; } + values[26] = Int32GetDatum(citusDistStat->global_pid); + tuplestore_putvalues(tupleStore, tupleDesc, values, nulls); } } diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 4fe97e421..78e14367a 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -557,7 +557,8 @@ ResetGlobalVariables() MetadataSyncOnCommit = false; InTopLevelDelegatedFunctionCall = false; ResetWorkerErrorIndication(); - AllowedDistributionColumnValue.isActive = false; + memset(&AllowedDistributionColumnValue, 0, + sizeof(AllowedDistributionColumn)); } diff --git a/src/backend/distributed/utils/resource_lock.c b/src/backend/distributed/utils/resource_lock.c index 1c005e582..6c78a9389 100644 --- a/src/backend/distributed/utils/resource_lock.c +++ b/src/backend/distributed/utils/resource_lock.c @@ -1012,8 +1012,8 @@ CitusRangeVarCallbackForLockTable(const RangeVar *rangeVar, Oid relationId, return; } - /* we only allow tables and views to be locked */ - if (!RegularTable(relationId)) + /* we only allow tables, views and foreign tables to be locked */ + if (!RegularTable(relationId) && !IsForeignTable(relationId)) { ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a table", rangeVar->relname))); diff --git a/src/backend/distributed/worker/worker_create_or_replace.c b/src/backend/distributed/worker/worker_create_or_replace.c index 942cabba5..6ce96bd9f 100644 --- a/src/backend/distributed/worker/worker_create_or_replace.c +++ b/src/backend/distributed/worker/worker_create_or_replace.c @@ -13,8 +13,10 @@ #include "catalog/dependency.h" #include "catalog/pg_collation.h" #include "catalog/pg_proc.h" +#include "catalog/pg_ts_config.h" #include "catalog/pg_type.h" #include "fmgr.h" +#include "funcapi.h" #include "nodes/makefuncs.h" #include "nodes/nodes.h" #include "parser/parse_type.h" @@ -28,13 +30,17 @@ #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/deparser.h" +#include "distributed/listutils.h" #include "distributed/metadata/distobject.h" #include "distributed/worker_create_or_replace.h" #include "distributed/worker_protocol.h" -static const char * CreateStmtByObjectAddress(const ObjectAddress *address); +static List * CreateStmtListByObjectAddress(const ObjectAddress *address); +static bool CompareStringList(List *list1, List *list2); PG_FUNCTION_INFO_V1(worker_create_or_replace_object); +PG_FUNCTION_INFO_V1(worker_create_or_replace_object_array); +static bool WorkerCreateOrReplaceObject(List *sqlStatements); /* @@ -51,6 +57,37 @@ WrapCreateOrReplace(const char *sql) } +/* + * WrapCreateOrReplaceList takes a list of sql commands and wraps it in a call to citus' + * udf to create or replace the existing object based on its create commands. + */ +char * +WrapCreateOrReplaceList(List *sqls) +{ + StringInfoData textArrayLitteral = { 0 }; + initStringInfo(&textArrayLitteral); + + appendStringInfoString(&textArrayLitteral, "ARRAY["); + const char *sql = NULL; + bool first = true; + foreach_ptr(sql, sqls) + { + if (!first) + { + appendStringInfoString(&textArrayLitteral, ", "); + } + appendStringInfoString(&textArrayLitteral, quote_literal_cstr(sql)); + first = false; + } + appendStringInfoString(&textArrayLitteral, "]::text[]"); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + appendStringInfo(&buf, CREATE_OR_REPLACE_COMMAND, textArrayLitteral.data); + return buf.data; +} + + /* * worker_create_or_replace_object(statement text) * @@ -73,35 +110,102 @@ Datum worker_create_or_replace_object(PG_FUNCTION_ARGS) { text *sqlStatementText = PG_GETARG_TEXT_P(0); - const char *sqlStatement = text_to_cstring(sqlStatementText); - Node *parseTree = ParseTreeNode(sqlStatement); + char *sqlStatement = text_to_cstring(sqlStatementText); + List *sqlStatements = list_make1(sqlStatement); + PG_RETURN_BOOL(WorkerCreateOrReplaceObject(sqlStatements)); +} + + +/* + * worker_create_or_replace_object(statements text[]) + * + * function is called, by the coordinator, with a CREATE statement for an object. This + * function implements the CREATE ... IF NOT EXISTS functionality for objects that do not + * have this functionality or where their implementation is not sufficient. + * + * Besides checking if an object of said name exists it tries to compare the object to be + * created with the one in the local catalog. If there is a difference the one in the local + * catalog will be renamed after which the statement can be executed on this worker to + * create the object. If more statements are provided, all are compared in order with the + * statements generated on the worker. This works assuming a) both citus versions are the + * same, b) the objects are exactly the same. + * + * Renaming has two purposes + * - free the identifier for creation + * - non destructive if there is data store that would be destroyed if the object was + * used in a table on this node, eg. types. If the type would be dropped with a cascade + * it would drop any column holding user data for this type. + */ +Datum +worker_create_or_replace_object_array(PG_FUNCTION_ARGS) +{ + List *sqlStatements = NIL; + Datum *textArray = NULL; + int length = 0; + deconstruct_array(PG_GETARG_ARRAYTYPE_P(0), TEXTOID, -1, false, 'i', &textArray, + NULL, &length); + + for (int i = 0; i < length; i++) + { + sqlStatements = lappend(sqlStatements, TextDatumGetCString(textArray[i])); + } + + if (list_length(sqlStatements) < 1) + { + ereport(ERROR, (errmsg("expected atleast 1 statement to be provided"))); + } + + PG_RETURN_BOOL(WorkerCreateOrReplaceObject(sqlStatements)); +} + + +/* + * WorkerCreateOrReplaceObject implements the logic used by both variants of + * worker_create_or_replace_object to either create the object or coming to the conclusion + * the object already exists in the correct state. + * + * Returns true if the object has been created, false if it was already in the exact state + * it was asked for. + */ +static bool +WorkerCreateOrReplaceObject(List *sqlStatements) +{ /* - * since going to the drop statement might require some resolving we will do a check - * if the type actually exists instead of adding the IF EXISTS keyword to the - * statement. + * To check which object we are changing we find the object address from the first + * statement passed into the UDF. Later we will check if all object addresses are the + * same. + * + * Although many of the objects will only have one statement in this call, more + * complex objects might come with a list of statements. We assume they all are on the + * same subject. */ + Node *parseTree = ParseTreeNode(linitial(sqlStatements)); ObjectAddress address = GetObjectAddressFromParseTree(parseTree, true); if (ObjectExists(&address)) { - const char *localSqlStatement = CreateStmtByObjectAddress(&address); + /* + * Object with name from statement is already found locally, check if states are + * identical. If objects differ we will rename the old object (non- destructively) + * as to make room to create the new object according to the spec sent. + */ - if (strcmp(sqlStatement, localSqlStatement) == 0) + /* + * Based on the local catalog we generate the list of commands we would send to + * recreate our version of the object. This we can compare to what the coordinator + * sent us. If they match we don't do anything. + */ + List *localSqlStatements = CreateStmtListByObjectAddress(&address); + if (CompareStringList(sqlStatements, localSqlStatements)) { /* - * TODO string compare is a poor man's comparison, but calling equal on the - * parsetree's returns false because there is extra information list character - * position of some sort - */ - - /* - * parseTree sent by the coordinator is the same as we would create for our - * object, therefore we can omit the create statement locally and not create - * the object as it already exists. + * statements sent by the coordinator are the same as we would create for our + * object, therefore we can omit the statements locally and not create the + * object as it already exists in the correct shape. * * We let the coordinator know we didn't create the object. */ - PG_RETURN_BOOL(false); + return false; } char *newName = GenerateBackupNameForCollision(&address); @@ -113,12 +217,47 @@ worker_create_or_replace_object(PG_FUNCTION_ARGS) NULL, None_Receiver, NULL); } - /* apply create statement locally */ - ProcessUtilityParseTree(parseTree, sqlStatement, PROCESS_UTILITY_QUERY, NULL, - None_Receiver, NULL); + /* apply all statement locally */ + char *sqlStatement = NULL; + foreach_ptr(sqlStatement, sqlStatements) + { + parseTree = ParseTreeNode(sqlStatement); + ProcessUtilityParseTree(parseTree, sqlStatement, PROCESS_UTILITY_QUERY, NULL, + None_Receiver, NULL); + + /* TODO verify all statements are about exactly 1 subject, mostly a sanity check + * to prevent unintentional use of this UDF, needs to come after the local + * execution to be able to actually resolve the ObjectAddress of the newly created + * object */ + } /* type has been created */ - PG_RETURN_BOOL(true); + return true; +} + + +static bool +CompareStringList(List *list1, List *list2) +{ + if (list_length(list1) != list_length(list2)) + { + return false; + } + + ListCell *cell1 = NULL; + ListCell *cell2 = NULL; + forboth(cell1, list1, cell2, list2) + { + const char *str1 = lfirst(cell1); + const char *str2 = lfirst(cell2); + + if (strcmp(str1, str2) != 0) + { + return false; + } + } + + return true; } @@ -130,24 +269,38 @@ worker_create_or_replace_object(PG_FUNCTION_ARGS) * therefore you cannot equal this tree against parsed statement. Instead it can be * deparsed to do a string comparison. */ -static const char * -CreateStmtByObjectAddress(const ObjectAddress *address) +static List * +CreateStmtListByObjectAddress(const ObjectAddress *address) { switch (getObjectClass(address)) { case OCLASS_COLLATION: { - return CreateCollationDDL(address->objectId); + return list_make1(CreateCollationDDL(address->objectId)); } case OCLASS_PROC: { - return GetFunctionDDLCommand(address->objectId, false); + return list_make1(GetFunctionDDLCommand(address->objectId, false)); + } + + case OCLASS_TSCONFIG: + { + /* + * We do support TEXT SEARCH CONFIGURATION, however, we can't recreate the + * object in 1 command. Since the returned text is compared to the create + * statement sql we always want the sql to be different compared to the + * canonical creation sql we return here, hence we return an empty string, as + * that should never match the sql we have passed in for the creation. + */ + + List *stmts = GetCreateTextSearchConfigStatements(address); + return DeparseTreeNodes(stmts); } case OCLASS_TYPE: { - return DeparseTreeNode(CreateTypeStmtByObjectAddress(address)); + return list_make1(DeparseTreeNode(CreateTypeStmtByObjectAddress(address))); } default: @@ -179,6 +332,11 @@ GenerateBackupNameForCollision(const ObjectAddress *address) return GenerateBackupNameForProcCollision(address); } + case OCLASS_TSCONFIG: + { + return GenerateBackupNameForTextSearchConfiguration(address); + } + case OCLASS_TYPE: { return GenerateBackupNameForTypeCollision(address); @@ -256,6 +414,25 @@ CreateRenameTypeStmt(const ObjectAddress *address, char *newName) } +/* + * CreateRenameTextSearchStmt creates a rename statement for a text search configuration + * based on its ObjectAddress. The rename statement will rename the existing object on its + * address to the value provided in newName. + */ +static RenameStmt * +CreateRenameTextSearchStmt(const ObjectAddress *address, char *newName) +{ + Assert(address->classId == TSConfigRelationId); + RenameStmt *stmt = makeNode(RenameStmt); + + stmt->renameType = OBJECT_TSCONFIGURATION; + stmt->object = (Node *) get_ts_config_namelist(address->objectId); + stmt->newname = newName; + + return stmt; +} + + /* * CreateRenameTypeStmt creates a rename statement for a type based on its ObjectAddress. * The rename statement will rename the existing object on its address to the value @@ -325,6 +502,11 @@ CreateRenameStatement(const ObjectAddress *address, char *newName) return CreateRenameProcStmt(address, newName); } + case OCLASS_TSCONFIG: + { + return CreateRenameTextSearchStmt(address, newName); + } + case OCLASS_TYPE: { return CreateRenameTypeStmt(address, newName); diff --git a/src/include/columnar/columnar_tableam.h b/src/include/columnar/columnar_tableam.h index 784cf0341..2f06e0972 100644 --- a/src/include/columnar/columnar_tableam.h +++ b/src/include/columnar/columnar_tableam.h @@ -50,7 +50,6 @@ typedef struct ColumnarScanDescData *ColumnarScanDesc; const TableAmRoutine * GetColumnarTableAmRoutine(void); extern void columnar_tableam_init(void); -extern bool CheckCitusVersion(int elevel); extern TableScanDesc columnar_beginscan_extended(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, diff --git a/src/include/distributed/backend_data.h b/src/include/distributed/backend_data.h index c68bfcf16..7f3a81e88 100644 --- a/src/include/distributed/backend_data.h +++ b/src/include/distributed/backend_data.h @@ -50,6 +50,7 @@ typedef struct BackendData Oid userId; slock_t mutex; bool cancelledDueToDeadlock; + uint64 globalPID; CitusInitiatedBackend citusBackend; DistributedTransactionId transactionId; } BackendData; @@ -61,8 +62,12 @@ extern void InitializeBackendData(void); extern void LockBackendSharedMemory(LWLockMode lockMode); extern void UnlockBackendSharedMemory(void); extern void UnSetDistributedTransactionId(void); +extern void UnSetGlobalPID(void); extern void AssignDistributedTransactionId(void); extern void MarkCitusInitiatedCoordinatorBackend(void); +extern void AssignGlobalPID(void); +extern uint64 GetGlobalPID(void); +extern uint64 ExtractGlobalPID(char *applicationName); extern void GetBackendDataForProc(PGPROC *proc, BackendData *result); extern void CancelTransactionDueToDeadlock(PGPROC *proc); extern bool MyBackendGotCancelledDueToDeadlock(bool clearState); @@ -73,4 +78,6 @@ extern int GetAllActiveClientBackendCount(void); extern void IncrementClientBackendCounter(void); extern void DecrementClientBackendCounter(void); +#define INVALID_CITUS_INTERNAL_BACKEND_GPID 0 + #endif /* BACKEND_DATA_H */ diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 41141ee8a..1b2b92590 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -151,6 +151,8 @@ extern ObjectAddress AlterCollationSchemaStmtObjectAddress(Node *stmt, extern List * PostprocessAlterCollationSchemaStmt(Node *stmt, const char *queryString); extern char * GenerateBackupNameForCollationCollision(const ObjectAddress *address); extern ObjectAddress DefineCollationStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * PreprocessDefineCollationStmt(Node *stmt, const char *queryString, + ProcessUtilityContext processUtilityContext); extern List * PostprocessDefineCollationStmt(Node *stmt, const char *queryString); /* database.c - forward declarations */ @@ -366,6 +368,8 @@ extern ObjectAddress AlterRoleSetStmtObjectAddress(Node *node, extern List * GenerateCreateOrAlterRoleCommand(Oid roleOid); /* schema.c - forward declarations */ +extern List * PreprocessCreateSchemaStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); extern List * PreprocessDropSchemaStmt(Node *dropSchemaStatement, const char *queryString, ProcessUtilityContext processUtilityContext); @@ -375,6 +379,7 @@ extern List * PreprocessGrantOnSchemaStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PreprocessAlterSchemaRenameStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern ObjectAddress CreateSchemaStmtObjectAddress(Node *node, bool missing_ok); extern ObjectAddress AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok); /* sequence.c - forward declarations */ @@ -462,6 +467,54 @@ extern Oid GetSequenceOid(Oid relationId, AttrNumber attnum); extern bool ConstrTypeUsesIndex(ConstrType constrType); +/* text_search.c - forward declarations */ +extern List * PostprocessCreateTextSearchConfigurationStmt(Node *node, + const char *queryString); +extern List * GetCreateTextSearchConfigStatements(const ObjectAddress *address); +extern List * CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address); +extern List * PreprocessDropTextSearchConfigurationStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PreprocessAlterTextSearchConfigurationStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PreprocessRenameTextSearchConfigurationStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node, + const char *queryString); +extern List * PreprocessTextSearchConfigurationCommentStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node, + const char *queryString); +extern ObjectAddress CreateTextSearchConfigurationObjectAddress(Node *node, + bool missing_ok); +extern ObjectAddress RenameTextSearchConfigurationStmtObjectAddress(Node *node, + bool missing_ok); +extern ObjectAddress AlterTextSearchConfigurationStmtObjectAddress(Node *node, + bool missing_ok); +extern ObjectAddress AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, + bool missing_ok); +extern ObjectAddress TextSearchConfigurationCommentObjectAddress(Node *node, + bool missing_ok); +extern ObjectAddress AlterTextSearchConfigurationOwnerObjectAddress(Node *node, + bool missing_ok); +extern char * GenerateBackupNameForTextSearchConfiguration(const ObjectAddress *address); +extern List * get_ts_config_namelist(Oid tsconfigOid); + /* truncate.c - forward declarations */ extern void PreprocessTruncateStatement(TruncateStmt *truncateStatement); diff --git a/src/include/distributed/connection_management.h b/src/include/distributed/connection_management.h index 721617474..ad575cfe5 100644 --- a/src/include/distributed/connection_management.h +++ b/src/include/distributed/connection_management.h @@ -29,7 +29,7 @@ #define ERROR_BUFFER_SIZE 256 /* application name used for internal connections in Citus */ -#define CITUS_APPLICATION_NAME "citus_internal" +#define CITUS_APPLICATION_NAME_PREFIX "citus_internal gpid=" /* application name used for internal connections in rebalancer */ #define CITUS_REBALANCER_NAME "citus_rebalancer" diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index 8934323f0..ebf4a6147 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -31,6 +31,7 @@ extern void AssertObjectTypeIsFunctional(ObjectType type); extern void QualifyTreeNode(Node *stmt); extern char * DeparseTreeNode(Node *stmt); +extern List * DeparseTreeNodes(List *stmts); /* forward declarations for deparse_attribute_stmts.c */ extern char * DeparseRenameAttributeStmt(Node *); @@ -59,7 +60,18 @@ extern char * DeparseAlterTableStmt(Node *node); extern void QualifyAlterTableSchemaStmt(Node *stmt); +/* foward declarations fro deparse_text_search.c */ +extern char * DeparseCreateTextSearchStmt(Node *node); +extern char * DeparseDropTextSearchConfigurationStmt(Node *node); +extern char * DeparseRenameTextSearchConfigurationStmt(Node *node); +extern char * DeparseAlterTextSearchConfigurationStmt(Node *node); +extern char * DeparseAlterTextSearchConfigurationSchemaStmt(Node *node); +extern char * DeparseTextSearchConfigurationCommentStmt(Node *node); +extern char * DeparseAlterTextSearchConfigurationOwnerStmt(Node *node); + /* forward declarations for deparse_schema_stmts.c */ +extern char * DeparseCreateSchemaStmt(Node *node); +extern char * DeparseDropSchemaStmt(Node *node); extern char * DeparseGrantOnSchemaStmt(Node *stmt); extern char * DeparseAlterSchemaRenameStmt(Node *stmt); @@ -138,6 +150,14 @@ extern char * DeparseAlterExtensionStmt(Node *stmt); /* forward declarations for deparse_database_stmts.c */ extern char * DeparseAlterDatabaseOwnerStmt(Node *node); +/* forward declatations for depatse_text_search_stmts.c */ +extern void QualifyDropTextSearchConfigurationStmt(Node *node); +extern void QualifyAlterTextSearchConfigurationStmt(Node *node); +extern void QualifyRenameTextSearchConfigurationStmt(Node *node); +extern void QualifyAlterTextSearchConfigurationSchemaStmt(Node *node); +extern void QualifyTextSearchConfigurationCommentStmt(Node *node); +extern void QualifyAlterTextSearchConfigurationOwnerStmt(Node *node); + /* forward declarations for deparse_sequence_stmts.c */ extern char * DeparseDropSequenceStmt(Node *node); extern char * DeparseRenameSequenceStmt(Node *node); diff --git a/src/include/distributed/function_call_delegation.h b/src/include/distributed/function_call_delegation.h index 5471a3f1d..bf3dbe170 100644 --- a/src/include/distributed/function_call_delegation.h +++ b/src/include/distributed/function_call_delegation.h @@ -23,7 +23,7 @@ extern bool InTopLevelDelegatedFunctionCall; extern bool InDelegatedProcedureCall; PlannedStmt * TryToDelegateFunctionCall(DistributedPlanningContext *planContext); -extern void ResetAllowedShardKeyValue(void); +extern void CheckAndResetAllowedShardKeyValueIfNeeded(void); extern bool IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId); #endif /* FUNCTION_CALL_DELEGATION_H */ diff --git a/src/include/distributed/metadata/dependency.h b/src/include/distributed/metadata/dependency.h index d20103011..141b2a628 100644 --- a/src/include/distributed/metadata/dependency.h +++ b/src/include/distributed/metadata/dependency.h @@ -19,6 +19,7 @@ extern List * GetUniqueDependenciesList(List *objectAddressesList); extern List * GetDependenciesForObject(const ObjectAddress *target); +extern List * GetAllSupportedDependenciesForObject(const ObjectAddress *target); extern List * GetAllDependenciesForObject(const ObjectAddress *target); extern List * OrderObjectAddressListInDependencyOrder(List *objectAddressList); extern bool SupportedDependencyByCitus(const ObjectAddress *address); diff --git a/src/include/distributed/metadata/distobject.h b/src/include/distributed/metadata/distobject.h index 472cd83e2..5ea04ec73 100644 --- a/src/include/distributed/metadata/distobject.h +++ b/src/include/distributed/metadata/distobject.h @@ -30,8 +30,8 @@ extern bool IsObjectAddressOwnedByExtension(const ObjectAddress *target, ObjectAddress *extensionAddress); extern ObjectAddress PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr); - extern List * GetDistributedObjectAddressList(void); +extern RoleSpec * GetRoleSpecObjectForUser(Oid roleOid); extern void UpdateDistributedObjectColocationId(uint32 oldColocationId, uint32 newColocationId); #endif /* CITUS_METADATA_DISTOBJECT_H */ diff --git a/src/include/distributed/metadata_cache.h b/src/include/distributed/metadata_cache.h index 6dac101af..46ba72a49 100644 --- a/src/include/distributed/metadata_cache.h +++ b/src/include/distributed/metadata_cache.h @@ -165,6 +165,7 @@ extern CitusTableCacheEntry * LookupCitusTableCacheEntry(Oid relationId); extern DistObjectCacheEntry * LookupDistObjectCacheEntry(Oid classid, Oid objid, int32 objsubid); extern int32 GetLocalGroupId(void); +extern int32 GetLocalNodeId(void); extern void CitusTableCacheFlushInvalidatedEntries(void); extern Oid LookupShardRelationFromCatalog(int64 shardId, bool missing_ok); extern List * ShardPlacementListIncludingOrphanedPlacements(uint64 shardId); diff --git a/src/include/distributed/multi_executor.h b/src/include/distributed/multi_executor.h index 3648dbc1b..dd10c511d 100644 --- a/src/include/distributed/multi_executor.h +++ b/src/include/distributed/multi_executor.h @@ -139,6 +139,7 @@ extern void ExecuteQueryIntoDestReceiver(Query *query, ParamListInfo params, extern void ExecutePlanIntoDestReceiver(PlannedStmt *queryPlan, ParamListInfo params, DestReceiver *dest); extern void SetLocalMultiShardModifyModeToSequential(void); +extern void EnsureSequentialMode(ObjectType objType); extern void SetLocalForceMaxQueryParallelization(void); extern void SortTupleStore(CitusScanState *scanState); extern bool DistributedPlanModifiesDatabase(DistributedPlan *plan); diff --git a/src/include/distributed/transaction_management.h b/src/include/distributed/transaction_management.h index 48bce27a5..5ff34e21a 100644 --- a/src/include/distributed/transaction_management.h +++ b/src/include/distributed/transaction_management.h @@ -70,6 +70,9 @@ typedef struct AllowedDistributionColumn Const *distributionColumnValue; uint32 colocationId; bool isActive; + + /* In nested executor, track the level at which value is set */ + int executorLevel; } AllowedDistributionColumn; /* diff --git a/src/include/distributed/worker_create_or_replace.h b/src/include/distributed/worker_create_or_replace.h index 60323d172..148cee138 100644 --- a/src/include/distributed/worker_create_or_replace.h +++ b/src/include/distributed/worker_create_or_replace.h @@ -19,6 +19,7 @@ #define CREATE_OR_REPLACE_COMMAND "SELECT worker_create_or_replace_object(%s);" extern char * WrapCreateOrReplace(const char *sql); +extern char * WrapCreateOrReplaceList(List *sqls); extern char * GenerateBackupNameForCollision(const ObjectAddress *address); extern RenameStmt * CreateRenameStatement(const ObjectAddress *address, char *newName); diff --git a/src/test/regress/expected/aggregate_support.out b/src/test/regress/expected/aggregate_support.out index 89ce53c70..bf35d7f4b 100644 --- a/src/test/regress/expected/aggregate_support.out +++ b/src/test/regress/expected/aggregate_support.out @@ -875,6 +875,7 @@ BEGIN RETURN $1 * $1; END; $function$; +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION square_func(int) RETURNS int LANGUAGE plpgsql @@ -883,6 +884,7 @@ BEGIN RETURN $1 * $1; END; $function$; +RESET citus.enable_metadata_sync; SELECT const_function(1), string_agg(a::character, ',') FROM t1; NOTICE: stable_fn called CONTEXT: PL/pgSQL function const_function(integer) line XX at RAISE diff --git a/src/test/regress/expected/alter_database_owner.out b/src/test/regress/expected/alter_database_owner.out index 2e5e54aca..1fb118242 100644 --- a/src/test/regress/expected/alter_database_owner.out +++ b/src/test/regress/expected/alter_database_owner.out @@ -165,8 +165,8 @@ SELECT count(*) FROM t; -- parallel execution; (1 row) ALTER DATABASE regression OWNER TO database_owner_2; -- should ERROR -ERROR: cannot create or modify database because there was a parallel operation on a distributed table in the transaction -DETAIL: When creating or altering a database, Citus needs to perform all operations over a single connection per node to ensure consistency. +ERROR: cannot run database command because there was a parallel operation on a distributed table in the transaction +DETAIL: When running command on/for a distributed database, Citus needs to perform all operations over a single connection per node to ensure consistency. HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" ROLLBACK; -- list the owners of the current database on all nodes diff --git a/src/test/regress/expected/citus_local_table_triggers.out b/src/test/regress/expected/citus_local_table_triggers.out index 1a269c649..007986fab 100644 --- a/src/test/regress/expected/citus_local_table_triggers.out +++ b/src/test/regress/expected/citus_local_table_triggers.out @@ -193,8 +193,8 @@ FOR EACH STATEMENT EXECUTE FUNCTION dummy_function(); NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1507008, 'interesting!schema', E'CREATE TRIGGER "trigger\\''name" BEFORE INSERT ON "interesting!schema"."citus_local!_table" FOR EACH STATEMENT EXECUTE FUNCTION dummy_function();') +CREATE EXTENSION seg; BEGIN; - CREATE EXTENSION seg; -- ALTER TRIGGER DEPENDS ON ALTER TRIGGER "trigger\'name" ON "interesting!schema"."citus_local!_table" DEPENDS ON EXTENSION seg; NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1507008, 'interesting!schema', E'ALTER TRIGGER "trigger\\''name" ON "interesting!schema"."citus_local!_table" DEPENDS ON EXTENSION seg;') @@ -578,5 +578,5 @@ NOTICE: executing the command locally: SELECT val FROM citus_local_table_trigge ROLLBACK; -- cleanup at exit +SET client_min_messages TO ERROR; DROP SCHEMA citus_local_table_triggers, "interesting!schema" CASCADE; -NOTICE: drop cascades to 22 other objects diff --git a/src/test/regress/expected/citus_local_tables_queries_mx.out b/src/test/regress/expected/citus_local_tables_queries_mx.out index 684644476..52b73e7df 100644 --- a/src/test/regress/expected/citus_local_tables_queries_mx.out +++ b/src/test/regress/expected/citus_local_tables_queries_mx.out @@ -58,6 +58,7 @@ CREATE TABLE postgres_local_table(a int, b int); -- We shouldn't use LIMIT in INSERT SELECT queries to make the test faster as -- LIMIT would force planner to wrap SELECT query in an intermediate result and -- this might reduce the coverage of the test cases. +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$ BEGIN SET client_min_messages to ERROR; @@ -74,6 +75,7 @@ CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$ RESET client_min_messages; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; --------------------------------------------------------------------- ---- SELECT ---- --------------------------------------------------------------------- diff --git a/src/test/regress/expected/columnar_truncate.out b/src/test/regress/expected/columnar_truncate.out index 5914cfb0f..fe5169535 100644 --- a/src/test/regress/expected/columnar_truncate.out +++ b/src/test/regress/expected/columnar_truncate.out @@ -265,6 +265,13 @@ set columnar.compression = 'pglz'; INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100); set columnar.compression to default; -- create a user that can not truncate +SELECT run_command_on_workers($$CREATE USER truncate_user;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE ROLE") + (localhost,57638,t,"CREATE ROLE") +(2 rows) + CREATE USER truncate_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. diff --git a/src/test/regress/expected/coordinator_evaluation.out b/src/test/regress/expected/coordinator_evaluation.out index bd309bdb2..f675c316f 100644 --- a/src/test/regress/expected/coordinator_evaluation.out +++ b/src/test/regress/expected/coordinator_evaluation.out @@ -523,6 +523,7 @@ BEGIN RETURN trunc(random() * (end_int-start_int) + start_int); END; $$ LANGUAGE 'plpgsql' STRICT; +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE PROCEDURE coordinator_evaluation.test_procedure(int) LANGUAGE plpgsql AS $procedure$ @@ -532,6 +533,7 @@ BEGIN PERFORM DISTINCT value FROM coordinator_evaluation_table_2 WHERE key = filterKey; END; $procedure$; +RESET citus.enable_metadata_sync; -- we couldn't find a meaningful query to write for this -- however this query fails before https://github.com/citusdata/citus/pull/3454 SET client_min_messages TO DEBUG2; @@ -564,6 +566,7 @@ BEGIN INSERT INTO coordinator_evaluation_table_2 VALUES (filterKey, filterKey); END; $procedure$; +DEBUG: switching to sequential query execution mode RESET citus.log_remote_commands ; RESET client_min_messages; -- these calls would INSERT key = 101, so test if insert succeeded diff --git a/src/test/regress/expected/coordinator_evaluation_modify.out b/src/test/regress/expected/coordinator_evaluation_modify.out index 906dbffef..e75ac60b1 100644 --- a/src/test/regress/expected/coordinator_evaluation_modify.out +++ b/src/test/regress/expected/coordinator_evaluation_modify.out @@ -833,11 +833,13 @@ EXECUTE router_with_only_function; SET citus.log_local_commands TO ON; SET search_path TO coordinator_evaluation_combinations_modify; -- returns 2 on the worker +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION get_constant_stable() RETURNS INT AS $$ BEGIN RETURN 2; END; $$ language plpgsql STABLE; +RESET citus.enable_metadata_sync; -- all local values INSERT INTO user_info_data (user_id, u_data) VALUES (3, '(''test3'', 3)'), (4, '(''test4'', 4)'), (7, '(''test7'', 7)'), diff --git a/src/test/regress/expected/coordinator_shouldhaveshards.out b/src/test/regress/expected/coordinator_shouldhaveshards.out index 2cd9f2f20..5d5fa982c 100644 --- a/src/test/regress/expected/coordinator_shouldhaveshards.out +++ b/src/test/regress/expected/coordinator_shouldhaveshards.out @@ -898,9 +898,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in -- a helper function which return true if the coordinated -- trannsaction uses 2PC +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION coordinated_transaction_should_use_2PC() RETURNS BOOL LANGUAGE C STRICT VOLATILE AS 'citus', $$coordinated_transaction_should_use_2PC$$; +RESET citus.enable_metadata_sync; -- a local SELECT followed by remote SELECTs -- does not trigger 2PC BEGIN; diff --git a/src/test/regress/expected/distributed_collations.out b/src/test/regress/expected/distributed_collations.out index 7ca02b9b1..bc6a5a859 100644 --- a/src/test/regress/expected/distributed_collations.out +++ b/src/test/regress/expected/distributed_collations.out @@ -155,16 +155,6 @@ ORDER BY 1,2,3; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA collation_tests CASCADE; DROP SCHEMA collation_tests2 CASCADE; --- This is hacky, but we should clean-up the resources as below -\c - - - :worker_1_port -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA collation_tests CASCADE; -DROP SCHEMA collation_tests2 CASCADE; -\c - - - :worker_2_port -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA collation_tests CASCADE; -DROP SCHEMA collation_tests2 CASCADE; -\c - - - :master_port DROP USER collationuser; SELECT run_command_on_workers($$DROP USER collationuser;$$); run_command_on_workers @@ -173,3 +163,19 @@ SELECT run_command_on_workers($$DROP USER collationuser;$$); (localhost,57638,t,"DROP ROLE") (2 rows) +\c - - - :worker_1_port +-- test creating a collation on a worker +CREATE COLLATION another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk'); +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +-- test if creating a collation on a worker on a local +-- schema raises the right error +SET citus.enable_ddl_propagation TO off; +CREATE SCHEMA collation_creation_on_worker; +SET citus.enable_ddl_propagation TO on; +CREATE COLLATION collation_creation_on_worker.another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk'); +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +SET citus.enable_ddl_propagation TO off; +DROP SCHEMA collation_creation_on_worker; +SET citus.enable_ddl_propagation TO on; diff --git a/src/test/regress/expected/distributed_collations_conflict.out b/src/test/regress/expected/distributed_collations_conflict.out index 821521457..818de2697 100644 --- a/src/test/regress/expected/distributed_collations_conflict.out +++ b/src/test/regress/expected/distributed_collations_conflict.out @@ -1,11 +1,4 @@ CREATE SCHEMA collation_conflict; -SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") -(2 rows) - \c - - - :worker_1_port SET search_path TO collation_conflict; SET citus.enable_metadata_sync TO off; diff --git a/src/test/regress/expected/distributed_functions.out b/src/test/regress/expected/distributed_functions.out index 2ab760014..614f19288 100644 --- a/src/test/regress/expected/distributed_functions.out +++ b/src/test/regress/expected/distributed_functions.out @@ -469,8 +469,7 @@ ALTER FUNCTION eq(macaddr,macaddr) DEPENDS ON EXTENSION citus; ERROR: distrtibuted functions are not allowed to depend on an extension DETAIL: Function "function_tests.eq(pg_catalog.macaddr,pg_catalog.macaddr)" is already distributed. Functions from extensions are expected to be created on the workers by the extension they depend on. SELECT create_distributed_function('pg_catalog.citus_drop_trigger()'); -ERROR: unable to create a distributed function from functions owned by an extension -DETAIL: Function "pg_catalog.citus_drop_trigger()" has a dependency on extension "citus". Functions depending on an extension cannot be distributed. Create the function by creating the extension on the workers. +ERROR: Citus extension functions(citus_drop_trigger) cannot be distributed. DROP FUNCTION eq(macaddr,macaddr); -- call should fail as function should have been dropped SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('0123456789ab','ba9876543210');$$) ORDER BY 1,2; @@ -1064,16 +1063,9 @@ SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isa \c - - - :worker_1_port UPDATE pg_dist_local_group SET groupid = 0; TRUNCATE pg_dist_node; -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA function_tests CASCADE; -DROP SCHEMA function_tests2 CASCADE; -SET search_path TO function_tests, function_tests2; \c - - - :worker_2_port UPDATE pg_dist_local_group SET groupid = 0; TRUNCATE pg_dist_node; -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA function_tests CASCADE; -DROP SCHEMA function_tests2 CASCADE; \c - - - :master_port SET client_min_messages TO ERROR; DROP USER functionuser; diff --git a/src/test/regress/expected/distributed_functions_conflict.out b/src/test/regress/expected/distributed_functions_conflict.out index 537da8c4e..354aea9c0 100644 --- a/src/test/regress/expected/distributed_functions_conflict.out +++ b/src/test/regress/expected/distributed_functions_conflict.out @@ -1,15 +1,9 @@ -- This is designed to test worker_create_or_replace_object in PG11 with aggregates -- Note in PG12 we use CREATE OR REPLACE AGGREGATE, thus the renaming does not occur CREATE SCHEMA proc_conflict; -SELECT run_command_on_workers($$CREATE SCHEMA proc_conflict;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") -(2 rows) - \c - - - :worker_1_port SET search_path TO proc_conflict; +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ BEGIN RETURN state * 2 + i; @@ -19,6 +13,7 @@ CREATE AGGREGATE existing_agg(int) ( SFUNC = existing_func, STYPE = int ); +RESET citus.enable_metadata_sync; \c - - - :master_port SET search_path TO proc_conflict; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ @@ -69,6 +64,7 @@ DROP AGGREGATE existing_agg(int) CASCADE; DROP FUNCTION existing_func(int, int) CASCADE; \c - - - :worker_1_port SET search_path TO proc_conflict; +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ BEGIN RETURN state * 3 + i; @@ -78,6 +74,7 @@ CREATE AGGREGATE existing_agg(int) ( SFUNC = existing_func, STYPE = int ); +RESET citus.enable_metadata_sync; \c - - - :master_port SET search_path TO proc_conflict; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ diff --git a/src/test/regress/expected/distributed_procedure.out b/src/test/regress/expected/distributed_procedure.out index 9331badff..3a14ed048 100644 --- a/src/test/regress/expected/distributed_procedure.out +++ b/src/test/regress/expected/distributed_procedure.out @@ -173,21 +173,7 @@ SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello'); SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA procedure_tests CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - DROP SCHEMA procedure_tests2 CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - DROP USER procedureuser; SELECT 1 FROM run_command_on_workers($$DROP USER procedureuser;$$); ?column? diff --git a/src/test/regress/expected/distributed_types.out b/src/test/regress/expected/distributed_types.out index 6d22ee2a2..dcf8dd8b2 100644 --- a/src/test/regress/expected/distributed_types.out +++ b/src/test/regress/expected/distributed_types.out @@ -456,21 +456,7 @@ SELECT * FROM field_indirection_test_2 ORDER BY 1,2,3; -- clear objects SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA type_tests CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - DROP SCHEMA type_tests2 CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - DROP USER typeuser; SELECT run_command_on_workers($$DROP USER typeuser;$$); run_command_on_workers diff --git a/src/test/regress/expected/distributed_types_conflict.out b/src/test/regress/expected/distributed_types_conflict.out index c106f64c9..17d259e76 100644 --- a/src/test/regress/expected/distributed_types_conflict.out +++ b/src/test/regress/expected/distributed_types_conflict.out @@ -1,12 +1,5 @@ SET citus.next_shard_id TO 20020000; CREATE SCHEMA type_conflict; -SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") -(2 rows) - -- create a type on a worker that should not cause data loss once overwritten with a type -- from the coordinator \c - - :public_worker_1_host :worker_1_port diff --git a/src/test/regress/expected/distributed_types_xact_add_enum_value.out b/src/test/regress/expected/distributed_types_xact_add_enum_value.out index fbfee78ac..abc2c88d0 100644 --- a/src/test/regress/expected/distributed_types_xact_add_enum_value.out +++ b/src/test/regress/expected/distributed_types_xact_add_enum_value.out @@ -64,10 +64,3 @@ SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumso -- clear objects SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA xact_enum_type CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - diff --git a/src/test/regress/expected/drop_partitioned_table.out b/src/test/regress/expected/drop_partitioned_table.out index b77e6f80b..edc535020 100644 --- a/src/test/regress/expected/drop_partitioned_table.out +++ b/src/test/regress/expected/drop_partitioned_table.out @@ -47,7 +47,6 @@ WHERE n.nspname IN ('drop_partitioned_table', 'schema1') AND c.relkind IN ('r','p') ORDER BY 1, 2; \c - - - :worker_1_port -CREATE SCHEMA drop_partitioned_table; SET search_path = drop_partitioned_table; CREATE VIEW tables_info AS SELECT n.nspname as "Schema", @@ -395,11 +394,4 @@ NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK DROP SCHEMA drop_partitioned_table CASCADE; NOTICE: drop cascades to 3 other objects -SELECT run_command_on_workers('DROP SCHEMA IF EXISTS drop_partitioned_table CASCADE'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - SET search_path TO public; diff --git a/src/test/regress/expected/failure_add_disable_node.out b/src/test/regress/expected/failure_add_disable_node.out index ef78e451b..ca1c8f838 100644 --- a/src/test/regress/expected/failure_add_disable_node.out +++ b/src/test/regress/expected/failure_add_disable_node.out @@ -253,11 +253,3 @@ ORDER BY placementid; RESET SEARCH_PATH; DROP SCHEMA add_remove_node CASCADE; NOTICE: drop cascades to table add_remove_node.user_table -SELECT * FROM run_command_on_workers('DROP SCHEMA IF EXISTS add_remove_node CASCADE') -ORDER BY nodeport; - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 9060 | t | DROP SCHEMA - localhost | 57637 | t | DROP SCHEMA -(2 rows) - diff --git a/src/test/regress/expected/failure_connection_establishment.out b/src/test/regress/expected/failure_connection_establishment.out index 6284107d2..9c44269a3 100644 --- a/src/test/regress/expected/failure_connection_establishment.out +++ b/src/test/regress/expected/failure_connection_establishment.out @@ -225,8 +225,8 @@ SELECT count(*) FROM single_replicatated WHERE key = 100; RESET client_min_messages; -- verify get_global_active_transactions works when a timeout happens on a connection -SELECT get_global_active_transactions(); - get_global_active_transactions +SELECT * FROM get_global_active_transactions() WHERE transaction_number != 0; + datid | process_id | initiator_node_identifier | worker_query | transaction_number | transaction_stamp | global_pid --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/failure_create_distributed_table_non_empty.out b/src/test/regress/expected/failure_create_distributed_table_non_empty.out index 13f6cdffa..3a88e0192 100644 --- a/src/test/regress/expected/failure_create_distributed_table_non_empty.out +++ b/src/test/regress/expected/failure_create_distributed_table_non_empty.out @@ -4,7 +4,9 @@ -- We have to keep two copies of this failure test -- because if the shards are created via the executor -- cancellations are processed, otherwise they are not +SET citus.enable_ddl_propagation TO OFF; CREATE SCHEMA create_distributed_table_non_empty_failure; +SET citus.enable_ddl_propagation TO ON; SET search_path TO 'create_distributed_table_non_empty_failure'; SET citus.next_shard_id TO 11000000; SELECT citus.mitmproxy('conn.allow()'); @@ -100,13 +102,6 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata (localhost,57637,t,1) (2 rows) -SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS create_distributed_table_non_empty_failure$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,9060,t,"DROP SCHEMA") - (localhost,57637,t,"DROP SCHEMA") -(2 rows) - -- this triggers a schema creation which prevents further transactions around dependency propagation SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_create_reference_table.out b/src/test/regress/expected/failure_create_reference_table.out index bb9ce32f5..432290c4b 100644 --- a/src/test/regress/expected/failure_create_reference_table.out +++ b/src/test/regress/expected/failure_create_reference_table.out @@ -193,7 +193,9 @@ SELECT citus.mitmproxy('conn.allow()'); DROP TABLE ref_table; DROP SCHEMA failure_reference_table; +SET citus.enable_ddl_propagation TO OFF; CREATE SCHEMA failure_reference_table; +SET citus.enable_ddl_propagation TO ON; CREATE TABLE ref_table(id int); INSERT INTO ref_table VALUES(1),(2),(3); -- Test in transaction diff --git a/src/test/regress/expected/failure_create_table.out b/src/test/regress/expected/failure_create_table.out index 14b3daa66..4a575ed19 100644 --- a/src/test/regress/expected/failure_create_table.out +++ b/src/test/regress/expected/failure_create_table.out @@ -1,7 +1,9 @@ -- -- failure_create_table adds failure tests for creating table without data. -- +SET citus.enable_ddl_propagation TO OFF; CREATE SCHEMA failure_create_table; +SET citus.enable_ddl_propagation TO ON; SET search_path TO 'failure_create_table'; SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/fast_path_router_modify.out b/src/test/regress/expected/fast_path_router_modify.out index f99aac5d4..bf7e07362 100644 --- a/src/test/regress/expected/fast_path_router_modify.out +++ b/src/test/regress/expected/fast_path_router_modify.out @@ -228,7 +228,10 @@ BEGIN DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT modify_fast_path_plpsql(1,1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement @@ -241,6 +244,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem (1 row) SELECT modify_fast_path_plpsql(2,2); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement @@ -253,6 +257,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem (1 row) SELECT modify_fast_path_plpsql(3,3); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement @@ -265,6 +270,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem (1 row) SELECT modify_fast_path_plpsql(4,4); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement @@ -277,6 +283,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem (1 row) SELECT modify_fast_path_plpsql(5,5); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement @@ -289,6 +296,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem (1 row) SELECT modify_fast_path_plpsql(6,6); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement @@ -301,6 +309,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem (1 row) SELECT modify_fast_path_plpsql(6,6); +DEBUG: function does not have co-located tables modify_fast_path_plpsql --------------------------------------------------------------------- diff --git a/src/test/regress/expected/fkeys_between_local_ref.out b/src/test/regress/expected/fkeys_between_local_ref.out index b142bd4d3..3df007cb3 100644 --- a/src/test/regress/expected/fkeys_between_local_ref.out +++ b/src/test/regress/expected/fkeys_between_local_ref.out @@ -332,6 +332,7 @@ BEGIN; ERROR: insert or update on table "local_table_5_1518073" violates foreign key constraint "local_table_5_col_1_fkey1_1518073" ROLLBACK; BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE SCHEMA another_schema_fkeys_between_local_ref; CREATE TABLE another_schema_fkeys_between_local_ref.local_table_6 (col_1 INT PRIMARY KEY); -- first convert local tables to citus local tables in graph @@ -376,6 +377,7 @@ BEGIN; ROLLBACK; BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE TABLE local_table_6 (col_1 INT PRIMARY KEY); -- first convert local tables to citus local tables in graph ALTER TABLE local_table_2 ADD CONSTRAINT fkey_11 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_1) ON DELETE CASCADE; diff --git a/src/test/regress/expected/forcedelegation_functions.out b/src/test/regress/expected/forcedelegation_functions.out index ad3b6cb8e..c26f7b75b 100644 --- a/src/test/regress/expected/forcedelegation_functions.out +++ b/src/test/regress/expected/forcedelegation_functions.out @@ -293,6 +293,8 @@ BEGIN RETURN ret_val; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands CREATE OR REPLACE FUNCTION func_calls_forcepush_func() RETURNS NUMERIC AS $$ DECLARE incremented_val NUMERIC; @@ -302,9 +304,11 @@ BEGIN RETURN incremented_val; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('func_calls_forcepush_func()'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -312,7 +316,7 @@ DETAIL: A distributed function is created. To make sure subsequent commands see SELECT create_distributed_function('inner_force_delegation_function(int)', '$1', colocate_with := 'test_nested', force_delegation := true); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -354,12 +358,16 @@ PL/pgSQL function func_calls_forcepush_func() line XX at SQL statement 101 (1 row) +-- Block distributing that function as distributing it causes +-- different test output on PG 14. +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION get_val() RETURNS INT AS $$ BEGIN RETURN 100::INT; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- -- UDF calling another UDF in a FROM clause -- fn() @@ -377,7 +385,10 @@ BEGIN RETURN incremented_val; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT func_calls_forcepush_func_infrom(); +DEBUG: function does not have co-located tables DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT inner_force_delegation_function FROM inner_force_delegation_function(add_val + 100)" PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement @@ -395,6 +406,7 @@ PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement BEGIN; SELECT func_calls_forcepush_func_infrom(); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT inner_force_delegation_function FROM inner_force_delegation_function(add_val + 100)" PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement @@ -428,7 +440,10 @@ BEGIN RETURN incremented_val; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT func_calls_forcepush_func_intarget(); +DEBUG: function does not have co-located tables DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT inner_force_delegation_function(100 + 100) OFFSET 0" PL/pgSQL function func_calls_forcepush_func_intarget() line XX at SQL statement @@ -446,6 +461,7 @@ PL/pgSQL function func_calls_forcepush_func_intarget() line XX at SQL statement BEGIN; SELECT func_calls_forcepush_func_intarget(); +DEBUG: not pushing down function calls in a multi-statement transaction NOTICE: inner_force_delegation_function():201 DETAIL: from localhost:xxxxx CONTEXT: SQL statement "SELECT inner_force_delegation_function(100 + 100) OFFSET 0" @@ -473,9 +489,11 @@ BEGIN END if; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('test_recursive(int)', '$1', colocate_with := 'test_nested', force_delegation := true); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -544,13 +562,15 @@ BEGIN RETURN x + y; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'test_non_constant(int,bigint)', '$1', colocate_with := 'test_forcepushdown', force_delegation := true); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -610,6 +630,8 @@ BEGIN INSERT INTO emp VALUES (empname, 33); END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands CREATE OR REPLACE FUNCTION outer_emp() RETURNS void AS $$ @@ -618,15 +640,18 @@ BEGIN PERFORM inner_emp('hello'); END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('inner_emp(text)','empname', force_delegation := true); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- (1 row) SELECT outer_emp(); +DEBUG: function does not have co-located tables DEBUG: Skipping pushdown of function from a PL/PgSQL simple expression CONTEXT: SQL statement "SELECT inner_emp('hello')" PL/pgSQL function outer_emp() line XX at PERFORM @@ -650,13 +675,15 @@ BEGIN INSERT INTO forcepushdown_schema.test_forcepushdown SELECT(a+1); END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_select_data(int)', 'a', colocate_with := 'test_forcepushdown', force_delegation := true ); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -725,13 +752,15 @@ BEGIN SELECT intcol FROM forcepushdown_schema.test_forcepushdown_noncolocate; END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_select_data_nonlocal(int)', 'a', colocate_with := 'test_forcepushdown', force_delegation := true ); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -803,13 +832,15 @@ BEGIN INSERT INTO forcepushdown_schema.test_forcepushdown_char VALUES (a); END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_data_char(char)', 'a', colocate_with := 'test_forcepushdown_char', force_delegation := true ); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -821,13 +852,15 @@ BEGIN INSERT INTO forcepushdown_schema.test_forcepushdown_varchar VALUES (a); END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_data_varchar(varchar)', 'a', colocate_with := 'test_forcepushdown_varchar', force_delegation := true ); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -839,13 +872,15 @@ BEGIN INSERT INTO forcepushdown_schema.test_forcepushdown_text VALUES (a); END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_data_text(text)', 'a', colocate_with := 'test_forcepushdown_text', force_delegation := true ); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -947,13 +982,15 @@ BEGIN RAISE NOTICE 'Result: %', var; END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'select_data(int)', 'a', colocate_with := 'test_subquery', force_delegation := true ); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -969,13 +1006,15 @@ BEGIN RAISE NOTICE 'Result: %', var; END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'select_data_noncolocate(int)', 'a', colocate_with := 'test_subquery', force_delegation := true ); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -990,13 +1029,15 @@ BEGIN RAISE NOTICE 'Result: %', var; END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_select_data_cte1(int)', 'a', colocate_with := 'test_subquery', force_delegation := true ); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -1011,13 +1052,15 @@ BEGIN RAISE NOTICE 'Result: %', var; END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_select_data_cte2(int)', 'a', colocate_with := 'test_subquery', force_delegation := true ); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -1033,13 +1076,15 @@ BEGIN RAISE NOTICE 'Result: %', var; END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_data_cte_nondist(int)', 'a', colocate_with := 'test_subquery', force_delegation := true ); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -1162,18 +1207,11 @@ SELECT * FROM forcepushdown_schema.test_subquery ORDER BY 1; (5 rows) -- Query with targetList greater than 1 --- Function from FROM clause is not delegated outside of a BEGIN (for now) +-- Function from FROM clause is delegated outside of a BEGIN SELECT 1,2,3 FROM select_data(100); -DEBUG: generating subplan XXX_1 for subquery SELECT data FROM forcepushdown_schema.test_subquery WHERE (data OPERATOR(pg_catalog.=) 100) -CONTEXT: SQL statement "SELECT result FROM forcepushdown_schema.test_subquery WHERE data = - (SELECT data FROM forcepushdown_schema.test_subquery WHERE data = a)" -PL/pgSQL function select_data(integer) line XX at SQL statement -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT result FROM forcepushdown_schema.test_subquery WHERE (data OPERATOR(pg_catalog.=) (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer))) -CONTEXT: SQL statement "SELECT result FROM forcepushdown_schema.test_subquery WHERE data = - (SELECT data FROM forcepushdown_schema.test_subquery WHERE data = a)" -PL/pgSQL function select_data(integer) line XX at SQL statement +DEBUG: pushing down the function call NOTICE: Result: -1 -CONTEXT: PL/pgSQL function select_data(integer) line XX at RAISE +DETAIL: from localhost:xxxxx ?column? | ?column? | ?column? --------------------------------------------------------------------- 1 | 2 | 3 @@ -1210,9 +1248,11 @@ BEGIN RETURN x + y; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('test_prepare(int,int)','x',force_delegation :=true, colocate_with := 'table_test_prepare'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -1230,9 +1270,12 @@ BEGIN PERFORM 1, 1 + a FROM test_prepare(x + 1, y + 1) a; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands -- First 5 get delegated and succeeds BEGIN; SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1251,6 +1294,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM (1 row) SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1269,6 +1313,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM (1 row) SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1287,6 +1332,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM (1 row) SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1305,6 +1351,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM (1 row) SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1331,6 +1378,7 @@ SELECT COUNT(*) FROM table_test_prepare; -- 6th execution will be generic plan and should get delegated SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1349,6 +1397,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM (1 row) SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1369,6 +1418,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM END; -- Fails as expected SELECT outer_test_prepare(1,2); +DEBUG: function does not have co-located tables DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1404,10 +1454,12 @@ BEGIN RETURN x; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('test(int)', 'x', colocate_with := 'test_perform', force_delegation := true); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -1425,10 +1477,471 @@ NOTICE: INPUT 3 CONTEXT: PL/pgSQL function test(integer) line XX at RAISE SQL statement "SELECT test(3)" PL/pgSQL function inline_code_block line XX at PERFORM +CREATE TABLE testnested_table (x int, y int); +SELECT create_distributed_table('testnested_table','x'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE FUNCTION inner_fn(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +-- Non-force function calling force-delegation function +CREATE OR REPLACE FUNCTION outer_local_fn() +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM 1 FROM inner_fn(1); + INSERT INTO forcepushdown_schema.testnested_table VALUES (2,3); + PERFORM 1 FROM inner_fn(4); + INSERT INTO forcepushdown_schema.testnested_table VALUES (5,6); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +SELECT create_distributed_function('inner_fn(int)','x', + colocate_with:='testnested_table', force_delegation := true); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT outer_local_fn(); +DEBUG: function does not have co-located tables +DEBUG: pushing down function call in a multi-statement transaction +CONTEXT: SQL statement "SELECT 1 FROM inner_fn(1)" +PL/pgSQL function outer_local_fn() line XX at PERFORM +DEBUG: pushing down the function call +CONTEXT: SQL statement "SELECT 1 FROM inner_fn(1)" +PL/pgSQL function outer_local_fn() line XX at PERFORM +DEBUG: pushing down function call in a multi-statement transaction +CONTEXT: SQL statement "SELECT 1 FROM inner_fn(4)" +PL/pgSQL function outer_local_fn() line XX at PERFORM +DEBUG: pushing down the function call +CONTEXT: SQL statement "SELECT 1 FROM inner_fn(4)" +PL/pgSQL function outer_local_fn() line XX at PERFORM + outer_local_fn +--------------------------------------------------------------------- + +(1 row) + +-- Rows from 1-6 should appear +SELECT * FROM testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- + 1 | 1 + 2 | 3 + 4 | 4 + 5 | 6 +(4 rows) + +BEGIN; +SELECT outer_local_fn(); +DEBUG: not pushing down function calls in a multi-statement transaction + outer_local_fn +--------------------------------------------------------------------- + +(1 row) + +END; +SELECT * FROM testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- + 1 | 1 + 1 | 1 + 2 | 3 + 2 | 3 + 4 | 4 + 4 | 4 + 5 | 6 + 5 | 6 +(8 rows) + +DROP FUNCTION inner_fn(int); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +DROP FUNCTION outer_local_fn(); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +TRUNCATE TABLE testnested_table; +CREATE OR REPLACE FUNCTION inner_fn(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +-- Force-delegation function calling non-force function +CREATE OR REPLACE FUNCTION outer_fn(y int, z int) +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM 1 FROM forcepushdown_schema.inner_fn(y); + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); + PERFORM 1 FROM forcepushdown_schema.inner_fn(z); + INSERT INTO forcepushdown_schema.testnested_table VALUES (z,z); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +SELECT create_distributed_function('inner_fn(int)','x', + colocate_with:='testnested_table', force_delegation := false); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_function('outer_fn(int, int)','y', + colocate_with:='testnested_table', force_delegation := true); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT outer_fn(1, 2); +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x)" +PL/pgSQL function forcepushdown_schema.inner_fn(integer) line XX at SQL statement +SQL statement "SELECT 1 FROM forcepushdown_schema.inner_fn(z)" +PL/pgSQL function forcepushdown_schema.outer_fn(integer,integer) line XX at PERFORM +while executing command on localhost:xxxxx +BEGIN; +SELECT outer_fn(1, 2); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x)" +PL/pgSQL function forcepushdown_schema.inner_fn(integer) line XX at SQL statement +SQL statement "SELECT 1 FROM forcepushdown_schema.inner_fn(z)" +PL/pgSQL function forcepushdown_schema.outer_fn(integer,integer) line XX at PERFORM +while executing command on localhost:xxxxx +END; +-- No rows +SELECT * FROM testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- +(0 rows) + +-- Force-delegation function calling force-delegation function +CREATE OR REPLACE FUNCTION force_push_inner(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +CREATE OR REPLACE FUNCTION force_push_outer(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); + PERFORM forcepushdown_schema.force_push_inner(x+1) LIMIT 1; +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +SELECT create_distributed_function( + 'force_push_outer(int)', 'x', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_function( + 'force_push_inner(int)', 'y', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- Keys 7,8,9,14 fall on one node and 15 on a different node +-- Function gets delegated to node with shard-key = 7 and inner function +-- will not be delegated but inserts shard-key = 8 locally +SELECT force_push_outer(7); +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)" +PL/pgSQL function forcepushdown_schema.force_push_inner(integer) line XX at SQL statement +SQL statement "SELECT forcepushdown_schema.force_push_inner(x+1) LIMIT 1" +PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at PERFORM +while executing command on localhost:xxxxx +BEGIN; +-- Function gets delegated to node with shard-key = 8 and inner function +-- will not be delegated but inserts shard-key = 9 locally +SELECT force_push_outer(8); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)" +PL/pgSQL function forcepushdown_schema.force_push_inner(integer) line XX at SQL statement +SQL statement "SELECT forcepushdown_schema.force_push_inner(x+1) LIMIT 1" +PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at PERFORM +while executing command on localhost:xxxxx +END; +BEGIN; +-- Function gets delegated to node with shard-key = 14 and inner function +-- will not be delegated but fails to insert shard-key = 15 remotely +SELECT force_push_outer(14); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)" +PL/pgSQL function forcepushdown_schema.force_push_inner(integer) line XX at SQL statement +SQL statement "SELECT forcepushdown_schema.force_push_inner(x+1) LIMIT 1" +PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at PERFORM +while executing command on localhost:xxxxx +END; +SELECT * FROM testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- +(0 rows) + +-- +-- Function-1() --> function-2() --> function-3() +-- +CREATE OR REPLACE FUNCTION force_push_1(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); + PERFORM forcepushdown_schema.force_push_2(x+1) LIMIT 1; +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +CREATE OR REPLACE FUNCTION force_push_2(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); + PERFORM forcepushdown_schema.force_push_3(y+1) LIMIT 1; +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +CREATE OR REPLACE FUNCTION force_push_3(z int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (z,z); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +SELECT create_distributed_function( + 'force_push_1(int)', 'x', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_function( + 'force_push_2(int)', 'y', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_function( + 'force_push_3(int)', 'z', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +TRUNCATE TABLE testnested_table; +BEGIN; +-- All local inserts +SELECT force_push_1(7); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)" +PL/pgSQL function forcepushdown_schema.force_push_2(integer) line XX at SQL statement +SQL statement "SELECT forcepushdown_schema.force_push_2(x+1) LIMIT 1" +PL/pgSQL function forcepushdown_schema.force_push_1(integer) line XX at PERFORM +while executing command on localhost:xxxxx +END; +BEGIN; +-- Local(shard-keys 13, 15) + remote insert (shard-key 14) +SELECT force_push_1(13); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)" +PL/pgSQL function forcepushdown_schema.force_push_2(integer) line XX at SQL statement +SQL statement "SELECT forcepushdown_schema.force_push_2(x+1) LIMIT 1" +PL/pgSQL function forcepushdown_schema.force_push_1(integer) line XX at PERFORM +while executing command on localhost:xxxxx +END; +SELECT * FROM testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- +(0 rows) + +TRUNCATE TABLE testnested_table; +CREATE OR REPLACE FUNCTION force_push_inner(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +CREATE OR REPLACE FUNCTION force_push_outer(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM FROM forcepushdown_schema.force_push_inner(x); + INSERT INTO forcepushdown_schema.testnested_table VALUES (x+1,x+1); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +SELECT create_distributed_function( + 'force_push_inner(int)', 'y', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_function( + 'force_push_outer(int)', 'x', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +BEGIN; +SELECT force_push_outer(7); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (x+1,x+1)" +PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at SQL statement +while executing command on localhost:xxxxx +END; +TABLE testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- +(0 rows) + +CREATE OR REPLACE FUNCTION force_push_inner(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + RAISE NOTICE '%', y; +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +CREATE OR REPLACE FUNCTION force_push_outer(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM FROM forcepushdown_schema.force_push_inner(x+1); + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +BEGIN; +SELECT force_push_outer(9); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +NOTICE: 10 +DETAIL: from localhost:xxxxx + force_push_outer +--------------------------------------------------------------------- + +(1 row) + +END; +TABLE testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- + 9 | 9 +(1 row) + RESET client_min_messages; SET citus.log_remote_commands TO off; DROP SCHEMA forcepushdown_schema CASCADE; -NOTICE: drop cascades to 38 other objects +NOTICE: drop cascades to 46 other objects DETAIL: drop cascades to table test_forcepushdown drop cascades to table test_forcepushdown_noncolocate drop cascades to function insert_data(integer) @@ -1467,3 +1980,11 @@ drop cascades to function test_prepare(integer,integer) drop cascades to function outer_test_prepare(integer,integer) drop cascades to table test_perform drop cascades to function test(integer) +drop cascades to table testnested_table +drop cascades to function inner_fn(integer) +drop cascades to function outer_fn(integer,integer) +drop cascades to function force_push_inner(integer) +drop cascades to function force_push_outer(integer) +drop cascades to function force_push_1(integer) +drop cascades to function force_push_2(integer) +drop cascades to function force_push_3(integer) diff --git a/src/test/regress/expected/function_propagation.out b/src/test/regress/expected/function_propagation.out new file mode 100644 index 000000000..a40f1f9a2 --- /dev/null +++ b/src/test/regress/expected/function_propagation.out @@ -0,0 +1,321 @@ +CREATE SCHEMA function_propagation_schema; +SET search_path TO 'function_propagation_schema'; +-- Check whether supported dependencies can be distributed while propagating functions +-- Check types +SET citus.enable_metadata_sync TO OFF; + CREATE TYPE function_prop_type AS (a int, b int); +RESET citus.enable_metadata_sync; +CREATE OR REPLACE FUNCTION func_1(param_1 function_prop_type) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +-- Check all dependent objects and function depends on all nodes +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema'::regnamespace::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (schema,{function_propagation_schema},{}) +(1 row) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type'::regtype::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (type,{function_propagation_schema.function_prop_type},{}) +(1 row) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_1'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{function_propagation_schema,func_1}",{function_propagation_schema.function_prop_type}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema'::regnamespace::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (schema,{function_propagation_schema},{}) + localhost | 57638 | t | (schema,{function_propagation_schema},{}) +(2 rows) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type'::regtype::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (type,{function_propagation_schema.function_prop_type},{}) + localhost | 57638 | t | (type,{function_propagation_schema.function_prop_type},{}) +(2 rows) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_1'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{function_propagation_schema,func_1}",{function_propagation_schema.function_prop_type}) + localhost | 57638 | t | (function,"{function_propagation_schema,func_1}",{function_propagation_schema.function_prop_type}) +(2 rows) + +SET citus.enable_metadata_sync TO OFF; + CREATE TYPE function_prop_type_2 AS (a int, b int); +RESET citus.enable_metadata_sync; +CREATE OR REPLACE FUNCTION func_2(param_1 int) +RETURNS function_prop_type_2 +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_2'::regtype::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (type,{function_propagation_schema.function_prop_type_2},{}) +(1 row) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_2'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{function_propagation_schema,func_2}",{integer}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_2'::regtype::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (type,{function_propagation_schema.function_prop_type_2},{}) + localhost | 57638 | t | (type,{function_propagation_schema.function_prop_type_2},{}) +(2 rows) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_2'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{function_propagation_schema,func_2}",{integer}) + localhost | 57638 | t | (function,"{function_propagation_schema,func_2}",{integer}) +(2 rows) + +-- Have a separate check for type created in transaction +BEGIN; + CREATE TYPE function_prop_type_3 AS (a int, b int); +COMMIT; +-- Objects in the body part is not found as dependency +CREATE OR REPLACE FUNCTION func_3(param_1 int) +RETURNS int +LANGUAGE plpgsql AS +$$ +DECLARE + internal_param1 function_prop_type_3; +BEGIN + return 1; +END; +$$; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_3'::regtype::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_3'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{function_propagation_schema,func_3}",{integer}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_3'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{function_propagation_schema,func_3}",{integer}) + localhost | 57638 | t | (function,"{function_propagation_schema,func_3}",{integer}) +(2 rows) + +-- Check table +CREATE TABLE function_prop_table(a int, b int); +-- Non-distributed table is not distributed as dependency +CREATE OR REPLACE FUNCTION func_4(param_1 function_prop_table) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +WARNING: Citus can't distribute function "func_4" having dependency on non-distributed relation "function_prop_table" +DETAIL: Function will be created only locally +HINT: To distribute function, distribute dependent relations first. Then, re-create the function +CREATE OR REPLACE FUNCTION func_5(param_1 int) +RETURNS function_prop_table +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +WARNING: Citus can't distribute function "func_5" having dependency on non-distributed relation "function_prop_table" +DETAIL: Function will be created only locally +HINT: To distribute function, distribute dependent relations first. Then, re-create the function +-- Functions can be created with distributed table dependency +SELECT create_distributed_table('function_prop_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE FUNCTION func_6(param_1 function_prop_table) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table}) + localhost | 57638 | t | (function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table}) +(2 rows) + +-- Views are not supported +CREATE VIEW function_prop_view AS SELECT * FROM function_prop_table; +CREATE OR REPLACE FUNCTION func_7(param_1 function_prop_view) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +WARNING: Citus can't distribute functions having dependency on unsupported object of type "view" +DETAIL: Function will be created only locally +CREATE OR REPLACE FUNCTION func_8(param_1 int) +RETURNS function_prop_view +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +WARNING: Citus can't distribute functions having dependency on unsupported object of type "view" +DETAIL: Function will be created only locally +-- Check within transaction +BEGIN; + CREATE TYPE type_in_transaction AS (a int, b int); + CREATE OR REPLACE FUNCTION func_in_transaction(param_1 type_in_transaction) + RETURNS int + LANGUAGE plpgsql AS + $$ + BEGIN + return 1; + END; + $$; + -- Within transaction functions are not distributed + SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + + SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + +COMMIT; +-- Show that recreating it outside transaction distributes the function and dependencies +CREATE OR REPLACE FUNCTION func_in_transaction(param_1 type_in_transaction) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (type,{function_propagation_schema.type_in_transaction},{}) +(1 row) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (type,{function_propagation_schema.type_in_transaction},{}) + localhost | 57638 | t | (type,{function_propagation_schema.type_in_transaction},{}) +(2 rows) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction}) + localhost | 57638 | t | (function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction}) +(2 rows) + +-- Test for SQL function with unsupported object in function body +CREATE TABLE table_in_sql_body(id int); +CREATE FUNCTION max_of_table() +RETURNS int +LANGUAGE SQL AS +$$ + SELECT max(id) FROM table_in_sql_body +$$; +-- Show that only function has propagated, since the table is not resolved as dependency +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regclass::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.max_of_table'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{function_propagation_schema,max_of_table}",{}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.max_of_table'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{function_propagation_schema,max_of_table}",{}) + localhost | 57638 | t | (function,"{function_propagation_schema,max_of_table}",{}) +(2 rows) + +-- Check extension owned table +CREATE TABLE extension_owned_table(a int); +SELECT run_command_on_workers($$ +CREATE TABLE function_propagation_schema.extension_owned_table(a int); +$$ +); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE TABLE") + (localhost,57638,t,"CREATE TABLE") +(2 rows) + +CREATE EXTENSION seg; +ALTER EXTENSION seg ADD TABLE extension_owned_table; +NOTICE: Citus does not propagate adding/dropping member objects +HINT: You can add/drop the member objects on the workers as well. +SELECT run_command_on_workers($$ +ALTER EXTENSION seg ADD TABLE function_propagation_schema.extension_owned_table; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"ALTER EXTENSION") + (localhost,57638,t,"ALTER EXTENSION") +(2 rows) + +CREATE OR REPLACE FUNCTION func_for_ext_check(param_1 extension_owned_table) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +RESET search_path; +SET client_min_messages TO WARNING; +DROP SCHEMA function_propagation_schema CASCADE; diff --git a/src/test/regress/expected/grant_on_schema_propagation.out b/src/test/regress/expected/grant_on_schema_propagation.out index 96816714d..97c02f074 100644 --- a/src/test/regress/expected/grant_on_schema_propagation.out +++ b/src/test/regress/expected/grant_on_schema_propagation.out @@ -6,7 +6,9 @@ CREATE SCHEMA dist_schema; CREATE TABLE dist_schema.dist_table (id int); CREATE SCHEMA another_dist_schema; CREATE TABLE another_dist_schema.dist_table (id int); +SET citus.enable_ddl_propagation TO off; CREATE SCHEMA non_dist_schema; +SET citus.enable_ddl_propagation TO on; -- create roles on all nodes SELECT run_command_on_coordinator_and_workers('CREATE USER role_1'); NOTICE: not propagating CREATE ROLE/USER commands to worker nodes @@ -193,24 +195,9 @@ SET citus.enable_alter_role_propagation TO ON; ALTER ROLE role_1 NOSUPERUSER; SET citus.enable_alter_role_propagation TO OFF; DROP TABLE dist_schema.dist_table, another_dist_schema.dist_table; -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema'); - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA another_dist_schema'); - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA non_dist_schema'); - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - +DROP SCHEMA dist_schema; +DROP SCHEMA another_dist_schema; +DROP SCHEMA non_dist_schema; -- test if the grantors are propagated correctly -- first remove one of the worker nodes SET citus.shard_replication_factor TO 1; @@ -319,12 +306,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER \c - - - :master_port DROP TABLE grantor_schema.grantor_table; -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA grantor_schema CASCADE'); - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - +DROP SCHEMA grantor_schema CASCADE; -- test distributing the schema with another user CREATE SCHEMA dist_schema; GRANT ALL ON SCHEMA dist_schema TO role_1 WITH GRANT OPTION; @@ -352,12 +334,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY \c - - - :master_port DROP TABLE dist_schema.dist_table; -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema CASCADE'); - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - +DROP SCHEMA dist_schema CASCADE; -- test grants on public schema -- first remove one of the worker nodes SET citus.shard_replication_factor TO 1; diff --git a/src/test/regress/expected/isolation_citus_dist_activity.out b/src/test/regress/expected/isolation_citus_dist_activity.out index c003d5ec9..f6cd7da9b 100644 --- a/src/test/regress/expected/isolation_citus_dist_activity.out +++ b/src/test/regress/expected/isolation_citus_dist_activity.out @@ -15,16 +15,16 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-alter-table: ALTER TABLE test_table ADD COLUMN x INT; step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep --------------------------------------------------------------------- @@ -32,7 +32,7 @@ pg_sleep (1 row) step s2-view-dist: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC; query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -42,7 +42,7 @@ query |query_hostname |query_hostport|d (1 row) step s3-view-worker: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -61,13 +61,13 @@ SELECT worker_apply_shard_ddl_command (1300001, 'public', ' (4 rows) step s2-rollback: - ROLLBACK; + ROLLBACK; step s1-commit: COMMIT; step s3-rollback: - ROLLBACK; + ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-insert s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback @@ -85,16 +85,16 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-insert: - INSERT INTO test_table VALUES (100, 100); + INSERT INTO test_table VALUES (100, 100); step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep --------------------------------------------------------------------- @@ -102,17 +102,17 @@ pg_sleep (1 row) step s2-view-dist: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC; query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- - INSERT INTO test_table VALUES (100, 100); + INSERT INTO test_table VALUES (100, 100); |coordinator_host| 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression (1 row) step s3-view-worker: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -120,13 +120,13 @@ INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)|local (1 row) step s2-rollback: - ROLLBACK; + ROLLBACK; step s1-commit: COMMIT; step s3-rollback: - ROLLBACK; + ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback @@ -144,10 +144,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-select: SELECT count(*) FROM test_table; @@ -158,7 +158,7 @@ count (1 row) step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep --------------------------------------------------------------------- @@ -166,7 +166,7 @@ pg_sleep (1 row) step s2-view-dist: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC; query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -176,7 +176,7 @@ query |query_hostname |query_hostport|distribute (1 row) step s3-view-worker: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -187,13 +187,13 @@ SELECT count(*) AS count FROM public.test_table_1300011 test_table WHERE true|lo (4 rows) step s2-rollback: - ROLLBACK; + ROLLBACK; step s1-commit: COMMIT; step s3-rollback: - ROLLBACK; + ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select-router s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback @@ -211,10 +211,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-select-router: SELECT count(*) FROM test_table WHERE column1 = 55; @@ -225,7 +225,7 @@ count (1 row) step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep --------------------------------------------------------------------- @@ -233,7 +233,7 @@ pg_sleep (1 row) step s2-view-dist: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC; query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -243,7 +243,7 @@ query |query_hostname |query_ (1 row) step s3-view-worker: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -251,11 +251,11 @@ SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column (1 row) step s2-rollback: - ROLLBACK; + ROLLBACK; step s1-commit: COMMIT; step s3-rollback: - ROLLBACK; + ROLLBACK; diff --git a/src/test/regress/expected/isolation_distributed_transaction_id.out b/src/test/regress/expected/isolation_distributed_transaction_id.out index 1e41f8957..3d44f0069 100644 --- a/src/test/regress/expected/isolation_distributed_transaction_id.out +++ b/src/test/regress/expected/isolation_distributed_transaction_id.out @@ -94,7 +94,8 @@ step s1-verify-current-xact-is-on-worker: get_current_transaction_id() as xact, run_command_on_workers($$ SELECT row(initiator_node_identifier, transaction_number) - FROM get_all_active_transactions(); + FROM get_all_active_transactions() + WHERE transaction_number != 0; $$) as remote ORDER BY remote.nodeport ASC; diff --git a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out index d036468b7..d8c1a5a74 100644 --- a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out +++ b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out @@ -1,6 +1,6 @@ Parsed test spec with 3 sessions -starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -164,22 +164,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -187,7 +171,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -357,22 +341,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -380,7 +348,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -550,22 +518,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -573,7 +525,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -739,22 +691,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -762,7 +698,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -934,22 +870,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -957,7 +877,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -1129,22 +1049,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -1152,7 +1056,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2-begin s1-add-worker s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2-begin s1-add-worker s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -1324,22 +1228,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -1347,7 +1235,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -1534,22 +1422,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -1557,7 +1429,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1-add-worker s2-create-schema s3-create-schema2 s2-create-table s3-create-table s1-commit s3-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1-add-worker s2-create-schema s3-create-schema2 s2-create-table s3-create-table s1-commit s3-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -1753,22 +1625,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -1776,7 +1632,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-type s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-type s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -1932,22 +1788,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -1955,7 +1795,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-create-type s1-add-worker s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-create-type s1-add-worker s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2110,22 +1950,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -2133,7 +1957,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-type s2-create-table-with-type s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-type s2-create-table-with-type s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2309,22 +2133,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -2332,7 +2140,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2508,22 +2316,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -2531,7 +2323,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2714,22 +2506,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -2737,7 +2513,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2922,22 +2698,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_extension_commands.out b/src/test/regress/expected/isolation_extension_commands.out index 4c5bfe3f0..711c854c7 100644 --- a/src/test/regress/expected/isolation_extension_commands.out +++ b/src/test/regress/expected/isolation_extension_commands.out @@ -28,7 +28,7 @@ step s1-print: count --------------------------------------------------------------------- - 4 + 7 (1 row) extname|extversion|nspname @@ -89,7 +89,7 @@ step s1-print: count --------------------------------------------------------------------- - 4 + 7 (1 row) extname|extversion|nspname @@ -158,7 +158,7 @@ step s1-print: count --------------------------------------------------------------------- - 3 + 6 (1 row) extname|extversion|nspname @@ -214,7 +214,7 @@ step s1-print: count --------------------------------------------------------------------- - 5 + 7 (1 row) extname|extversion|nspname @@ -275,7 +275,7 @@ step s1-print: count --------------------------------------------------------------------- - 4 + 6 (1 row) extname|extversion|nspname diff --git a/src/test/regress/expected/isolation_get_all_active_transactions.out b/src/test/regress/expected/isolation_get_all_active_transactions.out index 9d94470ec..d5c4765b8 100644 --- a/src/test/regress/expected/isolation_get_all_active_transactions.out +++ b/src/test/regress/expected/isolation_get_all_active_transactions.out @@ -35,8 +35,8 @@ step s2-begin-insert: step s3-as-admin: -- Admin should be able to see all transactions - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; count --------------------------------------------------------------------- @@ -51,8 +51,8 @@ count step s3-as-user-1: -- User should only be able to see its own transactions SET ROLE test_user_1; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; count --------------------------------------------------------------------- @@ -67,8 +67,8 @@ count step s3-as-readonly: -- Other user should not see transactions SET ROLE test_readonly; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; count --------------------------------------------------------------------- @@ -83,8 +83,8 @@ count step s3-as-monitor: -- Monitor should see all transactions SET ROLE test_monitor; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_global_pid.out b/src/test/regress/expected/isolation_global_pid.out new file mode 100644 index 000000000..99ca48693 --- /dev/null +++ b/src/test/regress/expected/isolation_global_pid.out @@ -0,0 +1,145 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-start-session-level-connection s1-worker-begin s1-worker-select s2-coordinator-citus_dist_stat_activity s2-coordinator-citus_worker_stat_activity s1-worker-commit s1-stop-session-level-connection +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-worker-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-worker-select: + SELECT run_commands_on_session_level_connection_to_node('SET citus.enable_local_execution TO off; SET citus.force_max_query_parallelization TO ON; SELECT * FROM dist_table'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-coordinator-citus_dist_stat_activity: + SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' and query NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; + +?column? +--------------------------------------------------------------------- +t +(1 row) + +step s2-coordinator-citus_worker_stat_activity: + SELECT query FROM citus_worker_stat_activity() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ) + ORDER BY 1; + +query +--------------------------------------------------------------------- +SELECT a, b FROM public.dist_table_12345000 dist_table WHERE true +SELECT a, b FROM public.dist_table_12345001 dist_table WHERE true +SELECT a, b FROM public.dist_table_12345002 dist_table WHERE true +SELECT a, b FROM public.dist_table_12345003 dist_table WHERE true +(4 rows) + +step s1-worker-commit: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-stop-session-level-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-coordinator-begin s1-coordinator-select s2-coordinator-citus_dist_stat_activity s2-coordinator-citus_worker_stat_activity s2-coordinator-get_all_active_transactions s2-coordinator-get_global_active_transactions s1-coordinator-commit +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-coordinator-begin: + BEGIN; + +step s1-coordinator-select: + SET citus.enable_local_execution TO off; + SET citus.force_max_query_parallelization TO ON; + SELECT * FROM dist_table; + +a|b +--------------------------------------------------------------------- +(0 rows) + +step s2-coordinator-citus_dist_stat_activity: + SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' and query NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; + +?column? +--------------------------------------------------------------------- +t +(1 row) + +step s2-coordinator-citus_worker_stat_activity: + SELECT query FROM citus_worker_stat_activity() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ) + ORDER BY 1; + +query +--------------------------------------------------------------------- +SELECT a, b FROM public.dist_table_12345000 dist_table WHERE true +SELECT a, b FROM public.dist_table_12345001 dist_table WHERE true +SELECT a, b FROM public.dist_table_12345002 dist_table WHERE true +SELECT a, b FROM public.dist_table_12345003 dist_table WHERE true +(4 rows) + +step s2-coordinator-get_all_active_transactions: + SELECT count(*) FROM get_all_active_transactions() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ); + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-coordinator-get_global_active_transactions: + SELECT count(*) FROM get_global_active_transactions() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ) + AND transaction_number != 0; + +count +--------------------------------------------------------------------- + 5 +(1 row) + +step s1-coordinator-commit: + COMMIT; + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/isolation_metadata_sync_vs_all.out b/src/test/regress/expected/isolation_metadata_sync_vs_all.out new file mode 100644 index 000000000..087dcbe93 --- /dev/null +++ b/src/test/regress/expected/isolation_metadata_sync_vs_all.out @@ -0,0 +1,716 @@ +unused step name: s3-debug +Parsed test spec with 3 sessions + +starting permutation: s3-compare-snapshot +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-start-metadata-sync-to-same-node s1-commit s2-commit s3-compare-snapshot +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-start-metadata-sync-to-same-node: + SELECT start_metadata_sync_to_node('localhost', 57638); + +step s1-commit: + COMMIT; + +step s2-start-metadata-sync-to-same-node: <... completed> +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-start-metadata-sync-to-another-node s1-commit s2-commit s3-compare-snapshot +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-start-metadata-sync-to-another-node: + SELECT start_metadata_sync_to_node('localhost', 57637); + +step s1-commit: + COMMIT; + +step s2-start-metadata-sync-to-another-node: <... completed> +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-alter-table s1-commit s2-commit s3-compare-snapshot +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-alter-table: + ALTER TABLE dist_table ADD COLUMN z int; + +step s1-commit: + COMMIT; + +step s2-alter-table: <... completed> +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-drop-table s1-commit s2-commit s3-compare-snapshot +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-drop-table: + DROP TABLE dist_table; + +step s1-commit: + COMMIT; + +step s2-drop-table: <... completed> +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-create-dist-table s1-commit s2-commit s3-compare-snapshot +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-create-dist-table: + CREATE TABLE new_dist_table(id int, data int); + SELECT create_distributed_table('new_dist_table', 'id'); + +step s1-commit: + COMMIT; + +step s2-create-dist-table: <... completed> +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-create-ref-table s1-commit s2-commit s3-compare-snapshot +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-create-ref-table: + CREATE TABLE new_ref_table(id int, data int); + SELECT create_reference_table('new_ref_table'); + +step s1-commit: + COMMIT; + +step s2-create-ref-table: <... completed> +create_reference_table +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-attach-partition s1-commit s2-commit s3-compare-snapshot +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-attach-partition: + ALTER TABLE dist_partitioned_table ATTACH PARTITION dist_partitioned_table_p1 FOR VALUES FROM (1) TO (9); + +step s1-commit: + COMMIT; + +step s2-attach-partition: <... completed> +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s2-attach-partition s1-begin s2-begin s1-start-metadata-sync s2-detach-partition s1-commit s2-commit s3-compare-snapshot +step s2-attach-partition: + ALTER TABLE dist_partitioned_table ATTACH PARTITION dist_partitioned_table_p1 FOR VALUES FROM (1) TO (9); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-detach-partition: + ALTER TABLE dist_partitioned_table DETACH PARTITION dist_partitioned_table_p1; + +step s1-commit: + COMMIT; + +step s2-detach-partition: <... completed> +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s2-attach-partition s1-begin s2-begin s1-start-metadata-sync s2-create-partition-of s1-commit s2-commit s3-compare-snapshot +step s2-attach-partition: + ALTER TABLE dist_partitioned_table ATTACH PARTITION dist_partitioned_table_p1 FOR VALUES FROM (1) TO (9); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-create-partition-of: + CREATE TABLE dist_partitioned_table_p2 PARTITION OF dist_partitioned_table FOR VALUES FROM (10) TO (20); + +step s1-commit: + COMMIT; + +step s2-create-partition-of: <... completed> +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-add-fk s1-commit s2-commit s3-compare-snapshot +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fk: + ALTER TABLE dist_table ADD CONSTRAINT y_fk FOREIGN KEY (y) REFERENCES ref_table(y); + +step s1-commit: + COMMIT; + +step s2-add-fk: <... completed> +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s2-add-fk s1-begin s2-begin s1-start-metadata-sync s2-drop-fk s1-commit s2-commit s3-compare-snapshot +step s2-add-fk: + ALTER TABLE dist_table ADD CONSTRAINT y_fk FOREIGN KEY (y) REFERENCES ref_table(y); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-drop-fk: + ALTER TABLE dist_table DROP CONSTRAINT y_fk; + +step s1-commit: + COMMIT; + +step s2-drop-fk: <... completed> +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-create-type s1-commit s2-commit s3-compare-snapshot +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-create-type: + CREATE TYPE my_type AS (a int, b int); + +step s1-commit: + COMMIT; + +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-create-dist-func s1-commit s2-commit s3-compare-snapshot +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-start-metadata-sync: + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-create-dist-func: + CREATE FUNCTION squares(int) RETURNS SETOF RECORD + AS $$ SELECT i, i * i FROM generate_series(1, $1) i $$ + LANGUAGE SQL; + SELECT create_distributed_function('squares(int)'); + +step s1-commit: + COMMIT; + +step s2-create-dist-func: <... completed> +create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s3-compare-snapshot: + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; + +same_metadata_in_workers +--------------------------------------------------------------------- +t +(1 row) + diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index d28a6b714..e83e71919 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -83,14 +83,17 @@ pg_sleep (1 row) step s2-view-dist: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' AND query NOT ILIKE '%pg_isolation_test_session_is_blocked%' AND query NOT ILIKE '%BEGIN%' ORDER BY query DESC; -query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname +query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- + SELECT check_distributed_deadlocks(); +|coordinator_host| 57636| | 0|idle |Client |ClientRead|postgres|regression + update ref_table set a = a + 1; -|coordinator_host| 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression -(1 row) + |coordinator_host| 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression +(2 rows) step s2-view-worker: SELECT query, query_hostname, query_hostport, distributed_query_host_name, @@ -101,7 +104,7 @@ step s2-view-worker: query NOT ILIKE '%dump_local_wait_edges%' ORDER BY query, query_hostport DESC; -query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname +query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression @@ -136,8 +139,8 @@ step s1-update-ref-table: step s2-active-transactions: -- Admin should be able to see all transactions - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/local_dist_join_mixed.out b/src/test/regress/expected/local_dist_join_mixed.out index 0e3e0aad4..5566186b5 100644 --- a/src/test/regress/expected/local_dist_join_mixed.out +++ b/src/test/regress/expected/local_dist_join_mixed.out @@ -1602,6 +1602,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c (1 row) DROP SCHEMA local_dist_join_mixed CASCADE; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed schema is run. To make sure subsequent commands see the schema correctly we need to make sure to use only one connection for all future commands NOTICE: drop cascades to 7 other objects DETAIL: drop cascades to table distributed drop cascades to table reference diff --git a/src/test/regress/expected/local_shard_copy.out b/src/test/regress/expected/local_shard_copy.out index ec9828a04..33ef9f7df 100644 --- a/src/test/regress/expected/local_shard_copy.out +++ b/src/test/regress/expected/local_shard_copy.out @@ -62,6 +62,7 @@ SET citus.log_local_commands TO ON; -- returns true of the distribution key filter -- on the distributed tables (e.g., WHERE key = 1), we'll hit a shard -- placement which is local to this not +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) RETURNS bool AS $$ DECLARE shard_is_local BOOLEAN := FALSE; @@ -84,6 +85,7 @@ CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) R RETURN shard_is_local; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- pick some example values that reside on the shards locally and remote -- distribution key values of 1,6, 500 and 701 are LOCAL to shards, -- we'll use these values in the tests diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index 51daa4046..781a9c86c 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -94,6 +94,7 @@ SET search_path TO local_shard_execution; -- returns true of the distribution key filter -- on the distributed tables (e.g., WHERE key = 1), we'll hit a shard -- placement which is local to this not +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) RETURNS bool AS $$ DECLARE shard_is_local BOOLEAN := FALSE; @@ -116,6 +117,7 @@ CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) R RETURN shard_is_local; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- test case for issue #3556 SET citus.log_intermediate_results TO TRUE; SET client_min_messages TO DEBUG1; @@ -801,6 +803,7 @@ BEGIN; ERROR: VACUUM cannot run inside a transaction block ROLLBACK; -- make sure that functions can use local execution +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE PROCEDURE only_local_execution() AS $$ DECLARE cnt INT; BEGIN @@ -896,6 +899,7 @@ CREATE OR REPLACE PROCEDURE local_execution_followed_by_dist() AS $$ SELECT count(*) INTO cnt FROM distributed_table; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CALL local_execution_followed_by_dist(); NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text CONTEXT: SQL statement "INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29'" diff --git a/src/test/regress/expected/local_shard_execution_replicated.out b/src/test/regress/expected/local_shard_execution_replicated.out index 9d880f44b..731c825c3 100644 --- a/src/test/regress/expected/local_shard_execution_replicated.out +++ b/src/test/regress/expected/local_shard_execution_replicated.out @@ -770,6 +770,7 @@ NOTICE: executing the command locally: INSERT INTO local_shard_execution_replic ERROR: VACUUM cannot run inside a transaction block ROLLBACK; -- make sure that functions can use local execution +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE PROCEDURE only_local_execution() AS $$ DECLARE cnt INT; BEGIN @@ -865,6 +866,7 @@ CREATE OR REPLACE PROCEDURE local_execution_followed_by_dist() AS $$ SELECT count(*) INTO cnt FROM distributed_table; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CALL local_execution_followed_by_dist(); NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text CONTEXT: SQL statement "INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29'" diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index cb3b113e2..9651fcc63 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -5,6 +5,13 @@ SET citus.next_placement_id TO 1500000; -- supress notice messages to make sure that the tests -- do not diverge with enterprise SET client_min_messages TO WARNING; +SELECT run_command_on_workers($$CREATE ROLE metadata_sync_helper_role WITH LOGIN;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE ROLE") + (localhost,57638,t,"CREATE ROLE") +(2 rows) + CREATE ROLE metadata_sync_helper_role WITH LOGIN; GRANT ALL ON SCHEMA metadata_sync_helpers TO metadata_sync_helper_role; RESET client_min_messages; @@ -36,30 +43,14 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; --- connect back as super user, and then connect to the worker --- with the superuser to make sure we can ingest metadata with --- a regular user under the certain conditions \c - postgres - --- we don't need the table/schema anymore -SET client_min_messages TO ERROR; -DROP SCHEMA metadata_sync_helpers CASCADE; -DROP ROLE metadata_sync_helper_role; \c - - - :worker_1_port -CREATE SCHEMA metadata_sync_helpers; SET search_path TO metadata_sync_helpers; CREATE TABLE test(col_1 int, col_2 int); --- supress notice messages to make sure that the tests --- do not diverge with enterprise -SET client_min_messages TO WARNING; -SET citus.enable_ddl_propagation TO OFF; -CREATE ROLE metadata_sync_helper_role WITH LOGIN; -GRANT ALL ON SCHEMA metadata_sync_helpers TO metadata_sync_helper_role; -RESET client_min_messages; -RESET citus.enable_ddl_propagation; -- connect back with the regular user \c - metadata_sync_helper_role - :worker_1_port SET search_path TO metadata_sync_helpers; @@ -73,7 +64,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: must be owner of table test ROLLBACK; @@ -85,7 +76,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); ERROR: must be owner of table test ROLLBACK; @@ -99,7 +90,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); citus_internal_add_partition_metadata --------------------------------------------------------------------- @@ -112,6 +103,54 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; 1 (1 row) +ROLLBACK; +-- application_name with incorrect gpid +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + assign_distributed_transaction_id +--------------------------------------------------------------------- + +(1 row) + + SET application_name to 'citus_internal gpid=not a correct gpid'; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ERROR: This is an internal Citus function can only be used in a distributed transaction +ROLLBACK; +-- application_name with empty gpid +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + assign_distributed_transaction_id +--------------------------------------------------------------------- + +(1 row) + + SET application_name to 'citus_internal gpid='; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ERROR: This is an internal Citus function can only be used in a distributed transaction +ROLLBACK; +-- empty application_name +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + assign_distributed_transaction_id +--------------------------------------------------------------------- + +(1 row) + + SET application_name to ''; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ERROR: This is an internal Citus function can only be used in a distributed transaction +ROLLBACK; +-- application_name with incorrect prefix +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + assign_distributed_transaction_id +--------------------------------------------------------------------- + +(1 row) + + SET application_name to 'citus gpid=10000000001'; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- fails because there is no X distribution method BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -121,7 +160,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ERROR: Metadata syncing is only allowed for hash, reference and local tables:X ROLLBACK; @@ -133,7 +172,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); ERROR: column "non_existing_col" of relation "test_2" does not exist ROLLBACK; @@ -145,7 +184,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); ERROR: relation cannot be NULL ROLLBACK; @@ -157,7 +196,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); ERROR: Metadata syncing is only allowed for valid colocation id values. ROLLBACK; @@ -169,7 +208,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); ERROR: Metadata syncing is only allowed for hash, reference and local tables:X ROLLBACK; @@ -181,7 +220,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); citus_internal_add_partition_metadata @@ -200,7 +239,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); citus_internal_add_partition_metadata @@ -219,7 +258,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); ERROR: Distribution column cannot be NULL for relation "test_2" @@ -252,7 +291,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); citus_internal_add_partition_metadata --------------------------------------------------------------------- @@ -268,7 +307,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); ERROR: could not find valid entry for shard xxxxx @@ -298,7 +337,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ERROR: role "non_existing_user" does not exist ROLLBACK; @@ -329,7 +368,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); ERROR: Reference or local tables cannot have distribution columns ROLLBACK; @@ -341,7 +380,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); ERROR: Metadata syncing is only allowed for known replication models. ROLLBACK; @@ -353,7 +392,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); ERROR: Local or references tables can only have 's' or 't' as the replication model. ROLLBACK; @@ -368,7 +407,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); citus_internal_add_partition_metadata --------------------------------------------------------------------- @@ -387,7 +426,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -402,7 +441,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -417,7 +456,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); citus_internal_add_partition_metadata --------------------------------------------------------------------- @@ -445,7 +484,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); citus_internal_update_relation_colocation --------------------------------------------------------------------- @@ -461,7 +500,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -476,7 +515,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -491,7 +530,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) @@ -506,7 +545,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) @@ -521,7 +560,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) @@ -536,7 +575,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), @@ -548,13 +587,13 @@ ROLLBACK; -- Now let's check valid pg_dist_object updates -- check with non-existing object type BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; - SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); assign_distributed_transaction_id --------------------------------------------------------------------- (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) @@ -569,7 +608,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false)) @@ -583,7 +622,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false)) @@ -592,13 +631,13 @@ ERROR: colocationId must be a positive number ROLLBACK; -- check with non-existing object BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; - SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); assign_distributed_transaction_id --------------------------------------------------------------------- (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) @@ -608,13 +647,13 @@ ROLLBACK; -- since citus_internal_add_object_metadata is strict function returns NULL -- if any parameter is NULL BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; - SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); assign_distributed_transaction_id --------------------------------------------------------------------- (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false)) @@ -635,7 +674,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse CREATE TABLE publication_test_table(id int); CREATE PUBLICATION publication_test FOR TABLE publication_test_table; @@ -653,7 +692,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse CREATE FUNCTION distribution_test_function(int) RETURNS int AS $$ SELECT $1 $$ @@ -671,7 +710,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse CREATE TYPE distributed_test_type AS (a int, b int); SET ROLE metadata_sync_helper_role; @@ -690,7 +729,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse UPDATE pg_dist_partition SET partmethod = 'X'; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) @@ -709,7 +748,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) @@ -736,7 +775,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '11'::text, '20'::text), @@ -767,7 +806,7 @@ BEGIN; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; @@ -779,7 +818,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_3'::regclass, 1420009::bigint, 't'::"char", '21'::text, '30'::text), @@ -806,7 +845,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) @@ -821,7 +860,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), @@ -837,7 +876,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) @@ -858,7 +897,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) @@ -880,7 +919,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) @@ -895,7 +934,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, 1::int, -10)) @@ -910,7 +949,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1430100, 1, 0::bigint, 1::int, 10)) @@ -925,7 +964,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 10, 0::bigint, 1::int, 1500000)) @@ -940,7 +979,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES ( 1420000, 1, 0::bigint, 123123123::int, 1500000)) @@ -948,6 +987,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ERROR: Node with group id 123123123 for shard placement xxxxx does not exist ROLLBACK; -- create a volatile function that returns the local node id +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION get_node_id() RETURNS INT AS $$ DECLARE localGroupId int; @@ -960,6 +1000,7 @@ BEGIN nodeport = 57637 AND nodename = 'localhost' AND isactive AND nodecluster = 'default'; RETURN localGroupId; END; $$ language plpgsql; +RESET citus.enable_metadata_sync; -- fails because we ingest more placements for the same shards to the same worker node BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); @@ -968,7 +1009,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000), @@ -984,7 +1025,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420007, 1, 0::bigint, get_node_id(), 1500000)) @@ -999,7 +1040,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000), @@ -1040,7 +1081,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); citus_internal_update_relation_colocation --------------------------------------------------------------------- @@ -1057,7 +1098,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); ERROR: Node with group id 1014 for shard placement xxxxx does not exist @@ -1070,7 +1111,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); ERROR: Active placement for shard xxxxx is not found on group:14 @@ -1083,7 +1124,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 0 @@ -1096,7 +1137,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 213123123123 @@ -1109,7 +1150,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); ERROR: must be owner of table super_user_table @@ -1122,7 +1163,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) @@ -1131,7 +1172,7 @@ ERROR: must be owner of table super_user_table ROLLBACK; -- the user only allowed to delete shards in a distributed transaction BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) @@ -1146,7 +1187,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420100)) @@ -1173,7 +1214,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420000)) @@ -1207,7 +1248,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the repmodel -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' @@ -1222,7 +1263,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the vartype of table from int to bigint -- so that making two tables colocated fails UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}' @@ -1237,7 +1278,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the partmethod of the table to not-valid -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' @@ -1252,7 +1293,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the partmethod of the table to not-valid -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' @@ -1270,7 +1311,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); citus_internal_add_partition_metadata @@ -1293,7 +1334,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); citus_internal_add_partition_metadata @@ -1316,4 +1357,7 @@ SET client_min_messages TO ERROR; SET citus.enable_ddl_propagation TO OFF; DROP OWNED BY metadata_sync_helper_role; DROP ROLE metadata_sync_helper_role; +\c - - - :master_port +-- cleanup +SET client_min_messages TO ERROR; DROP SCHEMA metadata_sync_helpers CASCADE; diff --git a/src/test/regress/expected/multi_create_table.out b/src/test/regress/expected/multi_create_table.out index a09f1fbcc..4ec8a27da 100644 --- a/src/test/regress/expected/multi_create_table.out +++ b/src/test/regress/expected/multi_create_table.out @@ -7,6 +7,7 @@ -- reference and hash-distributed version of orders, customer and part tables. SET citus.next_shard_id TO 360000; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -23,6 +24,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; CREATE TABLE lineitem ( l_orderkey bigint not null, l_partkey integer not null, diff --git a/src/test/regress/expected/multi_deparse_function.out b/src/test/regress/expected/multi_deparse_function.out index cdf002e8e..656c89c40 100644 --- a/src/test/regress/expected/multi_deparse_function.out +++ b/src/test/regress/expected/multi_deparse_function.out @@ -520,19 +520,11 @@ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE (localhost,57638,t,"DROP FUNCTION") (2 rows) +set citus.enable_ddl_propagation to on; -- create schema with weird names CREATE SCHEMA "CiTuS.TeeN"; CREATE SCHEMA "CiTUS.TEEN2"; -SELECT run_command_on_workers($$ - CREATE SCHEMA IF NOT EXISTS "CiTuS.TeeN"; - CREATE SCHEMA IF NOT EXISTS "CiTUS.TEEN2"; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") -(2 rows) - +set citus.enable_ddl_propagation to off; -- create table with weird names CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() RETURNS TEXT AS $$ SELECT 'test function without params' $$ @@ -697,17 +689,7 @@ SELECT create_distributed_function('func_custom_param(intpair)'); (1 row) RESET citus.enable_metadata_sync; -SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param; -$cmd$); -INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_custom_param(function_tests.intpair) RENAME TO func_with_custom_param; -CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE - deparse_and_run_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") -(2 rows) - -- a function that returns TABLE CREATE FUNCTION func_returns_table(IN count INT) RETURNS TABLE (x INT, y INT) @@ -721,31 +703,10 @@ SELECT create_distributed_function('func_returns_table(INT)'); (1 row) RESET citus.enable_metadata_sync; -SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_returns_table ROWS 100; -$cmd$); -INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_returns_table(integer) ROWS 100.000000; -CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE - deparse_and_run_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") -(2 rows) - -- clear objects SET client_min_messages TO WARNING; -- suppress cascading objects dropping DROP SCHEMA "CiTuS.TeeN" CASCADE; DROP SCHEMA "CiTUS.TEEN2" CASCADE; DROP SCHEMA function_tests CASCADE; -SELECT run_command_on_workers($$ - DROP SCHEMA "CiTuS.TeeN" CASCADE; - DROP SCHEMA "CiTUS.TEEN2" CASCADE; - DROP SCHEMA function_tests CASCADE; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - DROP ROLE function_role; diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 70dc4c2a0..0b185659b 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -77,6 +77,8 @@ END $func$ LANGUAGE plpgsql; CREATE SCHEMA test; :create_function_test_maintenance_worker +WARNING: Citus can't distribute functions having dependency on unsupported object of type "view" +DETAIL: Function will be created only locally -- check maintenance daemon is started SELECT datname, current_database(), usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') @@ -107,6 +109,7 @@ DROP EXTENSION citus; -- these tests switch between citus versions and call ddl's that require pg_dist_object to be created SET citus.enable_metadata_sync TO 'false'; SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.0-1'; ALTER EXTENSION citus UPDATE TO '8.0-2'; ALTER EXTENSION citus UPDATE TO '8.0-3'; @@ -757,6 +760,7 @@ SELECT * FROM multi_extension.print_extension_changes(); \set VERBOSITY terse CREATE TABLE columnar_table(a INT, b INT) USING columnar; SET citus.enable_version_checks TO ON; +SET columnar.enable_version_checks TO ON; -- all should throw an error due to version mismatch VACUUM FULL columnar_table; ERROR: loaded Citus library version differs from installed extension version @@ -785,6 +789,7 @@ CREATE TABLE new_columnar_table (a int) USING columnar; ERROR: loaded Citus library version differs from installed extension version -- do cleanup for the rest of the tests SET citus.enable_version_checks TO OFF; +SET columnar.enable_version_checks TO OFF; DROP TABLE columnar_table; RESET columnar.enable_custom_scan; \set VERBOSITY default @@ -1011,9 +1016,10 @@ SELECT * FROM multi_extension.print_extension_changes(); | function citus_shard_indexes_on_worker() SETOF record | function citus_shards_on_worker() SETOF record | function create_distributed_function(regprocedure,text,text,boolean) void + | function worker_create_or_replace_object(text[]) boolean | function worker_drop_sequence_dependency(text) void | function worker_drop_shell_table(text) void -(15 rows) +(16 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version @@ -1040,6 +1046,7 @@ ORDER BY 1, 2; -- see incompatible version errors out RESET citus.enable_version_checks; +RESET columnar.enable_version_checks; DROP EXTENSION citus; CREATE EXTENSION citus VERSION '8.0-1'; ERROR: specified version incompatible with loaded Citus library @@ -1047,8 +1054,10 @@ DETAIL: Loaded library requires 11.0, but 8.0-1 was specified. HINT: If a newer library is present, restart the database and try the command again. -- Test non-distributed queries work even in version mismatch SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.1-1'; SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; -- Test CREATE TABLE CREATE TABLE version_mismatch_table(column1 int); -- Test COPY @@ -1098,15 +1107,18 @@ $function$; ERROR: cannot change return type of existing function HINT: Use DROP FUNCTION relation_is_a_known_shard(regclass) first. SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; -- This will fail because of previous function declaration ALTER EXTENSION citus UPDATE TO '8.1-1'; NOTICE: version "8.1-1" of extension "citus" is already installed -- We can DROP problematic function and continue ALTER EXTENSION even when version checks are on SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; DROP FUNCTION pg_catalog.relation_is_a_known_shard(regclass); ERROR: cannot drop function relation_is_a_known_shard(regclass) because extension citus requires it HINT: You can drop extension citus instead. SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; ALTER EXTENSION citus UPDATE TO '8.1-1'; NOTICE: version "8.1-1" of extension "citus" is already installed -- Test updating to the latest version without specifying the version number @@ -1119,8 +1131,10 @@ CREATE EXTENSION citus; \c - - - :worker_1_port DROP EXTENSION citus; SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.0-1'; SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; -- during ALTER EXTENSION, we should invalidate the cache ALTER EXTENSION citus UPDATE; -- if cache is invalidated succesfull, this \d should work without any problem @@ -1197,6 +1211,8 @@ HINT: You can manually create a database and its extensions on workers. CREATE EXTENSION citus; CREATE SCHEMA test; :create_function_test_maintenance_worker +WARNING: Citus can't distribute functions having dependency on unsupported object of type "view" +DETAIL: Function will be created only locally -- see that the daemon started SELECT datname, current_database(), usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out index 98e5437a6..ab0e62964 100644 --- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out +++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out @@ -741,10 +741,3 @@ SELECT citus_remove_node('localhost', :master_port); (1 row) -SELECT run_command_on_workers($$ DROP SCHEMA IF EXISTS fix_idx_names CASCADE $$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - diff --git a/src/test/regress/expected/multi_foreign_key_relation_graph.out b/src/test/regress/expected/multi_foreign_key_relation_graph.out index 41de227c7..735cb0596 100644 --- a/src/test/regress/expected/multi_foreign_key_relation_graph.out +++ b/src/test/regress/expected/multi_foreign_key_relation_graph.out @@ -440,6 +440,7 @@ drop cascades to constraint fkey_1 on table test_4 ROLLBACK; -- Test schemas BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE SCHEMA fkey_intermediate_schema_1; CREATE SCHEMA fkey_intermediate_schema_2; SET search_path TO fkey_graph, fkey_intermediate_schema_1, fkey_intermediate_schema_2; @@ -526,6 +527,7 @@ drop cascades to constraint test_8_id_fkey on table test_8 ROLLBACK; BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE SCHEMA fkey_intermediate_schema_1; CREATE SCHEMA fkey_intermediate_schema_2; SET search_path TO fkey_graph, fkey_intermediate_schema_1, fkey_intermediate_schema_2; diff --git a/src/test/regress/expected/multi_function_in_join.out b/src/test/regress/expected/multi_function_in_join.out index 5d1b2a672..7d62e286b 100644 --- a/src/test/regress/expected/multi_function_in_join.out +++ b/src/test/regress/expected/multi_function_in_join.out @@ -39,14 +39,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta CREATE FUNCTION add(integer, integer) RETURNS integer AS 'SELECT $1 + $2;' LANGUAGE SQL; -SELECT create_distributed_function('add(integer,integer)'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN add(3,5) sum ON (id = sum) ORDER BY id ASC; id | data | sum --------------------------------------------------------------------- @@ -60,7 +54,10 @@ BEGIN RETURN i + 1; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN increment(2) val ON (id = val) ORDER BY id ASC; +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT val FROM functions_in_joins.increment(2) val(val) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, val.val FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.val FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(val integer)) val ON ((table1.id OPERATOR(pg_catalog.=) val.val))) ORDER BY table1.id id | data | val @@ -69,6 +66,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta (1 row) -- a function that returns a set of integers +-- Block distributing function as we have tests below to test it locally +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION next_k_integers(IN first_value INTEGER, IN k INTEGER DEFAULT 3, OUT result INTEGER) @@ -77,6 +76,7 @@ BEGIN RETURN QUERY SELECT x FROM generate_series(first_value, first_value+k-1) f(x); END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SELECT * FROM table1 JOIN next_k_integers(3,2) next_integers ON (id = next_integers.result) ORDER BY id ASC; @@ -93,7 +93,10 @@ CREATE FUNCTION get_set_of_records() RETURNS SETOF RECORD AS $cmd$ SELECT x, x+1 FROM generate_series(0,4) f(x) $cmd$ LANGUAGE SQL; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN get_set_of_records() AS t2(x int, y int) ON (id = x) ORDER BY id ASC; +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM functions_in_joins.get_set_of_records() t2(x integer, y integer) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, t2.x, t2.y FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) t2 ON ((table1.id OPERATOR(pg_catalog.=) t2.x))) ORDER BY table1.id id | data | x | y @@ -108,7 +111,10 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta CREATE FUNCTION dup(int) RETURNS TABLE(f1 int, f2 text) AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT f.* FROM table1 t JOIN dup(32) f ON (f1 = id); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT f1, f2 FROM functions_in_joins.dup(32) f(f1, f2) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FROM (functions_in_joins.table1 t JOIN (SELECT intermediate_result.f1, intermediate_result.f2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(f1 integer, f2 text)) f ON ((f.f1 OPERATOR(pg_catalog.=) t.id))) f1 | f2 @@ -119,7 +125,10 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FR -- a stable function CREATE OR REPLACE FUNCTION the_minimum_id() RETURNS INTEGER STABLE AS 'SELECT min(id) FROM table1' LANGUAGE SQL; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN the_minimum_id() min_id ON (id = min_id); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT min_id FROM functions_in_joins.the_minimum_id() min_id(min_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, min_id.min_id FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.min_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(min_id integer)) min_id ON ((table1.id OPERATOR(pg_catalog.=) min_id.min_id))) id | data | min_id @@ -180,7 +189,10 @@ begin return result; end; $$ language plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN max_and_min() m ON (m.maximum = data OR m.minimum = data) ORDER BY 1,2,3,4; +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT minimum, maximum FROM functions_in_joins.max_and_min() m(minimum, maximum) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, m.minimum, m.maximum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.minimum, intermediate_result.maximum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(minimum integer, maximum integer)) m ON (((m.maximum OPERATOR(pg_catalog.=) table1.data) OR (m.minimum OPERATOR(pg_catalog.=) table1.data)))) ORDER BY table1.id, table1.data, m.minimum, m.maximum id | data | minimum | maximum @@ -198,6 +210,7 @@ SET client_min_messages TO ERROR; -- function joins in CTE results can create lateral joins that are not supported -- we execute the query within a function to consolidate the error messages -- between different executors +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION raise_failed_execution_func_join(query text) RETURNS void AS $$ BEGIN EXECUTE query; @@ -209,6 +222,7 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SELECT raise_failed_execution_func_join($$ WITH one_row AS ( SELECT * FROM table1 WHERE id=52 @@ -219,8 +233,10 @@ SELECT raise_failed_execution_func_join($$ $$); ERROR: Task failed to execute -- a user-defined immutable function +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION the_answer_to_life() RETURNS INTEGER IMMUTABLE AS 'SELECT 42' LANGUAGE SQL; +RESET citus.enable_metadata_sync; SELECT raise_failed_execution_func_join($$ SELECT * FROM table1 JOIN the_answer_to_life() the_answer ON (id = the_answer); $$); diff --git a/src/test/regress/expected/multi_function_in_join_0.out b/src/test/regress/expected/multi_function_in_join_0.out index 21279ab8d..5f2bd70c7 100644 --- a/src/test/regress/expected/multi_function_in_join_0.out +++ b/src/test/regress/expected/multi_function_in_join_0.out @@ -39,14 +39,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta CREATE FUNCTION add(integer, integer) RETURNS integer AS 'SELECT $1 + $2;' LANGUAGE SQL; -SELECT create_distributed_function('add(integer,integer)'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN add(3,5) sum ON (id = sum) ORDER BY id ASC; DEBUG: generating subplan XXX_1 for subquery SELECT sum FROM functions_in_joins.add(3, 5) sum(sum) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, sum.sum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum integer)) sum ON ((table1.id OPERATOR(pg_catalog.=) sum.sum))) ORDER BY table1.id @@ -62,7 +56,10 @@ BEGIN RETURN i + 1; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN increment(2) val ON (id = val) ORDER BY id ASC; +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT val FROM functions_in_joins.increment(2) val(val) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, val.val FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.val FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(val integer)) val ON ((table1.id OPERATOR(pg_catalog.=) val.val))) ORDER BY table1.id id | data | val @@ -71,6 +68,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta (1 row) -- a function that returns a set of integers +-- Block distributing function as we have tests below to test it locally +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION next_k_integers(IN first_value INTEGER, IN k INTEGER DEFAULT 3, OUT result INTEGER) @@ -79,6 +78,7 @@ BEGIN RETURN QUERY SELECT x FROM generate_series(first_value, first_value+k-1) f(x); END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SELECT * FROM table1 JOIN next_k_integers(3,2) next_integers ON (id = next_integers.result) ORDER BY id ASC; @@ -95,7 +95,10 @@ CREATE FUNCTION get_set_of_records() RETURNS SETOF RECORD AS $cmd$ SELECT x, x+1 FROM generate_series(0,4) f(x) $cmd$ LANGUAGE SQL; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN get_set_of_records() AS t2(x int, y int) ON (id = x) ORDER BY id ASC; +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM functions_in_joins.get_set_of_records() t2(x integer, y integer) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, t2.x, t2.y FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) t2 ON ((table1.id OPERATOR(pg_catalog.=) t2.x))) ORDER BY table1.id id | data | x | y @@ -110,7 +113,10 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta CREATE FUNCTION dup(int) RETURNS TABLE(f1 int, f2 text) AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT f.* FROM table1 t JOIN dup(32) f ON (f1 = id); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT f1, f2 FROM functions_in_joins.dup(32) f(f1, f2) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FROM (functions_in_joins.table1 t JOIN (SELECT intermediate_result.f1, intermediate_result.f2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(f1 integer, f2 text)) f ON ((f.f1 OPERATOR(pg_catalog.=) t.id))) f1 | f2 @@ -121,7 +127,10 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FR -- a stable function CREATE OR REPLACE FUNCTION the_minimum_id() RETURNS INTEGER STABLE AS 'SELECT min(id) FROM table1' LANGUAGE SQL; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN the_minimum_id() min_id ON (id = min_id); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT min_id FROM functions_in_joins.the_minimum_id() min_id(min_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, min_id.min_id FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.min_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(min_id integer)) min_id ON ((table1.id OPERATOR(pg_catalog.=) min_id.min_id))) id | data | min_id @@ -182,7 +191,10 @@ begin return result; end; $$ language plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN max_and_min() m ON (m.maximum = data OR m.minimum = data) ORDER BY 1,2,3,4; +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT minimum, maximum FROM functions_in_joins.max_and_min() m(minimum, maximum) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, m.minimum, m.maximum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.minimum, intermediate_result.maximum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(minimum integer, maximum integer)) m ON (((m.maximum OPERATOR(pg_catalog.=) table1.data) OR (m.minimum OPERATOR(pg_catalog.=) table1.data)))) ORDER BY table1.id, table1.data, m.minimum, m.maximum id | data | minimum | maximum @@ -200,6 +212,7 @@ SET client_min_messages TO ERROR; -- function joins in CTE results can create lateral joins that are not supported -- we execute the query within a function to consolidate the error messages -- between different executors +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION raise_failed_execution_func_join(query text) RETURNS void AS $$ BEGIN EXECUTE query; @@ -211,6 +224,7 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SELECT raise_failed_execution_func_join($$ WITH one_row AS ( SELECT * FROM table1 WHERE id=52 @@ -221,8 +235,10 @@ SELECT raise_failed_execution_func_join($$ $$); ERROR: Task failed to execute -- a user-defined immutable function +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION the_answer_to_life() RETURNS INTEGER IMMUTABLE AS 'SELECT 42' LANGUAGE SQL; +RESET citus.enable_metadata_sync; SELECT raise_failed_execution_func_join($$ SELECT * FROM table1 JOIN the_answer_to_life() the_answer ON (id = the_answer); $$); diff --git a/src/test/regress/expected/multi_insert_select.out b/src/test/regress/expected/multi_insert_select.out index 99c5e8e56..c7679d02e 100644 --- a/src/test/regress/expected/multi_insert_select.out +++ b/src/test/regress/expected/multi_insert_select.out @@ -127,6 +127,7 @@ WHERE user_id < 0; NOTICE: evaluating on master -- make sure we don't evaluate stable functions with column arguments +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION evaluate_on_master(x int) RETURNS int LANGUAGE plpgsql STABLE AS $function$ @@ -135,6 +136,7 @@ BEGIN RETURN x; END; $function$; +RESET citus.enable_metadata_sync; INSERT INTO raw_events_second (user_id, value_1) SELECT user_id, evaluate_on_master(value_1) diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 5680b73c3..be923eee3 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -26,10 +26,12 @@ ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100000; SELECT nextval('pg_catalog.pg_dist_groupid_seq') AS last_group_id \gset SELECT nextval('pg_catalog.pg_dist_node_nodeid_seq') AS last_node_id \gset -- Create the necessary test utility function +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION activate_node_snapshot() RETURNS text[] LANGUAGE C STRICT AS 'citus'; +RESET citus.enable_metadata_sync; COMMENT ON FUNCTION activate_node_snapshot() IS 'commands to activate node snapshot'; -- Show that none of the existing tables are qualified to be MX tables @@ -89,12 +91,14 @@ SELECT unnest(activate_node_snapshot()) order by 1; (27 rows) -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) RETURNS void LANGUAGE C STRICT AS 'citus', $$master_create_distributed_table$$; +RESET citus.enable_metadata_sync; COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) diff --git a/src/test/regress/expected/multi_modifications.out b/src/test/regress/expected/multi_modifications.out index c6323c793..5b5764593 100644 --- a/src/test/regress/expected/multi_modifications.out +++ b/src/test/regress/expected/multi_modifications.out @@ -467,13 +467,6 @@ UPDATE limit_orders SET placed_at = LEAST(placed_at, now()::timestamp) WHERE id UPDATE limit_orders SET array_of_values = 1 || array_of_values WHERE id = 246; CREATE FUNCTION immutable_append(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -\c - - - :worker_1_port -CREATE FUNCTION immutable_append(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -\c - - - :worker_2_port -CREATE FUNCTION immutable_append(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -\c - - - :master_port -- immutable function calls with vars are also allowed UPDATE limit_orders SET array_of_values = immutable_append(array_of_values, 2) WHERE id = 246; diff --git a/src/test/regress/expected/multi_modifying_xacts.out b/src/test/regress/expected/multi_modifying_xacts.out index 226529040..47b113378 100644 --- a/src/test/regress/expected/multi_modifying_xacts.out +++ b/src/test/regress/expected/multi_modifying_xacts.out @@ -329,22 +329,15 @@ SELECT count(*) FROM pg_dist_transaction; (1 row) -- create a check function -SELECT * from run_command_on_workers('CREATE FUNCTION reject_large_id() RETURNS trigger AS $rli$ +CREATE FUNCTION reject_large_id() RETURNS trigger AS $rli$ BEGIN IF (NEW.id > 30) THEN - RAISE ''illegal value''; + RAISE 'illegal value'; END IF; RETURN NEW; END; -$rli$ LANGUAGE plpgsql;') -ORDER BY nodeport; - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 57637 | t | CREATE FUNCTION - localhost | 57638 | t | CREATE FUNCTION -(2 rows) - +$rli$ LANGUAGE plpgsql; -- register after insert trigger SELECT * FROM run_command_on_placements('researchers', 'CREATE CONSTRAINT TRIGGER reject_large_researcher_id AFTER INSERT ON %s DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_large_id()') ORDER BY nodeport, shardid; @@ -418,14 +411,7 @@ ORDER BY nodeport, shardid; localhost | 57638 | 1200001 | t | DROP TRIGGER (4 rows) -SELECT * FROM run_command_on_workers('drop function reject_large_id()') -ORDER BY nodeport; - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 57637 | t | DROP FUNCTION - localhost | 57638 | t | DROP FUNCTION -(2 rows) - +DROP FUNCTION reject_large_id(); -- ALTER and copy are compatible BEGIN; ALTER TABLE labs ADD COLUMN motto text; @@ -522,6 +508,7 @@ AND s.logicalrelid = 'objects'::regclass; -- create trigger on one worker to reject certain values \c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN @@ -531,6 +518,7 @@ CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON objects_1200003 DEFERRABLE INITIALLY IMMEDIATE @@ -573,6 +561,7 @@ DELETE FROM objects; -- there cannot be errors on different shards at different times -- because the first failure will fail the whole transaction \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN @@ -582,6 +571,7 @@ CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON labs_1200002 DEFERRABLE INITIALLY IMMEDIATE @@ -850,6 +840,7 @@ SELECT * FROM reference_modifying_xacts; -- lets fail on of the workers at before the commit time \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad_reference() RETURNS trigger AS $rb$ BEGIN IF (NEW.key = 999) THEN @@ -859,6 +850,7 @@ CREATE FUNCTION reject_bad_reference() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad_reference AFTER INSERT ON reference_modifying_xacts_1200006 DEFERRABLE INITIALLY IMMEDIATE @@ -938,6 +930,7 @@ INSERT INTO hash_modifying_xacts VALUES (2, 2); ABORT; -- lets fail one of the workers before COMMIT time for the hash table \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad_hash() RETURNS trigger AS $rb$ BEGIN IF (NEW.key = 997) THEN @@ -947,6 +940,7 @@ CREATE FUNCTION reject_bad_hash() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad_hash AFTER INSERT ON hash_modifying_xacts_1200007 DEFERRABLE INITIALLY IMMEDIATE diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index 01417a974..0e25a38d2 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -75,11 +75,6 @@ GRANT ALL ON TABLE test_1420000 TO full_access; GRANT SELECT ON TABLE test_1420000 TO read_access; GRANT ALL ON TABLE test_1420002 TO full_access; GRANT SELECT ON TABLE test_1420002 TO read_access; -CREATE SCHEMA full_access_user_schema; -REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC; -GRANT USAGE ON SCHEMA full_access_user_schema TO full_access; -GRANT ALL ON SCHEMA full_access_user_schema TO full_access; -GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access; \c - - - :worker_2_port CREATE USER full_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes @@ -102,11 +97,6 @@ GRANT ALL ON TABLE test_1420001 TO full_access; GRANT SELECT ON TABLE test_1420001 TO read_access; GRANT ALL ON TABLE test_1420003 TO full_access; GRANT SELECT ON TABLE test_1420003 TO read_access; -CREATE SCHEMA full_access_user_schema; -REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC; -GRANT USAGE ON SCHEMA full_access_user_schema TO full_access; -GRANT ALL ON SCHEMA full_access_user_schema TO full_access; -GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access; \c - - - :master_port SET citus.shard_replication_factor TO 1; -- create prepare tests @@ -632,10 +622,12 @@ ERROR: permission denied for function worker_cleanup_job_schema_cache RESET ROLE; -- to test access to files created during repartition we will create some on worker 1 \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION citus_rm_job_directory(bigint) RETURNS void AS 'citus' LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; SET ROLE full_access; SELECT worker_hash_partition_table(42,1,'SELECT a FROM generate_series(1,100) AS a', 'a', 23, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); worker_hash_partition_table @@ -646,10 +638,12 @@ SELECT worker_hash_partition_table(42,1,'SELECT a FROM generate_series(1,100) AS RESET ROLE; -- all attempts for transfer are initiated from other workers \c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION citus_rm_job_directory(bigint) RETURNS void AS 'citus' LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; -- super user should not be able to copy files created by a user SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.xxxx": No such file or directory diff --git a/src/test/regress/expected/multi_mx_add_coordinator.out b/src/test/regress/expected/multi_mx_add_coordinator.out index 33aec18d8..ff6debaf9 100644 --- a/src/test/regress/expected/multi_mx_add_coordinator.out +++ b/src/test/regress/expected/multi_mx_add_coordinator.out @@ -139,9 +139,11 @@ BEGIN INSERT INTO mx_add_coordinator.ref(groupid) VALUES (gid); END; $$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('my_group_id()', colocate_with := 'ref'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_mx_call.out b/src/test/regress/expected/multi_mx_call.out index 64b033d41..5d440e67b 100644 --- a/src/test/regress/expected/multi_mx_call.out +++ b/src/test/regress/expected/multi_mx_call.out @@ -402,6 +402,7 @@ select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass -- Test that we handle transactional constructs correctly inside a procedure -- that is routed to the workers. +SET citus.enable_metadata_sync TO OFF; CREATE PROCEDURE mx_call_proc_tx(x int) LANGUAGE plpgsql AS $$ BEGIN INSERT INTO multi_mx_call.mx_call_dist_table_1 VALUES (x, -1), (x+1, 4); @@ -411,12 +412,13 @@ BEGIN -- Now do the final update! UPDATE multi_mx_call.mx_call_dist_table_1 SET val = val-1 WHERE id >= x; END;$$; +RESET citus.enable_metadata_sync; -- before distribution ... CALL multi_mx_call.mx_call_proc_tx(10); -- after distribution ... select create_distributed_function('mx_call_proc_tx(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -486,9 +488,11 @@ BEGIN RAISE WARNING 'warning'; RAISE EXCEPTION 'error'; END;$$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands select create_distributed_function('mx_call_proc_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -552,9 +556,11 @@ SET client_min_messages TO DEBUG1; -- CREATE FUNCTION mx_call_add(int, int) RETURNS int AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('mx_call_add(int,int)'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_mx_call_0.out b/src/test/regress/expected/multi_mx_call_0.out index 496e735c9..474d3a637 100644 --- a/src/test/regress/expected/multi_mx_call_0.out +++ b/src/test/regress/expected/multi_mx_call_0.out @@ -402,6 +402,7 @@ select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass -- Test that we handle transactional constructs correctly inside a procedure -- that is routed to the workers. +SET citus.enable_metadata_sync TO OFF; CREATE PROCEDURE mx_call_proc_tx(x int) LANGUAGE plpgsql AS $$ BEGIN INSERT INTO multi_mx_call.mx_call_dist_table_1 VALUES (x, -1), (x+1, 4); @@ -411,12 +412,13 @@ BEGIN -- Now do the final update! UPDATE multi_mx_call.mx_call_dist_table_1 SET val = val-1 WHERE id >= x; END;$$; +RESET citus.enable_metadata_sync; -- before distribution ... CALL multi_mx_call.mx_call_proc_tx(10); -- after distribution ... select create_distributed_function('mx_call_proc_tx(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -486,9 +488,11 @@ BEGIN RAISE WARNING 'warning'; RAISE EXCEPTION 'error'; END;$$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands select create_distributed_function('mx_call_proc_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -552,9 +556,11 @@ SET client_min_messages TO DEBUG1; -- CREATE FUNCTION mx_call_add(int, int) RETURNS int AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('mx_call_add(int,int)'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index 92c43b363..6036bd325 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -63,33 +63,7 @@ CREATE TABLE citus_local_table(a int); SELECT citus_add_local_table_to_metadata('citus_local_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. --- create schema to test schema support -CREATE SCHEMA citus_mx_test_schema_join_1; -CREATE SCHEMA citus_mx_test_schema_join_2; --- create UDFs in worker node -CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; SET search_path TO citus_mx_test_schema; -CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, @@ -101,33 +75,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( ); -- now create required stuff in the worker 2 \c - - - :worker_2_port --- create schema to test schema support -CREATE SCHEMA citus_mx_test_schema_join_1; -CREATE SCHEMA citus_mx_test_schema_join_2; --- create UDF -CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; SET search_path TO citus_mx_test_schema; -CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, diff --git a/src/test/regress/expected/multi_mx_ddl.out b/src/test/regress/expected/multi_mx_ddl.out index 14318c3d8..e6f3f6e0f 100644 --- a/src/test/regress/expected/multi_mx_ddl.out +++ b/src/test/regress/expected/multi_mx_ddl.out @@ -304,13 +304,6 @@ SELECT * FROM seg_test; \c - - - :master_port CREATE SCHEMA ext_owned_tables; -SELECT run_command_on_workers($$CREATE SCHEMA ext_owned_tables;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") -(2 rows) - SET search_path TO ext_owned_tables; CREATE sequence my_seq_ext_1; SELECT run_command_on_workers($$CREATE sequence ext_owned_tables.my_seq_ext_1;$$); diff --git a/src/test/regress/expected/multi_mx_explain.out b/src/test/regress/expected/multi_mx_explain.out index 2c58dd003..1c585a027 100644 --- a/src/test/regress/expected/multi_mx_explain.out +++ b/src/test/regress/expected/multi_mx_explain.out @@ -13,6 +13,7 @@ VACUUM ANALYZE customer_mx; VACUUM ANALYZE supplier_mx; \c - - - :worker_1_port -- Function that parses explain output as JSON +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ @@ -36,6 +37,7 @@ END; $BODY$ LANGUAGE plpgsql; \c - - - :worker_2_port -- Function that parses explain output as JSON +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ @@ -57,6 +59,7 @@ BEGIN RETURN result; END; $BODY$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx diff --git a/src/test/regress/expected/multi_mx_function_call_delegation.out b/src/test/regress/expected/multi_mx_function_call_delegation.out index d48f001bf..552af7bf5 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation.out @@ -213,7 +213,7 @@ select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0); select create_distributed_function('mx_call_func_bigint(bigint,bigint)', 'x', colocate_with := 'mx_call_dist_table_bigint'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -224,7 +224,7 @@ select create_distributed_function('mx_call_func_bigint_force(bigint,bigint)', ' colocate_with := 'mx_call_dist_table_2', force_delegation := true); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -409,8 +409,11 @@ BEGIN WHERE id >= x ORDER BY 1, 2; END;$$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands -- before distribution ... select mx_call_func_tbl(10); +DEBUG: function does not have co-located tables mx_call_func_tbl --------------------------------------------------------------------- (10,-1) @@ -420,7 +423,7 @@ select mx_call_func_tbl(10); -- after distribution ... select create_distributed_function('mx_call_func_tbl(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -441,9 +444,11 @@ BEGIN RAISE WARNING 'warning'; RAISE EXCEPTION 'error'; END;$$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands select create_distributed_function('mx_call_func_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -593,9 +598,11 @@ SET citus.shard_replication_factor = 1; -- CREATE FUNCTION mx_call_add(int, int) RETURNS int AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('mx_call_add(int,int)', '$1'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -630,18 +637,6 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment (1 row) -- test forms we don't distribute -select * from mx_call_func(2, 0); -DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment - y ---------------------------------------------------------------------- - 29 -(1 row) - select mx_call_func(2, 0) where mx_call_func(0, 2) = 0; DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" @@ -671,6 +666,58 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment 29 | 27 (1 row) +-- regular call in FROM can be pushed down +select * from mx_call_func(2, 0); +DEBUG: pushing down the function call + y +--------------------------------------------------------------------- + 28 +(1 row) + +-- prepared statement with 6 invocations to trigger generic plan +prepare call_func(int, int) as select $1 from mx_call_func($1, $2); +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + -- we do not delegate the call, but do push down the query -- that result in remote execution from workers select mx_call_func(id, 0) from mx_call_dist_table_1; diff --git a/src/test/regress/expected/multi_mx_function_call_delegation_0.out b/src/test/regress/expected/multi_mx_function_call_delegation_0.out index 06a7b320d..cab2f6394 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation_0.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation_0.out @@ -213,7 +213,7 @@ select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0); select create_distributed_function('mx_call_func_bigint(bigint,bigint)', 'x', colocate_with := 'mx_call_dist_table_bigint'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -224,7 +224,7 @@ select create_distributed_function('mx_call_func_bigint_force(bigint,bigint)', ' colocate_with := 'mx_call_dist_table_2', force_delegation := true); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -409,8 +409,11 @@ BEGIN WHERE id >= x ORDER BY 1, 2; END;$$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands -- before distribution ... select mx_call_func_tbl(10); +DEBUG: function does not have co-located tables mx_call_func_tbl --------------------------------------------------------------------- (10,-1) @@ -420,7 +423,7 @@ select mx_call_func_tbl(10); -- after distribution ... select create_distributed_function('mx_call_func_tbl(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -441,9 +444,11 @@ BEGIN RAISE WARNING 'warning'; RAISE EXCEPTION 'error'; END;$$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands select create_distributed_function('mx_call_func_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -593,9 +598,11 @@ SET citus.shard_replication_factor = 1; -- CREATE FUNCTION mx_call_add(int, int) RETURNS int AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('mx_call_add(int,int)', '$1'); DEBUG: switching to sequential query execution mode -DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -630,18 +637,6 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment (1 row) -- test forms we don't distribute -select * from mx_call_func(2, 0); -DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment - y ---------------------------------------------------------------------- - 29 -(1 row) - select mx_call_func(2, 0) where mx_call_func(0, 2) = 0; DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" @@ -671,6 +666,58 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment 29 | 27 (1 row) +-- regular call in FROM can be pushed down +select * from mx_call_func(2, 0); +DEBUG: pushing down the function call + y +--------------------------------------------------------------------- + 28 +(1 row) + +-- prepared statement with 6 invocations to trigger generic plan +prepare call_func(int, int) as select $1 from mx_call_func($1, $2); +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + -- we do not delegate the call, but do push down the query -- that result in remote execution from workers select mx_call_func(id, 0) from mx_call_dist_table_1; diff --git a/src/test/regress/expected/multi_mx_metadata.out b/src/test/regress/expected/multi_mx_metadata.out index dc2365dda..a35b8edb1 100644 --- a/src/test/regress/expected/multi_mx_metadata.out +++ b/src/test/regress/expected/multi_mx_metadata.out @@ -344,16 +344,6 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx (3 rows) \c - no_access_mx - :worker_1_port --- see the comment in the top of the file -CREATE OR REPLACE FUNCTION raise_failed_aclcheck(query text) RETURNS void AS $$ -BEGIN - EXECUTE query; - EXCEPTION WHEN OTHERS THEN - IF SQLERRM LIKE 'must be owner of%' THEN - RAISE 'must be owner of the object'; - END IF; -END; -$$LANGUAGE plpgsql; SELECT raise_failed_aclcheck($$ DROP TABLE distributed_mx_table; $$); diff --git a/src/test/regress/expected/multi_mx_modifications.out b/src/test/regress/expected/multi_mx_modifications.out index c290c4e5a..276766c30 100644 --- a/src/test/regress/expected/multi_mx_modifications.out +++ b/src/test/regress/expected/multi_mx_modifications.out @@ -266,6 +266,7 @@ UPDATE limit_orders_mx SET array_of_values = 1 || array_of_values WHERE id = 246 -- immutable function calls with vars are also allowed UPDATE limit_orders_mx SET array_of_values = immutable_append_mx(array_of_values, 2) WHERE id = 246; +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION stable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ BEGIN RETURN old_values || new_value; END; $$ LANGUAGE plpgsql STABLE; @@ -282,6 +283,7 @@ SELECT array_of_values FROM limit_orders_mx WHERE id = 246; -- STRICT functions work as expected CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS 'SELECT COALESCE($1, 2) + COALESCE($1, 3);' LANGUAGE SQL STABLE STRICT; +RESET citus.enable_metadata_sync; UPDATE limit_orders_mx SET bidder_id = temp_strict_func(1, null) WHERE id = 246; ERROR: null value in column "bidder_id" violates not-null constraint SELECT array_of_values FROM limit_orders_mx WHERE id = 246; diff --git a/src/test/regress/expected/multi_mx_modifying_xacts.out b/src/test/regress/expected/multi_mx_modifying_xacts.out index 8c08673b2..e486b8b1b 100644 --- a/src/test/regress/expected/multi_mx_modifying_xacts.out +++ b/src/test/regress/expected/multi_mx_modifying_xacts.out @@ -227,6 +227,7 @@ SELECT * FROM objects_mx WHERE id = 1; -- create trigger on one worker to reject certain values \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN @@ -236,6 +237,7 @@ CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad_mx AFTER INSERT ON objects_mx_1220103 DEFERRABLE INITIALLY IMMEDIATE diff --git a/src/test/regress/expected/multi_mx_node_metadata.out b/src/test/regress/expected/multi_mx_node_metadata.out index d46f49caf..a4f49f320 100644 --- a/src/test/regress/expected/multi_mx_node_metadata.out +++ b/src/test/regress/expected/multi_mx_node_metadata.out @@ -829,10 +829,12 @@ SELECT master_update_node(nodeid, 'localhost', 12345) FROM pg_dist_node; (1 row) +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION trigger_metadata_sync() RETURNS void LANGUAGE C STRICT AS 'citus'; +RESET citus.enable_metadata_sync; SELECT trigger_metadata_sync(); trigger_metadata_sync --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out index 114e55a0f..c9357e954 100644 --- a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out +++ b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out @@ -52,13 +52,7 @@ CREATE TABLE repartition_udt_other ( -- so that the OID is off. \c - - - :worker_1_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -68,13 +62,6 @@ CREATE OPERATOR = ( ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 @@ -87,13 +74,7 @@ FUNCTION 1 test_udt_hash(test_udt); -- END type creation \c - - - :worker_2_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -103,13 +84,6 @@ CREATE OPERATOR = ( ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out index 4dcbac57e..d006b4bb8 100644 --- a/src/test/regress/expected/multi_mx_router_planner.out +++ b/src/test/regress/expected/multi_mx_router_planner.out @@ -1363,6 +1363,7 @@ DEBUG: query has a single distribution column value: 1 (5 rows) -- queries inside plpgsql functions could be router plannable +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION author_articles_max_id() RETURNS int AS $$ DECLARE max_id integer; @@ -1418,6 +1419,7 @@ PL/pgSQL function author_articles_id_word_count() line XX at RETURN QUERY 41 | 11814 (5 rows) +RESET citus.enable_metadata_sync; -- materialized views can be created for router plannable queries CREATE MATERIALIZED VIEW mv_articles_hash_mx AS SELECT * FROM articles_hash_mx WHERE author_id = 1; diff --git a/src/test/regress/expected/multi_mx_schema_support.out b/src/test/regress/expected/multi_mx_schema_support.out index bc41b5c24..71dfde3fd 100644 --- a/src/test/regress/expected/multi_mx_schema_support.out +++ b/src/test/regress/expected/multi_mx_schema_support.out @@ -444,11 +444,9 @@ ALTER TABLE "CiTuS.TeAeN"."TeeNTabLE.1!?!" ADD COLUMN new_col INT; -- set the search_path to not existing schema SET search_path TO not_existing_schema; ALTER TABLE "CiTuS.TeAeN"."TeeNTabLE.1!?!" DROP COLUMN new_col; +SET client_min_messages TO ERROR; DROP SCHEMA mx_ddl_schema_1, mx_ddl_schema_2, "CiTuS.TeAeN" CASCADE; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to table "CiTuS.TeAeN"."TeeNTabLE.1!?!" -drop cascades to table mx_ddl_schema_2.table_2 -drop cascades to table mx_ddl_schema_1.table_1 +RESET client_min_messages; -- test if ALTER TABLE SET SCHEMA sets the original table in the worker SET search_path TO public; CREATE SCHEMA mx_old_schema; @@ -462,11 +460,13 @@ SELECT create_distributed_table('mx_old_schema.table_set_schema', 'id'); CREATE SCHEMA mx_new_schema; SELECT objid::oid::regnamespace as "Distributed Schemas" FROM citus.pg_dist_object - WHERE objid::oid::regnamespace IN ('mx_old_schema', 'mx_new_schema'); + WHERE objid::oid::regnamespace IN ('mx_old_schema', 'mx_new_schema') + ORDER BY "Distributed Schemas"; Distributed Schemas --------------------------------------------------------------------- mx_old_schema -(1 row) + mx_new_schema +(2 rows) \c - - - :worker_1_port SELECT table_schema AS "Table's Schema" FROM information_schema.tables WHERE table_name='table_set_schema'; diff --git a/src/test/regress/expected/multi_name_lengths.out b/src/test/regress/expected/multi_name_lengths.out index e100ba7d8..e3cbca911 100644 --- a/src/test/regress/expected/multi_name_lengths.out +++ b/src/test/regress/expected/multi_name_lengths.out @@ -4,6 +4,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000; SET citus.shard_count TO 2; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -20,6 +21,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; -- Verify that a table name > 56 characters gets hashed properly. CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, diff --git a/src/test/regress/expected/multi_prepare_sql.out b/src/test/regress/expected/multi_prepare_sql.out index 4f30bc716..6681f4112 100644 --- a/src/test/regress/expected/multi_prepare_sql.out +++ b/src/test/regress/expected/multi_prepare_sql.out @@ -1132,10 +1132,6 @@ DROP TABLE http_request; -- (re-)planning, but not when executing. -- first create helper function CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; -\c - - - :worker_1_port -CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; -\c - - - :worker_2_port -CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; \c - - - :master_port -- test table CREATE TABLE test_table (test_id integer NOT NULL, data text); diff --git a/src/test/regress/expected/multi_repartition_udt.out b/src/test/regress/expected/multi_repartition_udt.out index c37bbba02..ee6ac8800 100644 --- a/src/test/regress/expected/multi_repartition_udt.out +++ b/src/test/regress/expected/multi_repartition_udt.out @@ -53,13 +53,7 @@ CREATE TABLE repartition_udt_other ( -- so that the OID is off. \c - - :public_worker_1_host :worker_1_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -69,13 +63,6 @@ CREATE OPERATOR = ( ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 @@ -88,13 +75,7 @@ FUNCTION 1 test_udt_hash(test_udt); -- END type creation \c - - :public_worker_2_host :worker_2_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -104,13 +85,6 @@ CREATE OPERATOR = ( ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 diff --git a/src/test/regress/expected/multi_repartitioned_subquery_udf.out b/src/test/regress/expected/multi_repartitioned_subquery_udf.out index 0048bb7eb..f6017d2ea 100644 --- a/src/test/regress/expected/multi_repartitioned_subquery_udf.out +++ b/src/test/regress/expected/multi_repartitioned_subquery_udf.out @@ -13,28 +13,6 @@ LANGUAGE sql IMMUTABLE AS $_$ ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; -\c - - :public_worker_1_host :worker_1_port -DROP FUNCTION IF EXISTS median(double precision[]); -NOTICE: function median(pg_catalog.float8[]) does not exist, skipping -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; -$_$; -\c - - :public_worker_2_host :worker_2_port -DROP FUNCTION IF EXISTS median(double precision[]); -NOTICE: function median(pg_catalog.float8[]) does not exist, skipping -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; -$_$; --- Run query on master -\c - - :master_host :master_port SELECT * FROM (SELECT median(ARRAY[1,2,sum(l_suppkey)]) as median, count(*) FROM lineitem GROUP BY l_partkey) AS a WHERE median > 2; diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index 405659544..cc503f766 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -1592,10 +1592,13 @@ BEGIN RETURN QUERY SELECT 1; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT 1 FROM authors_reference r JOIN ( SELECT s.datid FROM number1() s LEFT JOIN pg_database d ON s.datid = d.oid ) num_db ON (r.id = num_db.datid) LIMIT 1; DEBUG: found no worker with all shard placements +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT datid FROM public.number1() s(datid) DEBUG: Creating router plan DEBUG: generating subplan XXX_2 for subquery SELECT s.datid FROM ((SELECT intermediate_result.datid FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) s LEFT JOIN pg_database d ON (((s.datid)::oid OPERATOR(pg_catalog.=) d.oid))) @@ -1610,6 +1613,7 @@ CREATE VIEW num_db AS SELECT s.datid FROM number1() s LEFT JOIN pg_database d ON s.datid = d.oid; SELECT 1 FROM authors_reference r JOIN num_db ON (r.id = num_db.datid) LIMIT 1; DEBUG: found no worker with all shard placements +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT datid FROM public.number1() s(datid) DEBUG: Creating router plan DEBUG: generating subplan XXX_2 for subquery SELECT s.datid FROM ((SELECT intermediate_result.datid FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) s LEFT JOIN pg_database d ON (((s.datid)::oid OPERATOR(pg_catalog.=) d.oid))) @@ -2014,6 +2018,8 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SET client_min_messages TO ERROR; \set VERBOSITY terse SELECT raise_failed_execution_router($$ @@ -2307,7 +2313,10 @@ BEGIN return max_id; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables DEBUG: Creating router plan CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash ah WHERE author_id = 1" @@ -2332,7 +2341,10 @@ BEGIN END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM author_articles_id_word_count() ORDER BY 1; +DEBUG: function does not have co-located tables DEBUG: Creating router plan CONTEXT: SQL statement "SELECT ah.id, ah.word_count FROM articles_hash ah diff --git a/src/test/regress/expected/multi_router_planner_fast_path.out b/src/test/regress/expected/multi_router_planner_fast_path.out index eb4a91f7e..8fb8917ab 100644 --- a/src/test/regress/expected/multi_router_planner_fast_path.out +++ b/src/test/regress/expected/multi_router_planner_fast_path.out @@ -1225,6 +1225,7 @@ DEBUG: query has a single distribution column value: 1 (0 rows) -- create a dummy function to be used in filtering +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION someDummyFunction(regclass) RETURNS text AS $$ @@ -1247,6 +1248,7 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SET client_min_messages TO ERROR; \set VERBOSITY terse -- fast path router plannable, but errors @@ -1611,11 +1613,14 @@ BEGIN return max_id; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands -- we don't want too many details. though we're omitting -- "DETAIL: distribution column value:", we see it acceptable -- since the query results verifies the correctness \set VERBOSITY terse SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: query has a single distribution column value: 1 @@ -1625,30 +1630,35 @@ DEBUG: query has a single distribution column value: 1 (1 row) SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables author_articles_max_id --------------------------------------------------------------------- 41 @@ -1665,7 +1675,9 @@ BEGIN return max_id; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode SELECT author_articles_max_id(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan author_articles_max_id @@ -1674,6 +1686,7 @@ DEBUG: Creating router plan (1 row) SELECT author_articles_max_id(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan author_articles_max_id @@ -1682,6 +1695,7 @@ DEBUG: Creating router plan (1 row) SELECT author_articles_max_id(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan author_articles_max_id @@ -1690,6 +1704,7 @@ DEBUG: Creating router plan (1 row) SELECT author_articles_max_id(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan author_articles_max_id @@ -1698,6 +1713,7 @@ DEBUG: Creating router plan (1 row) SELECT author_articles_max_id(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan author_articles_max_id @@ -1706,6 +1722,7 @@ DEBUG: Creating router plan (1 row) SELECT author_articles_max_id(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan author_articles_max_id @@ -1724,7 +1741,9 @@ BEGIN END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode SELECT * FROM author_articles_id_word_count(); +DEBUG: function does not have co-located tables DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: query has a single distribution column value: 1 @@ -1738,6 +1757,7 @@ DEBUG: query has a single distribution column value: 1 (5 rows) SELECT * FROM author_articles_id_word_count(); +DEBUG: function does not have co-located tables id | word_count --------------------------------------------------------------------- 1 | 9572 @@ -1748,6 +1768,7 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); +DEBUG: function does not have co-located tables id | word_count --------------------------------------------------------------------- 1 | 9572 @@ -1758,6 +1779,7 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); +DEBUG: function does not have co-located tables id | word_count --------------------------------------------------------------------- 1 | 9572 @@ -1768,6 +1790,7 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); +DEBUG: function does not have co-located tables id | word_count --------------------------------------------------------------------- 1 | 9572 @@ -1778,6 +1801,7 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); +DEBUG: function does not have co-located tables id | word_count --------------------------------------------------------------------- 1 | 9572 @@ -1798,7 +1822,9 @@ BEGIN END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode SELECT * FROM author_articles_id_word_count(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan id | word_count @@ -1811,6 +1837,7 @@ DEBUG: Creating router plan (5 rows) SELECT * FROM author_articles_id_word_count(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan id | word_count @@ -1823,6 +1850,7 @@ DEBUG: Creating router plan (5 rows) SELECT * FROM author_articles_id_word_count(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan id | word_count @@ -1835,6 +1863,7 @@ DEBUG: Creating router plan (5 rows) SELECT * FROM author_articles_id_word_count(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan id | word_count @@ -1847,6 +1876,7 @@ DEBUG: Creating router plan (5 rows) SELECT * FROM author_articles_id_word_count(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan id | word_count @@ -1859,6 +1889,7 @@ DEBUG: Creating router plan (5 rows) SELECT * FROM author_articles_id_word_count(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan id | word_count diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index b23c609ec..eb1c47401 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -194,31 +194,6 @@ BEGIN END; $$ LANGUAGE 'plpgsql' IMMUTABLE; --- create UDF in worker node 1 -\c - - - :worker_1_port -CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; --- create UDF in worker node 2 -\c - - - :worker_2_port -CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; -\c - - - :master_port -- UDF in public, table in a schema other than public, search_path is not set SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; dummyfunction @@ -260,33 +235,6 @@ BEGIN END; $$ LANGUAGE 'plpgsql' IMMUTABLE; --- create UDF in worker node 1 in schema -\c - - - :worker_1_port -SET search_path TO test_schema_support; -CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; --- create UDF in worker node 2 in schema -\c - - - :worker_2_port -SET search_path TO test_schema_support; -CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; -\c - - - :master_port -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; @@ -941,7 +889,8 @@ SELECT objid::oid::regnamespace as "Distributed Schemas" Distributed Schemas --------------------------------------------------------------------- old_schema -(1 row) + new_schema +(2 rows) \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema" @@ -999,7 +948,8 @@ SELECT objid::oid::regnamespace as "Distributed Schemas" WHERE objid='new_schema'::regnamespace::oid; Distributed Schemas --------------------------------------------------------------------- -(0 rows) + new_schema +(1 row) \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema" @@ -1065,7 +1015,8 @@ SELECT objid::oid::regnamespace as "Distributed Schemas" Distributed Schemas --------------------------------------------------------------------- old_schema -(1 row) + new_schema +(2 rows) \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema", COUNT(*) AS "Counts" @@ -1425,8 +1376,8 @@ BEGIN; (1 row) ALTER SCHEMA bar RENAME TO foo; -ERROR: cannot create or modify schema because there was a parallel operation on a distributed table in the transaction -DETAIL: When creating or altering a schema, Citus needs to perform all operations over a single connection per node to ensure consistency. +ERROR: cannot run schema command because there was a parallel operation on a distributed table in the transaction +DETAIL: When running command on/for a distributed schema, Citus needs to perform all operations over a single connection per node to ensure consistency. HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" ROLLBACK; BEGIN; @@ -1441,10 +1392,18 @@ BEGIN; ROLLBACK; -- Clean up the created schema SET client_min_messages TO WARNING; -DROP SCHEMA run_test_schema CASCADE; -DROP SCHEMA test_schema_support_join_1 CASCADE; -DROP SCHEMA test_schema_support_join_2 CASCADE; -DROP SCHEMA "Citus'Teen123" CASCADE; -DROP SCHEMA "CiTUS.TEEN2" CASCADE; -DROP SCHEMA bar CASCADE; -DROP SCHEMA test_schema_support CASCADE; +SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object + WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema'); + pg_identify_object_as_address +--------------------------------------------------------------------- + (schema,{run_test_schema},{}) +(1 row) + +DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE; +-- verify that the dropped schema is removed from worker's pg_dist_object +SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object + WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema'); + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + diff --git a/src/test/regress/expected/multi_sequence_default.out b/src/test/regress/expected/multi_sequence_default.out index 7f3c23673..27f083e92 100644 --- a/src/test/regress/expected/multi_sequence_default.out +++ b/src/test/regress/expected/multi_sequence_default.out @@ -370,70 +370,6 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); DROP SEQUENCE sequence_default_0.sequence_3 CASCADE; NOTICE: drop cascades to default value for column y of table seq_test_3 DROP SCHEMA sequence_default_0; --- DROP SCHEMA problem: expected since we don't propagate DROP SCHEMA -CREATE TABLE seq_test_5 (x int, y int); -SELECT create_distributed_table('seq_test_5','x'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE SCHEMA sequence_default_1; -CREATE SEQUENCE sequence_default_1.seq_5; -ALTER TABLE seq_test_5 ADD COLUMN a bigint DEFAULT nextval('sequence_default_1.seq_5'); -DROP SCHEMA sequence_default_1 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to sequence sequence_default_1.seq_5 -drop cascades to default value for column a of table seq_test_5 --- sequence is gone from coordinator -INSERT INTO seq_test_5 VALUES (1, 2) RETURNING *; - x | y | a ---------------------------------------------------------------------- - 1 | 2 | -(1 row) - --- but is still present on worker -\c - - - :worker_1_port -INSERT INTO sequence_default.seq_test_5 VALUES (1, 2) RETURNING *; - x | y | a ---------------------------------------------------------------------- - 1 | 2 | 281474976710657 -(1 row) - -\c - - - :master_port -SET citus.shard_replication_factor TO 1; -SET search_path = sequence_default, public; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - --- apply workaround -SELECT run_command_on_workers('DROP SCHEMA sequence_default_1 CASCADE'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - --- now the sequence is gone from the worker as well -\c - - - :worker_1_port -INSERT INTO sequence_default.seq_test_5 VALUES (1, 2) RETURNING *; - x | y | a ---------------------------------------------------------------------- - 1 | 2 | -(1 row) - -\c - - - :master_port -SET citus.shard_replication_factor TO 1; -SET search_path = sequence_default, public; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -- check some more complex cases CREATE SEQUENCE seq_6; CREATE TABLE seq_test_6 (x int, t timestamptz DEFAULT now(), s int DEFAULT nextval('seq_6'), m int) PARTITION BY RANGE (t); @@ -569,14 +505,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); (1 row) -DROP SCHEMA sequence_default_8; -SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default_8 CASCADE'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - +DROP SCHEMA sequence_default_8 CASCADE; -- cannot use more than one sequence in a column default CREATE SEQUENCE seq_9; CREATE SEQUENCE seq_10; @@ -914,17 +843,6 @@ ERROR: nextval: reached maximum value of sequence "seq_14" (32767) -- table with default column is added CREATE SCHEMA test_schema_for_sequence_default_propagation; CREATE SEQUENCE test_schema_for_sequence_default_propagation.seq_10; --- Both should return 0 rows -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation.seq_10'::regclass); - pg_identify_object_as_address ---------------------------------------------------------------------- -(0 rows) - -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation'::regnamespace); - pg_identify_object_as_address ---------------------------------------------------------------------- -(0 rows) - -- Create distributed table with default column to propagate dependencies CREATE TABLE test_seq_dist(a int, x BIGINT DEFAULT nextval('test_schema_for_sequence_default_propagation.seq_10')); SELECT create_distributed_table('test_seq_dist', 'a'); @@ -998,13 +916,6 @@ DROP TABLE test_seq_dist; DROP TABLE sequence_default.seq_test_7_par; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA sequence_default CASCADE; -SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default CASCADE'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - SELECT master_remove_node('localhost', :master_port); master_remove_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_subquery_behavioral_analytics.out b/src/test/regress/expected/multi_subquery_behavioral_analytics.out index e357132b4..9284b7f92 100644 --- a/src/test/regress/expected/multi_subquery_behavioral_analytics.out +++ b/src/test/regress/expected/multi_subquery_behavioral_analytics.out @@ -1563,22 +1563,8 @@ LIMIT 10; (10 rows) -- this is one complex join query derived from a user's production query --- first declare the function on workers on master +-- declare the function on workers and master -- With array_index: -SELECT * FROM run_command_on_workers('CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) - RETURNS INT AS $$ - SELECT i - FROM (SELECT generate_series(array_lower($1, 1), array_upper($1, 1))) g(i) - WHERE $1 [i] = $2 - LIMIT 1; - $$ LANGUAGE sql') -ORDER BY 1,2; - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 57637 | t | CREATE FUNCTION - localhost | 57638 | t | CREATE FUNCTION -(2 rows) - CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) RETURNS INT AS $$ SELECT i @@ -1745,14 +1731,6 @@ LIMIT 10; (10 rows) -- drop created functions -SELECT * FROM run_command_on_workers('DROP FUNCTION array_index(ANYARRAY, ANYELEMENT)') -ORDER BY 1,2; - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 57637 | t | DROP FUNCTION - localhost | 57638 | t | DROP FUNCTION -(2 rows) - DROP FUNCTION array_index(ANYARRAY, ANYELEMENT); -- a query with a constant subquery SELECT count(*) as subquery_count @@ -1883,6 +1861,8 @@ CREATE OR REPLACE FUNCTION volatile_func_test() RETURNS INT AS $$ SELECT 1; $$ LANGUAGE sql VOLATILE; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands -- Citus should be able to evalute functions/row comparisons on the LIMIT/OFFSET SELECT user_id, array_length(events_table, 1) FROM ( @@ -1987,21 +1967,6 @@ CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -SELECT run_command_on_workers($f$ - -CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool - AS 'select $1 > $2;' - LANGUAGE SQL - IMMUTABLE - RETURNS NULL ON NULL INPUT; - -$f$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") - (localhost,57638,t,"CREATE FUNCTION") -(2 rows) - -- we don't support joins via functions SELECT user_id, array_length(events_table, 1) FROM ( @@ -2285,16 +2250,5 @@ LIMIT 1; (1 row) DROP FUNCTION test_join_function_2(integer, integer); -SELECT run_command_on_workers($f$ - - DROP FUNCTION test_join_function_2(integer, integer); - -$f$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP FUNCTION") - (localhost,57638,t,"DROP FUNCTION") -(2 rows) - SET citus.enable_router_execution TO TRUE; SET citus.subquery_pushdown to OFF; diff --git a/src/test/regress/expected/multi_test_helpers_superuser.out b/src/test/regress/expected/multi_test_helpers_superuser.out index 238ed9f2d..4bd8b941d 100644 --- a/src/test/regress/expected/multi_test_helpers_superuser.out +++ b/src/test/regress/expected/multi_test_helpers_superuser.out @@ -38,6 +38,7 @@ SELECT dist_node_check.matches AND dist_placement_check.matches FROM dist_node_check CROSS JOIN dist_placement_check $$; -- partition_task_list_results tests the internal PartitionTasklistResults function +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix text, query text, target_table regclass, @@ -63,3 +64,4 @@ BEGIN PERFORM public.wait_until_metadata_sync(30000); END; $function$; +RESET citus.enable_metadata_sync; diff --git a/src/test/regress/expected/multi_transactional_drop_shards.out b/src/test/regress/expected/multi_transactional_drop_shards.out index 5d59069a3..fd06e5d14 100644 --- a/src/test/regress/expected/multi_transactional_drop_shards.out +++ b/src/test/regress/expected/multi_transactional_drop_shards.out @@ -292,11 +292,13 @@ SET citus.override_table_visibility TO false; (1 row) -- test DROP table with failing worker +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION fail_drop_table() RETURNS event_trigger AS $fdt$ BEGIN RAISE 'illegal value'; END; $fdt$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE EVENT TRIGGER fail_drop_table ON sql_drop EXECUTE PROCEDURE fail_drop_table(); \c - - - :master_port \set VERBOSITY terse diff --git a/src/test/regress/expected/multi_unsupported_worker_operations.out b/src/test/regress/expected/multi_unsupported_worker_operations.out index 2e412a4a8..d54e5e84b 100644 --- a/src/test/regress/expected/multi_unsupported_worker_operations.out +++ b/src/test/regress/expected/multi_unsupported_worker_operations.out @@ -62,6 +62,7 @@ SELECT * FROM mx_table ORDER BY col_1; -- Try commands from metadata worker \c - - - :worker_1_port -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -78,6 +79,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; CREATE TABLE mx_table_worker(col_1 text); -- master_create_distributed_table SELECT master_create_distributed_table('mx_table_worker', 'col_1', 'hash'); diff --git a/src/test/regress/expected/mx_regular_user.out b/src/test/regress/expected/mx_regular_user.out index 2538929cc..9b60132e0 100644 --- a/src/test/regress/expected/mx_regular_user.out +++ b/src/test/regress/expected/mx_regular_user.out @@ -174,16 +174,6 @@ ERROR: unsupported lockmode 8 \c - postgres - :master_port; SET client_min_messages TO ERROR; DROP SCHEMA "Mx Super User" CASCADE; -\c - postgres - :worker_1_port; -SET client_min_messages TO ERROR; -SET citus.enable_ddl_propagation TO OFF; -CREATE SCHEMA "Mx Regular User"; -GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user; -\c - postgres - :worker_2_port; -SET client_min_messages TO ERROR; -SET citus.enable_ddl_propagation TO OFF; -CREATE SCHEMA "Mx Regular User"; -GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user; -- now connect with that user \c - regular_mx_user - :master_port SET search_path TO "Mx Regular User"; diff --git a/src/test/regress/expected/node_conninfo_reload.out b/src/test/regress/expected/node_conninfo_reload.out index 932610bc9..d7b853226 100644 --- a/src/test/regress/expected/node_conninfo_reload.out +++ b/src/test/regress/expected/node_conninfo_reload.out @@ -76,6 +76,10 @@ select count(*) from test where a = 0; (1 row) ALTER SYSTEM SET citus.node_conninfo = 'sslmode=doesnotexist'; +-- we cannot set application name +ALTER SYSTEM SET citus.node_conninfo = 'application_name=XXX'; +ERROR: invalid value for parameter "citus.node_conninfo": "application_name=XXX" +DETAIL: Prohibited conninfo keyword detected: application_name BEGIN; -- Should still work (no SIGHUP yet); select count(*) from test where a = 0; diff --git a/src/test/regress/expected/non_colocated_leaf_subquery_joins.out b/src/test/regress/expected/non_colocated_leaf_subquery_joins.out index 1a4f20529..976f3d438 100644 --- a/src/test/regress/expected/non_colocated_leaf_subquery_joins.out +++ b/src/test/regress/expected/non_colocated_leaf_subquery_joins.out @@ -22,6 +22,7 @@ BEGIN RETURN result; END; $BODY$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode SHOW log_error_verbosity; log_error_verbosity --------------------------------------------------------------------- @@ -36,6 +37,7 @@ FROM (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8)) as bar WHERE foo.user_id = bar.user_id;$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) valid @@ -51,6 +53,7 @@ FROM (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6,7,8)) as bar WHERE foo.user_id = bar.user_id;$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) @@ -73,6 +76,7 @@ WHERE users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6));$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) valid @@ -89,6 +93,7 @@ SELECT count(*) FROM q1, (SELECT users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) as bar WHERE bar.user_id = q1.user_id ;$$); +DEBUG: function does not have co-located tables DEBUG: CTE q1 is going to be inlined via distributed planning DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id FROM public.users_table) q1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) @@ -101,6 +106,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c SELECT true AS valid FROM explain_json($$ (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) UNION (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8));$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) @@ -137,6 +143,7 @@ FROM ( ) q ORDER BY 2 DESC, 1; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: push down of limit count: 5 DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) LIMIT 5 @@ -158,6 +165,7 @@ FROM (SELECT users_table.user_id, value_1 FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8)) as bar WHERE foo.user_id = bar.value_1;$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.value_1) valid diff --git a/src/test/regress/expected/non_colocated_subquery_joins.out b/src/test/regress/expected/non_colocated_subquery_joins.out index 4fa460f4a..c14502569 100644 --- a/src/test/regress/expected/non_colocated_subquery_joins.out +++ b/src/test/regress/expected/non_colocated_subquery_joins.out @@ -8,6 +8,8 @@ -- =================================================================== SET client_min_messages TO DEBUG1; CREATE SCHEMA non_colocated_subquery; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed schema is run. To make sure subsequent commands see the schema correctly we need to make sure to use only one connection for all future commands SET search_path TO non_colocated_subquery, public; -- we don't use the data anyway CREATE TABLE users_table_local AS SELECT * FROM users_table LIMIT 0; @@ -30,6 +32,7 @@ BEGIN RETURN result; END; $BODY$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode -- leaf queries contain colocated joins -- but not the subquery SELECT true AS valid FROM explain_json_2($$ @@ -41,6 +44,7 @@ SELECT true AS valid FROM explain_json_2($$ WHERE foo.value_2 = bar.value_2; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.value_2 FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) valid @@ -61,6 +65,7 @@ SELECT true AS valid FROM explain_json_2($$ (SELECT event_type FROM events_table WHERE user_id < 100); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 100) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer))) valid @@ -80,6 +85,7 @@ SELECT true AS valid FROM explain_json_2($$ NOT IN (SELECT user_id FROM events_table WHERE event_type = 2); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) 2) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) valid @@ -101,6 +107,7 @@ SELECT true AS valid FROM explain_json_2($$ foo.event_type IN (SELECT event_type FROM events_table WHERE user_id < 3); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) valid @@ -121,6 +128,7 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id AND foo.user_id IN (SELECT user_id FROM events_table WHERE user_id < 10); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT (users_table.user_id OPERATOR(pg_catalog./) 2) AS user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT events_table.user_id FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.<) 10)))) valid @@ -141,6 +149,7 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id AND foo.user_id NOT IN (SELECT user_id FROM events_table WHERE user_id < 10); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT (users_table.user_id OPERATOR(pg_catalog./) 2) AS user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 10) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (NOT (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) @@ -162,6 +171,7 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id AND foo.event_type IN (SELECT event_type FROM events_table WHERE user_id < 4); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan XXX_2 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 4) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) @@ -186,6 +196,7 @@ SELECT true AS valid FROM explain_json_2($$ ) as foo_top, events_table WHERE events_table.user_id = foo_top.user_id; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.event_type) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan XXX_3 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) 5) @@ -220,6 +231,7 @@ SELECT true AS valid FROM explain_json_2($$ ) as foo_top; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top valid @@ -251,6 +263,7 @@ SELECT true AS valid FROM explain_json_2($$ foo1.user_id = foo5.user_id ) as foo_top; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.user_id))) foo_top valid @@ -280,6 +293,7 @@ SELECT true AS valid FROM explain_json_2($$ foo1.user_id = foo5.value_1 ) as foo_top; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top @@ -311,6 +325,7 @@ SELECT true AS valid FROM explain_json_2($$ foo2.user_id = foo5.value_1 ) as foo_top; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo2.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top @@ -344,6 +359,7 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id) as bar_top ON (foo_top.user_id = bar_top.user_id); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) bar_top ON ((foo_top.user_id OPERATOR(pg_catalog.=) bar_top.user_id))) @@ -378,6 +394,7 @@ SELECT true AS valid FROM explain_json_2($$ ON (foo_top.value_2 = bar_top.user_id); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT foo.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id, foo.value_2 FROM (SELECT DISTINCT users_table.user_id, users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar_top ON ((foo_top.value_2 OPERATOR(pg_catalog.=) bar_top.user_id))) valid @@ -409,6 +426,7 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id) as bar_top ON (foo_top.value_2 = bar_top.user_id); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16]))) DEBUG: generating subplan XXX_2 for subquery SELECT foo.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id, foo.value_2 FROM (SELECT DISTINCT users_table.user_id, users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar_top ON ((foo_top.value_2 OPERATOR(pg_catalog.=) bar_top.user_id))) @@ -430,6 +448,7 @@ SELECT true AS valid FROM explain_json_2($$ WHERE foo.my_users = users_table.user_id) as mid_level_query ) as bar; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT events_table.user_id AS my_users FROM public.events_table, public.users_table WHERE (events_table.event_type OPERATOR(pg_catalog.=) users_table.user_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT intermediate_result.my_users FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(my_users integer)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar valid @@ -449,6 +468,7 @@ SELECT true AS valid FROM explain_json_2($$ WHERE foo.my_users = users_table.user_id) as mid_level_query ) as bar; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT events_table.event_type AS my_users, random() AS random FROM public.events_table, public.users_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT intermediate_result.my_users, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(my_users integer, random double precision)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar valid @@ -472,6 +492,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c WHERE foo.my_users = users_table.user_id) as mid_level_query ) as bar; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT events_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT events_table.user_id AS my_users FROM public.events_table, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) selected_users WHERE (events_table.event_type OPERATOR(pg_catalog.=) selected_users.user_id)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar valid @@ -505,6 +526,7 @@ SELECT true AS valid FROM explain_json_2($$ ) as bar; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_2 FROM public.events_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT events_table.user_id AS my_users FROM public.events_table, (SELECT events_table_1.user_id FROM public.users_table users_table_1, public.events_table events_table_1 WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table_1.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))))) selected_users WHERE (events_table.user_id OPERATOR(pg_catalog.=) selected_users.user_id)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar valid @@ -526,6 +548,7 @@ WHERE users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6));$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) valid @@ -542,6 +565,7 @@ SELECT count(*) FROM q1, (SELECT users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) as bar WHERE bar.user_id = q1.user_id ;$$); +DEBUG: function does not have co-located tables DEBUG: CTE q1 is going to be inlined via distributed planning DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id FROM public.users_table) q1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) @@ -559,6 +583,7 @@ SELECT true AS valid FROM explain_json_2($$ users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4)) as bar WHERE bar.user_id = q1.user_id ;$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for CTE q1: SELECT user_id FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) valid @@ -570,6 +595,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c SELECT true AS valid FROM explain_json_2($$ (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) UNION (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8));$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) @@ -606,6 +632,7 @@ FROM ( ) q ORDER BY 2 DESC, 1; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: push down of limit count: 5 DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) LIMIT 5 @@ -627,6 +654,7 @@ SELECT true AS valid FROM explain_json_2($$ FROM (users_table u1 JOIN users_table u2 using(value_1)) a JOIN (SELECT value_1, random() FROM users_table) as u3 USING (value_1); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_1, random() AS random FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((public.users_table u1 JOIN public.users_table u2 USING (value_1)) a(value_1, user_id, "time", value_2, value_3, value_4, user_id_1, time_1, value_2_1, value_3_1, value_4_1) JOIN (SELECT intermediate_result.value_1, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, random double precision)) u3 USING (value_1)) ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns @@ -639,6 +667,7 @@ SELECT true AS valid FROM explain_json_2($$ FROM (SELECT * FROM users_table u1 JOIN users_table u2 using(value_1)) a JOIN (SELECT value_1, random() FROM users_table) as u3 USING (value_1); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT u1.value_1, u1.user_id, u1."time", u1.value_2, u1.value_3, u1.value_4, u2.user_id, u2."time", u2.value_2, u2.value_3, u2.value_4 FROM (public.users_table u1 JOIN public.users_table u2 USING (value_1)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.value_1, intermediate_result.user_id, intermediate_result."time", intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.user_id_1 AS user_id, intermediate_result.time_1 AS "time", intermediate_result.value_2_1 AS value_2, intermediate_result.value_3_1 AS value_3, intermediate_result.value_4_1 AS value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, user_id integer, "time" timestamp without time zone, value_2 integer, value_3 double precision, value_4 bigint, user_id_1 integer, time_1 timestamp without time zone, value_2_1 integer, value_3_1 double precision, value_4_1 bigint)) a(value_1, user_id, "time", value_2, value_3, value_4, user_id_1, time_1, value_2_1, value_3_1, value_4_1) JOIN (SELECT users_table.value_1, random() AS random FROM public.users_table) u3 USING (value_1)) valid @@ -658,6 +687,7 @@ SELECT true AS valid FROM explain_json_2($$ events_table using (value_2); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_2, random() AS random FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u1 JOIN public.events_table USING (value_2)) valid @@ -676,6 +706,7 @@ SELECT true AS valid FROM explain_json_2($$ (SELECT value_2, random() FROM users_table) as u2 USING(value_2); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_2, random() AS random FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT users_table.value_2, random() AS random FROM public.users_table) u1 LEFT JOIN (SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u2 USING (value_2)) valid @@ -696,6 +727,7 @@ SELECT true AS valid FROM explain_json_2($$ (SELECT value_2, random() FROM users_table) as u2 USING(value_2); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_2, random() AS random FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT users_table.value_2, random() AS random FROM public.users_table) u1 RIGHT JOIN (SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u2 USING (value_2)) ERROR: cannot pushdown the subquery @@ -715,6 +747,7 @@ SELECT true AS valid FROM explain_json_2($$ (SELECT value_1 FROM users_table) as foo ON (a.user_id = foo.value_1) ); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) @@ -739,6 +772,7 @@ SELECT true AS valid FROM explain_json_2($$ users_table as foo ON (a.user_id = foo.value_1) ); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) @@ -772,6 +806,7 @@ SELECT true AS valid FROM explain_json_2($$ ON(foo.user_id = bar.value_1) ); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_1 FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id, a.user_id, bar.value_1 FROM (((SELECT users_table.user_id FROM public.users_table) foo JOIN (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) UNION SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) a ON ((a.user_id OPERATOR(pg_catalog.=) foo.user_id))) JOIN (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) bar ON ((foo.user_id OPERATOR(pg_catalog.=) bar.value_1))) valid @@ -811,6 +846,7 @@ SELECT true AS valid FROM explain_json_2($$ WHERE non_colocated_subquery.value_2 != non_colocated_subquery_2.cnt $$); +DEBUG: function does not have co-located tables DEBUG: CTE non_colocated_subquery is going to be inlined via distributed planning DEBUG: CTE non_colocated_subquery_2 is going to be inlined via distributed planning DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) @@ -835,6 +871,7 @@ SELECT true AS valid FROM explain_json_2($$ AND foo.value_2 = baz.value_2 $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table_local.value_2 FROM non_colocated_subquery.users_table_local, non_colocated_subquery.events_table_local WHERE ((users_table_local.user_id OPERATOR(pg_catalog.=) events_table_local.user_id) AND (events_table_local.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) AND (foo.value_2 OPERATOR(pg_catalog.=) baz.value_2)) @@ -870,6 +907,7 @@ SELECT true AS valid FROM explain_json_2($$ AND foo.user_id IN (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (1,2)) $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_1, value_2 FROM public.users_table DEBUG: generating subplan XXX_2 for subquery SELECT value_1 FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.<) 1) DEBUG: generating subplan XXX_3 for subquery SELECT value_2 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.<) 2) @@ -892,6 +930,7 @@ SELECT true AS valid FROM explain_json_2($$ users_table_ref.user_id = foo.user_id AND foo.user_id = bar.value_2; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT user_id, value_2 FROM public.events_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_reference_table users_table_ref, (SELECT users_table.user_id FROM public.users_table) foo, (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) bar WHERE ((users_table_ref.user_id OPERATOR(pg_catalog.=) foo.user_id) AND (foo.user_id OPERATOR(pg_catalog.=) bar.value_2)) valid @@ -940,6 +979,7 @@ JOIN LATERAL WHERE user_id = users_table.user_id) AS bar LEFT JOIN users_table u2 ON u2.user_id = bar.value_2) AS foo ON TRUE $$); +DEBUG: function does not have co-located tables DEBUG: Router planner cannot handle multi-shard select queries DEBUG: skipping recursive planning for the subquery since it contains references to outer queries DEBUG: skipping recursive planning for the subquery since it contains references to outer queries diff --git a/src/test/regress/expected/non_super_user_object_metadata.out b/src/test/regress/expected/non_super_user_object_metadata.out index 0ff03c0b8..0923d873a 100644 --- a/src/test/regress/expected/non_super_user_object_metadata.out +++ b/src/test/regress/expected/non_super_user_object_metadata.out @@ -412,7 +412,7 @@ SELECT * FROM run_command_on_workers($$ SELECT distribution_argument_index FROM localhost | 57638 | t | 1 (2 rows) --- Show that dropping schema doesn't affect the worker node +-- Show that the schema is dropped on worker node as well DROP SCHEMA local_schema CASCADE; NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to function plpgsql_dist_function(text) @@ -426,8 +426,8 @@ SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{local_schema}%';$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | (schema,{local_schema},{}) - localhost | 57638 | t | (schema,{local_schema},{}) + localhost | 57637 | t | + localhost | 57638 | t | (2 rows) -- Show that extension and dependent sequence also created and marked as distributed diff --git a/src/test/regress/expected/object_propagation_debug.out b/src/test/regress/expected/object_propagation_debug.out index 248ec4fd4..020fa2629 100644 --- a/src/test/regress/expected/object_propagation_debug.out +++ b/src/test/regress/expected/object_propagation_debug.out @@ -81,22 +81,5 @@ ON TRUE --------------------------------------------------------------------- (0 rows) --- find non-distributed dependencies of the local table test2 -CREATE SCHEMA objectprop2; -create table objectprop2.test2(a int, b t1); -SET search_path TO objectprop2; -SELECT - pg_identify_object(t.classid, t.objid, t.objsubid) -FROM - (SELECT * FROM pg_get_object_address('table', '{test2}', '{}')) as addr -JOIN LATERAL - "object prop".citus_get_dependencies_for_object(addr.classid, addr.objid, addr.objsubid) as t(classid oid, objid oid, objsubid int) -ON TRUE - ORDER BY 1; - pg_identify_object ---------------------------------------------------------------------- - (schema,,objectprop2,objectprop2) -(1 row) - SET client_min_messages TO ERROR; -DROP SCHEMA "object prop", objectprop2 CASCADE; +DROP SCHEMA "object prop" CASCADE; diff --git a/src/test/regress/expected/pg13.out b/src/test/regress/expected/pg13.out index 7e91e3d2a..2ba782aaf 100644 --- a/src/test/regress/expected/pg13.out +++ b/src/test/regress/expected/pg13.out @@ -145,9 +145,13 @@ CREATE TYPE myvarchar; CREATE FUNCTION myvarcharin(cstring, oid, integer) RETURNS myvarchar LANGUAGE internal IMMUTABLE PARALLEL SAFE STRICT AS 'varcharin'; NOTICE: return type myvarchar is only a shell +WARNING: Citus can't distribute functions having dependency on unsupported object of type "type" +DETAIL: Function will be created only locally CREATE FUNCTION myvarcharout(myvarchar) RETURNS cstring LANGUAGE internal IMMUTABLE PARALLEL SAFE STRICT AS 'varcharout'; NOTICE: argument type myvarchar is only a shell +WARNING: Citus can't distribute functions having dependency on unsupported object of type "type" +DETAIL: Function will be created only locally CREATE TYPE myvarchar ( input = myvarcharin, output = myvarcharout, diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out index cbdd70e05..134190dda 100644 --- a/src/test/regress/expected/pg14.out +++ b/src/test/regress/expected/pg14.out @@ -1272,3 +1272,72 @@ SELECT count(*) FROM set client_min_messages to error; drop schema pg14 cascade; +create schema pg14; +set search_path to pg14; +select 1 from citus_add_node('localhost',:master_port,groupid=>0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- test adding foreign table to metadata with the guc +-- will test truncating foreign tables later +CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial); +INSERT INTO foreign_table_test VALUES (1, 'text_test'); +SELECT citus_add_local_table_to_metadata('foreign_table_test'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +CREATE EXTENSION postgres_fdw; +CREATE SERVER foreign_server + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'localhost', port :'master_port', dbname 'regression'); +CREATE USER MAPPING FOR CURRENT_USER + SERVER foreign_server + OPTIONS (user 'postgres'); +CREATE FOREIGN TABLE foreign_table ( + id integer NOT NULL, + data text, + a bigserial +) + SERVER foreign_server + OPTIONS (schema_name 'pg14', table_name 'foreign_table_test'); +SELECT citus_add_local_table_to_metadata('foreign_table'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +SELECT count(*) FROM foreign_table; + count +--------------------------------------------------------------------- + 1 +(1 row) + +TRUNCATE foreign_table; +\c - - - :worker_1_port +set search_path to pg14; +-- verify the foreign table is truncated +SELECT count(*) FROM pg14.foreign_table; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- should error out +TRUNCATE foreign_table; +ERROR: truncating foreign tables that are added to metadata can only be excuted on the coordinator +\c - - - :master_port +-- cleanup +set client_min_messages to error; +drop extension postgres_fdw cascade; +drop schema pg14 cascade; +reset client_min_messages; +select 1 from citus_remove_node('localhost',:master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + diff --git a/src/test/regress/expected/propagate_extension_commands.out b/src/test/regress/expected/propagate_extension_commands.out index b7e0618e3..1c2fd19b4 100644 --- a/src/test/regress/expected/propagate_extension_commands.out +++ b/src/test/regress/expected/propagate_extension_commands.out @@ -186,22 +186,6 @@ CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; -SELECT run_command_on_workers($$ -CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") -(1 row) - -SELECT run_command_on_workers($$ -CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") -(1 row) - SELECT run_command_on_workers($$ CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); $$); @@ -398,10 +382,34 @@ SELECT 1 from master_remove_node('localhost', :worker_2_port); 1 (1 row) +-- Test extension function incorrect distribution argument +CREATE TABLE test_extension_function(col varchar); +CREATE EXTENSION seg; +-- Missing distribution argument +SELECT create_distributed_function('seg_in(cstring)'); +ERROR: Extension functions(seg_in) without distribution argument are not supported. +-- Missing colocation argument +SELECT create_distributed_function('seg_in(cstring)', '$1'); +ERROR: cannot distribute the function "seg_in" since there is no table to colocate with +HINT: Provide a distributed table via "colocate_with" option to create_distributed_function() +-- Incorrect distribution argument +SELECT create_distributed_function('seg_in(cstring)', '$2', colocate_with:='test_extension_function'); +ERROR: cannot distribute the function "seg_in" since the distribution argument is not valid +HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() +-- Colocated table is not distributed +SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); +ERROR: relation test_extension_function is not distributed +DROP EXTENSION seg; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('test_extension_function', 'col', colocate_with := 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + -- now, create a type that depends on another type, which -- finally depends on an extension BEGIN; - SET citus.shard_replication_factor TO 1; CREATE EXTENSION seg; CREATE EXTENSION isn; CREATE TYPE test_type AS (a int, b seg); @@ -419,9 +427,39 @@ BEGIN; create_reference_table --------------------------------------------------------------------- +(1 row) + + -- Distribute an extension-function + SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + (1 row) COMMIT; +-- Check the pg_dist_object +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + distributedfunction +--------------------------------------------------------------------- + seg_in +(1 row) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) +(1 row) + -- add the node back SELECT 1 from master_add_node('localhost', :worker_2_port); ?column? @@ -443,5 +481,145 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname (localhost,57638,t,2) (2 rows) +-- Check the pg_dist_object on the both nodes +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) + (localhost,57638,t,1) +(2 rows) + +DROP EXTENSION seg CASCADE; +-- Recheck the pg_dist_object +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + distributedfunction +--------------------------------------------------------------------- +(0 rows) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,0) + (localhost,57638,t,0) +(2 rows) + +-- Distribute an extension-function where extension is not in pg_dist_object +SET citus.enable_ddl_propagation TO false; +CREATE EXTENSION seg; +SET citus.enable_ddl_propagation TO true; +-- Check the extension in pg_dist_object +SELECT count(*) FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,0) + (localhost,57638,t,0) +(2 rows) + +SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- Recheck the extension in pg_dist_object +SELECT count(*) FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + distributedfunction +--------------------------------------------------------------------- + seg_in +(1 row) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) + (localhost,57638,t,1) +(2 rows) + +DROP EXTENSION seg; +DROP TABLE test_extension_function; +-- Test extension function altering distribution argument +BEGIN; +SET citus.shard_replication_factor = 1; +CREATE TABLE test_extension_function(col1 float8[], col2 float8[]); +SELECT create_distributed_table('test_extension_function', 'col1', colocate_with := 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE EXTENSION cube; +SELECT create_distributed_function('cube(float8[], float8[])', '$1', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT distribution_argument_index FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); + distribution_argument_index +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT create_distributed_function('cube(float8[], float8[])', '$2', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT distribution_argument_index FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); + distribution_argument_index +--------------------------------------------------------------------- + 1 +(1 row) + +ROLLBACK; -- drop the schema and all the objects DROP SCHEMA "extension'test" CASCADE; diff --git a/src/test/regress/expected/propagate_extension_commands_1.out b/src/test/regress/expected/propagate_extension_commands_1.out index 99b8ef3a2..e6c85bee4 100644 --- a/src/test/regress/expected/propagate_extension_commands_1.out +++ b/src/test/regress/expected/propagate_extension_commands_1.out @@ -186,22 +186,6 @@ CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; -SELECT run_command_on_workers($$ -CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") -(1 row) - -SELECT run_command_on_workers($$ -CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") -(1 row) - SELECT run_command_on_workers($$ CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); $$); @@ -397,10 +381,34 @@ SELECT 1 from master_remove_node('localhost', :worker_2_port); 1 (1 row) +-- Test extension function incorrect distribution argument +CREATE TABLE test_extension_function(col varchar); +CREATE EXTENSION seg; +-- Missing distribution argument +SELECT create_distributed_function('seg_in(cstring)'); +ERROR: Extension functions(seg_in) without distribution argument are not supported. +-- Missing colocation argument +SELECT create_distributed_function('seg_in(cstring)', '$1'); +ERROR: cannot distribute the function "seg_in" since there is no table to colocate with +HINT: Provide a distributed table via "colocate_with" option to create_distributed_function() +-- Incorrect distribution argument +SELECT create_distributed_function('seg_in(cstring)', '$2', colocate_with:='test_extension_function'); +ERROR: cannot distribute the function "seg_in" since the distribution argument is not valid +HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() +-- Colocated table is not distributed +SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); +ERROR: relation test_extension_function is not distributed +DROP EXTENSION seg; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('test_extension_function', 'col', colocate_with := 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + -- now, create a type that depends on another type, which -- finally depends on an extension BEGIN; - SET citus.shard_replication_factor TO 1; CREATE EXTENSION seg; CREATE EXTENSION isn; CREATE TYPE test_type AS (a int, b seg); @@ -418,9 +426,39 @@ BEGIN; create_reference_table --------------------------------------------------------------------- +(1 row) + + -- Distribute an extension-function + SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + (1 row) COMMIT; +-- Check the pg_dist_object +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + distributedfunction +--------------------------------------------------------------------- + seg_in +(1 row) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) +(1 row) + -- add the node back SELECT 1 from master_add_node('localhost', :worker_2_port); ?column? @@ -442,5 +480,145 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname (localhost,57638,t,2) (2 rows) +-- Check the pg_dist_object on the both nodes +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) + (localhost,57638,t,1) +(2 rows) + +DROP EXTENSION seg CASCADE; +-- Recheck the pg_dist_object +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + distributedfunction +--------------------------------------------------------------------- +(0 rows) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,0) + (localhost,57638,t,0) +(2 rows) + +-- Distribute an extension-function where extension is not in pg_dist_object +SET citus.enable_ddl_propagation TO false; +CREATE EXTENSION seg; +SET citus.enable_ddl_propagation TO true; +-- Check the extension in pg_dist_object +SELECT count(*) FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,0) + (localhost,57638,t,0) +(2 rows) + +SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- Recheck the extension in pg_dist_object +SELECT count(*) FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + distributedfunction +--------------------------------------------------------------------- + seg_in +(1 row) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) + (localhost,57638,t,1) +(2 rows) + +DROP EXTENSION seg; +DROP TABLE test_extension_function; +-- Test extension function altering distribution argument +BEGIN; +SET citus.shard_replication_factor = 1; +CREATE TABLE test_extension_function(col1 float8[], col2 float8[]); +SELECT create_distributed_table('test_extension_function', 'col1', colocate_with := 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE EXTENSION cube; +SELECT create_distributed_function('cube(float8[], float8[])', '$1', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT distribution_argument_index FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); + distribution_argument_index +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT create_distributed_function('cube(float8[], float8[])', '$2', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT distribution_argument_index FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); + distribution_argument_index +--------------------------------------------------------------------- + 1 +(1 row) + +ROLLBACK; -- drop the schema and all the objects DROP SCHEMA "extension'test" CASCADE; diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index 987c52bdc..948adb050 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -43,11 +43,13 @@ SELECT create_reference_table('numbers'); INSERT INTO numbers VALUES (20), (21); NOTICE: executing the command locally: INSERT INTO replicate_ref_to_coordinator.numbers_8000001 AS citus_table_alias (a) VALUES (20), (21) +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION my_volatile_fn() RETURNS INT AS $$ BEGIN RETURN 1; END; $$ language plpgsql VOLATILE; +RESET citus.enable_metadata_sync; -- INSERT ... SELECT between reference tables BEGIN; EXPLAIN (COSTS OFF) INSERT INTO squares SELECT a, a*a FROM numbers; diff --git a/src/test/regress/expected/shard_move_deferred_delete.out b/src/test/regress/expected/shard_move_deferred_delete.out index ed0d46500..d878c18a4 100644 --- a/src/test/regress/expected/shard_move_deferred_delete.out +++ b/src/test/regress/expected/shard_move_deferred_delete.out @@ -142,22 +142,35 @@ SELECT master_move_shard_placement(20000000, 'localhost', :worker_2_port, 'local (1 row) -SELECT run_command_on_workers($cmd$ - -- override the function for testing purpose - create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) - as $BODY$ - begin - select 20 into available_disk_size; - select 8500 into total_disk_size; - end - $BODY$ language plpgsql; -$cmd$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") - (localhost,57638,t,"CREATE FUNCTION") -(2 rows) - +-- override the function for testing purpose +-- since it is extension owned function, propagate it to workers manually +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 20 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 20 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 20 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :master_port +SET search_path TO shard_move_deferred_delete; SELECT citus_shard_cost_by_disk_size(20000001); citus_shard_cost_by_disk_size --------------------------------------------------------------------- @@ -187,22 +200,35 @@ $cmd$); (localhost,57638,t,1) (2 rows) -SELECT run_command_on_workers($cmd$ - -- override the function for testing purpose - create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) - as $BODY$ - begin - select 8300 into available_disk_size; - select 8500 into total_disk_size; - end - $BODY$ language plpgsql; -$cmd$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") - (localhost,57638,t,"CREATE FUNCTION") -(2 rows) - +-- override the function for testing purpose +-- since it is extension owned function, propagate it to workers manually +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 8300 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 8300 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 8300 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :master_port +SET search_path TO shard_move_deferred_delete; -- When there would not be enough free space left after the move, the move should fail SELECT master_move_shard_placement(20000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port); ERROR: not enough empty space on node if the shard is moved, actual available space after move will be 108 bytes, desired available space after move is 850 bytes,estimated size increase on node after move is 8192 bytes. diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index bbf537000..c63b50842 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -241,6 +241,7 @@ RETURNS boolean AS 'citus' LANGUAGE C STRICT VOLATILE; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -257,6 +258,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; SET citus.next_shard_id TO 123000; SELECT worker_node_responsive(node_name, node_port::int) FROM master_get_active_worker_nodes() diff --git a/src/test/regress/expected/single_node.out b/src/test/regress/expected/single_node.out index 0f94e6e7b..3b9f9f2c7 100644 --- a/src/test/regress/expected/single_node.out +++ b/src/test/regress/expected/single_node.out @@ -2011,8 +2011,28 @@ RESET citus.enable_manual_changes_to_shards ; -- these should work as expected TRUNCATE TABLE test_disabling_drop_and_truncate_102040; DROP TABLE test_disabling_drop_and_truncate_102040; -RESET citus.shard_replication_factor; DROP TABLE test_disabling_drop_and_truncate; +-- test creating distributed or reference tables from shards +CREATE TABLE test_creating_distributed_relation_table_from_shard (a int); +SELECT create_distributed_table('test_creating_distributed_relation_table_from_shard', 'a'); +NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (102044, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) ');SELECT worker_apply_shard_ddl_command (102044, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres') +NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (102045, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) ');SELECT worker_apply_shard_ddl_command (102045, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres') +NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (102046, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) ');SELECT worker_apply_shard_ddl_command (102046, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres') +NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (102047, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) ');SELECT worker_apply_shard_ddl_command (102047, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres') + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- these should error because shards cannot be used to: +-- create distributed table +SELECT create_distributed_table('test_creating_distributed_relation_table_from_shard_102044', 'a'); +ERROR: relation "test_creating_distributed_relation_table_from_shard_102044" is a shard relation +-- create reference table +SELECT create_reference_table('test_creating_distributed_relation_table_from_shard_102044'); +ERROR: relation "test_creating_distributed_relation_table_from_shard_102044" is a shard relation +RESET citus.shard_replication_factor; +DROP TABLE test_creating_distributed_relation_table_from_shard; -- lets flush the copy often to make sure everyhing is fine SET citus.local_copy_flush_threshold TO 1; TRUNCATE another_schema_table; diff --git a/src/test/regress/expected/single_node_truncate.out b/src/test/regress/expected/single_node_truncate.out index bf1c99d69..7d94dc744 100644 --- a/src/test/regress/expected/single_node_truncate.out +++ b/src/test/regress/expected/single_node_truncate.out @@ -31,9 +31,9 @@ SELECT * FROM table_sizes; name | has_data --------------------------------------------------------------------- citus_local | f - citus_local_102045 | t + citus_local_102049 | t ref | t - ref_102044 | t + ref_102048 | t (4 rows) -- verify that this UDF is noop on Citus local tables @@ -47,9 +47,9 @@ SELECT * FROM table_sizes; name | has_data --------------------------------------------------------------------- citus_local | f - citus_local_102045 | t + citus_local_102049 | t ref | t - ref_102044 | t + ref_102048 | t (4 rows) -- test that we allow cascading truncates to citus local tables @@ -65,9 +65,9 @@ SELECT * FROM table_sizes; name | has_data --------------------------------------------------------------------- citus_local | f - citus_local_102045 | t + citus_local_102049 | t ref | f - ref_102044 | t + ref_102048 | t (4 rows) ROLLBACK; @@ -98,14 +98,14 @@ SELECT * FROM table_sizes; name | has_data --------------------------------------------------------------------- citus_local | f - citus_local_102045 | t + citus_local_102049 | t dist | f - dist_102047 | t - dist_102048 | t - dist_102049 | t - dist_102050 | t + dist_102051 | t + dist_102052 | t + dist_102053 | t + dist_102054 | t ref | f - ref_102044 | t + ref_102048 | t (9 rows) ROLLBACK; @@ -121,14 +121,14 @@ SELECT * FROM table_sizes; name | has_data --------------------------------------------------------------------- citus_local | f - citus_local_102045 | t + citus_local_102049 | t dist | f - dist_102047 | t - dist_102048 | t - dist_102049 | t - dist_102050 | t + dist_102051 | t + dist_102052 | t + dist_102053 | t + dist_102054 | t ref | t - ref_102044 | t + ref_102048 | t (9 rows) ROLLBACK; diff --git a/src/test/regress/expected/subquery_and_cte.out b/src/test/regress/expected/subquery_and_cte.out index 4360bb69e..f3f12b975 100644 --- a/src/test/regress/expected/subquery_and_cte.out +++ b/src/test/regress/expected/subquery_and_cte.out @@ -126,6 +126,7 @@ WITH cte1 AS MATERIALIZED (SELECT id, value FROM func()) UPDATE dist_table dt SET value = cte1.value FROM cte1 WHERE dt.id = 1; DEBUG: generating subplan XXX_1 for CTE cte1: SELECT id, value FROM subquery_and_ctes.func() func(id, value) +DEBUG: function does not have co-located tables DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE subquery_and_ctes.dist_table dt SET value = cte1.value FROM (SELECT intermediate_result.id, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value integer)) cte1 WHERE (dt.id OPERATOR(pg_catalog.=) 1) -- CTEs are recursively planned, and subquery foo is also recursively planned -- final plan becomes a real-time plan since we also have events_table in the diff --git a/src/test/regress/expected/subquery_prepared_statements.out b/src/test/regress/expected/subquery_prepared_statements.out index 91e5d6642..29e76db03 100644 --- a/src/test/regress/expected/subquery_prepared_statements.out +++ b/src/test/regress/expected/subquery_prepared_statements.out @@ -2,13 +2,6 @@ -- test recursive planning functionality on prepared statements -- =================================================================== CREATE SCHEMA subquery_prepared_statements; -SELECT run_command_on_workers('CREATE SCHEMA subquery_prepared_statements;'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") -(2 rows) - SET search_path TO subquery_prepared_statements, public; CREATE TYPE subquery_prepared_statements.xy AS (x int, y int); SET client_min_messages TO DEBUG1; diff --git a/src/test/regress/expected/text_search.out b/src/test/regress/expected/text_search.out new file mode 100644 index 000000000..1b4f652c1 --- /dev/null +++ b/src/test/regress/expected/text_search.out @@ -0,0 +1,489 @@ +CREATE SCHEMA text_search; +CREATE SCHEMA text_search2; +SET search_path TO text_search; +-- create a new configruation from scratch +CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); +CREATE TABLE t1(id int, name text); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t1', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE t1; +DROP TEXT SEARCH CONFIGURATION my_text_search_config; +-- try to create table and index in 1 transaction +BEGIN; +CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); +CREATE TABLE t1(id int, name text); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t1', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +ABORT; +-- try again, should not fail with my_text_search_config being retained on the worker +BEGIN; +CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'on demand propagation of text search object with a comment'; +CREATE TABLE t1(id int, name text); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t1', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.my_text_search_config'::regconfig); +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | on demand propagation of text search object with a comment + localhost | 57638 | t | on demand propagation of text search object with a comment +(2 rows) + +-- verify that changing anything on a managed TEXT SEARCH CONFIGURATION fails after parallel execution +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'this comment can''t be set right now'; +ERROR: cannot run text search configuration command because there was a parallel operation on a distributed table in the transaction +DETAIL: When running command on/for a distributed text search configuration, Citus needs to perform all operations over a single connection per node to ensure consistency. +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ABORT; +-- create an index on an already distributed table +BEGIN; +CREATE TEXT SEARCH CONFIGURATION my_text_search_config2 ( parser = default ); +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config2 IS 'on demand propagation of text search object with a comment 2'; +CREATE TABLE t1(id int, name text); +SELECT create_distributed_table('t1', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config2'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.my_text_search_config2'::regconfig); +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | on demand propagation of text search object with a comment 2 + localhost | 57638 | t | on demand propagation of text search object with a comment 2 +(2 rows) + +ABORT; +-- should be able to create a configuration based on a copy of an existing configuration +CREATE TEXT SEARCH CONFIGURATION french_noaccent ( COPY = french ); +CREATE TABLE t2(id int, name text); +CREATE INDEX t2_search_name ON t2 USING gin (to_tsvector('text_search.french_noaccent'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t2', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- spot check that french_noaccent copied settings from french +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'comment tu t''appelle') WHERE alias = 'asciiword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciiword,french_stem) + localhost | 57638 | t | (asciiword,french_stem) +(2 rows) + +-- makes no sense, however we expect that the dictionary for the first token changes accordingly +ALTER TEXT SEARCH CONFIGURATION french_noaccent ALTER MAPPING FOR asciiword WITH dutch_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'comment tu t''appelle') WHERE alias = 'asciiword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciiword,dutch_stem) + localhost | 57638 | t | (asciiword,dutch_stem) +(2 rows) + +-- do the same but we will replace all french dictionaries +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciihword,french_stem) + localhost | 57638 | t | (asciihword,french_stem) +(2 rows) + +ALTER TEXT SEARCH CONFIGURATION french_noaccent ALTER MAPPING REPLACE french_stem WITH dutch_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciihword,dutch_stem) + localhost | 57638 | t | (asciihword,dutch_stem) +(2 rows) + +-- once more but now back via yet a different DDL command +ALTER TEXT SEARCH CONFIGURATION french_noaccent ALTER MAPPING FOR asciihword REPLACE dutch_stem WITH french_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciihword,french_stem) + localhost | 57638 | t | (asciihword,french_stem) +(2 rows) + +-- drop a mapping +ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING FOR asciihword; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciihword,) + localhost | 57638 | t | (asciihword,) +(2 rows) + +-- also with exists, doesn't change anything, but should not error +ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING IF EXISTS FOR asciihword; +NOTICE: mapping for token type "asciihword" does not exist, skipping +-- Comment on a text search configuration +COMMENT ON TEXT SEARCH CONFIGURATION french_noaccent IS 'a text configuration that is butcherd to test all edge cases'; +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.french_noaccent'::regconfig); +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | a text configuration that is butcherd to test all edge cases + localhost | 57638 | t | a text configuration that is butcherd to test all edge cases +(2 rows) + +-- Remove a comment +COMMENT ON TEXT SEARCH CONFIGURATION french_noaccent IS NULL; +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.french_noaccent'::regconfig); +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | + localhost | 57638 | t | +(2 rows) + +-- verify adding 2 dictionaries for two tokes at once +ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING IF EXISTS FOR asciiword, asciihword; +NOTICE: mapping for token type "asciihword" does not exist, skipping +ALTER TEXT SEARCH CONFIGURATION french_noaccent ADD MAPPING FOR asciiword, asciihword WITH french_stem, dutch_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionaries) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciiword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciiword,"{french_stem,dutch_stem}") + localhost | 57638 | t | (asciiword,"{french_stem,dutch_stem}") +(2 rows) + +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionaries) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciihword,"{french_stem,dutch_stem}") + localhost | 57638 | t | (asciihword,"{french_stem,dutch_stem}") +(2 rows) + +--verify we can drop cascade a configuration that is in use +-- verify it is in use +DROP TEXT SEARCH CONFIGURATION text_search.french_noaccent; +ERROR: cannot drop text search configuration french_noaccent because other objects depend on it +DETAIL: index t2_search_name depends on text search configuration french_noaccent +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- drop cascade +DROP TEXT SEARCH CONFIGURATION text_search.french_noaccent CASCADE; +NOTICE: drop cascades to index t2_search_name +-- verify the configuration is dropped from the workers +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.french_noaccent'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.french_noaccent" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.french_noaccent" does not exist +(2 rows) + +SET client_min_messages TO 'warning'; +SELECT * FROM run_command_on_workers($$CREATE ROLE text_search_owner;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | CREATE ROLE + localhost | 57638 | t | CREATE ROLE +(2 rows) + +CREATE ROLE text_search_owner; +RESET client_min_messages; +CREATE TEXT SEARCH CONFIGURATION changed_owner ( PARSER = default ); +SELECT * FROM run_command_on_workers($$ + SELECT cfgowner::regrole + FROM pg_ts_config + WHERE oid = 'text_search.changed_owner'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | postgres + localhost | 57638 | t | postgres +(2 rows) + +ALTER TEXT SEARCH CONFIGURATION changed_owner OWNER TO text_search_owner; +SELECT * FROM run_command_on_workers($$ + SELECT cfgowner::regrole + FROM pg_ts_config + WHERE oid = 'text_search.changed_owner'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search_owner + localhost | 57638 | t | text_search_owner +(2 rows) + +-- redo test with propagating object after it was created and changed of owner +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION changed_owner2 ( PARSER = default ); +ALTER TEXT SEARCH CONFIGURATION changed_owner2 OWNER TO text_search_owner; +RESET citus.enable_ddl_propagation; +-- verify object doesn't exist before propagating +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.changed_owner2'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.changed_owner2" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.changed_owner2" does not exist +(2 rows) + +-- distribute configuration +CREATE TABLE t3(id int, name text); +CREATE INDEX t3_search_name ON t3 USING gin (to_tsvector('text_search.changed_owner2'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t3', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- verify config owner +SELECT * FROM run_command_on_workers($$ + SELECT cfgowner::regrole + FROM pg_ts_config + WHERE oid = 'text_search.changed_owner2'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search_owner + localhost | 57638 | t | text_search_owner +(2 rows) + +-- rename tests +CREATE TEXT SEARCH CONFIGURATION change_name ( PARSER = default ); +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search.change_name'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search.change_name + localhost | 57638 | t | text_search.change_name +(2 rows) + +ALTER TEXT SEARCH CONFIGURATION change_name RENAME TO changed_name; +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search.changed_name'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search.changed_name + localhost | 57638 | t | text_search.changed_name +(2 rows) + +-- test move of schema +CREATE TEXT SEARCH CONFIGURATION change_schema ( PARSER = default ); +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search.change_schema'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search.change_schema + localhost | 57638 | t | text_search.change_schema +(2 rows) + +ALTER TEXT SEARCH CONFIGURATION change_schema SET SCHEMA text_search2; +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search2.change_schema'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search2.change_schema + localhost | 57638 | t | text_search2.change_schema +(2 rows) + +-- verify we get an error that the configuration change_schema is not found, even though the object address will be +-- found in its new schema, and is distributed +ALTER TEXT SEARCH CONFIGURATION change_schema SET SCHEMA text_search2; +ERROR: text search configuration "change_schema" does not exist +-- should tell us that text_search.does_not_exist does not exist, covers a complex edgecase +-- in resolving the object address +ALTER TEXT SEARCH CONFIGURATION text_search.does_not_exist SET SCHEMA text_search2; +ERROR: text search configuration "text_search.does_not_exist" does not exist +-- verify edgecases in deparsers +CREATE TEXT SEARCH CONFIGURATION config1 ( PARSER = default ); +CREATE TEXT SEARCH CONFIGURATION config2 ( PARSER = default ); +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION config3 ( PARSER = default ); +RESET citus.enable_ddl_propagation; +-- verify config1, config2 exist on workers, config3 not +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config1'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search.config1 + localhost | 57638 | t | text_search.config1 +(2 rows) + +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config2'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search.config2 + localhost | 57638 | t | text_search.config2 +(2 rows) + +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.config3" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.config3" does not exist +(2 rows) + +-- DROP all config's, only 1&2 are distributed, they should propagate well to remotes +DROP TEXT SEARCH CONFIGURATION config1, config2, config3; +-- verify all existing ones have been removed (checking config3 for consistency) +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config1'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.config1" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.config1" does not exist +(2 rows) + +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config2'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.config2" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.config2" does not exist +(2 rows) + +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.config3" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.config3" does not exist +(2 rows) + +-- verify they are all removed locally +SELECT 'text_search.config1'::regconfig; +ERROR: text search configuration "text_search.config1" does not exist +SELECT 'text_search.config2'::regconfig; +ERROR: text search configuration "text_search.config2" does not exist +SELECT 'text_search.config3'::regconfig; +ERROR: text search configuration "text_search.config3" does not exist +-- verify that indexes created concurrently that would propagate a TEXT SEARCH CONFIGURATION object +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION concurrent_index_config ( PARSER = default ); +RESET citus.enable_ddl_propagation; +-- verify it doesn't exist on the workers +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.concurrent_index_config'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.concurrent_index_config" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.concurrent_index_config" does not exist +(2 rows) + +-- create distributed table that then concurrently would have an index created. +CREATE TABLE t4(id int, name text); +SELECT create_distributed_table('t4', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE INDEX CONCURRENTLY t4_search_name ON t4 USING gin (to_tsvector('text_search.concurrent_index_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +-- now the configuration should be on the worker, and the above index creation shouldn't have failed. +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.concurrent_index_config'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search.concurrent_index_config + localhost | 57638 | t | text_search.concurrent_index_config +(2 rows) + +-- verify the objid is correctly committed locally due to the somewhat convoluted commit and new transaction starting when creating an index concurrently +SELECT pg_catalog.pg_identify_object_as_address(classid, objid, objsubid) + FROM citus.pg_dist_object + WHERE classid = 3602 AND objid = 'text_search.concurrent_index_config'::regconfig::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + ("text search configuration","{text_search,concurrent_index_config}",{}) +(1 row) + +-- verify old text search configurations get renamed if they are not the same as the newly propagated configuration. +-- We do this by creating configurations on the workers as a copy from a different existing catalog. +SELECT * FROM run_command_on_workers($$ + set citus.enable_metadata_sync TO off; + CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = dutch ); + reset citus.enable_metadata_sync; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | SET + localhost | 57638 | t | SET +(2 rows) + +CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = french ); +-- now we expect manually_created_wrongly(citus_backup_XXX) to show up when querying the configurations +SELECT * FROM run_command_on_workers($$ + SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | {manually_created_wrongly(citus_backup_0),manually_created_wrongly} + localhost | 57638 | t | {manually_created_wrongly(citus_backup_0),manually_created_wrongly} +(2 rows) + +-- verify the objects get reused appropriately when the specification is the same +SELECT * FROM run_command_on_workers($$ + set citus.enable_metadata_sync TO off; + CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = french ); + reset citus.enable_metadata_sync; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | SET + localhost | 57638 | t | SET +(2 rows) + +CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = french ); +-- now we don't expect manually_created_correct(citus_backup_XXX) to show up when querying the configurations as the +-- original one is reused +SELECT * FROM run_command_on_workers($$ + SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | {manually_created_correct} + localhost | 57638 | t | {manually_created_correct} +(2 rows) + +CREATE SCHEMA "Text Search Requiring Quote's"; +CREATE TEXT SEARCH CONFIGURATION "Text Search Requiring Quote's"."Quoted Config Name" ( parser = default ); +CREATE TABLE t5(id int, name text); +CREATE INDEX t5_search_name ON t5 USING gin (to_tsvector('"Text Search Requiring Quote''s"."Quoted Config Name"'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t5', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SET client_min_messages TO 'warning'; +DROP SCHEMA text_search, text_search2, "Text Search Requiring Quote's" CASCADE; +DROP ROLE text_search_owner; diff --git a/src/test/regress/expected/upgrade_basic_before.out b/src/test/regress/expected/upgrade_basic_before.out index 28a358428..2e63447df 100644 --- a/src/test/regress/expected/upgrade_basic_before.out +++ b/src/test/regress/expected/upgrade_basic_before.out @@ -53,6 +53,7 @@ SELECT create_distributed_table('tr', 'pk'); INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -69,6 +70,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; CREATE TABLE t_range(id int, value_1 int); SELECT create_distributed_table('t_range', 'id', 'range'); create_distributed_table diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 86c121568..7ae524a3b 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -203,6 +203,7 @@ ORDER BY 1; function worker_cleanup_job_schema_cache() function worker_create_or_alter_role(text,text,text) function worker_create_or_replace_object(text) + function worker_create_or_replace_object(text[]) function worker_create_schema(bigint,text) function worker_create_truncate_trigger(regclass) function worker_drop_distributed_table(text) @@ -267,5 +268,5 @@ ORDER BY 1; view citus_worker_stat_activity view pg_dist_shard_placement view time_partitions -(251 rows) +(252 rows) diff --git a/src/test/regress/expected/values.out b/src/test/regress/expected/values.out index 8509cd971..cc26f7432 100644 --- a/src/test/regress/expected/values.out +++ b/src/test/regress/expected/values.out @@ -587,6 +587,8 @@ CREATE OR REPLACE FUNCTION fixed_volatile_value() RETURNS integer VOLATILE AS $$ RETURN 1; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands EXECUTE test_values_pushdown(fixed_volatile_value(),2,3); NOTICE: evaluated on the coordinator CONTEXT: PL/pgSQL function fixed_volatile_value() line XX at RAISE diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index 659000306..fa828bd7b 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -294,11 +294,13 @@ ALTER TABLE lineitem_alter_220000 DROP COLUMN first; CREATE TABLE ddl_commands (command text UNIQUE DEFERRABLE INITIALLY DEFERRED); -- Use an event trigger to log all DDL event tags in it +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION log_ddl_tag() RETURNS event_trigger AS $ldt$ BEGIN INSERT INTO ddl_commands VALUES (tg_tag); END; $ldt$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_tag(); @@ -499,13 +501,14 @@ CREATE TABLE trigger_table ( SELECT create_distributed_table('trigger_table', 'id'); -- first set a trigger on a shard \c - - - :worker_1_port - +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION update_value() RETURNS trigger AS $up$ BEGIN NEW.value := 'trigger enabled'; RETURN NEW; END; $up$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE TRIGGER update_value BEFORE INSERT ON trigger_table_220017 @@ -645,10 +648,6 @@ SELECT create_distributed_table('table_without_sequence', 'a'); CREATE SCHEMA test_schema_for_sequence_propagation; CREATE SEQUENCE test_schema_for_sequence_propagation.seq_10; --- Both should have zero rows -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass); -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace); - ALTER TABLE table_without_sequence ADD COLUMN x BIGINT DEFAULT nextval('test_schema_for_sequence_propagation.seq_10'); -- Should be distributed along with the sequence diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index f0700c734..e5602618f 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -19,6 +19,7 @@ test: isolation_cluster_management # the following tests depend on the distributed # transactionId, so should not be parallelized # and no tests should be added before these +test: isolation_metadata_sync_vs_all test: isolation_distributed_transaction_id test: isolation_dump_global_wait_edges test: isolation_citus_dist_activity @@ -63,6 +64,7 @@ test: shared_connection_waits test: isolation_cancellation test: isolation_undistribute_table test: isolation_fix_partition_shard_index_names +test: isolation_global_pid # Rebalancer test: isolation_blocking_move_single_shard_commands diff --git a/src/test/regress/mitmscripts/fluent.py b/src/test/regress/mitmscripts/fluent.py index eb392ab7c..2fc408e03 100644 --- a/src/test/regress/mitmscripts/fluent.py +++ b/src/test/regress/mitmscripts/fluent.py @@ -20,9 +20,6 @@ logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s", level=loggin # I. Command Strings -class Stop(Exception): - pass - class Handler: ''' This class hierarchy serves two purposes: @@ -46,19 +43,13 @@ class Handler: if not self.next: raise Exception("we don't know what to do!") - try: - self.next._accept(flow, message) - except Stop: + if self.next._accept(flow, message) == 'stop': if self.root is not self: - raise + return 'stop' self.next = KillHandler(self) flow.kill() - elif result == 'done': - # stop processing this packet, move on to the next one - return - elif result == 'stop': - # from now on kill all connections - raise Stop() + else: + return result def _handle(self, flow, message): ''' diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index db44baa61..4d7f68e73 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -268,7 +268,6 @@ test: multi_foreign_key_relation_graph # and rerun some of the tests. # -------- test: add_coordinator -test: foreign_key_to_reference_table test: replicate_reference_tables_to_coordinator test: citus_local_tables test: mixed_relkind_tests @@ -313,13 +312,16 @@ test: ssl_by_default # --------- # object distribution tests +# TODO: After deprecating parameterless create_distributed_function combine +# distributed_functions and function_propagation tests # --------- -test: distributed_types distributed_types_conflict disable_object_propagation distributed_types_xact_add_enum_value +test: distributed_types distributed_types_conflict disable_object_propagation distributed_types_xact_add_enum_value text_search test: check_mx test: distributed_functions distributed_functions_conflict test: distributed_collations test: distributed_procedure test: distributed_collations_conflict +test: function_propagation test: check_mx # --------- diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index 6c2b04289..2e26ea425 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -642,11 +642,13 @@ ALTER TABLE lineitem_alter_220000 DROP COLUMN first; -- Create table to trigger at-xact-end (deferred) failure CREATE TABLE ddl_commands (command text UNIQUE DEFERRABLE INITIALLY DEFERRED); -- Use an event trigger to log all DDL event tags in it +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION log_ddl_tag() RETURNS event_trigger AS $ldt$ BEGIN INSERT INTO ddl_commands VALUES (tg_tag); END; $ldt$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_tag(); \c - - - :master_port -- The above trigger will cause failure at transaction end on one placement. @@ -985,12 +987,14 @@ SELECT create_distributed_table('trigger_table', 'id'); -- first set a trigger on a shard \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION update_value() RETURNS trigger AS $up$ BEGIN NEW.value := 'trigger enabled'; RETURN NEW; END; $up$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE TRIGGER update_value BEFORE INSERT ON trigger_table_220017 FOR EACH ROW EXECUTE PROCEDURE update_value(); @@ -1269,17 +1273,6 @@ SELECT create_distributed_table('table_without_sequence', 'a'); CREATE SCHEMA test_schema_for_sequence_propagation; CREATE SEQUENCE test_schema_for_sequence_propagation.seq_10; --- Both should have zero rows -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass); - pg_identify_object_as_address -------------------------------- -(0 rows) - -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace); - pg_identify_object_as_address -------------------------------- -(0 rows) - ALTER TABLE table_without_sequence ADD COLUMN x BIGINT DEFAULT nextval('test_schema_for_sequence_propagation.seq_10'); -- Should be distributed along with the sequence SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass); diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index 2bd0c7b77..d6d72d963 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -635,6 +635,8 @@ INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::re INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table_to_distribute'::regclass::oid, 0); INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'second_dustbunnies'::regclass::oid, 0); SELECT 1 FROM master_activate_node('localhost', :worker_1_port); +NOTICE: Replicating postgres objects to node localhost:57637 +DETAIL: There are 114 objects to replicate, depending on your environment this might take a while ?column? --------------------------------------------------------------------- 1 diff --git a/src/test/regress/spec/isolation_citus_dist_activity.spec b/src/test/regress/spec/isolation_citus_dist_activity.spec index 5d8558a84..c41c671f0 100644 --- a/src/test/regress/spec/isolation_citus_dist_activity.spec +++ b/src/test/regress/spec/isolation_citus_dist_activity.spec @@ -71,7 +71,7 @@ step "s2-sleep" step "s2-view-dist" { - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC; } diff --git a/src/test/regress/spec/isolation_distributed_transaction_id.spec b/src/test/regress/spec/isolation_distributed_transaction_id.spec index 372cd8f78..f928918ed 100644 --- a/src/test/regress/spec/isolation_distributed_transaction_id.spec +++ b/src/test/regress/spec/isolation_distributed_transaction_id.spec @@ -54,7 +54,8 @@ step "s1-verify-current-xact-is-on-worker" get_current_transaction_id() as xact, run_command_on_workers($$ SELECT row(initiator_node_identifier, transaction_number) - FROM get_all_active_transactions(); + FROM get_all_active_transactions() + WHERE transaction_number != 0; $$) as remote ORDER BY remote.nodeport ASC; } diff --git a/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec b/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec index c8aad9a79..f46f80753 100644 --- a/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec +++ b/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec @@ -188,40 +188,34 @@ step "s3-drop-coordinator-schemas" DROP SCHEMA IF EXISTS myschema2 CASCADE; } -step "s3-drop-worker-schemas" -{ - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); -} - // schema only tests -permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-public-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-public-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" // concurrency tests with multi schema distribution -permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s2-create-table" "s3-create-table" "s1-commit" "s3-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" +permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s2-create-table" "s3-create-table" "s1-commit" "s3-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" // type and schema tests -permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-type" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-create-type" "s1-add-worker" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-type" "s2-create-table-with-type" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-type" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-create-type" "s1-add-worker" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-type" "s2-create-table-with-type" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" // distributed function tests // isolation tests are not very simple psql, so trigger NOTIFY reliably for // s3-wait-for-metadata-sync step, we do "s2-begin" followed directly by // "s2-commit", because "COMMIT" syncs the messages -permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" // we cannot run the following operations concurrently // the problem is that NOTIFY event doesn't (reliably) happen before COMMIT // so we have to commit s2 before s1 starts -permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" +permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" diff --git a/src/test/regress/spec/isolation_get_all_active_transactions.spec b/src/test/regress/spec/isolation_get_all_active_transactions.spec index da0c4553c..fd69c0ac4 100644 --- a/src/test/regress/spec/isolation_get_all_active_transactions.spec +++ b/src/test/regress/spec/isolation_get_all_active_transactions.spec @@ -71,32 +71,32 @@ session "s3" step "s3-as-admin" { -- Admin should be able to see all transactions - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; } step "s3-as-user-1" { -- User should only be able to see its own transactions SET ROLE test_user_1; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; } step "s3-as-readonly" { -- Other user should not see transactions SET ROLE test_readonly; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; } step "s3-as-monitor" { -- Monitor should see all transactions SET ROLE test_monitor; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; } permutation "s1-grant" "s1-begin-insert" "s2-begin-insert" "s3-as-admin" "s3-as-user-1" "s3-as-readonly" "s3-as-monitor" "s1-commit" "s2-commit" diff --git a/src/test/regress/spec/isolation_global_pid.spec b/src/test/regress/spec/isolation_global_pid.spec new file mode 100644 index 000000000..62f45d1e9 --- /dev/null +++ b/src/test/regress/spec/isolation_global_pid.spec @@ -0,0 +1,96 @@ +#include "isolation_mx_common.include.spec" + +setup +{ + SET citus.next_shard_id TO 12345000; + CREATE TABLE dist_table (a INT, b INT); + SELECT create_distributed_table('dist_table', 'a', shard_count:=4); +} + +teardown +{ + DROP TABLE dist_table; + SELECT citus_internal.restore_isolation_tester_func(); +} + +session "s1" + +step "s1-coordinator-begin" +{ + BEGIN; +} + +step "s1-coordinator-select" +{ + SET citus.enable_local_execution TO off; + SET citus.force_max_query_parallelization TO ON; + SELECT * FROM dist_table; +} + +step "s1-coordinator-commit" +{ + COMMIT; +} + +step "s1-start-session-level-connection" +{ + + SELECT start_session_level_connection_to_node('localhost', 57637); +} + +step "s1-worker-begin" +{ + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +} + +step "s1-worker-select" +{ + SELECT run_commands_on_session_level_connection_to_node('SET citus.enable_local_execution TO off; SET citus.force_max_query_parallelization TO ON; SELECT * FROM dist_table'); +} + +step "s1-worker-commit" +{ + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +} + +step "s1-stop-session-level-connection" +{ + SELECT stop_session_level_connection_to_node(); +} + +session "s2" + +step "s2-coordinator-citus_dist_stat_activity" +{ + SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' and query NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; +} + +step "s2-coordinator-citus_worker_stat_activity" +{ + SELECT query FROM citus_worker_stat_activity() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ) + ORDER BY 1; +} + +step "s2-coordinator-get_all_active_transactions" +{ + SELECT count(*) FROM get_all_active_transactions() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ); +} + +step "s2-coordinator-get_global_active_transactions" +{ + SELECT count(*) FROM get_global_active_transactions() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ) + AND transaction_number != 0; +} + + +// worker - coordinator +permutation "s1-start-session-level-connection" "s1-worker-begin" "s1-worker-select" "s2-coordinator-citus_dist_stat_activity" "s2-coordinator-citus_worker_stat_activity" "s1-worker-commit" "s1-stop-session-level-connection" + +// coordinator - coordinator +permutation "s1-coordinator-begin" "s1-coordinator-select" "s2-coordinator-citus_dist_stat_activity" "s2-coordinator-citus_worker_stat_activity" "s2-coordinator-get_all_active_transactions" "s2-coordinator-get_global_active_transactions" "s1-coordinator-commit" diff --git a/src/test/regress/spec/isolation_metadata_sync_vs_all.spec b/src/test/regress/spec/isolation_metadata_sync_vs_all.spec new file mode 100644 index 000000000..7ff246365 --- /dev/null +++ b/src/test/regress/spec/isolation_metadata_sync_vs_all.spec @@ -0,0 +1,199 @@ +setup +{ + SET citus.shard_replication_factor to 1; + + ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 123000; + ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 123000; + ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 123000; + ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 123000; + + -- Create the necessary test utility function + CREATE OR REPLACE FUNCTION activate_node_snapshot() + RETURNS text[] + LANGUAGE C STRICT + AS 'citus'; + SELECT create_distributed_function('activate_node_snapshot()'); + + -- Create distributed tables + CREATE TABLE ref_table (test_id integer, y int unique); + SELECT create_reference_table('ref_table'); + + CREATE TABLE dist_table (x int, y int); + SELECT create_distributed_table('dist_table', 'x'); + + CREATE TABLE dist_partitioned_table (x int, y int) PARTITION BY RANGE(y); + SELECT create_distributed_table('dist_partitioned_table', 'x'); + + CREATE TABLE dist_partitioned_table_p1(x int, y int); +} + +teardown +{ + // drop all distributed tables + DROP TABLE IF EXISTS ref_table, + dist_table, + dist_partitioned_table, + dist_partitioned_table_p1, + dist_partitioned_table_p2, + new_dist_table, + new_ref_table; + + + // drop all distributed objects + DROP FUNCTION activate_node_snapshot(); + DROP FUNCTION IF EXISTS squares(int); + DROP TYPE IF EXISTS my_type; +} + +session "s1" + +step "s1-begin" +{ + BEGIN; +} + +step "s1-commit" +{ + COMMIT; +} + +step "s1-start-metadata-sync" +{ + SELECT start_metadata_sync_to_node('localhost', 57638); +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-commit" +{ + COMMIT; +} + +step "s2-start-metadata-sync-to-same-node" +{ + SELECT start_metadata_sync_to_node('localhost', 57638); +} + +step "s2-start-metadata-sync-to-another-node" +{ + SELECT start_metadata_sync_to_node('localhost', 57637); +} + +step "s2-alter-table" +{ + ALTER TABLE dist_table ADD COLUMN z int; +} + +step "s2-add-fk" +{ + ALTER TABLE dist_table ADD CONSTRAINT y_fk FOREIGN KEY (y) REFERENCES ref_table(y); +} + +step "s2-drop-fk" +{ + ALTER TABLE dist_table DROP CONSTRAINT y_fk; +} + +step "s2-drop-table" +{ + DROP TABLE dist_table; +} + +step "s2-create-dist-table" +{ + CREATE TABLE new_dist_table(id int, data int); + SELECT create_distributed_table('new_dist_table', 'id'); +} + +step "s2-create-ref-table" +{ + CREATE TABLE new_ref_table(id int, data int); + SELECT create_reference_table('new_ref_table'); +} + +step "s2-attach-partition" +{ + ALTER TABLE dist_partitioned_table ATTACH PARTITION dist_partitioned_table_p1 FOR VALUES FROM (1) TO (9); +} + +step "s2-detach-partition" +{ + ALTER TABLE dist_partitioned_table DETACH PARTITION dist_partitioned_table_p1; +} + +step "s2-create-partition-of" +{ + CREATE TABLE dist_partitioned_table_p2 PARTITION OF dist_partitioned_table FOR VALUES FROM (10) TO (20); +} + +step "s2-create-type" +{ + CREATE TYPE my_type AS (a int, b int); +} + +step "s2-create-dist-func" +{ + CREATE FUNCTION squares(int) RETURNS SETOF RECORD + AS $$ SELECT i, i * i FROM generate_series(1, $1) i $$ + LANGUAGE SQL; + + SELECT create_distributed_function('squares(int)'); +} + +session "s3" + +step "s3-compare-snapshot" +{ + SELECT count(*) = 0 AS same_metadata_in_workers + FROM + ( + ( + SELECT unnest(activate_node_snapshot()) + EXCEPT + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + ) + UNION + ( + SELECT unnest(result::text[]) AS unnested_result + FROM run_command_on_workers($$SELECT activate_node_snapshot()$$) + EXCEPT + SELECT unnest(activate_node_snapshot()) + ) + ) AS foo; +} + +step "s3-debug" +{ + SELECT unnest(activate_node_snapshot()); + + SELECT unnest(result::text[]) + FROM run_command_on_workers('SELECT activate_node_snapshot()'); +} + +// before running any updates to metadata, make sure all nodes have same metadata in the cluster +permutation "s3-compare-snapshot" + +// concurrent metadata syncing operations get blocked +permutation "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-start-metadata-sync-to-same-node" "s1-commit" "s2-commit" "s3-compare-snapshot" +permutation "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-start-metadata-sync-to-another-node" "s1-commit" "s2-commit" "s3-compare-snapshot" + +// the following operations get blocked when a concurrent metadata sync is in progress +permutation "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-alter-table" "s1-commit" "s2-commit" "s3-compare-snapshot" +permutation "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-drop-table" "s1-commit" "s2-commit" "s3-compare-snapshot" +permutation "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-create-dist-table" "s1-commit" "s2-commit" "s3-compare-snapshot" +permutation "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-create-ref-table" "s1-commit" "s2-commit" "s3-compare-snapshot" +permutation "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-attach-partition" "s1-commit" "s2-commit" "s3-compare-snapshot" +permutation "s2-attach-partition" "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-detach-partition" "s1-commit" "s2-commit" "s3-compare-snapshot" +permutation "s2-attach-partition" "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-create-partition-of" "s1-commit" "s2-commit" "s3-compare-snapshot" +permutation "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-add-fk" "s1-commit" "s2-commit" "s3-compare-snapshot" +permutation "s2-add-fk" "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-drop-fk" "s1-commit" "s2-commit" "s3-compare-snapshot" + +// the following operations do not get blocked +permutation "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-create-type" "s1-commit" "s2-commit" "s3-compare-snapshot" +permutation "s1-begin" "s2-begin" "s1-start-metadata-sync" "s2-create-dist-func" "s1-commit" "s2-commit" "s3-compare-snapshot" diff --git a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec index 752f26399..0defcf549 100644 --- a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec +++ b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec @@ -83,7 +83,7 @@ step "s2-lock-ref-table-placement-on-coordinator" step "s2-view-dist" { - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' AND query NOT ILIKE '%pg_isolation_test_session_is_blocked%' AND query NOT ILIKE '%BEGIN%' ORDER BY query DESC; } step "s2-view-worker" @@ -106,14 +106,15 @@ step "s2-sleep" step "s2-active-transactions" { -- Admin should be able to see all transactions - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; } // we disable the daemon during the regression tests in order to get consistent results // thus we manually issue the deadlock detection session "deadlock-checker" + // we issue the checker not only when there are deadlocks to ensure that we never cancel // backend inappropriately step "deadlock-checker-call" diff --git a/src/test/regress/sql/aggregate_support.sql b/src/test/regress/sql/aggregate_support.sql index dc7215f79..7c82418a7 100644 --- a/src/test/regress/sql/aggregate_support.sql +++ b/src/test/regress/sql/aggregate_support.sql @@ -459,6 +459,7 @@ RETURN $1 * $1; END; $function$; +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION square_func(int) RETURNS int LANGUAGE plpgsql @@ -467,6 +468,7 @@ BEGIN RETURN $1 * $1; END; $function$; +RESET citus.enable_metadata_sync; SELECT const_function(1), string_agg(a::character, ',') FROM t1; SELECT const_function(1), count(b) FROM t1; diff --git a/src/test/regress/sql/citus_local_table_triggers.sql b/src/test/regress/sql/citus_local_table_triggers.sql index 76b192388..9bb1ee81d 100644 --- a/src/test/regress/sql/citus_local_table_triggers.sql +++ b/src/test/regress/sql/citus_local_table_triggers.sql @@ -160,8 +160,9 @@ CREATE TRIGGER "trigger\'name" BEFORE INSERT ON "interesting!schema"."citus_local!_table" FOR EACH STATEMENT EXECUTE FUNCTION dummy_function(); +CREATE EXTENSION seg; + BEGIN; - CREATE EXTENSION seg; -- ALTER TRIGGER DEPENDS ON ALTER TRIGGER "trigger\'name" ON "interesting!schema"."citus_local!_table" DEPENDS ON EXTENSION seg; @@ -384,4 +385,5 @@ BEGIN; ROLLBACK; -- cleanup at exit +SET client_min_messages TO ERROR; DROP SCHEMA citus_local_table_triggers, "interesting!schema" CASCADE; diff --git a/src/test/regress/sql/citus_local_tables_queries_mx.sql b/src/test/regress/sql/citus_local_tables_queries_mx.sql index f4a53bffc..ddcc95d84 100644 --- a/src/test/regress/sql/citus_local_tables_queries_mx.sql +++ b/src/test/regress/sql/citus_local_tables_queries_mx.sql @@ -43,6 +43,7 @@ CREATE TABLE postgres_local_table(a int, b int); -- We shouldn't use LIMIT in INSERT SELECT queries to make the test faster as -- LIMIT would force planner to wrap SELECT query in an intermediate result and -- this might reduce the coverage of the test cases. +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$ BEGIN SET client_min_messages to ERROR; @@ -59,6 +60,7 @@ CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$ RESET client_min_messages; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; ---------------- ---- SELECT ---- diff --git a/src/test/regress/sql/columnar_truncate.sql b/src/test/regress/sql/columnar_truncate.sql index f9532c00d..9cdc44d55 100644 --- a/src/test/regress/sql/columnar_truncate.sql +++ b/src/test/regress/sql/columnar_truncate.sql @@ -128,6 +128,7 @@ set columnar.compression = 'pglz'; INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100); set columnar.compression to default; -- create a user that can not truncate +SELECT run_command_on_workers($$CREATE USER truncate_user;$$); CREATE USER truncate_user; GRANT USAGE ON SCHEMA truncate_schema TO truncate_user; GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user; diff --git a/src/test/regress/sql/coordinator_evaluation.sql b/src/test/regress/sql/coordinator_evaluation.sql index e7479fd1a..1f313a76a 100644 --- a/src/test/regress/sql/coordinator_evaluation.sql +++ b/src/test/regress/sql/coordinator_evaluation.sql @@ -165,6 +165,7 @@ BEGIN END; $$ LANGUAGE 'plpgsql' STRICT; +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE PROCEDURE coordinator_evaluation.test_procedure(int) LANGUAGE plpgsql AS $procedure$ @@ -174,6 +175,7 @@ BEGIN PERFORM DISTINCT value FROM coordinator_evaluation_table_2 WHERE key = filterKey; END; $procedure$; +RESET citus.enable_metadata_sync; -- we couldn't find a meaningful query to write for this -- however this query fails before https://github.com/citusdata/citus/pull/3454 diff --git a/src/test/regress/sql/coordinator_evaluation_modify.sql b/src/test/regress/sql/coordinator_evaluation_modify.sql index e4f104ad9..a4c9f75b5 100644 --- a/src/test/regress/sql/coordinator_evaluation_modify.sql +++ b/src/test/regress/sql/coordinator_evaluation_modify.sql @@ -275,11 +275,13 @@ SET citus.log_local_commands TO ON; SET search_path TO coordinator_evaluation_combinations_modify; -- returns 2 on the worker +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION get_constant_stable() RETURNS INT AS $$ BEGIN RETURN 2; END; $$ language plpgsql STABLE; +RESET citus.enable_metadata_sync; -- all local values diff --git a/src/test/regress/sql/coordinator_shouldhaveshards.sql b/src/test/regress/sql/coordinator_shouldhaveshards.sql index 159a42d6c..bc79513c3 100644 --- a/src/test/regress/sql/coordinator_shouldhaveshards.sql +++ b/src/test/regress/sql/coordinator_shouldhaveshards.sql @@ -373,9 +373,11 @@ inserts AS ( -- a helper function which return true if the coordinated -- trannsaction uses 2PC +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION coordinated_transaction_should_use_2PC() RETURNS BOOL LANGUAGE C STRICT VOLATILE AS 'citus', $$coordinated_transaction_should_use_2PC$$; +RESET citus.enable_metadata_sync; -- a local SELECT followed by remote SELECTs -- does not trigger 2PC diff --git a/src/test/regress/sql/distributed_collations.sql b/src/test/regress/sql/distributed_collations.sql index 2bf85448a..669577a09 100644 --- a/src/test/regress/sql/distributed_collations.sql +++ b/src/test/regress/sql/distributed_collations.sql @@ -91,20 +91,21 @@ ORDER BY 1,2,3; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA collation_tests CASCADE; DROP SCHEMA collation_tests2 CASCADE; - --- This is hacky, but we should clean-up the resources as below - -\c - - - :worker_1_port -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA collation_tests CASCADE; -DROP SCHEMA collation_tests2 CASCADE; - -\c - - - :worker_2_port -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA collation_tests CASCADE; -DROP SCHEMA collation_tests2 CASCADE; - -\c - - - :master_port - DROP USER collationuser; SELECT run_command_on_workers($$DROP USER collationuser;$$); + +\c - - - :worker_1_port +-- test creating a collation on a worker +CREATE COLLATION another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk'); + +-- test if creating a collation on a worker on a local +-- schema raises the right error +SET citus.enable_ddl_propagation TO off; +CREATE SCHEMA collation_creation_on_worker; +SET citus.enable_ddl_propagation TO on; + +CREATE COLLATION collation_creation_on_worker.another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk'); + +SET citus.enable_ddl_propagation TO off; +DROP SCHEMA collation_creation_on_worker; +SET citus.enable_ddl_propagation TO on; diff --git a/src/test/regress/sql/distributed_collations_conflict.sql b/src/test/regress/sql/distributed_collations_conflict.sql index c84e241a4..eceee4dd6 100644 --- a/src/test/regress/sql/distributed_collations_conflict.sql +++ b/src/test/regress/sql/distributed_collations_conflict.sql @@ -1,5 +1,4 @@ CREATE SCHEMA collation_conflict; -SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$); \c - - - :worker_1_port SET search_path TO collation_conflict; diff --git a/src/test/regress/sql/distributed_functions.sql b/src/test/regress/sql/distributed_functions.sql index ce39d50c6..97a97fd9d 100644 --- a/src/test/regress/sql/distributed_functions.sql +++ b/src/test/regress/sql/distributed_functions.sql @@ -670,16 +670,9 @@ SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isa \c - - - :worker_1_port UPDATE pg_dist_local_group SET groupid = 0; TRUNCATE pg_dist_node; -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA function_tests CASCADE; -DROP SCHEMA function_tests2 CASCADE; -SET search_path TO function_tests, function_tests2; \c - - - :worker_2_port UPDATE pg_dist_local_group SET groupid = 0; TRUNCATE pg_dist_node; -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA function_tests CASCADE; -DROP SCHEMA function_tests2 CASCADE; \c - - - :master_port SET client_min_messages TO ERROR; diff --git a/src/test/regress/sql/distributed_functions_conflict.sql b/src/test/regress/sql/distributed_functions_conflict.sql index ebbb6c6aa..cee80548b 100644 --- a/src/test/regress/sql/distributed_functions_conflict.sql +++ b/src/test/regress/sql/distributed_functions_conflict.sql @@ -2,10 +2,10 @@ -- Note in PG12 we use CREATE OR REPLACE AGGREGATE, thus the renaming does not occur CREATE SCHEMA proc_conflict; -SELECT run_command_on_workers($$CREATE SCHEMA proc_conflict;$$); \c - - - :worker_1_port SET search_path TO proc_conflict; +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ BEGIN RETURN state * 2 + i; @@ -15,6 +15,7 @@ CREATE AGGREGATE existing_agg(int) ( SFUNC = existing_func, STYPE = int ); +RESET citus.enable_metadata_sync; \c - - - :master_port SET search_path TO proc_conflict; @@ -59,7 +60,7 @@ DROP FUNCTION existing_func(int, int) CASCADE; \c - - - :worker_1_port SET search_path TO proc_conflict; - +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ BEGIN RETURN state * 3 + i; @@ -69,6 +70,7 @@ CREATE AGGREGATE existing_agg(int) ( SFUNC = existing_func, STYPE = int ); +RESET citus.enable_metadata_sync; \c - - - :master_port SET search_path TO proc_conflict; diff --git a/src/test/regress/sql/distributed_procedure.sql b/src/test/regress/sql/distributed_procedure.sql index f8c2ac7b5..e8819a900 100644 --- a/src/test/regress/sql/distributed_procedure.sql +++ b/src/test/regress/sql/distributed_procedure.sql @@ -79,9 +79,7 @@ SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello'); SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA procedure_tests CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$); DROP SCHEMA procedure_tests2 CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$); DROP USER procedureuser; SELECT 1 FROM run_command_on_workers($$DROP USER procedureuser;$$); diff --git a/src/test/regress/sql/distributed_types.sql b/src/test/regress/sql/distributed_types.sql index 5448fb181..a136e2fa8 100644 --- a/src/test/regress/sql/distributed_types.sql +++ b/src/test/regress/sql/distributed_types.sql @@ -284,8 +284,6 @@ SELECT * FROM field_indirection_test_2 ORDER BY 1,2,3; -- clear objects SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA type_tests CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$); DROP SCHEMA type_tests2 CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$); DROP USER typeuser; SELECT run_command_on_workers($$DROP USER typeuser;$$); diff --git a/src/test/regress/sql/distributed_types_conflict.sql b/src/test/regress/sql/distributed_types_conflict.sql index d5f5da761..4b220b088 100644 --- a/src/test/regress/sql/distributed_types_conflict.sql +++ b/src/test/regress/sql/distributed_types_conflict.sql @@ -1,7 +1,6 @@ SET citus.next_shard_id TO 20020000; CREATE SCHEMA type_conflict; -SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$); -- create a type on a worker that should not cause data loss once overwritten with a type -- from the coordinator diff --git a/src/test/regress/sql/distributed_types_xact_add_enum_value.sql b/src/test/regress/sql/distributed_types_xact_add_enum_value.sql index e9020f140..c3eb19beb 100644 --- a/src/test/regress/sql/distributed_types_xact_add_enum_value.sql +++ b/src/test/regress/sql/distributed_types_xact_add_enum_value.sql @@ -32,4 +32,3 @@ SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumso -- clear objects SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA xact_enum_type CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$); diff --git a/src/test/regress/sql/drop_partitioned_table.sql b/src/test/regress/sql/drop_partitioned_table.sql index a3d5e9752..fc6e3ac4d 100644 --- a/src/test/regress/sql/drop_partitioned_table.sql +++ b/src/test/regress/sql/drop_partitioned_table.sql @@ -53,7 +53,6 @@ WHERE n.nspname IN ('drop_partitioned_table', 'schema1') ORDER BY 1, 2; \c - - - :worker_1_port -CREATE SCHEMA drop_partitioned_table; SET search_path = drop_partitioned_table; CREATE VIEW tables_info AS SELECT n.nspname as "Schema", @@ -240,5 +239,4 @@ DROP TABLE parent, child1; ROLLBACK; DROP SCHEMA drop_partitioned_table CASCADE; -SELECT run_command_on_workers('DROP SCHEMA IF EXISTS drop_partitioned_table CASCADE'); SET search_path TO public; diff --git a/src/test/regress/sql/failure_add_disable_node.sql b/src/test/regress/sql/failure_add_disable_node.sql index 1aa8f9526..42d6e1adb 100644 --- a/src/test/regress/sql/failure_add_disable_node.sql +++ b/src/test/regress/sql/failure_add_disable_node.sql @@ -124,5 +124,3 @@ ORDER BY placementid; RESET SEARCH_PATH; DROP SCHEMA add_remove_node CASCADE; -SELECT * FROM run_command_on_workers('DROP SCHEMA IF EXISTS add_remove_node CASCADE') -ORDER BY nodeport; diff --git a/src/test/regress/sql/failure_connection_establishment.sql b/src/test/regress/sql/failure_connection_establishment.sql index 43cb97b86..5029d40b7 100644 --- a/src/test/regress/sql/failure_connection_establishment.sql +++ b/src/test/regress/sql/failure_connection_establishment.sql @@ -124,7 +124,7 @@ SELECT count(*) FROM single_replicatated WHERE key = 100; RESET client_min_messages; -- verify get_global_active_transactions works when a timeout happens on a connection -SELECT get_global_active_transactions(); +SELECT * FROM get_global_active_transactions() WHERE transaction_number != 0; -- tests for connectivity checks SET client_min_messages TO ERROR; diff --git a/src/test/regress/sql/failure_create_distributed_table_non_empty.sql b/src/test/regress/sql/failure_create_distributed_table_non_empty.sql index 29dc7a2d7..946903cd0 100644 --- a/src/test/regress/sql/failure_create_distributed_table_non_empty.sql +++ b/src/test/regress/sql/failure_create_distributed_table_non_empty.sql @@ -6,7 +6,9 @@ -- because if the shards are created via the executor -- cancellations are processed, otherwise they are not +SET citus.enable_ddl_propagation TO OFF; CREATE SCHEMA create_distributed_table_non_empty_failure; +SET citus.enable_ddl_propagation TO ON; SET search_path TO 'create_distributed_table_non_empty_failure'; SET citus.next_shard_id TO 11000000; @@ -44,7 +46,6 @@ SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").cancel(' || pg_bac SELECT create_distributed_table('test_table', 'id'); SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); -SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS create_distributed_table_non_empty_failure$$); -- this triggers a schema creation which prevents further transactions around dependency propagation SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/sql/failure_create_reference_table.sql b/src/test/regress/sql/failure_create_reference_table.sql index 36fa10a93..21552538b 100644 --- a/src/test/regress/sql/failure_create_reference_table.sql +++ b/src/test/regress/sql/failure_create_reference_table.sql @@ -81,7 +81,9 @@ SET client_min_messages TO NOTICE; SELECT citus.mitmproxy('conn.allow()'); DROP TABLE ref_table; DROP SCHEMA failure_reference_table; +SET citus.enable_ddl_propagation TO OFF; CREATE SCHEMA failure_reference_table; +SET citus.enable_ddl_propagation TO ON; CREATE TABLE ref_table(id int); INSERT INTO ref_table VALUES(1),(2),(3); diff --git a/src/test/regress/sql/failure_create_table.sql b/src/test/regress/sql/failure_create_table.sql index a4035b431..968d44bc4 100644 --- a/src/test/regress/sql/failure_create_table.sql +++ b/src/test/regress/sql/failure_create_table.sql @@ -2,7 +2,9 @@ -- failure_create_table adds failure tests for creating table without data. -- +SET citus.enable_ddl_propagation TO OFF; CREATE SCHEMA failure_create_table; +SET citus.enable_ddl_propagation TO ON; SET search_path TO 'failure_create_table'; SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/sql/fkeys_between_local_ref.sql b/src/test/regress/sql/fkeys_between_local_ref.sql index 6afce7f3f..a04040474 100644 --- a/src/test/regress/sql/fkeys_between_local_ref.sql +++ b/src/test/regress/sql/fkeys_between_local_ref.sql @@ -247,6 +247,7 @@ BEGIN; ROLLBACK; BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE SCHEMA another_schema_fkeys_between_local_ref; CREATE TABLE another_schema_fkeys_between_local_ref.local_table_6 (col_1 INT PRIMARY KEY); @@ -276,6 +277,7 @@ BEGIN; ROLLBACK; BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE TABLE local_table_6 (col_1 INT PRIMARY KEY); -- first convert local tables to citus local tables in graph ALTER TABLE local_table_2 ADD CONSTRAINT fkey_11 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_1) ON DELETE CASCADE; diff --git a/src/test/regress/sql/forcedelegation_functions.sql b/src/test/regress/sql/forcedelegation_functions.sql index 77b171fc1..d95cfc75a 100644 --- a/src/test/regress/sql/forcedelegation_functions.sql +++ b/src/test/regress/sql/forcedelegation_functions.sql @@ -189,12 +189,16 @@ COMMIT; SELECT func_calls_forcepush_func(); +-- Block distributing that function as distributing it causes +-- different test output on PG 14. +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION get_val() RETURNS INT AS $$ BEGIN RETURN 100::INT; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- -- UDF calling another UDF in a FROM clause @@ -606,8 +610,9 @@ SELECT * FROM forcepushdown_schema.test_subquery ORDER BY 1; -- Query with targetList greater than 1 --- Function from FROM clause is not delegated outside of a BEGIN (for now) +-- Function from FROM clause is delegated outside of a BEGIN SELECT 1,2,3 FROM select_data(100); + BEGIN; -- Function from FROM clause is delegated SELECT 1,2,3 FROM select_data(100); @@ -682,6 +687,250 @@ BEGIN END; $$ LANGUAGE plpgsql; +CREATE TABLE testnested_table (x int, y int); +SELECT create_distributed_table('testnested_table','x'); + +CREATE OR REPLACE FUNCTION inner_fn(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); +END; +$$ LANGUAGE plpgsql; + +-- Non-force function calling force-delegation function +CREATE OR REPLACE FUNCTION outer_local_fn() +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM 1 FROM inner_fn(1); + INSERT INTO forcepushdown_schema.testnested_table VALUES (2,3); + PERFORM 1 FROM inner_fn(4); + INSERT INTO forcepushdown_schema.testnested_table VALUES (5,6); +END; +$$ LANGUAGE plpgsql; + +SELECT create_distributed_function('inner_fn(int)','x', + colocate_with:='testnested_table', force_delegation := true); + +SELECT outer_local_fn(); +-- Rows from 1-6 should appear +SELECT * FROM testnested_table ORDER BY 1; + +BEGIN; +SELECT outer_local_fn(); +END; +SELECT * FROM testnested_table ORDER BY 1; + +DROP FUNCTION inner_fn(int); +DROP FUNCTION outer_local_fn(); +TRUNCATE TABLE testnested_table; + +CREATE OR REPLACE FUNCTION inner_fn(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); +END; +$$ LANGUAGE plpgsql; + +-- Force-delegation function calling non-force function +CREATE OR REPLACE FUNCTION outer_fn(y int, z int) +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM 1 FROM forcepushdown_schema.inner_fn(y); + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); + PERFORM 1 FROM forcepushdown_schema.inner_fn(z); + INSERT INTO forcepushdown_schema.testnested_table VALUES (z,z); +END; +$$ LANGUAGE plpgsql; + +SELECT create_distributed_function('inner_fn(int)','x', + colocate_with:='testnested_table', force_delegation := false); +SELECT create_distributed_function('outer_fn(int, int)','y', + colocate_with:='testnested_table', force_delegation := true); + +SELECT outer_fn(1, 2); +BEGIN; +SELECT outer_fn(1, 2); +END; + +-- No rows +SELECT * FROM testnested_table ORDER BY 1; + +-- Force-delegation function calling force-delegation function +CREATE OR REPLACE FUNCTION force_push_inner(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION force_push_outer(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); + PERFORM forcepushdown_schema.force_push_inner(x+1) LIMIT 1; +END; +$$ LANGUAGE plpgsql; + +SELECT create_distributed_function( + 'force_push_outer(int)', 'x', + colocate_with := 'testnested_table', + force_delegation := true +); +SELECT create_distributed_function( + 'force_push_inner(int)', 'y', + colocate_with := 'testnested_table', + force_delegation := true +); + +-- Keys 7,8,9,14 fall on one node and 15 on a different node + +-- Function gets delegated to node with shard-key = 7 and inner function +-- will not be delegated but inserts shard-key = 8 locally +SELECT force_push_outer(7); + +BEGIN; +-- Function gets delegated to node with shard-key = 8 and inner function +-- will not be delegated but inserts shard-key = 9 locally +SELECT force_push_outer(8); +END; + +BEGIN; +-- Function gets delegated to node with shard-key = 14 and inner function +-- will not be delegated but fails to insert shard-key = 15 remotely +SELECT force_push_outer(14); +END; +SELECT * FROM testnested_table ORDER BY 1; + +-- +-- Function-1() --> function-2() --> function-3() +-- +CREATE OR REPLACE FUNCTION force_push_1(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); + PERFORM forcepushdown_schema.force_push_2(x+1) LIMIT 1; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION force_push_2(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); + PERFORM forcepushdown_schema.force_push_3(y+1) LIMIT 1; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION force_push_3(z int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (z,z); +END; +$$ LANGUAGE plpgsql; + +SELECT create_distributed_function( + 'force_push_1(int)', 'x', + colocate_with := 'testnested_table', + force_delegation := true +); +SELECT create_distributed_function( + 'force_push_2(int)', 'y', + colocate_with := 'testnested_table', + force_delegation := true +); +SELECT create_distributed_function( + 'force_push_3(int)', 'z', + colocate_with := 'testnested_table', + force_delegation := true +); + +TRUNCATE TABLE testnested_table; +BEGIN; +-- All local inserts +SELECT force_push_1(7); +END; + +BEGIN; +-- Local(shard-keys 13, 15) + remote insert (shard-key 14) +SELECT force_push_1(13); +END; + +SELECT * FROM testnested_table ORDER BY 1; + +TRUNCATE TABLE testnested_table; +CREATE OR REPLACE FUNCTION force_push_inner(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION force_push_outer(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM FROM forcepushdown_schema.force_push_inner(x); + INSERT INTO forcepushdown_schema.testnested_table VALUES (x+1,x+1); +END; +$$ LANGUAGE plpgsql; +SELECT create_distributed_function( + 'force_push_inner(int)', 'y', + colocate_with := 'testnested_table', + force_delegation := true +); +SELECT create_distributed_function( + 'force_push_outer(int)', 'x', + colocate_with := 'testnested_table', + force_delegation := true +); + +BEGIN; +SELECT force_push_outer(7); +END; +TABLE testnested_table ORDER BY 1; + +CREATE OR REPLACE FUNCTION force_push_inner(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + RAISE NOTICE '%', y; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION force_push_outer(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM FROM forcepushdown_schema.force_push_inner(x+1); + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); +END; +$$ LANGUAGE plpgsql; + +BEGIN; +SELECT force_push_outer(9); +END; +TABLE testnested_table ORDER BY 1; + RESET client_min_messages; SET citus.log_remote_commands TO off; DROP SCHEMA forcepushdown_schema CASCADE; diff --git a/src/test/regress/sql/function_propagation.sql b/src/test/regress/sql/function_propagation.sql new file mode 100644 index 000000000..79168497b --- /dev/null +++ b/src/test/regress/sql/function_propagation.sql @@ -0,0 +1,193 @@ +CREATE SCHEMA function_propagation_schema; +SET search_path TO 'function_propagation_schema'; + +-- Check whether supported dependencies can be distributed while propagating functions + +-- Check types +SET citus.enable_metadata_sync TO OFF; + CREATE TYPE function_prop_type AS (a int, b int); +RESET citus.enable_metadata_sync; + +CREATE OR REPLACE FUNCTION func_1(param_1 function_prop_type) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +-- Check all dependent objects and function depends on all nodes +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema'::regnamespace::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type'::regtype::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_1'::regproc::oid; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema'::regnamespace::oid;$$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type'::regtype::oid;$$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_1'::regproc::oid;$$) ORDER BY 1,2; + +SET citus.enable_metadata_sync TO OFF; + CREATE TYPE function_prop_type_2 AS (a int, b int); +RESET citus.enable_metadata_sync; + +CREATE OR REPLACE FUNCTION func_2(param_1 int) +RETURNS function_prop_type_2 +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_2'::regtype::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_2'::regproc::oid; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_2'::regtype::oid;$$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_2'::regproc::oid;$$) ORDER BY 1,2; + +-- Have a separate check for type created in transaction +BEGIN; + CREATE TYPE function_prop_type_3 AS (a int, b int); +COMMIT; + +-- Objects in the body part is not found as dependency +CREATE OR REPLACE FUNCTION func_3(param_1 int) +RETURNS int +LANGUAGE plpgsql AS +$$ +DECLARE + internal_param1 function_prop_type_3; +BEGIN + return 1; +END; +$$; + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_3'::regtype::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_3'::regproc::oid; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_3'::regproc::oid;$$) ORDER BY 1,2; + +-- Check table +CREATE TABLE function_prop_table(a int, b int); + +-- Non-distributed table is not distributed as dependency +CREATE OR REPLACE FUNCTION func_4(param_1 function_prop_table) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +CREATE OR REPLACE FUNCTION func_5(param_1 int) +RETURNS function_prop_table +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +-- Functions can be created with distributed table dependency +SELECT create_distributed_table('function_prop_table', 'a'); +CREATE OR REPLACE FUNCTION func_6(param_1 function_prop_table) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid;$$) ORDER BY 1,2; + +-- Views are not supported +CREATE VIEW function_prop_view AS SELECT * FROM function_prop_table; +CREATE OR REPLACE FUNCTION func_7(param_1 function_prop_view) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +CREATE OR REPLACE FUNCTION func_8(param_1 int) +RETURNS function_prop_view +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +-- Check within transaction +BEGIN; + CREATE TYPE type_in_transaction AS (a int, b int); + CREATE OR REPLACE FUNCTION func_in_transaction(param_1 type_in_transaction) + RETURNS int + LANGUAGE plpgsql AS + $$ + BEGIN + return 1; + END; + $$; + + -- Within transaction functions are not distributed + SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid; + SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid; +COMMIT; + +-- Show that recreating it outside transaction distributes the function and dependencies +CREATE OR REPLACE FUNCTION func_in_transaction(param_1 type_in_transaction) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid;$$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid;$$) ORDER BY 1,2; + +-- Test for SQL function with unsupported object in function body +CREATE TABLE table_in_sql_body(id int); + +CREATE FUNCTION max_of_table() +RETURNS int +LANGUAGE SQL AS +$$ + SELECT max(id) FROM table_in_sql_body +$$; + +-- Show that only function has propagated, since the table is not resolved as dependency +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regclass::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.max_of_table'::regproc::oid; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.max_of_table'::regproc::oid;$$) ORDER BY 1,2; + +-- Check extension owned table +CREATE TABLE extension_owned_table(a int); +SELECT run_command_on_workers($$ +CREATE TABLE function_propagation_schema.extension_owned_table(a int); +$$ +); +CREATE EXTENSION seg; +ALTER EXTENSION seg ADD TABLE extension_owned_table; +SELECT run_command_on_workers($$ +ALTER EXTENSION seg ADD TABLE function_propagation_schema.extension_owned_table; +$$); + +CREATE OR REPLACE FUNCTION func_for_ext_check(param_1 extension_owned_table) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +RESET search_path; +SET client_min_messages TO WARNING; +DROP SCHEMA function_propagation_schema CASCADE; diff --git a/src/test/regress/sql/grant_on_schema_propagation.sql b/src/test/regress/sql/grant_on_schema_propagation.sql index 85a92c2b6..cf8832309 100644 --- a/src/test/regress/sql/grant_on_schema_propagation.sql +++ b/src/test/regress/sql/grant_on_schema_propagation.sql @@ -7,7 +7,9 @@ CREATE SCHEMA dist_schema; CREATE TABLE dist_schema.dist_table (id int); CREATE SCHEMA another_dist_schema; CREATE TABLE another_dist_schema.dist_table (id int); +SET citus.enable_ddl_propagation TO off; CREATE SCHEMA non_dist_schema; +SET citus.enable_ddl_propagation TO on; -- create roles on all nodes SELECT run_command_on_coordinator_and_workers('CREATE USER role_1'); @@ -89,9 +91,9 @@ ALTER ROLE role_1 NOSUPERUSER; SET citus.enable_alter_role_propagation TO OFF; DROP TABLE dist_schema.dist_table, another_dist_schema.dist_table; -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema'); -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA another_dist_schema'); -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA non_dist_schema'); +DROP SCHEMA dist_schema; +DROP SCHEMA another_dist_schema; +DROP SCHEMA non_dist_schema; -- test if the grantors are propagated correctly -- first remove one of the worker nodes @@ -157,7 +159,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER \c - - - :master_port DROP TABLE grantor_schema.grantor_table; -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA grantor_schema CASCADE'); +DROP SCHEMA grantor_schema CASCADE; -- test distributing the schema with another user CREATE SCHEMA dist_schema; @@ -175,7 +177,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY \c - - - :master_port DROP TABLE dist_schema.dist_table; -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema CASCADE'); +DROP SCHEMA dist_schema CASCADE; -- test grants on public schema -- first remove one of the worker nodes diff --git a/src/test/regress/sql/local_shard_copy.sql b/src/test/regress/sql/local_shard_copy.sql index 688ba8b4b..094ba8fb4 100644 --- a/src/test/regress/sql/local_shard_copy.sql +++ b/src/test/regress/sql/local_shard_copy.sql @@ -48,6 +48,7 @@ SET citus.log_local_commands TO ON; -- returns true of the distribution key filter -- on the distributed tables (e.g., WHERE key = 1), we'll hit a shard -- placement which is local to this not +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) RETURNS bool AS $$ DECLARE shard_is_local BOOLEAN := FALSE; @@ -70,6 +71,7 @@ CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) R RETURN shard_is_local; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- pick some example values that reside on the shards locally and remote diff --git a/src/test/regress/sql/local_shard_execution.sql b/src/test/regress/sql/local_shard_execution.sql index 6bda63d8d..c7dad6f00 100644 --- a/src/test/regress/sql/local_shard_execution.sql +++ b/src/test/regress/sql/local_shard_execution.sql @@ -71,6 +71,7 @@ SET search_path TO local_shard_execution; -- returns true of the distribution key filter -- on the distributed tables (e.g., WHERE key = 1), we'll hit a shard -- placement which is local to this not +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) RETURNS bool AS $$ DECLARE shard_is_local BOOLEAN := FALSE; @@ -93,6 +94,7 @@ CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) R RETURN shard_is_local; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- test case for issue #3556 SET citus.log_intermediate_results TO TRUE; @@ -418,6 +420,7 @@ BEGIN; ROLLBACK; -- make sure that functions can use local execution +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE PROCEDURE only_local_execution() AS $$ DECLARE cnt INT; BEGIN @@ -491,6 +494,7 @@ CREATE OR REPLACE PROCEDURE local_execution_followed_by_dist() AS $$ SELECT count(*) INTO cnt FROM distributed_table; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CALL local_execution_followed_by_dist(); diff --git a/src/test/regress/sql/local_shard_execution_replicated.sql b/src/test/regress/sql/local_shard_execution_replicated.sql index a341ac619..01af172a3 100644 --- a/src/test/regress/sql/local_shard_execution_replicated.sql +++ b/src/test/regress/sql/local_shard_execution_replicated.sql @@ -384,6 +384,7 @@ BEGIN; ROLLBACK; -- make sure that functions can use local execution +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE PROCEDURE only_local_execution() AS $$ DECLARE cnt INT; BEGIN @@ -457,6 +458,7 @@ CREATE OR REPLACE PROCEDURE local_execution_followed_by_dist() AS $$ SELECT count(*) INTO cnt FROM distributed_table; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CALL local_execution_followed_by_dist(); diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index 22e337443..cf123ff49 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -6,6 +6,7 @@ SET citus.next_placement_id TO 1500000; -- supress notice messages to make sure that the tests -- do not diverge with enterprise SET client_min_messages TO WARNING; +SELECT run_command_on_workers($$CREATE ROLE metadata_sync_helper_role WITH LOGIN;$$); CREATE ROLE metadata_sync_helper_role WITH LOGIN; GRANT ALL ON SCHEMA metadata_sync_helpers TO metadata_sync_helper_role; RESET client_min_messages; @@ -28,36 +29,16 @@ ROLLBACK; -- but we are on the coordinator, so still not allowed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; - --- connect back as super user, and then connect to the worker --- with the superuser to make sure we can ingest metadata with --- a regular user under the certain conditions \c - postgres - - --- we don't need the table/schema anymore -SET client_min_messages TO ERROR; -DROP SCHEMA metadata_sync_helpers CASCADE; -DROP ROLE metadata_sync_helper_role; - \c - - - :worker_1_port -CREATE SCHEMA metadata_sync_helpers; SET search_path TO metadata_sync_helpers; CREATE TABLE test(col_1 int, col_2 int); --- supress notice messages to make sure that the tests --- do not diverge with enterprise -SET client_min_messages TO WARNING; -SET citus.enable_ddl_propagation TO OFF; -CREATE ROLE metadata_sync_helper_role WITH LOGIN; -GRANT ALL ON SCHEMA metadata_sync_helpers TO metadata_sync_helper_role; -RESET client_min_messages; -RESET citus.enable_ddl_propagation; - -- connect back with the regular user \c - metadata_sync_helper_role - :worker_1_port SET search_path TO metadata_sync_helpers; @@ -67,14 +48,14 @@ SET search_path TO metadata_sync_helpers; -- owner of the table test BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- we do not own the relation BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); ROLLBACK; @@ -83,50 +64,78 @@ CREATE TABLE test_2(col_1 int, col_2 int); CREATE TABLE test_3(col_1 int, col_2 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); SELECT count(*) FROM pg_dist_partition WHERE logicalrelid = 'metadata_sync_helpers.test_2'::regclass; ROLLBACK; +-- application_name with incorrect gpid +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SET application_name to 'citus_internal gpid=not a correct gpid'; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ROLLBACK; + +-- application_name with empty gpid +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SET application_name to 'citus_internal gpid='; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ROLLBACK; + +-- empty application_name +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SET application_name to ''; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ROLLBACK; + +-- application_name with incorrect prefix +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SET application_name to 'citus gpid=10000000001'; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ROLLBACK; + -- fails because there is no X distribution method BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; -- fails because there is the column does not exist BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); ROLLBACK; --- fails because we do not allow NULL parameters BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); ROLLBACK; -- fails because colocationId cannot be negative BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); ROLLBACK; -- fails because there is no X replication model BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); @@ -135,7 +144,7 @@ ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key even if distribution key changes BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's'); @@ -144,7 +153,7 @@ ROLLBACK; -- hash distributed table cannot have NULL distribution key BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); ROLLBACK; @@ -165,14 +174,14 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; -- should throw error even if we skip the checks, there are no such nodes BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); ROLLBACK; @@ -189,7 +198,7 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; @@ -207,21 +216,21 @@ SET search_path TO metadata_sync_helpers; CREATE TABLE test_ref(col_1 int, col_2 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); ROLLBACK; -- non-valid replication model BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); ROLLBACK; -- not-matching replication model for reference table BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); ROLLBACK; @@ -231,7 +240,7 @@ SET search_path TO metadata_sync_helpers; CREATE TABLE super_user_table(col_1 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); COMMIT; @@ -244,7 +253,7 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -254,7 +263,7 @@ ROLLBACK; -- the user is only allowed to add a shard for add a table which is in pg_dist_partition BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -264,7 +273,7 @@ ROLLBACK; -- ok, now add the table to the pg_dist_partition BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); SELECT citus_internal_add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's'); SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't'); @@ -273,14 +282,14 @@ COMMIT; -- we can update to a non-existing colocation group (e.g., colocate_with:=none) BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); ROLLBACK; -- invalid shard ids are not allowed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -290,7 +299,7 @@ ROLLBACK; -- invalid storage types are not allowed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -300,7 +309,7 @@ ROLLBACK; -- NULL shard ranges are not allowed for hash distributed tables BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) @@ -310,7 +319,7 @@ ROLLBACK; -- non-integer shard ranges are not allowed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) @@ -320,7 +329,7 @@ ROLLBACK; -- shardMinValue should be smaller than shardMaxValue BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) @@ -330,7 +339,7 @@ ROLLBACK; -- we do not allow overlapping shards for the same table BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), @@ -344,7 +353,7 @@ ROLLBACK; -- check with non-existing object type BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) @@ -354,7 +363,7 @@ ROLLBACK; -- check the sanity of distributionArgumentIndex and colocationId BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false)) @@ -363,7 +372,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false)) @@ -373,7 +382,7 @@ ROLLBACK; -- check with non-existing object BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) @@ -384,7 +393,7 @@ ROLLBACK; -- if any parameter is NULL BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false)) @@ -397,7 +406,7 @@ ROLLBACK; -- which is known how to distribute BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse CREATE TABLE publication_test_table(id int); @@ -412,7 +421,7 @@ ROLLBACK; -- Show that citus_internal_add_object_metadata checks the priviliges BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse CREATE FUNCTION distribution_test_function(int) RETURNS int @@ -427,7 +436,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse CREATE TYPE distributed_test_type AS (a int, b int); @@ -443,7 +452,7 @@ ROLLBACK; SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse UPDATE pg_dist_partition SET partmethod = 'X'; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) @@ -457,7 +466,7 @@ ROLLBACK; SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) @@ -475,7 +484,7 @@ SET search_path TO metadata_sync_helpers; -- now, add few shards BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '11'::text, '20'::text), @@ -491,14 +500,14 @@ COMMIT; -- we cannot mark these two tables colocated because they are not colocated BEGIN; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- now, add few more shards for test_3 to make it colocated with test_2 BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_3'::regclass, 1420009::bigint, 't'::"char", '21'::text, '30'::text), @@ -512,7 +521,7 @@ COMMIT; -- shardMin/MaxValues should be NULL for reference tables BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) @@ -522,7 +531,7 @@ ROLLBACK; -- reference tables cannot have multiple shards BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), @@ -533,7 +542,7 @@ ROLLBACK; -- finally, add a shard for reference tables BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) @@ -546,7 +555,7 @@ SET search_path TO metadata_sync_helpers; -- and a shard for the superuser table BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) @@ -561,7 +570,7 @@ SET search_path TO metadata_sync_helpers; -- shard does not exist BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) @@ -571,7 +580,7 @@ ROLLBACK; -- invalid placementid BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, 1::int, -10)) @@ -581,7 +590,7 @@ ROLLBACK; -- non-existing shard BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1430100, 1, 0::bigint, 1::int, 10)) @@ -591,7 +600,7 @@ ROLLBACK; -- invalid shard state BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 10, 0::bigint, 1::int, 1500000)) @@ -601,7 +610,7 @@ ROLLBACK; -- non-existing node with non-existing node-id 123123123 BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES ( 1420000, 1, 0::bigint, 123123123::int, 1500000)) @@ -609,6 +618,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ROLLBACK; -- create a volatile function that returns the local node id +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION get_node_id() RETURNS INT AS $$ DECLARE localGroupId int; @@ -621,11 +631,12 @@ BEGIN nodeport = 57637 AND nodename = 'localhost' AND isactive AND nodecluster = 'default'; RETURN localGroupId; END; $$ language plpgsql; +RESET citus.enable_metadata_sync; -- fails because we ingest more placements for the same shards to the same worker node BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000), @@ -636,7 +647,7 @@ ROLLBACK; -- shard is not owned by us BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420007, 1, 0::bigint, get_node_id(), 1500000)) @@ -646,7 +657,7 @@ ROLLBACK; -- sucessfully add placements BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000), @@ -667,7 +678,7 @@ COMMIT; -- we should be able to colocate both tables now BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); ROLLBACK; @@ -676,7 +687,7 @@ ROLLBACK; -- fails because we are trying to update it to non-existing node BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); COMMIT; @@ -684,7 +695,7 @@ COMMIT; -- fails because the source node doesn't contain the shard BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); COMMIT; @@ -692,7 +703,7 @@ COMMIT; -- fails because shard does not exist BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); COMMIT; @@ -700,7 +711,7 @@ COMMIT; -- fails because none-existing shard BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); COMMIT; @@ -708,7 +719,7 @@ COMMIT; -- fails because we do not own the shard BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); COMMIT; @@ -716,7 +727,7 @@ COMMIT; -- the user only allowed to delete their own shards BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) @@ -725,7 +736,7 @@ ROLLBACK; -- the user only allowed to delete shards in a distributed transaction BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) @@ -735,7 +746,7 @@ ROLLBACK; -- the user cannot delete non-existing shards BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420100)) @@ -750,7 +761,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT count(*) FROM pg_dist_placement WHERE shardid = 1420000; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420000)) @@ -767,7 +778,7 @@ ROLLBACK; SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the repmodel -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' @@ -778,7 +789,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the vartype of table from int to bigint -- so that making two tables colocated fails UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}' @@ -788,7 +799,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the partmethod of the table to not-valid -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' @@ -798,7 +809,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the partmethod of the table to not-valid -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' @@ -812,7 +823,7 @@ CREATE TABLE test_6(int_col int, text_col text); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); SELECT citus_internal_add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's'); @@ -828,7 +839,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; CREATE TABLE test_8(int_col int, text_col text COLLATE "caseinsensitive"); SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); SELECT citus_internal_add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's'); @@ -847,4 +858,8 @@ SET client_min_messages TO ERROR; SET citus.enable_ddl_propagation TO OFF; DROP OWNED BY metadata_sync_helper_role; DROP ROLE metadata_sync_helper_role; + +\c - - - :master_port +-- cleanup +SET client_min_messages TO ERROR; DROP SCHEMA metadata_sync_helpers CASCADE; diff --git a/src/test/regress/sql/multi_create_table.sql b/src/test/regress/sql/multi_create_table.sql index 4c318e73a..4a1070514 100644 --- a/src/test/regress/sql/multi_create_table.sql +++ b/src/test/regress/sql/multi_create_table.sql @@ -10,6 +10,7 @@ SET citus.next_shard_id TO 360000; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -27,6 +28,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; CREATE TABLE lineitem ( l_orderkey bigint not null, diff --git a/src/test/regress/sql/multi_deparse_function.sql b/src/test/regress/sql/multi_deparse_function.sql index ba823f669..2b8450b4f 100644 --- a/src/test/regress/sql/multi_deparse_function.sql +++ b/src/test/regress/sql/multi_deparse_function.sql @@ -258,14 +258,11 @@ SELECT deparse_and_run_on_workers($cmd$ DROP FUNCTION IF EXISTS missing_func_without_args; $cmd$); +set citus.enable_ddl_propagation to on; -- create schema with weird names CREATE SCHEMA "CiTuS.TeeN"; CREATE SCHEMA "CiTUS.TEEN2"; - -SELECT run_command_on_workers($$ - CREATE SCHEMA IF NOT EXISTS "CiTuS.TeeN"; - CREATE SCHEMA IF NOT EXISTS "CiTUS.TEEN2"; -$$); +set citus.enable_ddl_propagation to off; -- create table with weird names CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() RETURNS TEXT @@ -362,10 +359,7 @@ SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('func_custom_param(intpair)'); RESET citus.enable_metadata_sync; -SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param; -$cmd$); - -- a function that returns TABLE CREATE FUNCTION func_returns_table(IN count INT) @@ -376,9 +370,7 @@ SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('func_returns_table(INT)'); RESET citus.enable_metadata_sync; -SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_returns_table ROWS 100; -$cmd$); -- clear objects SET client_min_messages TO WARNING; -- suppress cascading objects dropping @@ -387,10 +379,4 @@ DROP SCHEMA "CiTuS.TeeN" CASCADE; DROP SCHEMA "CiTUS.TEEN2" CASCADE; DROP SCHEMA function_tests CASCADE; -SELECT run_command_on_workers($$ - DROP SCHEMA "CiTuS.TeeN" CASCADE; - DROP SCHEMA "CiTUS.TEEN2" CASCADE; - DROP SCHEMA function_tests CASCADE; -$$); - DROP ROLE function_role; diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index 43f24fb10..aaed45803 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -105,6 +105,8 @@ SET citus.enable_metadata_sync TO 'false'; SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; + CREATE EXTENSION citus VERSION '8.0-1'; ALTER EXTENSION citus UPDATE TO '8.0-2'; ALTER EXTENSION citus UPDATE TO '8.0-3'; @@ -303,6 +305,7 @@ SELECT * FROM multi_extension.print_extension_changes(); \set VERBOSITY terse CREATE TABLE columnar_table(a INT, b INT) USING columnar; SET citus.enable_version_checks TO ON; +SET columnar.enable_version_checks TO ON; -- all should throw an error due to version mismatch VACUUM FULL columnar_table; @@ -324,6 +327,7 @@ CREATE TABLE new_columnar_table (a int) USING columnar; -- do cleanup for the rest of the tests SET citus.enable_version_checks TO OFF; +SET columnar.enable_version_checks TO OFF; DROP TABLE columnar_table; RESET columnar.enable_custom_scan; \set VERBOSITY default @@ -472,13 +476,16 @@ ORDER BY 1, 2; -- see incompatible version errors out RESET citus.enable_version_checks; +RESET columnar.enable_version_checks; DROP EXTENSION citus; CREATE EXTENSION citus VERSION '8.0-1'; -- Test non-distributed queries work even in version mismatch SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.1-1'; SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; -- Test CREATE TABLE CREATE TABLE version_mismatch_table(column1 int); @@ -517,14 +524,17 @@ END; $function$; SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; -- This will fail because of previous function declaration ALTER EXTENSION citus UPDATE TO '8.1-1'; -- We can DROP problematic function and continue ALTER EXTENSION even when version checks are on SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; DROP FUNCTION pg_catalog.relation_is_a_known_shard(regclass); SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; ALTER EXTENSION citus UPDATE TO '8.1-1'; -- Test updating to the latest version without specifying the version number @@ -540,8 +550,10 @@ CREATE EXTENSION citus; DROP EXTENSION citus; SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.0-1'; SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; -- during ALTER EXTENSION, we should invalidate the cache ALTER EXTENSION citus UPDATE; diff --git a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql index 8604480a0..3ee453074 100644 --- a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql +++ b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql @@ -342,4 +342,3 @@ ALTER TABLE parent_table DROP CONSTRAINT unique_cst CASCADE; DROP SCHEMA fix_idx_names CASCADE; SELECT citus_remove_node('localhost', :master_port); -SELECT run_command_on_workers($$ DROP SCHEMA IF EXISTS fix_idx_names CASCADE $$); diff --git a/src/test/regress/sql/multi_foreign_key_relation_graph.sql b/src/test/regress/sql/multi_foreign_key_relation_graph.sql index 818b5b34f..4adf9bae4 100644 --- a/src/test/regress/sql/multi_foreign_key_relation_graph.sql +++ b/src/test/regress/sql/multi_foreign_key_relation_graph.sql @@ -161,6 +161,7 @@ ROLLBACK; -- Test schemas BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE SCHEMA fkey_intermediate_schema_1; CREATE SCHEMA fkey_intermediate_schema_2; SET search_path TO fkey_graph, fkey_intermediate_schema_1, fkey_intermediate_schema_2; @@ -192,6 +193,7 @@ BEGIN; ROLLBACK; BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE SCHEMA fkey_intermediate_schema_1; CREATE SCHEMA fkey_intermediate_schema_2; SET search_path TO fkey_graph, fkey_intermediate_schema_1, fkey_intermediate_schema_2; diff --git a/src/test/regress/sql/multi_function_in_join.sql b/src/test/regress/sql/multi_function_in_join.sql index 6abdafdad..1e2844f4e 100644 --- a/src/test/regress/sql/multi_function_in_join.sql +++ b/src/test/regress/sql/multi_function_in_join.sql @@ -33,7 +33,6 @@ SELECT * FROM table1 JOIN nextval('numbers') n ON (id = n) ORDER BY id ASC; CREATE FUNCTION add(integer, integer) RETURNS integer AS 'SELECT $1 + $2;' LANGUAGE SQL; -SELECT create_distributed_function('add(integer,integer)'); SELECT * FROM table1 JOIN add(3,5) sum ON (id = sum) ORDER BY id ASC; -- Check join of plpgsql functions @@ -46,6 +45,8 @@ $$ LANGUAGE plpgsql; SELECT * FROM table1 JOIN increment(2) val ON (id = val) ORDER BY id ASC; -- a function that returns a set of integers +-- Block distributing function as we have tests below to test it locally +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION next_k_integers(IN first_value INTEGER, IN k INTEGER DEFAULT 3, OUT result INTEGER) @@ -54,6 +55,7 @@ BEGIN RETURN QUERY SELECT x FROM generate_series(first_value, first_value+k-1) f(x); END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SELECT * FROM table1 JOIN next_k_integers(3,2) next_integers ON (id = next_integers.result) ORDER BY id ASC; @@ -124,6 +126,7 @@ SET client_min_messages TO ERROR; -- function joins in CTE results can create lateral joins that are not supported -- we execute the query within a function to consolidate the error messages -- between different executors +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION raise_failed_execution_func_join(query text) RETURNS void AS $$ BEGIN EXECUTE query; @@ -135,6 +138,7 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SELECT raise_failed_execution_func_join($$ WITH one_row AS ( @@ -146,8 +150,10 @@ SELECT raise_failed_execution_func_join($$ $$); -- a user-defined immutable function +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION the_answer_to_life() RETURNS INTEGER IMMUTABLE AS 'SELECT 42' LANGUAGE SQL; +RESET citus.enable_metadata_sync; SELECT raise_failed_execution_func_join($$ SELECT * FROM table1 JOIN the_answer_to_life() the_answer ON (id = the_answer); diff --git a/src/test/regress/sql/multi_insert_select.sql b/src/test/regress/sql/multi_insert_select.sql index 7de9f9763..92c802fe1 100644 --- a/src/test/regress/sql/multi_insert_select.sql +++ b/src/test/regress/sql/multi_insert_select.sql @@ -106,6 +106,7 @@ WHERE user_id < 0; -- make sure we don't evaluate stable functions with column arguments +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION evaluate_on_master(x int) RETURNS int LANGUAGE plpgsql STABLE AS $function$ @@ -114,6 +115,7 @@ BEGIN RETURN x; END; $function$; +RESET citus.enable_metadata_sync; INSERT INTO raw_events_second (user_id, value_1) SELECT diff --git a/src/test/regress/sql/multi_metadata_sync.sql b/src/test/regress/sql/multi_metadata_sync.sql index d85bede5f..13c984396 100644 --- a/src/test/regress/sql/multi_metadata_sync.sql +++ b/src/test/regress/sql/multi_metadata_sync.sql @@ -20,10 +20,12 @@ SELECT nextval('pg_catalog.pg_dist_groupid_seq') AS last_group_id \gset SELECT nextval('pg_catalog.pg_dist_node_nodeid_seq') AS last_node_id \gset -- Create the necessary test utility function +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION activate_node_snapshot() RETURNS text[] LANGUAGE C STRICT AS 'citus'; +RESET citus.enable_metadata_sync; COMMENT ON FUNCTION activate_node_snapshot() IS 'commands to activate node snapshot'; @@ -43,12 +45,14 @@ ALTER ROLE CURRENT_USER WITH PASSWORD 'dummypassword'; SELECT unnest(activate_node_snapshot()) order by 1; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) RETURNS void LANGUAGE C STRICT AS 'citus', $$master_create_distributed_table$$; +RESET citus.enable_metadata_sync; COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) diff --git a/src/test/regress/sql/multi_modifications.sql b/src/test/regress/sql/multi_modifications.sql index 55453e78b..7977325ea 100644 --- a/src/test/regress/sql/multi_modifications.sql +++ b/src/test/regress/sql/multi_modifications.sql @@ -363,16 +363,6 @@ UPDATE limit_orders SET array_of_values = 1 || array_of_values WHERE id = 246; CREATE FUNCTION immutable_append(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -\c - - - :worker_1_port -CREATE FUNCTION immutable_append(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; - -\c - - - :worker_2_port -CREATE FUNCTION immutable_append(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; - -\c - - - :master_port - -- immutable function calls with vars are also allowed UPDATE limit_orders SET array_of_values = immutable_append(array_of_values, 2) WHERE id = 246; diff --git a/src/test/regress/sql/multi_modifying_xacts.sql b/src/test/regress/sql/multi_modifying_xacts.sql index a3b004546..a53153b9b 100644 --- a/src/test/regress/sql/multi_modifying_xacts.sql +++ b/src/test/regress/sql/multi_modifying_xacts.sql @@ -282,18 +282,16 @@ SELECT * FROM researchers WHERE lab_id = 6; -- verify 2pc SELECT count(*) FROM pg_dist_transaction; - -- create a check function -SELECT * from run_command_on_workers('CREATE FUNCTION reject_large_id() RETURNS trigger AS $rli$ +CREATE FUNCTION reject_large_id() RETURNS trigger AS $rli$ BEGIN IF (NEW.id > 30) THEN - RAISE ''illegal value''; + RAISE 'illegal value'; END IF; RETURN NEW; END; -$rli$ LANGUAGE plpgsql;') -ORDER BY nodeport; +$rli$ LANGUAGE plpgsql; -- register after insert trigger SELECT * FROM run_command_on_placements('researchers', 'CREATE CONSTRAINT TRIGGER reject_large_researcher_id AFTER INSERT ON %s DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_large_id()') @@ -344,9 +342,7 @@ SELECT * FROM researchers WHERE lab_id = 6; SELECT * from run_command_on_placements('researchers', 'drop trigger reject_large_researcher_id on %s') ORDER BY nodeport, shardid; -SELECT * FROM run_command_on_workers('drop function reject_large_id()') -ORDER BY nodeport; - +DROP FUNCTION reject_large_id(); -- ALTER and copy are compatible BEGIN; ALTER TABLE labs ADD COLUMN motto text; @@ -416,6 +412,7 @@ AND s.logicalrelid = 'objects'::regclass; -- create trigger on one worker to reject certain values \c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN @@ -425,6 +422,7 @@ CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON objects_1200003 @@ -460,6 +458,7 @@ DELETE FROM objects; -- there cannot be errors on different shards at different times -- because the first failure will fail the whole transaction \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN @@ -469,6 +468,7 @@ CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON labs_1200002 @@ -668,7 +668,7 @@ SELECT * FROM reference_modifying_xacts; -- lets fail on of the workers at before the commit time \c - - - :worker_1_port - +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad_reference() RETURNS trigger AS $rb$ BEGIN IF (NEW.key = 999) THEN @@ -678,6 +678,7 @@ CREATE FUNCTION reject_bad_reference() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad_reference AFTER INSERT ON reference_modifying_xacts_1200006 @@ -762,7 +763,7 @@ ABORT; -- lets fail one of the workers before COMMIT time for the hash table \c - - - :worker_1_port - +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad_hash() RETURNS trigger AS $rb$ BEGIN IF (NEW.key = 997) THEN @@ -772,6 +773,7 @@ CREATE FUNCTION reject_bad_hash() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad_hash AFTER INSERT ON hash_modifying_xacts_1200007 diff --git a/src/test/regress/sql/multi_multiuser.sql b/src/test/regress/sql/multi_multiuser.sql index aed7fe20b..f379c75f1 100644 --- a/src/test/regress/sql/multi_multiuser.sql +++ b/src/test/regress/sql/multi_multiuser.sql @@ -54,12 +54,6 @@ GRANT SELECT ON TABLE test_1420000 TO read_access; GRANT ALL ON TABLE test_1420002 TO full_access; GRANT SELECT ON TABLE test_1420002 TO read_access; -CREATE SCHEMA full_access_user_schema; -REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC; -GRANT USAGE ON SCHEMA full_access_user_schema TO full_access; -GRANT ALL ON SCHEMA full_access_user_schema TO full_access; -GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access; - \c - - - :worker_2_port CREATE USER full_access; CREATE USER usage_access; @@ -75,12 +69,6 @@ GRANT SELECT ON TABLE test_1420001 TO read_access; GRANT ALL ON TABLE test_1420003 TO full_access; GRANT SELECT ON TABLE test_1420003 TO read_access; -CREATE SCHEMA full_access_user_schema; -REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC; -GRANT USAGE ON SCHEMA full_access_user_schema TO full_access; -GRANT ALL ON SCHEMA full_access_user_schema TO full_access; -GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access; - \c - - - :master_port SET citus.shard_replication_factor TO 1; @@ -381,21 +369,24 @@ RESET ROLE; -- to test access to files created during repartition we will create some on worker 1 \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION citus_rm_job_directory(bigint) RETURNS void AS 'citus' LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; SET ROLE full_access; SELECT worker_hash_partition_table(42,1,'SELECT a FROM generate_series(1,100) AS a', 'a', 23, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); RESET ROLE; -- all attempts for transfer are initiated from other workers \c - - - :worker_2_port - +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION citus_rm_job_directory(bigint) RETURNS void AS 'citus' LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; -- super user should not be able to copy files created by a user SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); diff --git a/src/test/regress/sql/multi_mx_call.sql b/src/test/regress/sql/multi_mx_call.sql index 4728b8948..ccdffd9aa 100644 --- a/src/test/regress/sql/multi_mx_call.sql +++ b/src/test/regress/sql/multi_mx_call.sql @@ -180,6 +180,7 @@ select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass -- Test that we handle transactional constructs correctly inside a procedure -- that is routed to the workers. +SET citus.enable_metadata_sync TO OFF; CREATE PROCEDURE mx_call_proc_tx(x int) LANGUAGE plpgsql AS $$ BEGIN INSERT INTO multi_mx_call.mx_call_dist_table_1 VALUES (x, -1), (x+1, 4); @@ -189,6 +190,7 @@ BEGIN -- Now do the final update! UPDATE multi_mx_call.mx_call_dist_table_1 SET val = val-1 WHERE id >= x; END;$$; +RESET citus.enable_metadata_sync; -- before distribution ... CALL multi_mx_call.mx_call_proc_tx(10); diff --git a/src/test/regress/sql/multi_mx_create_table.sql b/src/test/regress/sql/multi_mx_create_table.sql index be391318a..0a685e5ce 100644 --- a/src/test/regress/sql/multi_mx_create_table.sql +++ b/src/test/regress/sql/multi_mx_create_table.sql @@ -63,37 +63,7 @@ CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); CREATE TABLE citus_local_table(a int); SELECT citus_add_local_table_to_metadata('citus_local_table'); --- create schema to test schema support -CREATE SCHEMA citus_mx_test_schema_join_1; -CREATE SCHEMA citus_mx_test_schema_join_2; - --- create UDFs in worker node -CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - SET search_path TO citus_mx_test_schema; -CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; - -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, @@ -107,37 +77,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( -- now create required stuff in the worker 2 \c - - - :worker_2_port --- create schema to test schema support -CREATE SCHEMA citus_mx_test_schema_join_1; -CREATE SCHEMA citus_mx_test_schema_join_2; - - --- create UDF -CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - SET search_path TO citus_mx_test_schema; -CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( diff --git a/src/test/regress/sql/multi_mx_ddl.sql b/src/test/regress/sql/multi_mx_ddl.sql index 5d1622d35..d0452c3d2 100644 --- a/src/test/regress/sql/multi_mx_ddl.sql +++ b/src/test/regress/sql/multi_mx_ddl.sql @@ -173,7 +173,6 @@ SELECT * FROM seg_test; \c - - - :master_port CREATE SCHEMA ext_owned_tables; -SELECT run_command_on_workers($$CREATE SCHEMA ext_owned_tables;$$); SET search_path TO ext_owned_tables; diff --git a/src/test/regress/sql/multi_mx_explain.sql b/src/test/regress/sql/multi_mx_explain.sql index 9417b94f3..347d79d67 100644 --- a/src/test/regress/sql/multi_mx_explain.sql +++ b/src/test/regress/sql/multi_mx_explain.sql @@ -18,6 +18,7 @@ VACUUM ANALYZE supplier_mx; \c - - - :worker_1_port -- Function that parses explain output as JSON +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ @@ -43,6 +44,7 @@ $BODY$ LANGUAGE plpgsql; \c - - - :worker_2_port -- Function that parses explain output as JSON +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ @@ -65,6 +67,7 @@ BEGIN RETURN result; END; $BODY$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- Test Text format diff --git a/src/test/regress/sql/multi_mx_function_call_delegation.sql b/src/test/regress/sql/multi_mx_function_call_delegation.sql index 206969456..0efcea922 100644 --- a/src/test/regress/sql/multi_mx_function_call_delegation.sql +++ b/src/test/regress/sql/multi_mx_function_call_delegation.sql @@ -274,10 +274,21 @@ select mx_call_func((select x + 1 from mx_call_add(3, 4) x), 2); select mx_call_func(floor(random())::int, 2); -- test forms we don't distribute -select * from mx_call_func(2, 0); select mx_call_func(2, 0) where mx_call_func(0, 2) = 0; select mx_call_func(2, 0), mx_call_func(0, 2); +-- regular call in FROM can be pushed down +select * from mx_call_func(2, 0); + +-- prepared statement with 6 invocations to trigger generic plan +prepare call_func(int, int) as select $1 from mx_call_func($1, $2); +execute call_func(2, 0); +execute call_func(2, 0); +execute call_func(2, 0); +execute call_func(2, 0); +execute call_func(2, 0); +execute call_func(2, 0); + -- we do not delegate the call, but do push down the query -- that result in remote execution from workers select mx_call_func(id, 0) from mx_call_dist_table_1; diff --git a/src/test/regress/sql/multi_mx_metadata.sql b/src/test/regress/sql/multi_mx_metadata.sql index 7bd09b900..169f3aa8b 100644 --- a/src/test/regress/sql/multi_mx_metadata.sql +++ b/src/test/regress/sql/multi_mx_metadata.sql @@ -218,17 +218,6 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx \c - no_access_mx - :worker_1_port --- see the comment in the top of the file -CREATE OR REPLACE FUNCTION raise_failed_aclcheck(query text) RETURNS void AS $$ -BEGIN - EXECUTE query; - EXCEPTION WHEN OTHERS THEN - IF SQLERRM LIKE 'must be owner of%' THEN - RAISE 'must be owner of the object'; - END IF; -END; -$$LANGUAGE plpgsql; - SELECT raise_failed_aclcheck($$ DROP TABLE distributed_mx_table; $$); diff --git a/src/test/regress/sql/multi_mx_modifications.sql b/src/test/regress/sql/multi_mx_modifications.sql index 72ad17b65..852bf3a42 100644 --- a/src/test/regress/sql/multi_mx_modifications.sql +++ b/src/test/regress/sql/multi_mx_modifications.sql @@ -202,6 +202,7 @@ UPDATE limit_orders_mx SET array_of_values = 1 || array_of_values WHERE id = 246 UPDATE limit_orders_mx SET array_of_values = immutable_append_mx(array_of_values, 2) WHERE id = 246; +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION stable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ BEGIN RETURN old_values || new_value; END; $$ LANGUAGE plpgsql STABLE; @@ -215,6 +216,8 @@ SELECT array_of_values FROM limit_orders_mx WHERE id = 246; -- STRICT functions work as expected CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS 'SELECT COALESCE($1, 2) + COALESCE($1, 3);' LANGUAGE SQL STABLE STRICT; +RESET citus.enable_metadata_sync; + UPDATE limit_orders_mx SET bidder_id = temp_strict_func(1, null) WHERE id = 246; SELECT array_of_values FROM limit_orders_mx WHERE id = 246; diff --git a/src/test/regress/sql/multi_mx_modifying_xacts.sql b/src/test/regress/sql/multi_mx_modifying_xacts.sql index 2c7d65b7c..cf60f023d 100644 --- a/src/test/regress/sql/multi_mx_modifying_xacts.sql +++ b/src/test/regress/sql/multi_mx_modifying_xacts.sql @@ -195,6 +195,7 @@ SELECT * FROM objects_mx WHERE id = 1; -- create trigger on one worker to reject certain values \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN @@ -204,6 +205,7 @@ CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad_mx AFTER INSERT ON objects_mx_1220103 diff --git a/src/test/regress/sql/multi_mx_node_metadata.sql b/src/test/regress/sql/multi_mx_node_metadata.sql index 09e8e49f1..a54917f87 100644 --- a/src/test/regress/sql/multi_mx_node_metadata.sql +++ b/src/test/regress/sql/multi_mx_node_metadata.sql @@ -367,11 +367,12 @@ UPDATE pg_dist_node SET hasmetadata = true; SELECT master_update_node(nodeid, 'localhost', 12345) FROM pg_dist_node; +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION trigger_metadata_sync() RETURNS void LANGUAGE C STRICT AS 'citus'; - +RESET citus.enable_metadata_sync; SELECT trigger_metadata_sync(); \c :datname - - :master_port diff --git a/src/test/regress/sql/multi_mx_repartition_udt_prepare.sql b/src/test/regress/sql/multi_mx_repartition_udt_prepare.sql index a2663c7a4..779f4c24e 100644 --- a/src/test/regress/sql/multi_mx_repartition_udt_prepare.sql +++ b/src/test/regress/sql/multi_mx_repartition_udt_prepare.sql @@ -68,14 +68,7 @@ CREATE TABLE repartition_udt_other ( \c - - - :worker_1_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -87,15 +80,6 @@ CREATE OPERATOR = ( -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - - -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 @@ -112,14 +96,7 @@ FUNCTION 1 test_udt_hash(test_udt); \c - - - :worker_2_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -131,15 +108,6 @@ CREATE OPERATOR = ( -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - - -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 diff --git a/src/test/regress/sql/multi_mx_router_planner.sql b/src/test/regress/sql/multi_mx_router_planner.sql index e8efbe244..fdfd81b07 100644 --- a/src/test/regress/sql/multi_mx_router_planner.sql +++ b/src/test/regress/sql/multi_mx_router_planner.sql @@ -609,6 +609,7 @@ PREPARE author_articles(int) as EXECUTE author_articles(1); -- queries inside plpgsql functions could be router plannable +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION author_articles_max_id() RETURNS int AS $$ DECLARE max_id integer; @@ -635,6 +636,7 @@ END; $$ LANGUAGE plpgsql; SELECT * FROM author_articles_id_word_count(); +RESET citus.enable_metadata_sync; -- materialized views can be created for router plannable queries CREATE MATERIALIZED VIEW mv_articles_hash_mx AS diff --git a/src/test/regress/sql/multi_mx_schema_support.sql b/src/test/regress/sql/multi_mx_schema_support.sql index c6be56db8..555794f92 100644 --- a/src/test/regress/sql/multi_mx_schema_support.sql +++ b/src/test/regress/sql/multi_mx_schema_support.sql @@ -295,7 +295,9 @@ ALTER TABLE "CiTuS.TeAeN"."TeeNTabLE.1!?!" ADD COLUMN new_col INT; SET search_path TO not_existing_schema; ALTER TABLE "CiTuS.TeAeN"."TeeNTabLE.1!?!" DROP COLUMN new_col; +SET client_min_messages TO ERROR; DROP SCHEMA mx_ddl_schema_1, mx_ddl_schema_2, "CiTuS.TeAeN" CASCADE; +RESET client_min_messages; -- test if ALTER TABLE SET SCHEMA sets the original table in the worker SET search_path TO public; @@ -307,7 +309,8 @@ CREATE SCHEMA mx_new_schema; SELECT objid::oid::regnamespace as "Distributed Schemas" FROM citus.pg_dist_object - WHERE objid::oid::regnamespace IN ('mx_old_schema', 'mx_new_schema'); + WHERE objid::oid::regnamespace IN ('mx_old_schema', 'mx_new_schema') + ORDER BY "Distributed Schemas"; \c - - - :worker_1_port SELECT table_schema AS "Table's Schema" FROM information_schema.tables WHERE table_name='table_set_schema'; SELECT table_schema AS "Shards' Schema" diff --git a/src/test/regress/sql/multi_name_lengths.sql b/src/test/regress/sql/multi_name_lengths.sql index df0a68f33..34a509fad 100644 --- a/src/test/regress/sql/multi_name_lengths.sql +++ b/src/test/regress/sql/multi_name_lengths.sql @@ -7,6 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000; SET citus.shard_count TO 2; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -24,6 +25,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; -- Verify that a table name > 56 characters gets hashed properly. CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( diff --git a/src/test/regress/sql/multi_prepare_sql.sql b/src/test/regress/sql/multi_prepare_sql.sql index 47fad0021..54893d025 100644 --- a/src/test/regress/sql/multi_prepare_sql.sql +++ b/src/test/regress/sql/multi_prepare_sql.sql @@ -610,10 +610,6 @@ DROP TABLE http_request; -- first create helper function CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; -\c - - - :worker_1_port -CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; -\c - - - :worker_2_port -CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; \c - - - :master_port -- test table diff --git a/src/test/regress/sql/multi_repartition_udt.sql b/src/test/regress/sql/multi_repartition_udt.sql index c244a2b61..2e5c1de24 100644 --- a/src/test/regress/sql/multi_repartition_udt.sql +++ b/src/test/regress/sql/multi_repartition_udt.sql @@ -69,14 +69,7 @@ CREATE TABLE repartition_udt_other ( \c - - :public_worker_1_host :worker_1_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -88,15 +81,6 @@ CREATE OPERATOR = ( -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - - -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 @@ -113,14 +97,7 @@ FUNCTION 1 test_udt_hash(test_udt); \c - - :public_worker_2_host :worker_2_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -132,15 +109,6 @@ CREATE OPERATOR = ( -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - - -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 diff --git a/src/test/regress/sql/multi_repartitioned_subquery_udf.sql b/src/test/regress/sql/multi_repartitioned_subquery_udf.sql index ed432b797..6708c0ae1 100644 --- a/src/test/regress/sql/multi_repartitioned_subquery_udf.sql +++ b/src/test/regress/sql/multi_repartitioned_subquery_udf.sql @@ -18,32 +18,6 @@ LANGUAGE sql IMMUTABLE AS $_$ OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; -\c - - :public_worker_1_host :worker_1_port -DROP FUNCTION IF EXISTS median(double precision[]); - -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; -$_$; - -\c - - :public_worker_2_host :worker_2_port -DROP FUNCTION IF EXISTS median(double precision[]); - -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; -$_$; - --- Run query on master -\c - - :master_host :master_port - - SELECT * FROM (SELECT median(ARRAY[1,2,sum(l_suppkey)]) as median, count(*) FROM lineitem GROUP BY l_partkey) AS a WHERE median > 2; diff --git a/src/test/regress/sql/multi_router_planner_fast_path.sql b/src/test/regress/sql/multi_router_planner_fast_path.sql index 1bf416826..01c9035d5 100644 --- a/src/test/regress/sql/multi_router_planner_fast_path.sql +++ b/src/test/regress/sql/multi_router_planner_fast_path.sql @@ -531,6 +531,7 @@ SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) WHERE author_id = 1 and 1=0; -- create a dummy function to be used in filtering +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION someDummyFunction(regclass) RETURNS text AS $$ @@ -554,6 +555,7 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SET client_min_messages TO ERROR; \set VERBOSITY terse diff --git a/src/test/regress/sql/multi_schema_support.sql b/src/test/regress/sql/multi_schema_support.sql index b472f0083..02ca41d01 100644 --- a/src/test/regress/sql/multi_schema_support.sql +++ b/src/test/regress/sql/multi_schema_support.sql @@ -203,34 +203,6 @@ END; $$ LANGUAGE 'plpgsql' IMMUTABLE; --- create UDF in worker node 1 -\c - - - :worker_1_port -CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - --- create UDF in worker node 2 -\c - - - :worker_2_port -CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - -\c - - - :master_port - -- UDF in public, table in a schema other than public, search_path is not set SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; @@ -251,36 +223,6 @@ END; $$ LANGUAGE 'plpgsql' IMMUTABLE; --- create UDF in worker node 1 in schema -\c - - - :worker_1_port -SET search_path TO test_schema_support; -CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - --- create UDF in worker node 2 in schema -\c - - - :worker_2_port -SET search_path TO test_schema_support; -CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - -\c - - - :master_port - -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; @@ -1026,10 +968,10 @@ ROLLBACK; -- Clean up the created schema SET client_min_messages TO WARNING; -DROP SCHEMA run_test_schema CASCADE; -DROP SCHEMA test_schema_support_join_1 CASCADE; -DROP SCHEMA test_schema_support_join_2 CASCADE; -DROP SCHEMA "Citus'Teen123" CASCADE; -DROP SCHEMA "CiTUS.TEEN2" CASCADE; -DROP SCHEMA bar CASCADE; -DROP SCHEMA test_schema_support CASCADE; + +SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object + WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema'); +DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE; +-- verify that the dropped schema is removed from worker's pg_dist_object +SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object + WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema'); diff --git a/src/test/regress/sql/multi_sequence_default.sql b/src/test/regress/sql/multi_sequence_default.sql index c8d09738f..8c664f393 100644 --- a/src/test/regress/sql/multi_sequence_default.sql +++ b/src/test/regress/sql/multi_sequence_default.sql @@ -187,34 +187,6 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); DROP SEQUENCE sequence_default_0.sequence_3 CASCADE; DROP SCHEMA sequence_default_0; - --- DROP SCHEMA problem: expected since we don't propagate DROP SCHEMA -CREATE TABLE seq_test_5 (x int, y int); -SELECT create_distributed_table('seq_test_5','x'); -CREATE SCHEMA sequence_default_1; -CREATE SEQUENCE sequence_default_1.seq_5; -ALTER TABLE seq_test_5 ADD COLUMN a bigint DEFAULT nextval('sequence_default_1.seq_5'); -DROP SCHEMA sequence_default_1 CASCADE; --- sequence is gone from coordinator -INSERT INTO seq_test_5 VALUES (1, 2) RETURNING *; --- but is still present on worker -\c - - - :worker_1_port -INSERT INTO sequence_default.seq_test_5 VALUES (1, 2) RETURNING *; -\c - - - :master_port -SET citus.shard_replication_factor TO 1; -SET search_path = sequence_default, public; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); --- apply workaround -SELECT run_command_on_workers('DROP SCHEMA sequence_default_1 CASCADE'); --- now the sequence is gone from the worker as well -\c - - - :worker_1_port -INSERT INTO sequence_default.seq_test_5 VALUES (1, 2) RETURNING *; -\c - - - :master_port -SET citus.shard_replication_factor TO 1; -SET search_path = sequence_default, public; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - - -- check some more complex cases CREATE SEQUENCE seq_6; CREATE TABLE seq_test_6 (x int, t timestamptz DEFAULT now(), s int DEFAULT nextval('seq_6'), m int) PARTITION BY RANGE (t); @@ -287,8 +259,7 @@ ALTER TABLE sequence_default_8.seq_8 SET SCHEMA sequence_default; SET citus.shard_replication_factor TO 1; SET search_path = sequence_default, public; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -DROP SCHEMA sequence_default_8; -SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default_8 CASCADE'); +DROP SCHEMA sequence_default_8 CASCADE; -- cannot use more than one sequence in a column default @@ -442,10 +413,6 @@ SELECT nextval('seq_14'); CREATE SCHEMA test_schema_for_sequence_default_propagation; CREATE SEQUENCE test_schema_for_sequence_default_propagation.seq_10; --- Both should return 0 rows -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation.seq_10'::regclass); -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation'::regnamespace); - -- Create distributed table with default column to propagate dependencies CREATE TABLE test_seq_dist(a int, x BIGINT DEFAULT nextval('test_schema_for_sequence_default_propagation.seq_10')); SELECT create_distributed_table('test_seq_dist', 'a'); @@ -487,6 +454,5 @@ DROP TABLE test_seq_dist; DROP TABLE sequence_default.seq_test_7_par; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA sequence_default CASCADE; -SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default CASCADE'); SELECT master_remove_node('localhost', :master_port); SET search_path TO public; diff --git a/src/test/regress/sql/multi_subquery_behavioral_analytics.sql b/src/test/regress/sql/multi_subquery_behavioral_analytics.sql index 3e8953ead..f7860ce58 100644 --- a/src/test/regress/sql/multi_subquery_behavioral_analytics.sql +++ b/src/test/regress/sql/multi_subquery_behavioral_analytics.sql @@ -1278,17 +1278,8 @@ ORDER BY LIMIT 10; -- this is one complex join query derived from a user's production query --- first declare the function on workers on master +-- declare the function on workers and master -- With array_index: -SELECT * FROM run_command_on_workers('CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) - RETURNS INT AS $$ - SELECT i - FROM (SELECT generate_series(array_lower($1, 1), array_upper($1, 1))) g(i) - WHERE $1 [i] = $2 - LIMIT 1; - $$ LANGUAGE sql') -ORDER BY 1,2; - CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) RETURNS INT AS $$ SELECT i @@ -1431,8 +1422,6 @@ LIMIT 10; -- drop created functions -SELECT * FROM run_command_on_workers('DROP FUNCTION array_index(ANYARRAY, ANYELEMENT)') -ORDER BY 1,2; DROP FUNCTION array_index(ANYARRAY, ANYELEMENT); -- a query with a constant subquery @@ -1629,16 +1618,6 @@ CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool IMMUTABLE RETURNS NULL ON NULL INPUT; -SELECT run_command_on_workers($f$ - -CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool - AS 'select $1 > $2;' - LANGUAGE SQL - IMMUTABLE - RETURNS NULL ON NULL INPUT; - -$f$); - -- we don't support joins via functions SELECT user_id, array_length(events_table, 1) FROM ( @@ -1869,11 +1848,5 @@ LIMIT 1; DROP FUNCTION test_join_function_2(integer, integer); -SELECT run_command_on_workers($f$ - - DROP FUNCTION test_join_function_2(integer, integer); - -$f$); - SET citus.enable_router_execution TO TRUE; SET citus.subquery_pushdown to OFF; diff --git a/src/test/regress/sql/multi_test_helpers_superuser.sql b/src/test/regress/sql/multi_test_helpers_superuser.sql index 2df7b1724..85900ee00 100644 --- a/src/test/regress/sql/multi_test_helpers_superuser.sql +++ b/src/test/regress/sql/multi_test_helpers_superuser.sql @@ -37,6 +37,7 @@ $$; -- partition_task_list_results tests the internal PartitionTasklistResults function +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix text, query text, target_table regclass, @@ -49,7 +50,6 @@ CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix LANGUAGE C STRICT VOLATILE AS 'citus', $$partition_task_list_results$$; - -- get size of TopTransactionContext CREATE OR REPLACE FUNCTION top_transaction_context_size() RETURNS BIGINT LANGUAGE C STRICT VOLATILE @@ -65,3 +65,4 @@ BEGIN PERFORM public.wait_until_metadata_sync(30000); END; $function$; +RESET citus.enable_metadata_sync; diff --git a/src/test/regress/sql/multi_transactional_drop_shards.sql b/src/test/regress/sql/multi_transactional_drop_shards.sql index f88839d77..25c69031e 100644 --- a/src/test/regress/sql/multi_transactional_drop_shards.sql +++ b/src/test/regress/sql/multi_transactional_drop_shards.sql @@ -168,11 +168,13 @@ SET citus.override_table_visibility TO false; -- test DROP table with failing worker +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION fail_drop_table() RETURNS event_trigger AS $fdt$ BEGIN RAISE 'illegal value'; END; $fdt$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE EVENT TRIGGER fail_drop_table ON sql_drop EXECUTE PROCEDURE fail_drop_table(); diff --git a/src/test/regress/sql/multi_unsupported_worker_operations.sql b/src/test/regress/sql/multi_unsupported_worker_operations.sql index 02e5397cc..5fac64e3d 100644 --- a/src/test/regress/sql/multi_unsupported_worker_operations.sql +++ b/src/test/regress/sql/multi_unsupported_worker_operations.sql @@ -51,6 +51,7 @@ SELECT * FROM mx_table ORDER BY col_1; \c - - - :worker_1_port -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -68,6 +69,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; CREATE TABLE mx_table_worker(col_1 text); diff --git a/src/test/regress/sql/mx_regular_user.sql b/src/test/regress/sql/mx_regular_user.sql index 1c52ebc80..2dbd85c28 100644 --- a/src/test/regress/sql/mx_regular_user.sql +++ b/src/test/regress/sql/mx_regular_user.sql @@ -100,18 +100,6 @@ SELECT lock_shard_resources(8, ARRAY[2980000]); SET client_min_messages TO ERROR; DROP SCHEMA "Mx Super User" CASCADE; -\c - postgres - :worker_1_port; -SET client_min_messages TO ERROR; -SET citus.enable_ddl_propagation TO OFF; -CREATE SCHEMA "Mx Regular User"; -GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user; - -\c - postgres - :worker_2_port; -SET client_min_messages TO ERROR; -SET citus.enable_ddl_propagation TO OFF; -CREATE SCHEMA "Mx Regular User"; -GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user; - -- now connect with that user \c - regular_mx_user - :master_port SET search_path TO "Mx Regular User"; diff --git a/src/test/regress/sql/node_conninfo_reload.sql b/src/test/regress/sql/node_conninfo_reload.sql index 5a4f3c75b..3790f2c98 100644 --- a/src/test/regress/sql/node_conninfo_reload.sql +++ b/src/test/regress/sql/node_conninfo_reload.sql @@ -32,6 +32,9 @@ show citus.node_conninfo; select count(*) from test where a = 0; ALTER SYSTEM SET citus.node_conninfo = 'sslmode=doesnotexist'; +-- we cannot set application name +ALTER SYSTEM SET citus.node_conninfo = 'application_name=XXX'; + BEGIN; -- Should still work (no SIGHUP yet); select count(*) from test where a = 0; diff --git a/src/test/regress/sql/non_super_user_object_metadata.sql b/src/test/regress/sql/non_super_user_object_metadata.sql index be965288f..63d16800b 100644 --- a/src/test/regress/sql/non_super_user_object_metadata.sql +++ b/src/test/regress/sql/non_super_user_object_metadata.sql @@ -179,7 +179,7 @@ SELECT create_distributed_function('metadata_dist_test_proc(integer, integer)', SELECT distribution_argument_index FROM citus.pg_dist_object WHERE objid = 'metadata_dist_test_proc'::regproc; SELECT * FROM run_command_on_workers($$ SELECT distribution_argument_index FROM citus.pg_dist_object WHERE objid = 'local_schema.metadata_dist_test_proc'::regproc;$$) ORDER BY 1,2; --- Show that dropping schema doesn't affect the worker node +-- Show that the schema is dropped on worker node as well DROP SCHEMA local_schema CASCADE; SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{local_schema}%'; diff --git a/src/test/regress/sql/object_propagation_debug.sql b/src/test/regress/sql/object_propagation_debug.sql index 0b676bd38..566b80501 100644 --- a/src/test/regress/sql/object_propagation_debug.sql +++ b/src/test/regress/sql/object_propagation_debug.sql @@ -60,18 +60,5 @@ JOIN LATERAL ON TRUE ORDER BY 1; --- find non-distributed dependencies of the local table test2 -CREATE SCHEMA objectprop2; -create table objectprop2.test2(a int, b t1); -SET search_path TO objectprop2; -SELECT - pg_identify_object(t.classid, t.objid, t.objsubid) -FROM - (SELECT * FROM pg_get_object_address('table', '{test2}', '{}')) as addr -JOIN LATERAL - "object prop".citus_get_dependencies_for_object(addr.classid, addr.objid, addr.objsubid) as t(classid oid, objid oid, objsubid int) -ON TRUE - ORDER BY 1; - SET client_min_messages TO ERROR; -DROP SCHEMA "object prop", objectprop2 CASCADE; +DROP SCHEMA "object prop" CASCADE; diff --git a/src/test/regress/sql/pg14.sql b/src/test/regress/sql/pg14.sql index 8bc422f5d..df3ba4e6c 100644 --- a/src/test/regress/sql/pg14.sql +++ b/src/test/regress/sql/pg14.sql @@ -642,3 +642,46 @@ SELECT count(*) FROM set client_min_messages to error; drop schema pg14 cascade; + +create schema pg14; +set search_path to pg14; + +select 1 from citus_add_node('localhost',:master_port,groupid=>0); + +-- test adding foreign table to metadata with the guc +-- will test truncating foreign tables later +CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial); +INSERT INTO foreign_table_test VALUES (1, 'text_test'); +SELECT citus_add_local_table_to_metadata('foreign_table_test'); +CREATE EXTENSION postgres_fdw; +CREATE SERVER foreign_server + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'localhost', port :'master_port', dbname 'regression'); +CREATE USER MAPPING FOR CURRENT_USER + SERVER foreign_server + OPTIONS (user 'postgres'); +CREATE FOREIGN TABLE foreign_table ( + id integer NOT NULL, + data text, + a bigserial +) + SERVER foreign_server + OPTIONS (schema_name 'pg14', table_name 'foreign_table_test'); +SELECT citus_add_local_table_to_metadata('foreign_table'); + +SELECT count(*) FROM foreign_table; +TRUNCATE foreign_table; +\c - - - :worker_1_port +set search_path to pg14; +-- verify the foreign table is truncated +SELECT count(*) FROM pg14.foreign_table; + +-- should error out +TRUNCATE foreign_table; +\c - - - :master_port +-- cleanup +set client_min_messages to error; +drop extension postgres_fdw cascade; +drop schema pg14 cascade; +reset client_min_messages; +select 1 from citus_remove_node('localhost',:master_port); diff --git a/src/test/regress/sql/propagate_extension_commands.sql b/src/test/regress/sql/propagate_extension_commands.sql index 500dc00b8..3640e2f17 100644 --- a/src/test/regress/sql/propagate_extension_commands.sql +++ b/src/test/regress/sql/propagate_extension_commands.sql @@ -116,13 +116,6 @@ CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; -SELECT run_command_on_workers($$ -CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -$$); - -SELECT run_command_on_workers($$ -CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -$$); SELECT run_command_on_workers($$ CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); @@ -227,10 +220,25 @@ SET search_path TO "extension'test"; -- remove the node, we'll add back again SELECT 1 from master_remove_node('localhost', :worker_2_port); +-- Test extension function incorrect distribution argument +CREATE TABLE test_extension_function(col varchar); +CREATE EXTENSION seg; +-- Missing distribution argument +SELECT create_distributed_function('seg_in(cstring)'); +-- Missing colocation argument +SELECT create_distributed_function('seg_in(cstring)', '$1'); +-- Incorrect distribution argument +SELECT create_distributed_function('seg_in(cstring)', '$2', colocate_with:='test_extension_function'); +-- Colocated table is not distributed +SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); +DROP EXTENSION seg; + +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('test_extension_function', 'col', colocate_with := 'none'); + -- now, create a type that depends on another type, which -- finally depends on an extension BEGIN; - SET citus.shard_replication_factor TO 1; CREATE EXTENSION seg; CREATE EXTENSION isn; CREATE TYPE test_type AS (a int, b seg); @@ -243,8 +251,25 @@ BEGIN; CREATE TABLE t3 (a int, b test_type_3); SELECT create_reference_table('t3'); + -- Distribute an extension-function + SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); COMMIT; +-- Check the pg_dist_object +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + -- add the node back SELECT 1 from master_add_node('localhost', :worker_2_port); @@ -252,5 +277,86 @@ SELECT 1 from master_add_node('localhost', :worker_2_port); SELECT count(*) FROM citus.pg_dist_object WHERE objid IN (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn')); SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname IN ('seg', 'isn')$$); +-- Check the pg_dist_object on the both nodes +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + +DROP EXTENSION seg CASCADE; + +-- Recheck the pg_dist_object +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + +-- Distribute an extension-function where extension is not in pg_dist_object +SET citus.enable_ddl_propagation TO false; +CREATE EXTENSION seg; +SET citus.enable_ddl_propagation TO true; + +-- Check the extension in pg_dist_object +SELECT count(*) FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + +SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); + +-- Recheck the extension in pg_dist_object +SELECT count(*) FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); + +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); +DROP EXTENSION seg; +DROP TABLE test_extension_function; + + +-- Test extension function altering distribution argument +BEGIN; +SET citus.shard_replication_factor = 1; +CREATE TABLE test_extension_function(col1 float8[], col2 float8[]); +SELECT create_distributed_table('test_extension_function', 'col1', colocate_with := 'none'); +CREATE EXTENSION cube; + +SELECT create_distributed_function('cube(float8[], float8[])', '$1', 'test_extension_function'); +SELECT distribution_argument_index FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); + +SELECT create_distributed_function('cube(float8[], float8[])', '$2', 'test_extension_function'); +SELECT distribution_argument_index FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); +ROLLBACK; + -- drop the schema and all the objects DROP SCHEMA "extension'test" CASCADE; diff --git a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql index 0587ebfaa..a3a5c94c8 100644 --- a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql +++ b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql @@ -32,12 +32,13 @@ CREATE TABLE numbers(a int); SELECT create_reference_table('numbers'); INSERT INTO numbers VALUES (20), (21); +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION my_volatile_fn() RETURNS INT AS $$ BEGIN RETURN 1; END; $$ language plpgsql VOLATILE; - +RESET citus.enable_metadata_sync; -- INSERT ... SELECT between reference tables BEGIN; diff --git a/src/test/regress/sql/shard_move_deferred_delete.sql b/src/test/regress/sql/shard_move_deferred_delete.sql index a052590d5..9dc4f82e3 100644 --- a/src/test/regress/sql/shard_move_deferred_delete.sql +++ b/src/test/regress/sql/shard_move_deferred_delete.sql @@ -80,17 +80,39 @@ $cmd$); SELECT master_move_shard_placement(20000000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); -SELECT run_command_on_workers($cmd$ - -- override the function for testing purpose - create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) - as $BODY$ - begin - select 20 into available_disk_size; - select 8500 into total_disk_size; - end - $BODY$ language plpgsql; -$cmd$); +-- override the function for testing purpose +-- since it is extension owned function, propagate it to workers manually +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 20 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 20 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; + +\c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 20 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; + +\c - - - :master_port + +SET search_path TO shard_move_deferred_delete; SELECT citus_shard_cost_by_disk_size(20000001); @@ -108,16 +130,39 @@ SELECT run_command_on_workers($cmd$ SELECT count(*) FROM pg_class WHERE relname = 't1_20000000'; $cmd$); -SELECT run_command_on_workers($cmd$ - -- override the function for testing purpose - create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) - as $BODY$ - begin - select 8300 into available_disk_size; - select 8500 into total_disk_size; - end - $BODY$ language plpgsql; -$cmd$); +-- override the function for testing purpose +-- since it is extension owned function, propagate it to workers manually +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 8300 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; + +\c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 8300 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; + +\c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 8300 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; + +\c - - - :master_port + +SET search_path TO shard_move_deferred_delete; -- When there would not be enough free space left after the move, the move should fail SELECT master_move_shard_placement(20000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port); diff --git a/src/test/regress/sql/shard_rebalancer.sql b/src/test/regress/sql/shard_rebalancer.sql index c9bad1983..8a9ae16d9 100644 --- a/src/test/regress/sql/shard_rebalancer.sql +++ b/src/test/regress/sql/shard_rebalancer.sql @@ -137,6 +137,7 @@ AS 'citus' LANGUAGE C STRICT VOLATILE; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -154,6 +155,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; SET citus.next_shard_id TO 123000; diff --git a/src/test/regress/sql/single_node.sql b/src/test/regress/sql/single_node.sql index 13659f2eb..3ca456108 100644 --- a/src/test/regress/sql/single_node.sql +++ b/src/test/regress/sql/single_node.sql @@ -1017,9 +1017,22 @@ RESET citus.enable_manual_changes_to_shards ; TRUNCATE TABLE test_disabling_drop_and_truncate_102040; DROP TABLE test_disabling_drop_and_truncate_102040; -RESET citus.shard_replication_factor; DROP TABLE test_disabling_drop_and_truncate; +-- test creating distributed or reference tables from shards +CREATE TABLE test_creating_distributed_relation_table_from_shard (a int); +SELECT create_distributed_table('test_creating_distributed_relation_table_from_shard', 'a'); + +-- these should error because shards cannot be used to: +-- create distributed table +SELECT create_distributed_table('test_creating_distributed_relation_table_from_shard_102044', 'a'); + +-- create reference table +SELECT create_reference_table('test_creating_distributed_relation_table_from_shard_102044'); + +RESET citus.shard_replication_factor; +DROP TABLE test_creating_distributed_relation_table_from_shard; + -- lets flush the copy often to make sure everyhing is fine SET citus.local_copy_flush_threshold TO 1; TRUNCATE another_schema_table; diff --git a/src/test/regress/sql/subquery_prepared_statements.sql b/src/test/regress/sql/subquery_prepared_statements.sql index 1152f7212..4f20e6905 100644 --- a/src/test/regress/sql/subquery_prepared_statements.sql +++ b/src/test/regress/sql/subquery_prepared_statements.sql @@ -2,7 +2,6 @@ -- test recursive planning functionality on prepared statements -- =================================================================== CREATE SCHEMA subquery_prepared_statements; -SELECT run_command_on_workers('CREATE SCHEMA subquery_prepared_statements;'); SET search_path TO subquery_prepared_statements, public; diff --git a/src/test/regress/sql/text_search.sql b/src/test/regress/sql/text_search.sql new file mode 100644 index 000000000..916644fd6 --- /dev/null +++ b/src/test/regress/sql/text_search.sql @@ -0,0 +1,263 @@ +CREATE SCHEMA text_search; +CREATE SCHEMA text_search2; +SET search_path TO text_search; + +-- create a new configruation from scratch +CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); +CREATE TABLE t1(id int, name text); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t1', 'name'); + +DROP TABLE t1; +DROP TEXT SEARCH CONFIGURATION my_text_search_config; + +-- try to create table and index in 1 transaction +BEGIN; +CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); +CREATE TABLE t1(id int, name text); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t1', 'name'); +ABORT; + +-- try again, should not fail with my_text_search_config being retained on the worker +BEGIN; +CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'on demand propagation of text search object with a comment'; +CREATE TABLE t1(id int, name text); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t1', 'name'); +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.my_text_search_config'::regconfig); +$$) ORDER BY 1,2; + +-- verify that changing anything on a managed TEXT SEARCH CONFIGURATION fails after parallel execution +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'this comment can''t be set right now'; +ABORT; + +-- create an index on an already distributed table +BEGIN; +CREATE TEXT SEARCH CONFIGURATION my_text_search_config2 ( parser = default ); +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config2 IS 'on demand propagation of text search object with a comment 2'; +CREATE TABLE t1(id int, name text); +SELECT create_distributed_table('t1', 'name'); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config2'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.my_text_search_config2'::regconfig); +$$) ORDER BY 1,2; +ABORT; + +-- should be able to create a configuration based on a copy of an existing configuration +CREATE TEXT SEARCH CONFIGURATION french_noaccent ( COPY = french ); +CREATE TABLE t2(id int, name text); +CREATE INDEX t2_search_name ON t2 USING gin (to_tsvector('text_search.french_noaccent'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t2', 'id'); + +-- spot check that french_noaccent copied settings from french +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'comment tu t''appelle') WHERE alias = 'asciiword' LIMIT 1; +$$) ORDER BY 1,2; +-- makes no sense, however we expect that the dictionary for the first token changes accordingly +ALTER TEXT SEARCH CONFIGURATION french_noaccent ALTER MAPPING FOR asciiword WITH dutch_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'comment tu t''appelle') WHERE alias = 'asciiword' LIMIT 1; +$$) ORDER BY 1,2; +-- do the same but we will replace all french dictionaries +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; +ALTER TEXT SEARCH CONFIGURATION french_noaccent ALTER MAPPING REPLACE french_stem WITH dutch_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; +-- once more but now back via yet a different DDL command +ALTER TEXT SEARCH CONFIGURATION french_noaccent ALTER MAPPING FOR asciihword REPLACE dutch_stem WITH french_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; +-- drop a mapping +ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING FOR asciihword; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; +-- also with exists, doesn't change anything, but should not error +ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING IF EXISTS FOR asciihword; + +-- Comment on a text search configuration +COMMENT ON TEXT SEARCH CONFIGURATION french_noaccent IS 'a text configuration that is butcherd to test all edge cases'; +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.french_noaccent'::regconfig); +$$) ORDER BY 1,2; + +-- Remove a comment +COMMENT ON TEXT SEARCH CONFIGURATION french_noaccent IS NULL; +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.french_noaccent'::regconfig); +$$) ORDER BY 1,2; + +-- verify adding 2 dictionaries for two tokes at once +ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING IF EXISTS FOR asciiword, asciihword; +ALTER TEXT SEARCH CONFIGURATION french_noaccent ADD MAPPING FOR asciiword, asciihword WITH french_stem, dutch_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionaries) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciiword' LIMIT 1; +$$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionaries) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; + +--verify we can drop cascade a configuration that is in use +-- verify it is in use +DROP TEXT SEARCH CONFIGURATION text_search.french_noaccent; +-- drop cascade +DROP TEXT SEARCH CONFIGURATION text_search.french_noaccent CASCADE; +-- verify the configuration is dropped from the workers +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.french_noaccent'::regconfig; $$) ORDER BY 1,2; + +SET client_min_messages TO 'warning'; +SELECT * FROM run_command_on_workers($$CREATE ROLE text_search_owner;$$) ORDER BY 1,2; +CREATE ROLE text_search_owner; +RESET client_min_messages; + +CREATE TEXT SEARCH CONFIGURATION changed_owner ( PARSER = default ); +SELECT * FROM run_command_on_workers($$ + SELECT cfgowner::regrole + FROM pg_ts_config + WHERE oid = 'text_search.changed_owner'::regconfig; +$$) ORDER BY 1,2; +ALTER TEXT SEARCH CONFIGURATION changed_owner OWNER TO text_search_owner; +SELECT * FROM run_command_on_workers($$ + SELECT cfgowner::regrole + FROM pg_ts_config + WHERE oid = 'text_search.changed_owner'::regconfig; +$$) ORDER BY 1,2; + +-- redo test with propagating object after it was created and changed of owner +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION changed_owner2 ( PARSER = default ); +ALTER TEXT SEARCH CONFIGURATION changed_owner2 OWNER TO text_search_owner; +RESET citus.enable_ddl_propagation; +-- verify object doesn't exist before propagating +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.changed_owner2'::regconfig; $$) ORDER BY 1,2; + +-- distribute configuration +CREATE TABLE t3(id int, name text); +CREATE INDEX t3_search_name ON t3 USING gin (to_tsvector('text_search.changed_owner2'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t3', 'name'); + +-- verify config owner +SELECT * FROM run_command_on_workers($$ + SELECT cfgowner::regrole + FROM pg_ts_config + WHERE oid = 'text_search.changed_owner2'::regconfig; +$$) ORDER BY 1,2; + + +-- rename tests +CREATE TEXT SEARCH CONFIGURATION change_name ( PARSER = default ); +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search.change_name'::regconfig; +$$) ORDER BY 1,2; +ALTER TEXT SEARCH CONFIGURATION change_name RENAME TO changed_name; +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search.changed_name'::regconfig; +$$) ORDER BY 1,2; + +-- test move of schema +CREATE TEXT SEARCH CONFIGURATION change_schema ( PARSER = default ); +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search.change_schema'::regconfig; +$$) ORDER BY 1,2; +ALTER TEXT SEARCH CONFIGURATION change_schema SET SCHEMA text_search2; +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search2.change_schema'::regconfig; +$$) ORDER BY 1,2; + +-- verify we get an error that the configuration change_schema is not found, even though the object address will be +-- found in its new schema, and is distributed +ALTER TEXT SEARCH CONFIGURATION change_schema SET SCHEMA text_search2; +-- should tell us that text_search.does_not_exist does not exist, covers a complex edgecase +-- in resolving the object address +ALTER TEXT SEARCH CONFIGURATION text_search.does_not_exist SET SCHEMA text_search2; + + +-- verify edgecases in deparsers +CREATE TEXT SEARCH CONFIGURATION config1 ( PARSER = default ); +CREATE TEXT SEARCH CONFIGURATION config2 ( PARSER = default ); +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION config3 ( PARSER = default ); +RESET citus.enable_ddl_propagation; + +-- verify config1, config2 exist on workers, config3 not +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config1'::regconfig; $$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config2'::regconfig; $$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; $$) ORDER BY 1,2; + +-- DROP all config's, only 1&2 are distributed, they should propagate well to remotes +DROP TEXT SEARCH CONFIGURATION config1, config2, config3; + +-- verify all existing ones have been removed (checking config3 for consistency) +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config1'::regconfig; $$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config2'::regconfig; $$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; $$) ORDER BY 1,2; +-- verify they are all removed locally +SELECT 'text_search.config1'::regconfig; +SELECT 'text_search.config2'::regconfig; +SELECT 'text_search.config3'::regconfig; + +-- verify that indexes created concurrently that would propagate a TEXT SEARCH CONFIGURATION object +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION concurrent_index_config ( PARSER = default ); +RESET citus.enable_ddl_propagation; + +-- verify it doesn't exist on the workers +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.concurrent_index_config'::regconfig; $$) ORDER BY 1,2; + +-- create distributed table that then concurrently would have an index created. +CREATE TABLE t4(id int, name text); +SELECT create_distributed_table('t4', 'name'); +CREATE INDEX CONCURRENTLY t4_search_name ON t4 USING gin (to_tsvector('text_search.concurrent_index_config'::regconfig, (COALESCE(name, ''::character varying))::text)); + +-- now the configuration should be on the worker, and the above index creation shouldn't have failed. +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.concurrent_index_config'::regconfig; $$) ORDER BY 1,2; + +-- verify the objid is correctly committed locally due to the somewhat convoluted commit and new transaction starting when creating an index concurrently +SELECT pg_catalog.pg_identify_object_as_address(classid, objid, objsubid) + FROM citus.pg_dist_object + WHERE classid = 3602 AND objid = 'text_search.concurrent_index_config'::regconfig::oid; + +-- verify old text search configurations get renamed if they are not the same as the newly propagated configuration. +-- We do this by creating configurations on the workers as a copy from a different existing catalog. +SELECT * FROM run_command_on_workers($$ + set citus.enable_metadata_sync TO off; + CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = dutch ); + reset citus.enable_metadata_sync; +$$) ORDER BY 1,2; +CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = french ); + +-- now we expect manually_created_wrongly(citus_backup_XXX) to show up when querying the configurations +SELECT * FROM run_command_on_workers($$ + SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; +$$) ORDER BY 1,2; + +-- verify the objects get reused appropriately when the specification is the same +SELECT * FROM run_command_on_workers($$ + set citus.enable_metadata_sync TO off; + CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = french ); + reset citus.enable_metadata_sync; +$$) ORDER BY 1,2; +CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = french ); + +-- now we don't expect manually_created_correct(citus_backup_XXX) to show up when querying the configurations as the +-- original one is reused +SELECT * FROM run_command_on_workers($$ + SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; +$$) ORDER BY 1,2; + +CREATE SCHEMA "Text Search Requiring Quote's"; +CREATE TEXT SEARCH CONFIGURATION "Text Search Requiring Quote's"."Quoted Config Name" ( parser = default ); +CREATE TABLE t5(id int, name text); +CREATE INDEX t5_search_name ON t5 USING gin (to_tsvector('"Text Search Requiring Quote''s"."Quoted Config Name"'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t5', 'name'); + +SET client_min_messages TO 'warning'; +DROP SCHEMA text_search, text_search2, "Text Search Requiring Quote's" CASCADE; +DROP ROLE text_search_owner; diff --git a/src/test/regress/sql/upgrade_basic_before.sql b/src/test/regress/sql/upgrade_basic_before.sql index 3b236cca0..7d518e510 100644 --- a/src/test/regress/sql/upgrade_basic_before.sql +++ b/src/test/regress/sql/upgrade_basic_before.sql @@ -34,6 +34,7 @@ CREATE TABLE tr(pk int, a int REFERENCES r(a) ON DELETE CASCADE ON UPDATE CASCAD SELECT create_distributed_table('tr', 'pk'); INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -50,6 +51,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; CREATE TABLE t_range(id int, value_1 int); SELECT create_distributed_table('t_range', 'id', 'range'); SELECT master_create_empty_shard('t_range') as shardid1 \gset