diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c index 6664b6c1c..fa6519ffb 100644 --- a/src/backend/distributed/commands/alter_table.c +++ b/src/backend/distributed/commands/alter_table.c @@ -215,6 +215,9 @@ PG_FUNCTION_INFO_V1(alter_distributed_table); PG_FUNCTION_INFO_V1(alter_table_set_access_method); PG_FUNCTION_INFO_V1(worker_change_sequence_dependency); +/* global variable keeping track of whether we are in a table type conversion function */ +bool InTableTypeConversionFunctionCall = false; + /* * undistribute_table gets a distributed table name and @@ -503,10 +506,16 @@ AlterTableSetAccessMethod(TableConversionParameters *params) * * The function returns a TableConversionReturn object that can stores variables that * can be used at the caller operations. + * + * To be able to provide more meaningful messages while converting a table type, + * Citus keeps InTableTypeConversionFunctionCall flag. Don't forget to set it properly + * in case you add a new way to return from this function. */ TableConversionReturn * ConvertTable(TableConversionState *con) { + InTableTypeConversionFunctionCall = true; + /* * We undistribute citus local tables that are not chained with any reference * tables via foreign keys at the end of the utility hook. @@ -535,6 +544,7 @@ ConvertTable(TableConversionState *con) * subgraph including itself, so return here. */ SetLocalEnableLocalReferenceForeignKeys(oldEnableLocalReferenceForeignKeys); + InTableTypeConversionFunctionCall = false; return NULL; } char *newAccessMethod = con->accessMethod ? con->accessMethod : @@ -819,6 +829,7 @@ ConvertTable(TableConversionState *con) SetLocalEnableLocalReferenceForeignKeys(oldEnableLocalReferenceForeignKeys); + InTableTypeConversionFunctionCall = false; return ret; } diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index 72b49fc5a..13bbf34fa 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -348,6 +348,14 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) return DDLCommandsForSequence(dependency->objectId, sequenceOwnerName); } + if (relKind == RELKIND_VIEW) + { + char *createViewCommand = CreateViewDDLCommand(dependency->objectId); + char *alterViewOwnerCommand = AlterViewOwnerCommand(dependency->objectId); + + return list_make2(createViewCommand, alterViewOwnerCommand); + } + /* if this relation is not supported, break to the error at the end */ break; } diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index aaecb3e98..7f8f74aef 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -197,6 +197,14 @@ static DistributeObjectOps Any_CreateFunction = { .address = CreateFunctionStmtObjectAddress, .markDistributed = true, }; +static DistributeObjectOps Any_View = { + .deparse = NULL, + .qualify = NULL, + .preprocess = PreprocessViewStmt, + .postprocess = PostprocessViewStmt, + .address = ViewStmtObjectAddress, + .markDistributed = true, +}; static DistributeObjectOps Any_CreatePolicy = { .deparse = NULL, .qualify = NULL, @@ -470,6 +478,14 @@ static DistributeObjectOps Function_Drop = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps View_Drop = { + .deparse = DeparseDropViewStmt, + .qualify = QualifyDropViewStmt, + .preprocess = PreprocessDropViewStmt, + .postprocess = NULL, + .address = NULL, + .markDistributed = false, +}; static DistributeObjectOps Function_Rename = { .deparse = DeparseRenameFunctionStmt, .qualify = QualifyRenameFunctionStmt, @@ -1419,6 +1435,11 @@ GetDistributeObjectOps(Node *node) return &Trigger_Drop; } + case OBJECT_VIEW: + { + return &View_Drop; + } + default: { return &NoDistributeOps; @@ -1448,6 +1469,11 @@ GetDistributeObjectOps(Node *node) return &Any_Index; } + case T_ViewStmt: + { + return &Any_View; + } + case T_ReindexStmt: { return &Any_Reindex; diff --git a/src/backend/distributed/commands/view.c b/src/backend/distributed/commands/view.c new file mode 100644 index 000000000..2f1ffc7c6 --- /dev/null +++ b/src/backend/distributed/commands/view.c @@ -0,0 +1,462 @@ +/*------------------------------------------------------------------------- + * + * view.c + * Commands for distributing CREATE OR REPLACE VIEW statements. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "fmgr.h" + +#include "access/genam.h" +#include "catalog/objectaddress.h" +#include "commands/extension.h" +#include "distributed/commands.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/deparser.h" +#include "distributed/errormessage.h" +#include "distributed/listutils.h" +#include "distributed/metadata_sync.h" +#include "distributed/metadata/dependency.h" +#include "distributed/metadata/distobject.h" +#include "distributed/multi_executor.h" +#include "distributed/namespace_utils.h" +#include "distributed/worker_transaction.h" +#include "executor/spi.h" +#include "nodes/nodes.h" +#include "nodes/pg_list.h" +#include "tcop/utility.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +static List * FilterNameListForDistributedViews(List *viewNamesList, bool missing_ok); +static void AppendQualifiedViewNameToCreateViewCommand(StringInfo buf, Oid viewOid); +static void AppendAliasesToCreateViewCommand(StringInfo createViewCommand, Oid viewOid); +static void AppendOptionsToCreateViewCommand(StringInfo createViewCommand, Oid viewOid); + +/* + * PreprocessViewStmt is called during the planning phase for CREATE OR REPLACE VIEW + * before it is created on the local node internally. + */ +List * +PreprocessViewStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + if (!ShouldPropagate()) + { + return NIL; + } + + /* check creation against multi-statement transaction policy */ + if (!ShouldPropagateCreateInCoordinatedTransction()) + { + return NIL; + } + + EnsureCoordinator(); + + return NIL; +} + + +/* + * PostprocessViewStmt actually creates the commmands we need to run on workers to + * propagate views. + * + * If view depends on any undistributable object, Citus can not distribute it. In order to + * not to prevent users from creating local views on the coordinator WARNING message will + * be sent to the customer about the case instead of erroring out. If no worker nodes exist + * at all, view will be created locally without any WARNING message. + * + * Besides creating the plan we also make sure all (new) dependencies of the view are + * created on all nodes. + */ +List * +PostprocessViewStmt(Node *node, const char *queryString) +{ + ViewStmt *stmt = castNode(ViewStmt, node); + + if (!ShouldPropagate()) + { + return NIL; + } + + /* check creation against multi-statement transaction policy */ + if (!ShouldPropagateCreateInCoordinatedTransction()) + { + return NIL; + } + + ObjectAddress viewAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + + if (IsObjectAddressOwnedByExtension(&viewAddress, NULL)) + { + return NIL; + } + + /* If the view has any unsupported dependency, create it locally */ + DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&viewAddress); + + if (errMsg != NULL) + { + /* + * Don't need to give any warning/error messages if there is no worker nodes in + * the cluster as user's experience won't be affected on the single node even + * if the view won't be distributed. + */ + if (!HasAnyNodes()) + { + return NIL; + } + + /* + * Since Citus drops and recreates views while converting a table type, giving a + * NOTICE message is enough if the process in table type conversion function call + */ + if (InTableTypeConversionFunctionCall) + { + RaiseDeferredError(errMsg, DEBUG1); + return NIL; + } + + /* + * If the view is already distributed, we should provide an error to not have + * different definition of view on coordinator and worker nodes. If the view + * is not distributed yet, we can create it locally to not affect user's local + * usage experience. + */ + if (IsObjectDistributed(&viewAddress)) + { + RaiseDeferredError(errMsg, ERROR); + } + else + { + RaiseDeferredError(errMsg, WARNING); + return NIL; + } + } + + EnsureDependenciesExistOnAllNodes(&viewAddress); + + char *command = CreateViewDDLCommand(viewAddress.objectId); + + /* + * We'd typically use NodeDDLTaskList() for generating node-level DDL commands, + * such as when creating a type. However, views are different in a sense that + * views do not depend on citus tables. Instead, they are `depending` on citus tables. + * + * When NodeDDLTaskList() used, it should be accompanied with sequential execution. + * Here, we do something equivalent to NodeDDLTaskList(), but using metadataSyncCommand + * field. This hack allows us to use the metadata connection + * (see `REQUIRE_METADATA_CONNECTION` flag). Meaning that, view creation is treated as + * a metadata operation. + * + * We do this mostly for performance reasons, because we cannot afford to switch to + * sequential execution, for instance when we are altering or creating distributed + * tables -- which may require significant resources. + * + * The downside of using this hack is that if a view is re-used in the same transaction + * that creates the view on the workers, we might get errors such as the below which + * we consider a decent trade-off currently: + * + * BEGIN; + * CREATE VIEW dist_view .. + * CRETAE TABLE t2(id int, val dist_view); + * + * -- shard creation fails on one of the connections + * SELECT create_distributed_table('t2', 'id'); + * ERROR: type "public.dist_view" does not exist + * + */ + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); + ddlJob->targetObjectAddress = viewAddress; + ddlJob->metadataSyncCommand = command; + ddlJob->taskList = NIL; + + return list_make1(ddlJob); +} + + +/* + * ViewStmtObjectAddress returns the ObjectAddress for the subject of the + * CREATE [OR REPLACE] VIEW statement. + */ +ObjectAddress +ViewStmtObjectAddress(Node *node, bool missing_ok) +{ + ViewStmt *stmt = castNode(ViewStmt, node); + + Oid viewOid = RangeVarGetRelid(stmt->view, NoLock, missing_ok); + + ObjectAddress viewAddress = { 0 }; + ObjectAddressSet(viewAddress, RelationRelationId, viewOid); + + return viewAddress; +} + + +/* + * PreprocessDropViewStmt gets called during the planning phase of a DROP VIEW statement + * and returns a list of DDLJob's that will drop any distributed view from the + * workers. + * + * The DropStmt could have multiple objects to drop, the list of objects will be filtered + * to only keep the distributed views for deletion on the workers. Non-distributed + * views will still be dropped locally but not on the workers. + */ +List * +PreprocessDropViewStmt(Node *node, const char *queryString, ProcessUtilityContext + processUtilityContext) +{ + DropStmt *stmt = castNode(DropStmt, node); + + if (!ShouldPropagate()) + { + return NIL; + } + + List *distributedViewNames = FilterNameListForDistributedViews(stmt->objects, + stmt->missing_ok); + + if (list_length(distributedViewNames) < 1) + { + /* no distributed view to drop */ + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_VIEW); + + /* + * Swap the list of objects before deparsing and restore the old list after. This + * ensures we only have distributed views in the deparsed drop statement. + */ + DropStmt *stmtCopy = copyObject(stmt); + stmtCopy->objects = distributedViewNames; + + QualifyTreeNode((Node *) stmtCopy); + const char *dropStmtSql = DeparseTreeNode((Node *) stmtCopy); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) dropStmtSql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * FilterNameListForDistributedViews takes a list of view names and filters against the + * views that are distributed. + * + * The original list will not be touched, a new list will be created with only the objects + * in there. + */ +static List * +FilterNameListForDistributedViews(List *viewNamesList, bool missing_ok) +{ + List *distributedViewNames = NIL; + + List *possiblyQualifiedViewName = NULL; + foreach_ptr(possiblyQualifiedViewName, viewNamesList) + { + char *viewName = NULL; + char *schemaName = NULL; + DeconstructQualifiedName(possiblyQualifiedViewName, &schemaName, &viewName); + + if (schemaName == NULL) + { + char *objName = NULL; + Oid schemaOid = QualifiedNameGetCreationNamespace(possiblyQualifiedViewName, + &objName); + schemaName = get_namespace_name(schemaOid); + } + + Oid schemaId = get_namespace_oid(schemaName, missing_ok); + Oid viewOid = get_relname_relid(viewName, schemaId); + + if (!OidIsValid(viewOid)) + { + continue; + } + + ObjectAddress viewAddress = { 0 }; + ObjectAddressSet(viewAddress, RelationRelationId, viewOid); + + if (IsObjectDistributed(&viewAddress)) + { + distributedViewNames = lappend(distributedViewNames, + possiblyQualifiedViewName); + } + } + + return distributedViewNames; +} + + +/* + * CreateViewDDLCommand returns the DDL command to create the view addressed by + * the viewAddress. + */ +char * +CreateViewDDLCommand(Oid viewOid) +{ + StringInfo createViewCommand = makeStringInfo(); + + appendStringInfoString(createViewCommand, "CREATE OR REPLACE VIEW "); + + AppendQualifiedViewNameToCreateViewCommand(createViewCommand, viewOid); + AppendAliasesToCreateViewCommand(createViewCommand, viewOid); + AppendOptionsToCreateViewCommand(createViewCommand, viewOid); + AppendViewDefinitionToCreateViewCommand(createViewCommand, viewOid); + + return createViewCommand->data; +} + + +/* + * AppendQualifiedViewNameToCreateViewCommand adds the qualified view of the given view + * oid to the given create view command. + */ +static void +AppendQualifiedViewNameToCreateViewCommand(StringInfo buf, Oid viewOid) +{ + char *viewName = get_rel_name(viewOid); + char *schemaName = get_namespace_name(get_rel_namespace(viewOid)); + char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName); + + appendStringInfo(buf, "%s ", qualifiedViewName); +} + + +/* + * AppendAliasesToCreateViewCommand appends aliases to the create view + * command for the existing view. + */ +static void +AppendAliasesToCreateViewCommand(StringInfo createViewCommand, Oid viewOid) +{ + /* Get column name aliases from pg_attribute */ + ScanKeyData key[1]; + ScanKeyInit(&key[0], + Anum_pg_attribute_attrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(viewOid)); + + Relation maprel = table_open(AttributeRelationId, AccessShareLock); + Relation mapidx = index_open(AttributeRelidNumIndexId, AccessShareLock); + SysScanDesc pgAttributeScan = systable_beginscan_ordered(maprel, mapidx, NULL, 1, + key); + + bool isInitialAlias = true; + bool hasAlias = false; + HeapTuple attributeTuple; + while (HeapTupleIsValid(attributeTuple = systable_getnext_ordered(pgAttributeScan, + ForwardScanDirection))) + { + Form_pg_attribute att = (Form_pg_attribute) GETSTRUCT(attributeTuple); + const char *aliasName = quote_identifier(NameStr(att->attname)); + + if (isInitialAlias) + { + appendStringInfoString(createViewCommand, "("); + } + else + { + appendStringInfoString(createViewCommand, ","); + } + + appendStringInfoString(createViewCommand, aliasName); + + hasAlias = true; + isInitialAlias = false; + } + + if (hasAlias) + { + appendStringInfoString(createViewCommand, ") "); + } + + systable_endscan_ordered(pgAttributeScan); + index_close(mapidx, AccessShareLock); + table_close(maprel, AccessShareLock); +} + + +/* + * AppendOptionsToCreateViewCommand add relation options to create view command + * for an existing view + */ +static void +AppendOptionsToCreateViewCommand(StringInfo createViewCommand, Oid viewOid) +{ + /* Add rel options to create view command */ + char *relOptions = flatten_reloptions(viewOid); + if (relOptions != NULL) + { + appendStringInfo(createViewCommand, "WITH (%s) ", relOptions); + } +} + + +/* + * AppendViewDefinitionToCreateViewCommand adds the definition of the given view to the + * given create view command. + */ +void +AppendViewDefinitionToCreateViewCommand(StringInfo buf, Oid viewOid) +{ + /* + * Set search_path to NIL so that all objects outside of pg_catalog will be + * schema-prefixed. + */ + OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext); + overridePath->schemas = NIL; + overridePath->addCatalog = true; + PushOverrideSearchPath(overridePath); + + /* + * Push the transaction snapshot to be able to get vief definition with pg_get_viewdef + */ + PushActiveSnapshot(GetTransactionSnapshot()); + + Datum viewDefinitionDatum = DirectFunctionCall1(pg_get_viewdef, + ObjectIdGetDatum(viewOid)); + char *viewDefinition = TextDatumGetCString(viewDefinitionDatum); + + PopActiveSnapshot(); + PopOverrideSearchPath(); + + appendStringInfo(buf, "AS %s ", viewDefinition); +} + + +/* + * AlterViewOwnerCommand returns the command to alter view owner command for the + * given view oid. + */ +char * +AlterViewOwnerCommand(Oid viewOid) +{ + /* Add alter owner commmand */ + StringInfo alterOwnerCommand = makeStringInfo(); + + char *viewName = get_rel_name(viewOid); + Oid schemaOid = get_rel_namespace(viewOid); + char *schemaName = get_namespace_name(schemaOid); + + char *viewOwnerName = TableOwner(viewOid); + char *qualifiedViewName = NameListToQuotedString(list_make2(makeString(schemaName), + makeString(viewName))); + appendStringInfo(alterOwnerCommand, + "ALTER VIEW %s OWNER TO %s", qualifiedViewName, + quote_identifier(viewOwnerName)); + + return alterOwnerCommand->data; +} diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index 3da845d3b..ff8da5991 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -80,7 +80,6 @@ static void deparse_index_columns(StringInfo buffer, List *indexParameterList, static void AppendStorageParametersToString(StringInfo stringBuffer, List *optionList); static void simple_quote_literal(StringInfo buf, const char *val); -static char * flatten_reloptions(Oid relid); static void AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer); @@ -1231,7 +1230,7 @@ pg_get_replica_identity_command(Oid tableRelationId) * This function comes from PostgreSQL source code in * src/backend/utils/adt/ruleutils.c */ -static char * +char * flatten_reloptions(Oid relid) { char *result = NULL; diff --git a/src/backend/distributed/deparser/deparse_view_stmts.c b/src/backend/distributed/deparser/deparse_view_stmts.c new file mode 100644 index 000000000..b669a92c3 --- /dev/null +++ b/src/backend/distributed/deparser/deparse_view_stmts.c @@ -0,0 +1,94 @@ +/*------------------------------------------------------------------------- + * + * deparse_view_stmts.c + * + * All routines to deparse view statements. + * + * Copyright (c), Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/namespace.h" +#include "commands/defrem.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/commands.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "lib/stringinfo.h" +#include "nodes/parsenodes.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" + +static void AppendDropViewStmt(StringInfo buf, DropStmt *stmt); +static void AppendViewNameList(StringInfo buf, List *objects); + +/* + * DeparseDropViewStmt deparses the given DROP VIEW statement. + */ +char * +DeparseDropViewStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + StringInfoData str = { 0 }; + initStringInfo(&str); + + Assert(stmt->removeType == OBJECT_VIEW); + + AppendDropViewStmt(&str, stmt); + + return str.data; +} + + +/* + * AppendDropViewStmt appends the deparsed representation of given drop stmt + * to the given string info buffer. + */ +static void +AppendDropViewStmt(StringInfo buf, DropStmt *stmt) +{ + /* + * already tested at call site, but for future it might be collapsed in a + * DeparseDropStmt so be safe and check again + */ + Assert(stmt->removeType == OBJECT_VIEW); + + appendStringInfo(buf, "DROP VIEW "); + if (stmt->missing_ok) + { + appendStringInfoString(buf, "IF EXISTS "); + } + AppendViewNameList(buf, stmt->objects); + if (stmt->behavior == DROP_CASCADE) + { + appendStringInfoString(buf, " CASCADE"); + } + appendStringInfoString(buf, ";"); +} + + +/* + * AppendViewNameList appends the qualified view names by constructing them from the given + * objects list to the given string info buffer. Note that, objects must hold schema + * qualified view names as its' members. + */ +static void +AppendViewNameList(StringInfo buf, List *viewNamesList) +{ + bool isFirstView = true; + List *qualifiedViewName = NULL; + foreach_ptr(qualifiedViewName, viewNamesList) + { + char *quotedQualifiedVieName = NameListToQuotedString(qualifiedViewName); + if (!isFirstView) + { + appendStringInfo(buf, ", "); + } + + appendStringInfoString(buf, quotedQualifiedVieName); + isFirstView = false; + } +} diff --git a/src/backend/distributed/deparser/qualify_view_stmt.c b/src/backend/distributed/deparser/qualify_view_stmt.c new file mode 100644 index 000000000..c2bd7efc1 --- /dev/null +++ b/src/backend/distributed/deparser/qualify_view_stmt.c @@ -0,0 +1,54 @@ +/*------------------------------------------------------------------------- + * + * qualify_view_stmt.c + * Functions specialized in fully qualifying all view statements. These + * functions are dispatched from qualify.c + * + * Copyright (c), Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/namespace.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "nodes/nodes.h" +#include "utils/guc.h" +#include "utils/lsyscache.h" + +/* + * QualifyDropViewStmt quailifies the view names of the DROP VIEW statement. + */ +void +QualifyDropViewStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + List *qualifiedViewNames = NIL; + + List *possiblyQualifiedViewName = NULL; + foreach_ptr(possiblyQualifiedViewName, stmt->objects) + { + char *viewName = NULL; + char *schemaName = NULL; + DeconstructQualifiedName(possiblyQualifiedViewName, &schemaName, &viewName); + + if (schemaName == NULL) + { + char *objname = NULL; + Oid schemaOid = QualifiedNameGetCreationNamespace(possiblyQualifiedViewName, + &objname); + schemaName = get_namespace_name(schemaOid); + List *qualifiedViewName = list_make2(makeString(schemaName), + makeString(viewName)); + qualifiedViewNames = lappend(qualifiedViewNames, qualifiedViewName); + } + else + { + qualifiedViewNames = lappend(qualifiedViewNames, possiblyQualifiedViewName); + } + } + + stmt->objects = qualifiedViewNames; +} diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index f03e96b7c..435ab1ea5 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -803,6 +803,11 @@ GetObjectTypeString(ObjectType objType) return "type"; } + case OBJECT_VIEW: + { + return "view"; + } + default: { ereport(DEBUG1, (errmsg("unsupported object type"), diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index 7545bb6cd..823552788 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -165,6 +165,7 @@ static bool FollowAllDependencies(ObjectAddressCollector *collector, DependencyDefinition *definition); static void ApplyAddToDependencyList(ObjectAddressCollector *collector, DependencyDefinition *definition); +static List * GetViewRuleReferenceDependencyList(Oid relationId); static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress target); static ViewDependencyNode * BuildViewDependencyGraph(Oid relationId, HTAB *nodeMap); @@ -425,7 +426,7 @@ DependencyDefinitionFromPgDepend(ObjectAddress target) /* - * DependencyDefinitionFromPgDepend loads all pg_shdepend records describing the + * DependencyDefinitionFromPgShDepend loads all pg_shdepend records describing the * dependencies of target. */ static List * @@ -747,7 +748,8 @@ SupportedDependencyByCitus(const ObjectAddress *address) relKind == RELKIND_FOREIGN_TABLE || relKind == RELKIND_SEQUENCE || relKind == RELKIND_INDEX || - relKind == RELKIND_PARTITIONED_INDEX) + relKind == RELKIND_PARTITIONED_INDEX || + relKind == RELKIND_VIEW) { return true; } @@ -801,8 +803,11 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress) * Otherwise, callers are expected to throw the error returned from this * function as a hard one by ignoring the detail part. */ - appendStringInfo(detailInfo, "\"%s\" will be created only locally", - objectDescription); + if (!IsObjectDistributed(objectAddress)) + { + appendStringInfo(detailInfo, "\"%s\" will be created only locally", + objectDescription); + } if (SupportedDependencyByCitus(undistributableDependency)) { @@ -813,9 +818,19 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress) objectDescription, dependencyDescription); - appendStringInfo(hintInfo, "Distribute \"%s\" first to distribute \"%s\"", - dependencyDescription, - objectDescription); + if (IsObjectDistributed(objectAddress)) + { + appendStringInfo(hintInfo, + "Distribute \"%s\" first to modify \"%s\" on worker nodes", + dependencyDescription, + objectDescription); + } + else + { + appendStringInfo(hintInfo, "Distribute \"%s\" first to distribute \"%s\"", + dependencyDescription, + objectDescription); + } return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, errorInfo->data, detailInfo->data, hintInfo->data); @@ -893,7 +908,9 @@ GetUndistributableDependency(const ObjectAddress *objectAddress) { char relKind = get_rel_relkind(dependency->objectId); - if (relKind == RELKIND_SEQUENCE || relKind == RELKIND_COMPOSITE_TYPE) + if (relKind == RELKIND_SEQUENCE || + relKind == RELKIND_COMPOSITE_TYPE || + relKind == RELKIND_VIEW) { /* citus knows how to auto-distribute these dependencies */ continue; @@ -1307,9 +1324,26 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe * create all objects required by the indices before we create the table * including indices. */ - List *indexDependencyList = GetRelationIndicesDependencyList(relationId); result = list_concat(result, indexDependencyList); + + /* + * Get the dependencies of the rule for the given view. PG keeps internal + * dependency between view and rule. As it is stated on the PG doc, if + * there is an internal dependency, dependencies of the dependent object + * behave much like they were dependencies of the referenced object. + * + * We need to expand dependencies by including dependencies of the rule + * internally dependent to the view. PG doesn't keep any dependencies + * from view to any object, but it keeps an internal dependency to the + * rule and that rule has dependencies to other objects. + */ + char relKind = get_rel_relkind(relationId); + if (relKind == RELKIND_VIEW) + { + List *ruleRefDepList = GetViewRuleReferenceDependencyList(relationId); + result = list_concat(result, ruleRefDepList); + } } default: @@ -1322,6 +1356,64 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe } +/* + * GetViewRuleReferenceDependencyList returns the dependencies of the view's + * internal rule dependencies. + */ +static List * +GetViewRuleReferenceDependencyList(Oid viewId) +{ + List *dependencyTupleList = GetPgDependTuplesForDependingObjects(RelationRelationId, + viewId); + List *nonInternalDependenciesOfDependingRules = NIL; + + HeapTuple depTup = NULL; + foreach_ptr(depTup, dependencyTupleList) + { + Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup); + + /* + * Dependencies of the internal rule dependency should be handled as the dependency + * of referenced view object. + * + * PG doesn't keep dependency relation between views and dependent objects directly + * but it keeps an internal dependency relation between the view and the rule, then + * keeps the dependent objects of the view as non-internal dependencies of the + * internally dependent rule object. + */ + if (pg_depend->deptype == DEPENDENCY_INTERNAL && pg_depend->classid == + RewriteRelationId) + { + ObjectAddress ruleAddress = { 0 }; + ObjectAddressSet(ruleAddress, RewriteRelationId, pg_depend->objid); + + /* Expand results with the noninternal dependencies of it */ + List *ruleDependencies = DependencyDefinitionFromPgDepend(ruleAddress); + + DependencyDefinition *dependencyDef = NULL; + foreach_ptr(dependencyDef, ruleDependencies) + { + /* + * Follow all dependencies of the internally dependent rule dependencies + * except it is an internal dependency of view itself. + */ + if (dependencyDef->data.pg_depend.deptype == DEPENDENCY_INTERNAL || + (dependencyDef->data.pg_depend.refclassid == RelationRelationId && + dependencyDef->data.pg_depend.refobjid == viewId)) + { + continue; + } + + nonInternalDependenciesOfDependingRules = + lappend(nonInternalDependenciesOfDependingRules, dependencyDef); + } + } + } + + return nonInternalDependenciesOfDependingRules; +} + + /* * GetRelationSequenceDependencyList returns the sequence dependency definition * list for the given relation. diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index acf031008..010df8d51 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -433,7 +433,9 @@ ShouldSyncUserCommandForObject(ObjectAddress objectAddress) { if (objectAddress.classId == RelationRelationId) { - return ShouldSyncTableMetadata(objectAddress.objectId); + Oid relOid = objectAddress.objectId; + return ShouldSyncTableMetadata(relOid) || + get_rel_relkind(relOid) == RELKIND_VIEW; } return false; diff --git a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c index 26248f025..47e401499 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c +++ b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c @@ -419,6 +419,7 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, case OBJECT_TABLE: case OBJECT_EXTENSION: case OBJECT_COLLATION: + case OBJECT_VIEW: { check_object_ownership(userId, type, *addr, node, *relation); break; diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 5b6c8d116..f4472ba95 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -565,6 +565,7 @@ ResetGlobalVariables() TransactionModifiedNodeMetadata = false; NodeMetadataSyncOnCommit = false; InTopLevelDelegatedFunctionCall = false; + InTableTypeConversionFunctionCall = false; ResetWorkerErrorIndication(); memset(&AllowedDistributionColumnValue, 0, sizeof(AllowedDistributionColumn)); diff --git a/src/include/distributed/citus_ruleutils.h b/src/include/distributed/citus_ruleutils.h index 03d58d031..f84307fc8 100644 --- a/src/include/distributed/citus_ruleutils.h +++ b/src/include/distributed/citus_ruleutils.h @@ -46,6 +46,7 @@ extern char * pg_get_indexclusterdef_string(Oid indexRelationId); extern bool contain_nextval_expression_walker(Node *node, void *context); extern char * pg_get_replica_identity_command(Oid tableRelationId); extern const char * RoleSpecString(RoleSpec *spec, bool withQuoteIdentifier); +extern char * flatten_reloptions(Oid relid); /* Function declarations for version dependent PostgreSQL ruleutils functions */ extern void pg_get_query_def(Query *query, StringInfo buffer); diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index e5125aba0..2d5095a30 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -524,6 +524,18 @@ extern void UpdateFunctionDistributionInfo(const ObjectAddress *distAddress, /* vacuum.c - forward declarations */ extern void PostprocessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand); +/* view.c - forward declarations */ +extern List * PreprocessViewStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * PostprocessViewStmt(Node *node, const char *queryString); +extern ObjectAddress ViewStmtObjectAddress(Node *node, bool missing_ok); +extern List * PreprocessDropViewStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern char * CreateViewDDLCommand(Oid viewOid); +extern char * AlterViewOwnerCommand(Oid viewOid); +extern char * DeparseViewStmt(Node *node); +extern char * DeparseDropViewStmt(Node *node); + /* trigger.c - forward declarations */ extern List * GetExplicitTriggerCommandList(Oid relationId); extern HeapTuple GetTriggerTupleById(Oid triggerId, bool missingOk); diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index 8c58f56cc..29e14d116 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -144,6 +144,10 @@ extern Oid TypeOidGetNamespaceOid(Oid typeOid); extern ObjectAddress GetObjectAddressFromParseTree(Node *parseTree, bool missing_ok); extern ObjectAddress RenameAttributeStmtObjectAddress(Node *stmt, bool missing_ok); +/* forward declarations for deparse_view_stmts.c */ +extern void QualifyDropViewStmt(Node *node); +extern void AppendViewDefinitionToCreateViewCommand(StringInfo buf, Oid viewOid); + /* forward declarations for deparse_function_stmts.c */ extern char * DeparseDropFunctionStmt(Node *stmt); extern char * DeparseAlterFunctionStmt(Node *stmt); diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h index 5eab34cd8..ff732c1e0 100644 --- a/src/include/distributed/metadata_utility.h +++ b/src/include/distributed/metadata_utility.h @@ -41,6 +41,15 @@ #define SHARD_SIZES_COLUMN_COUNT (3) +/* + * Flag to keep track of whether the process is currently in a function converting the + * type of the table. Since it only affects the level of the log shown while dropping/ + * recreating table within the table type conversion, rollbacking to the savepoint hasn't + * been implemented for the sake of simplicity. If you are planning to use that flag for + * any other purpose, please consider implementing that. + */ +extern bool InTableTypeConversionFunctionCall; + /* In-memory representation of a typed tuple in pg_dist_shard. */ typedef struct ShardInterval { diff --git a/src/test/regress/expected/alter_table_set_access_method.out b/src/test/regress/expected/alter_table_set_access_method.out index 75ddac37e..da6cbd868 100644 --- a/src/test/regress/expected/alter_table_set_access_method.out +++ b/src/test/regress/expected/alter_table_set_access_method.out @@ -575,6 +575,9 @@ CREATE TABLE local(a int, b bigserial, c int default nextval('c_seq')); INSERT INTO local VALUES (3); create materialized view m_local as select * from local; create view v_local as select * from local; +WARNING: "view v_local" has dependency to "table local" that is not in Citus' metadata +DETAIL: "view v_local" will be created only locally +HINT: Distribute "table local" first to distribute "view v_local" CREATE TABLE ref(a int); SELECT create_Reference_table('ref'); create_reference_table diff --git a/src/test/regress/expected/citus_local_tables_queries_mx.out b/src/test/regress/expected/citus_local_tables_queries_mx.out index 52b73e7df..2eff2cd1d 100644 --- a/src/test/regress/expected/citus_local_tables_queries_mx.out +++ b/src/test/regress/expected/citus_local_tables_queries_mx.out @@ -683,11 +683,13 @@ SELECT count(*) FROM distributed_table WHERE b in 0 (1 row) +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW view_2 AS SELECT count(*) FROM citus_local_table JOIN citus_local_table_2 USING (a) JOIN distributed_table USING (a); +RESET citus.enable_ddl_propagation; -- should fail as view contains direct local dist join SELECT count(*) FROM view_2; count @@ -695,11 +697,13 @@ SELECT count(*) FROM view_2; 1 (1 row) +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW view_3 AS SELECT count(*) FROM citus_local_table_2 JOIN reference_table USING (a); +RESET citus.enable_ddl_propagation; -- ok SELECT count(*) FROM view_3; count diff --git a/src/test/regress/expected/distributed_planning.out b/src/test/regress/expected/distributed_planning.out index f05e46d3a..5f8eb09e0 100644 --- a/src/test/regress/expected/distributed_planning.out +++ b/src/test/regress/expected/distributed_planning.out @@ -158,8 +158,10 @@ BEGIN; INSERT INTO test VALUES (1, 2); COMMIT; -- basic view queries +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW simple_view AS SELECT count(*) as cnt FROM test t1 JOIN test t2 USING (x); +RESET citus.enable_ddl_propagation; SELECT * FROM simple_view; cnt --------------------------------------------------------------------- diff --git a/src/test/regress/expected/drop_partitioned_table.out b/src/test/regress/expected/drop_partitioned_table.out index 846656aaf..2cfd6a7b7 100644 --- a/src/test/regress/expected/drop_partitioned_table.out +++ b/src/test/regress/expected/drop_partitioned_table.out @@ -46,21 +46,6 @@ FROM pg_catalog.pg_class c WHERE n.nspname IN ('drop_partitioned_table', 'schema1') AND c.relkind IN ('r','p') ORDER BY 1, 2; -\c - - - :worker_1_port -SET search_path = drop_partitioned_table; -CREATE VIEW tables_info AS -SELECT n.nspname as "Schema", - c.relname as "Name", - CASE c.relkind WHEN 'r' THEN 'table' WHEN 'p' THEN 'partitioned table' END as "Type", - pg_catalog.pg_get_userbyid(c.relowner) as "Owner" -FROM pg_catalog.pg_class c - LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - LEFT JOIN pg_user u ON u.usesysid = c.relowner -WHERE n.nspname IN ('drop_partitioned_table', 'schema1') - AND c.relkind IN ('r','p') -ORDER BY 1, 2; -\c - - - :master_port -SET search_path = drop_partitioned_table; SET citus.next_shard_id TO 721000; -- CASE 1 -- Dropping the parent table diff --git a/src/test/regress/expected/function_propagation.out b/src/test/regress/expected/function_propagation.out index 1ec5736f5..5c61761fb 100644 --- a/src/test/regress/expected/function_propagation.out +++ b/src/test/regress/expected/function_propagation.out @@ -177,7 +177,7 @@ SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(clas localhost | 57638 | t | (function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table}) (2 rows) --- Views are not supported +-- Views are supported CREATE VIEW function_prop_view AS SELECT * FROM function_prop_table; CREATE OR REPLACE FUNCTION func_7(param_1 function_prop_view) RETURNS int @@ -187,8 +187,6 @@ BEGIN return 1; END; $$; -WARNING: "function func_7(function_prop_view)" has dependency on unsupported object "view function_prop_view" -DETAIL: "function func_7(function_prop_view)" will be created only locally CREATE OR REPLACE FUNCTION func_8(param_1 int) RETURNS function_prop_view LANGUAGE plpgsql AS @@ -197,8 +195,6 @@ BEGIN return 1; END; $$; -WARNING: "function func_8(integer)" has dependency on unsupported object "view function_prop_view" -DETAIL: "function func_8(integer)" will be created only locally -- Check within transaction BEGIN; CREATE TYPE type_in_transaction AS (a int, b int); @@ -504,7 +500,7 @@ BEGIN; ALTER TABLE table_to_dist ADD COLUMN col_1 int default function_propagation_schema.non_dist_func(NULL::non_dist_table_for_function); ERROR: "table table_to_dist" has dependency to "table non_dist_table_for_function" that is not in Citus' metadata -HINT: Distribute "table non_dist_table_for_function" first to distribute "table table_to_dist" +HINT: Distribute "table non_dist_table_for_function" first to modify "table table_to_dist" on worker nodes ROLLBACK; -- Adding multiple columns with default values should propagate the function BEGIN; diff --git a/src/test/regress/expected/local_dist_join_mixed.out b/src/test/regress/expected/local_dist_join_mixed.out index 5566186b5..cc709b982 100644 --- a/src/test/regress/expected/local_dist_join_mixed.out +++ b/src/test/regress/expected/local_dist_join_mixed.out @@ -358,6 +358,9 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c (1 row) CREATE VIEW local_regular_view AS SELECT * FROM local; +WARNING: "view local_regular_view" has dependency to "table local" that is not in Citus' metadata +DETAIL: "view local_regular_view" will be created only locally +HINT: Distribute "table local" first to distribute "view local_regular_view" CREATE VIEW dist_regular_view AS SELECT * FROM distributed; SELECT count(*) FROM distributed JOIN local_regular_view USING (id); DEBUG: generating subplan XXX_1 for subquery SELECT local.id, local.title FROM local_dist_join_mixed.local @@ -1601,14 +1604,5 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c 101 (1 row) +SET client_min_messages TO ERROR; DROP SCHEMA local_dist_join_mixed CASCADE; -DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed schema is run. To make sure subsequent commands see the schema correctly we need to make sure to use only one connection for all future commands -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table distributed -drop cascades to table reference -drop cascades to table local -drop cascades to table unlogged_local -drop cascades to materialized view mat_view -drop cascades to view local_regular_view -drop cascades to view dist_regular_view diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index 98639a953..6c3407bf4 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -398,7 +398,9 @@ INSERT INTO distributed_table VALUES (1, '22', 20); NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 (key, value, age) VALUES (1, '22'::text, 20) INSERT INTO second_distributed_table VALUES (1, '1'); NOTICE: executing the command locally: INSERT INTO local_shard_execution.second_distributed_table_1470005 (key, value) VALUES (1, '1'::text) +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW abcd_view AS SELECT * FROM abcd; +RESET citus.enable_ddl_propagation; SELECT * FROM abcd first join abcd second on first.b = second.b ORDER BY 1,2,3,4; b | c | d | b | c | d --------------------------------------------------------------------- @@ -1656,8 +1658,10 @@ NOTICE: executing the command locally: DELETE FROM local_shard_execution.distri ROLLBACK; -- probably not a realistic case since views are not very -- well supported with MX +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution AS SELECT * FROM distributed_table WHERE key = 500; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution; NOTICE: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (distributed_table.key OPERATOR(pg_catalog.=) 500)) v_local_query_execution key | value | age @@ -1667,8 +1671,10 @@ NOTICE: executing the command locally: SELECT key, value, age FROM (SELECT dist -- similar test, but this time the view itself is a non-local -- query, but the query on the view is local +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution_2 AS SELECT * FROM distributed_table; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution_2 WHERE key = 500; NOTICE: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470003 distributed_table) v_local_query_execution_2 WHERE (key OPERATOR(pg_catalog.=) 500) key | value | age diff --git a/src/test/regress/expected/local_shard_execution_replicated.out b/src/test/regress/expected/local_shard_execution_replicated.out index c297f0a99..200b99872 100644 --- a/src/test/regress/expected/local_shard_execution_replicated.out +++ b/src/test/regress/expected/local_shard_execution_replicated.out @@ -334,7 +334,9 @@ NOTICE: executing the command locally: SELECT key, value FROM local_shard_execu -- Put row back for other tests INSERT INTO distributed_table VALUES (1, '22', 20); NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500001 (key, value, age) VALUES (1, '22'::text, 20) +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW abcd_view AS SELECT * FROM abcd; +RESET citus.enable_ddl_propagation; SELECT * FROM abcd first join abcd second on first.b = second.b ORDER BY 1,2,3,4; b | c | d | b | c | d --------------------------------------------------------------------- @@ -1650,8 +1652,10 @@ NOTICE: executing the command locally: DELETE FROM local_shard_execution_replic ROLLBACK; -- probably not a realistic case since views are not very -- well supported with MX +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution AS SELECT * FROM distributed_table WHERE key = 500; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution; NOTICE: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE (distributed_table.key OPERATOR(pg_catalog.=) 500)) v_local_query_execution key | value | age @@ -1661,8 +1665,10 @@ NOTICE: executing the command locally: SELECT key, value, age FROM (SELECT dist -- similar test, but this time the view itself is a non-local -- query, but the query on the view is local +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution_2 AS SELECT * FROM distributed_table; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution_2 WHERE key = 500; NOTICE: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table) v_local_query_execution_2 WHERE (key OPERATOR(pg_catalog.=) 500) key | value | age diff --git a/src/test/regress/expected/local_table_join.out b/src/test/regress/expected/local_table_join.out index effe23b0d..90737a2ed 100644 --- a/src/test/regress/expected/local_table_join.out +++ b/src/test/regress/expected/local_table_join.out @@ -991,6 +991,9 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c (1 row) CREATE view loc_view AS SELECT * FROM postgres_table WHERE key > 0; +WARNING: "view loc_view" has dependency to "table postgres_table" that is not in Citus' metadata +DETAIL: "view loc_view" will be created only locally +HINT: Distribute "table postgres_table" first to distribute "view loc_view" UPDATE loc_view SET key = (SELECT COUNT(*) FROM distributed_table); DEBUG: generating subplan XXX_1 for subquery SELECT count(*) AS count FROM local_table_join.distributed_table DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE local_table_join.postgres_table SET key = (SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) FROM local_table_join.postgres_table WHERE (postgres_table.key OPERATOR(pg_catalog.>) 0) diff --git a/src/test/regress/expected/multi_generate_ddl_commands.out b/src/test/regress/expected/multi_generate_ddl_commands.out index 6aae20f9b..db211eb49 100644 --- a/src/test/regress/expected/multi_generate_ddl_commands.out +++ b/src/test/regress/expected/multi_generate_ddl_commands.out @@ -166,8 +166,11 @@ SELECT master_get_table_ddl_events('fiddly_table'); ALTER TABLE public.fiddly_table OWNER TO postgres (3 rows) --- propagating views is not supported +-- propagating views is not supported if local table dependency exists CREATE VIEW local_view AS SELECT * FROM simple_table; +WARNING: "view local_view" has dependency to "table simple_table" that is not in Citus' metadata +DETAIL: "view local_view" will be created only locally +HINT: Distribute "table simple_table" first to distribute "view local_view" SELECT master_get_table_ddl_events('local_view'); ERROR: local_view is not a regular, foreign or partitioned table -- clean up diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out index a2c03ef7a..618c563de 100644 --- a/src/test/regress/expected/multi_partitioning.out +++ b/src/test/regress/expected/multi_partitioning.out @@ -4301,12 +4301,14 @@ WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_% (2 rows) -- should work properly - no names clashes +SET client_min_messages TO WARNING; SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); ?column? --------------------------------------------------------------------- - 1 + 1 (1 row) +RESET client_min_messages; \c - - - :worker_1_port -- check that indexes are named properly SELECT tablename, indexname FROM pg_indexes diff --git a/src/test/regress/expected/non_colocated_subquery_joins.out b/src/test/regress/expected/non_colocated_subquery_joins.out index c14502569..1b7b1b1e7 100644 --- a/src/test/regress/expected/non_colocated_subquery_joins.out +++ b/src/test/regress/expected/non_colocated_subquery_joins.out @@ -1076,7 +1076,6 @@ ERROR: cannot pushdown the subquery -- make sure that non-colocated subquery joins work fine in -- modifications CREATE TABLE table1 (id int, tenant_id int); -CREATE VIEW table1_view AS SELECT * from table1 where id < 100; CREATE TABLE table2 (id int, tenant_id int) partition by range(tenant_id); CREATE TABLE table2_p1 PARTITION OF table2 FOR VALUES FROM (1) TO (10); -- modifications on the partitons are only allowed with rep=1 @@ -1093,6 +1092,7 @@ SELECT create_distributed_table('table1','tenant_id'); (1 row) +CREATE VIEW table1_view AS SELECT * from table1 where id < 100; -- all of the above queries are non-colocated subquery joins -- because the views are replaced with subqueries UPDATE table2 SET id=20 FROM table1_view WHERE table1_view.id=table2.id; diff --git a/src/test/regress/expected/object_propagation_debug.out b/src/test/regress/expected/object_propagation_debug.out index 020fa2629..8cecb1c85 100644 --- a/src/test/regress/expected/object_propagation_debug.out +++ b/src/test/regress/expected/object_propagation_debug.out @@ -50,8 +50,9 @@ ON TRUE --------------------------------------------------------------------- ("composite type","""object prop""",t1,"""object prop"".t1") (schema,,"""object prop""","""object prop""") + (table,"""object prop""",test,"""object prop"".test") (type,"""object prop""",t1,"""object prop"".t1") -(3 rows) +(4 rows) -- find all the dependencies of type t1 SELECT diff --git a/src/test/regress/expected/propagate_extension_commands.out b/src/test/regress/expected/propagate_extension_commands.out index 41c012641..e7feb5221 100644 --- a/src/test/regress/expected/propagate_extension_commands.out +++ b/src/test/regress/expected/propagate_extension_commands.out @@ -305,6 +305,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname -- propagated to the workers. the user should run it manually on the workers CREATE TABLE t1 (A int); CREATE VIEW v1 AS select * from t1; +WARNING: "view v1" has dependency to "table t1" that is not in Citus' metadata +DETAIL: "view v1" will be created only locally +HINT: Distribute "table t1" first to distribute "view v1" ALTER EXTENSION seg ADD VIEW v1; ALTER EXTENSION seg DROP VIEW v1; DROP VIEW v1; diff --git a/src/test/regress/expected/recursive_dml_queries_mx.out b/src/test/regress/expected/recursive_dml_queries_mx.out index fed88e1f2..b1e29ffb3 100644 --- a/src/test/regress/expected/recursive_dml_queries_mx.out +++ b/src/test/regress/expected/recursive_dml_queries_mx.out @@ -110,6 +110,7 @@ WHERE SET search_path TO recursive_dml_queries_mx, public; CREATE TABLE recursive_dml_queries_mx.local_table (id text, name text); INSERT INTO local_table SELECT i::text, 'user_' || i FROM generate_series (0, 100) i; +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW tenant_ids AS SELECT tenant_id, name @@ -118,6 +119,7 @@ CREATE VIEW tenant_ids AS WHERE distributed_table.dept::text = reference_table.id ORDER BY 2 DESC, 1 DESC; +RESET citus.enable_ddl_propagation; -- we currently do not allow local tables in modification queries UPDATE distributed_table diff --git a/src/test/regress/expected/recursive_view_local_table.out b/src/test/regress/expected/recursive_view_local_table.out index a2306a2e9..b4ef802b4 100644 --- a/src/test/regress/expected/recursive_view_local_table.out +++ b/src/test/regress/expected/recursive_view_local_table.out @@ -12,6 +12,9 @@ CREATE RECURSIVE VIEW recursive_view(val_1, val_2) AS WHERE val_2 < 50 ); CREATE RECURSIVE VIEW recursive_defined_non_recursive_view(c) AS (SELECT 1 FROM local_table); +WARNING: "view recursive_defined_non_recursive_view" has dependency to "table local_table" that is not in Citus' metadata +DETAIL: "view recursive_defined_non_recursive_view" will be created only locally +HINT: Distribute "table local_table" first to distribute "view recursive_defined_non_recursive_view" CREATE TABLE ref_table(a int, b INT); SELECT create_reference_table('ref_table'); create_reference_table diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index 948adb050..a23b44ffa 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -382,6 +382,9 @@ $Q$); (2 rows) CREATE VIEW local_table_v AS SELECT * FROM local_table WHERE a BETWEEN 1 AND 10; +WARNING: "view local_table_v" has dependency to "table local_table" that is not in Citus' metadata +DETAIL: "view local_table_v" will be created only locally +HINT: Distribute "table local_table" first to distribute "view local_table_v" SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT * FROM squares JOIN local_table_v ON squares.a = local_table_v.a; diff --git a/src/test/regress/expected/resync_metadata_with_sequences.out b/src/test/regress/expected/resync_metadata_with_sequences.out index f96651a14..930cf33d4 100644 --- a/src/test/regress/expected/resync_metadata_with_sequences.out +++ b/src/test/regress/expected/resync_metadata_with_sequences.out @@ -140,6 +140,7 @@ INSERT INTO sensors_news VALUES (DEFAULT, DEFAULT, '2021-01-01') RETURNING *; (1 row) \c - - - :master_port +SET client_min_messages TO ERROR; SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); ?column? --------------------------------------------------------------------- diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index b443d99e9..918f65147 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -438,6 +438,9 @@ CREATE VIEW replication_test_table_placements_per_node AS AND shardstate != 4 GROUP BY nodename, nodeport ORDER BY nodename, nodeport; +WARNING: "view replication_test_table_placements_per_node" has dependency to "table replication_test_table" that is not in Citus' metadata +DETAIL: "view replication_test_table_placements_per_node" will be created only locally +HINT: Distribute "table replication_test_table" first to distribute "view replication_test_table_placements_per_node" -- Create four shards with replication factor 2, and delete the placements -- with smaller port number to simulate under-replicated shards. SELECT count(master_create_empty_shard('replication_test_table')) diff --git a/src/test/regress/expected/sqlsmith_failures.out b/src/test/regress/expected/sqlsmith_failures.out index d276f04a9..6440cf75f 100644 --- a/src/test/regress/expected/sqlsmith_failures.out +++ b/src/test/regress/expected/sqlsmith_failures.out @@ -142,8 +142,9 @@ where (select pg_catalog.array_agg(id) from sqlsmith_failures.countries) -- cleanup DROP SCHEMA sqlsmith_failures CASCADE; -NOTICE: drop cascades to 6 other objects +NOTICE: drop cascades to 7 other objects DETAIL: drop cascades to table countries +drop cascades to table countries_1280000 drop cascades to table orgs drop cascades to table users drop cascades to table orders diff --git a/src/test/regress/expected/start_stop_metadata_sync.out b/src/test/regress/expected/start_stop_metadata_sync.out index 1f82c60cb..d3f961124 100644 --- a/src/test/regress/expected/start_stop_metadata_sync.out +++ b/src/test/regress/expected/start_stop_metadata_sync.out @@ -141,7 +141,6 @@ SELECT * FROM distributed_table_1; --------------------------------------------------------------------- (0 rows) -CREATE VIEW test_view AS SELECT COUNT(*) FROM distributed_table_3; CREATE MATERIALIZED VIEW test_matview AS SELECT COUNT(*) FROM distributed_table_3; SELECT * FROM test_view; count diff --git a/src/test/regress/expected/subquery_in_targetlist.out b/src/test/regress/expected/subquery_in_targetlist.out index a32b480f1..79989b409 100644 --- a/src/test/regress/expected/subquery_in_targetlist.out +++ b/src/test/regress/expected/subquery_in_targetlist.out @@ -278,6 +278,8 @@ ORDER BY 1 LIMIT 3; ERROR: correlated subqueries are not supported when the FROM clause contains a subquery without FROM -- sublink on view CREATE TEMP VIEW view_1 AS (SELECT user_id, value_2 FROM users_table WHERE user_id = 1 AND value_1 = 1 ORDER BY 1,2); +WARNING: "view view_1" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "view view_1" will be created only locally -- with distribution column group by SELECT (SELECT value_2 FROM view_1 WHERE user_id = e.user_id GROUP BY user_id, value_2) FROM events_table e @@ -323,6 +325,8 @@ ORDER BY 1 LIMIT 3; -- sublink on reference table view CREATE TEMP VIEW view_2 AS (SELECT user_id, value_2 FROM users_reference_table WHERE user_id = 1 AND value_1 = 1); +WARNING: "view view_2" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "view view_2" will be created only locally SELECT (SELECT value_2 FROM view_2 WHERE user_id = e.user_id GROUP BY user_id, value_2) FROM events_table e GROUP BY 1 diff --git a/src/test/regress/expected/subquery_in_where.out b/src/test/regress/expected/subquery_in_where.out index c5aedd31d..c5ffc8d93 100644 --- a/src/test/regress/expected/subquery_in_where.out +++ b/src/test/regress/expected/subquery_in_where.out @@ -1096,6 +1096,8 @@ CREATE TEMPORARY VIEW correlated_subquery_view AS FROM events_table e1 WHERE e1.user_id = u1.user_id ) > 0; +WARNING: "view correlated_subquery_view" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "view correlated_subquery_view" will be created only locally SELECT sum(user_id) FROM correlated_subquery_view; sum --------------------------------------------------------------------- diff --git a/src/test/regress/expected/subquery_partitioning.out b/src/test/regress/expected/subquery_partitioning.out index 80ea4478a..57a589600 100644 --- a/src/test/regress/expected/subquery_partitioning.out +++ b/src/test/regress/expected/subquery_partitioning.out @@ -209,6 +209,9 @@ FROM ) as foo WHERE foo.user_id = cte.user_id ) as foo, users_table WHERE foo.cnt > users_table.value_2; +WARNING: "view subquery_and_ctes" has dependency to "table users_table_local" that is not in Citus' metadata +DETAIL: "view subquery_and_ctes" will be created only locally +HINT: Distribute "table users_table_local" first to distribute "view subquery_and_ctes" SELECT * FROM subquery_and_ctes ORDER BY 3 DESC, 1 DESC, 2 DESC, 4 DESC LIMIT 5; diff --git a/src/test/regress/expected/subquery_view.out b/src/test/regress/expected/subquery_view.out index 5f50e6e6d..535e356d5 100644 --- a/src/test/regress/expected/subquery_view.out +++ b/src/test/regress/expected/subquery_view.out @@ -281,6 +281,9 @@ FROM ) as baz WHERE baz.user_id = users_table.user_id ) as sub1; +WARNING: "view subquery_from_from_where_local_table" has dependency to "table events_table_local" that is not in Citus' metadata +DETAIL: "view subquery_from_from_where_local_table" will be created only locally +HINT: Distribute "table events_table_local" first to distribute "view subquery_from_from_where_local_table" SELECT * FROM @@ -337,6 +340,9 @@ FROM SELECT user_id FROM users_table_local WHERE user_id = 2 ) baw WHERE foo.value_2 = bar.user_id AND baz.value_2 = bar.user_id AND bar.user_id = baw.user_id; +WARNING: "view all_executors_view" has dependency to "table users_table_local" that is not in Citus' metadata +DETAIL: "view all_executors_view" will be created only locally +HINT: Distribute "table users_table_local" first to distribute "view all_executors_view" SELECT * FROM @@ -390,6 +396,9 @@ FROM ) as foo WHERE foo.user_id = cte.user_id ) as foo, users_table WHERE foo.cnt > users_table.value_2; +WARNING: "view subquery_and_ctes" has dependency to "table users_table_local" that is not in Citus' metadata +DETAIL: "view subquery_and_ctes" will be created only locally +HINT: Distribute "table users_table_local" first to distribute "view subquery_and_ctes" SELECT * FROM subquery_and_ctes ORDER BY 3 DESC, 1 DESC, 2 DESC, 4 DESC LIMIT 5; @@ -437,6 +446,9 @@ SELECT time, event_type, value_2, value_3 FROM events_table WHERE foo.user_id = events_table.value_2; +WARNING: "view subquery_and_ctes_second" has dependency to "table users_table_local" that is not in Citus' metadata +DETAIL: "view subquery_and_ctes_second" will be created only locally +HINT: Distribute "table users_table_local" first to distribute "view subquery_and_ctes_second" SELECT * FROM subquery_and_ctes_second ORDER BY 3 DESC, 2 DESC, 1 DESC LIMIT 5; diff --git a/src/test/regress/expected/union_pushdown.out b/src/test/regress/expected/union_pushdown.out index 3d68bd8a5..cbee11f8e 100644 --- a/src/test/regress/expected/union_pushdown.out +++ b/src/test/regress/expected/union_pushdown.out @@ -899,6 +899,9 @@ INSERT INTO range_dist_table_2 VALUES ((10, 91)); INSERT INTO range_dist_table_2 VALUES ((20, 100)); -- the following can be pushed down CREATE OR REPLACE VIEW v2 AS SELECT * from range_dist_table_2 UNION ALL SELECT * from range_dist_table_2; +WARNING: "view v2" has dependency to "table range_dist_table_2" that is not in Citus' metadata +DETAIL: "view v2" will be created only locally +HINT: Distribute "table range_dist_table_2" first to distribute "view v2" SELECT public.explain_has_distributed_subplan($$ EXPLAIN SELECT COUNT(dist_col) FROM v2; diff --git a/src/test/regress/expected/view_propagation.out b/src/test/regress/expected/view_propagation.out new file mode 100644 index 000000000..22c287b6e --- /dev/null +++ b/src/test/regress/expected/view_propagation.out @@ -0,0 +1,440 @@ +-- Tests to check propagation of all view commands +CREATE SCHEMA view_prop_schema; +SET search_path to view_prop_schema; +-- Check creating views depending on different types of tables +-- and from multiple schemas +-- Check the most basic one +CREATE VIEW prop_view_basic AS SELECT 1; +-- Try to create view depending local table, then try to recreate it after distributing the table +CREATE TABLE view_table_1(id int, val_1 text); +CREATE VIEW prop_view_1 AS + SELECT * FROM view_table_1; +WARNING: "view prop_view_1" has dependency to "table view_table_1" that is not in Citus' metadata +DETAIL: "view prop_view_1" will be created only locally +HINT: Distribute "table view_table_1" first to distribute "view prop_view_1" +SELECT create_distributed_table('view_table_1', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE VIEW prop_view_1 AS + SELECT * FROM view_table_1; +-- Try to create view depending local table, then try to recreate it after making the table reference table +CREATE TABLE view_table_2(id int PRIMARY KEY, val_1 text); +CREATE VIEW prop_view_2 AS + SELECT view_table_1.id, view_table_2.val_1 FROM view_table_1 INNER JOIN view_table_2 + ON view_table_1.id = view_table_2.id; +WARNING: "view prop_view_2" has dependency to "table view_table_2" that is not in Citus' metadata +DETAIL: "view prop_view_2" will be created only locally +HINT: Distribute "table view_table_2" first to distribute "view prop_view_2" +SELECT create_reference_table('view_table_2'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE VIEW prop_view_2 AS + SELECT view_table_1.id, view_table_2.val_1 FROM view_table_1 INNER JOIN view_table_2 + ON view_table_1.id = view_table_2.id; +-- Try to create view depending local table, then try to recreate it after making the table citus local table +CREATE TABLE view_table_3(id int, val_1 text); +CREATE VIEW prop_view_3 AS + SELECT * FROM view_table_1 WHERE id IN + (SELECT view_table_2.id FROM view_table_2 INNER JOIN view_table_3 ON view_table_2.id = view_table_3.id); +WARNING: "view prop_view_3" has dependency to "table view_table_3" that is not in Citus' metadata +DETAIL: "view prop_view_3" will be created only locally +HINT: Distribute "table view_table_3" first to distribute "view prop_view_3" +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid=>0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +RESET client_min_messages; +ALTER TABLE view_table_3 +ADD CONSTRAINT f_key_for_local_table +FOREIGN KEY(id) +REFERENCES view_table_2(id); +CREATE OR REPLACE VIEW prop_view_3 AS + SELECT * FROM view_table_1 WHERE id IN + (SELECT view_table_2.id FROM view_table_2 INNER JOIN view_table_3 ON view_table_2.id = view_table_3.id); +-- Try to create view depending on PG metadata table +CREATE VIEW prop_view_4 AS + SELECT * FROM pg_stat_activity; +-- Try to create view depending on Citus metadata table +CREATE VIEW prop_view_5 AS + SELECT * FROM citus_dist_stat_activity; +-- Try to create table depending on a local table from another schema, then try to create it again after distributing the table +CREATE SCHEMA view_prop_schema_inner; +SET search_path TO view_prop_schema_inner; +-- Create local table for tests below +CREATE TABLE view_table_4(id int, val_1 text); +-- Create a distributed table and view to test drop view below +CREATE TABLE inner_view_table(id int); +SELECT create_distributed_table('inner_view_table','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW inner_view_prop AS SELECT * FROM inner_view_table; +SET search_path to view_prop_schema; +CREATE VIEW prop_view_6 AS + SELECT vt1.id, vt4.val_1 FROM view_table_1 AS vt1 + INNER JOIN view_prop_schema_inner.view_table_4 AS vt4 ON vt1.id = vt4.id; +WARNING: "view prop_view_6" has dependency to "table view_prop_schema_inner.view_table_4" that is not in Citus' metadata +DETAIL: "view prop_view_6" will be created only locally +HINT: Distribute "table view_prop_schema_inner.view_table_4" first to distribute "view prop_view_6" +SELECT create_distributed_table('view_prop_schema_inner.view_table_4','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE VIEW prop_view_6 AS + SELECT vt1.id, vt4.val_1 FROM view_table_1 AS vt1 + INNER JOIN view_prop_schema_inner.view_table_4 AS vt4 ON vt1.id = vt4.id; +-- Show that all views are propagated as distributed object +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_%' ORDER BY 1; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,prop_view_1}",{}) + (view,"{view_prop_schema,prop_view_2}",{}) + (view,"{view_prop_schema,prop_view_3}",{}) + (view,"{view_prop_schema,prop_view_4}",{}) + (view,"{view_prop_schema,prop_view_5}",{}) + (view,"{view_prop_schema,prop_view_6}",{}) + (view,"{view_prop_schema,prop_view_basic}",{}) +(7 rows) + +-- Check creating views depending various kind of objects +-- Tests will also check propagating dependent objects +-- Depending on function +SET citus.enable_ddl_propagation TO OFF; +CREATE OR REPLACE FUNCTION func_1_for_view(param_1 int) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return param_1; +END; +$$; +RESET citus.enable_ddl_propagation; +-- Show that function will be propagated together with the view +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%func_1_for_view%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +CREATE VIEW prop_view_7 AS SELECT func_1_for_view(id) FROM view_table_1; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%func_1_for_view%'; + obj_identifier +--------------------------------------------------------------------- + (function,"{view_prop_schema,func_1_for_view}",{integer}) +(1 row) + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_7%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,prop_view_7}",{}) +(1 row) + +-- Depending on type +SET citus.enable_ddl_propagation TO OFF; +CREATE TYPE type_for_view_prop AS ENUM ('a','b','c'); +RESET citus.enable_ddl_propagation; +-- Show that type will be propagated together with the view +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%type_for_view_prop%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +CREATE VIEW prop_view_8 AS SELECT val_1::type_for_view_prop FROM view_table_1; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%type_for_view_prop%'; + obj_identifier +--------------------------------------------------------------------- + (type,{view_prop_schema.type_for_view_prop},{}) +(1 row) + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_8%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,prop_view_8}",{}) +(1 row) + +-- Depending on another view +CREATE TABLE view_table_5(id int); +CREATE VIEW prop_view_9 AS SELECT * FROM view_table_5; +WARNING: "view prop_view_9" has dependency to "table view_table_5" that is not in Citus' metadata +DETAIL: "view prop_view_9" will be created only locally +HINT: Distribute "table view_table_5" first to distribute "view prop_view_9" +CREATE VIEW prop_view_10 AS SELECT * FROM prop_view_9; +WARNING: "view prop_view_10" has dependency to "table view_table_5" that is not in Citus' metadata +DETAIL: "view prop_view_10" will be created only locally +HINT: Distribute "table view_table_5" first to distribute "view prop_view_10" +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_9%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_10%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +SELECT create_distributed_table('view_table_5', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE VIEW prop_view_10 AS SELECT * FROM prop_view_9; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_9%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,prop_view_9}",{}) +(1 row) + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_10%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,prop_view_10}",{}) +(1 row) + +-- Check views owned by non-superuser +SET client_min_messages TO ERROR; +CREATE USER view_creation_user; +SELECT 1 FROM run_command_on_workers($$CREATE USER view_creation_user;$$); + ?column? +--------------------------------------------------------------------- + 1 + 1 +(2 rows) + +GRANT ALL PRIVILEGES ON SCHEMA view_prop_schema to view_creation_user; +SET ROLE view_creation_user; +CREATE TABLE user_owned_table_for_view(id int); +SELECT create_distributed_table('user_owned_table_for_view','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW view_owned_by_user AS SELECT * FROM user_owned_table_for_view; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%view_owned_by_user%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,view_owned_by_user}",{}) +(1 row) + +DROP VIEW view_owned_by_user; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%view_owned_by_user%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +DROP TABLE user_owned_table_for_view; +RESET ROLE; +RESET client_min_messages; +-- Create view with different options +CREATE TABLE view_table_6(id int, val_1 text); +SELECT create_distributed_table('view_table_6','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- TEMP VIEW is not supported. View will be created locally. +CREATE TEMP VIEW temp_prop_view AS SELECT * FROM view_table_6; +WARNING: "view temp_prop_view" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "view temp_prop_view" will be created only locally +-- Recursive views are supported +CREATE RECURSIVE VIEW nums_1_100_prop_view (n) AS + VALUES (1) +UNION ALL + SELECT n+1 FROM nums_1_100_prop_view WHERE n < 100; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%nums_1_100_prop_view%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,nums_1_100_prop_view}",{}) +(1 row) + +-- Sequences are supported as dependency +CREATE SEQUENCE sequence_to_prop; +CREATE VIEW seq_view_prop AS SELECT sequence_to_prop.is_called FROM sequence_to_prop; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%sequence_to_prop%'; + obj_identifier +--------------------------------------------------------------------- + (sequence,"{view_prop_schema,sequence_to_prop}",{}) +(1 row) + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%seq_view_prop%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,seq_view_prop}",{}) +(1 row) + +-- Views depend on temp sequences will be created locally +CREATE TEMPORARY SEQUENCE temp_sequence_to_drop; +CREATE VIEW temp_seq_view_prop AS SELECT temp_sequence_to_drop.is_called FROM temp_sequence_to_drop; +NOTICE: view "temp_seq_view_prop" will be a temporary view +WARNING: "view temp_seq_view_prop" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "view temp_seq_view_prop" will be created only locally +-- Check circular dependencies are detected +CREATE VIEW circular_view_1 AS SELECT * FROM view_table_6; +CREATE VIEW circular_view_2 AS SELECT * FROM view_table_6; +CREATE OR REPLACE VIEW circular_view_1 AS SELECT view_table_6.* FROM view_table_6 JOIN circular_view_2 USING (id); +CREATE OR REPLACE VIEW circular_view_2 AS SELECT view_table_6.* FROM view_table_6 JOIN circular_view_1 USING (id); +ERROR: Citus can not handle circular dependencies between distributed objects +DETAIL: "view circular_view_2" circularly depends itself, resolve circular dependency first +-- Recursive views with distributed tables included +CREATE TABLE employees (employee_id int, manager_id int, full_name text); +SELECT create_distributed_table('employees', 'employee_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE RECURSIVE VIEW reporting_line (employee_id, subordinates) AS +SELECT + employee_id, + full_name AS subordinates +FROM + employees +WHERE + manager_id IS NULL +UNION ALL + SELECT + e.employee_id, + ( + rl.subordinates || ' > ' || e.full_name + ) AS subordinates + FROM + employees e + INNER JOIN reporting_line rl ON e.manager_id = rl.employee_id; +-- Aliases are supported +CREATE VIEW aliased_opt_prop_view(alias_1, alias_2) AS SELECT * FROM view_table_6; +-- View options are supported +CREATE VIEW opt_prop_view + WITH(check_option=CASCADED, security_barrier=true) + AS SELECT * FROM view_table_6; +CREATE VIEW sep_opt_prop_view + AS SELECT * FROM view_table_6 + WITH LOCAL CHECK OPTION; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%opt_prop_view%' ORDER BY 1; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,aliased_opt_prop_view}",{}) + (view,"{view_prop_schema,opt_prop_view}",{}) + (view,"{view_prop_schema,sep_opt_prop_view}",{}) +(3 rows) + +-- Check definitions and reltoptions of views are correct on workers +\c - - - :worker_1_port +SELECT definition FROM pg_views WHERE viewname = 'aliased_opt_prop_view'; + definition +--------------------------------------------------------------------- + SELECT view_table_6.id AS alias_1, + + view_table_6.val_1 AS alias_2 + + FROM view_prop_schema.view_table_6; +(1 row) + +SELECT definition FROM pg_views WHERE viewname = 'opt_prop_view'; + definition +--------------------------------------------------------------------- + SELECT view_table_6.id, + + view_table_6.val_1 + + FROM view_prop_schema.view_table_6; +(1 row) + +SELECT definition FROM pg_views WHERE viewname = 'sep_opt_prop_view'; + definition +--------------------------------------------------------------------- + SELECT view_table_6.id, + + view_table_6.val_1 + + FROM view_prop_schema.view_table_6; +(1 row) + +SELECT relname, reloptions +FROM pg_class +WHERE + oid = 'view_prop_schema.aliased_opt_prop_view'::regclass::oid OR + oid = 'view_prop_schema.opt_prop_view'::regclass::oid OR + oid = 'view_prop_schema.sep_opt_prop_view'::regclass::oid +ORDER BY 1; + relname | reloptions +--------------------------------------------------------------------- + aliased_opt_prop_view | + opt_prop_view | {check_option=cascaded,security_barrier=true} + sep_opt_prop_view | {check_option=local} +(3 rows) + +\c - - - :master_port +SET search_path to view_prop_schema; +-- Sync metadata to check it works properly after adding a view +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- Drop views and check metadata afterwards +DROP VIEW prop_view_9 CASCADE; +NOTICE: drop cascades to view prop_view_10 +DROP VIEW opt_prop_view, aliased_opt_prop_view, view_prop_schema_inner.inner_view_prop, sep_opt_prop_view; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%inner_view_prop%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%opt_prop_view%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +-- Drop a column that view depends on +ALTER TABLE view_table_1 DROP COLUMN val_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to view prop_view_1 +drop cascades to view prop_view_3 +drop cascades to view prop_view_8 +-- Since prop_view_3 depends on the view_table_1's val_1 column, it should be dropped +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_3%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +-- Drop a table that view depends on +DROP TABLE view_table_2 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view prop_view_2 +drop cascades to constraint f_key_for_local_table on table view_table_3 +NOTICE: drop cascades to constraint f_key_for_local_table_1410200 on table view_prop_schema.view_table_3_1410200 +CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)" +PL/pgSQL function citus_drop_trigger() line XX at PERFORM +NOTICE: removing table view_prop_schema.view_table_3 from metadata as it is not connected to any reference tables via foreign keys +-- Since prop_view_2 depends on the view_table_2, it should be dropped +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_2%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +-- Show that unsupported CREATE OR REPLACE VIEW commands are catched by PG on the coordinator +CREATE TABLE table_to_test_unsup_view(id int, val1 text); +SELECT create_distributed_table('table_to_test_unsup_view', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW view_for_unsup_commands AS SELECT * FROM table_to_test_unsup_view; +CREATE OR REPLACE VIEW view_for_unsup_commands(a,b) AS SELECT * FROM table_to_test_unsup_view; +ERROR: cannot change name of view column "id" to "a" +HINT: Use ALTER VIEW ... RENAME COLUMN ... to change name of view column instead. +CREATE OR REPLACE VIEW view_for_unsup_commands AS SELECT id FROM table_to_test_unsup_view; +ERROR: cannot drop columns from view +SET client_min_messages TO ERROR; +DROP SCHEMA view_prop_schema_inner CASCADE; +DROP SCHEMA view_prop_schema CASCADE; diff --git a/src/test/regress/expected/views_create.out b/src/test/regress/expected/views_create.out index acc8f002f..ddd787c7e 100644 --- a/src/test/regress/expected/views_create.out +++ b/src/test/regress/expected/views_create.out @@ -1,6 +1,15 @@ CREATE SCHEMA views_create; SET search_path TO views_create; CREATE TABLE view_test_table(a INT NOT NULL PRIMARY KEY, b BIGINT, c text); +SELECT create_distributed_table('view_test_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Since creating view distributed or locally depends on the arbitrary config +-- set client_min_messages to ERROR to get consistent result. +SET client_min_messages TO ERROR; CREATE OR REPLACE VIEW select_filtered_view AS SELECT * FROM view_test_table WHERE c = 'testing' WITH CASCADED CHECK OPTION; @@ -9,12 +18,7 @@ CREATE OR REPLACE VIEW select_all_view AS WITH LOCAL CHECK OPTION; CREATE OR REPLACE VIEW count_view AS SELECT COUNT(*) FROM view_test_table; -SELECT create_distributed_table('view_test_table', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - +RESET client_min_messages; INSERT INTO view_test_table VALUES (1,1,'testing'), (2,1,'views'); SELECT * FROM count_view; count @@ -42,6 +46,8 @@ SELECT * FROM select_filtered_view; -- dummy temp recursive view CREATE TEMP RECURSIVE VIEW recursive_defined_non_recursive_view(c) AS (SELECT 1); +WARNING: "view recursive_defined_non_recursive_view" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "view recursive_defined_non_recursive_view" will be created only locally CREATE MATERIALIZED VIEW select_all_matview AS SELECT * FROM view_test_table WITH DATA; diff --git a/src/test/regress/input/multi_copy.source b/src/test/regress/input/multi_copy.source index a017fe342..ba6ad675f 100644 --- a/src/test/regress/input/multi_copy.source +++ b/src/test/regress/input/multi_copy.source @@ -508,6 +508,7 @@ INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table_to_distribute'::regclass::oid, 0); INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'second_dustbunnies'::regclass::oid, 0); +SET client_min_messages TO ERROR; SELECT 1 FROM master_activate_node('localhost', :worker_1_port); RESET client_min_messages; RESET citus.shard_replication_factor; diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index e95676e2c..1897f7d6c 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -324,6 +324,7 @@ test: distributed_collations test: distributed_procedure test: distributed_collations_conflict test: function_propagation +test: view_propagation test: check_mx # --------- diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index f709263fd..5692ed347 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -634,9 +634,8 @@ INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'super_packed_numbers_hash'::regclass::oid, 0); INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table_to_distribute'::regclass::oid, 0); INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'second_dustbunnies'::regclass::oid, 0); +SET client_min_messages TO ERROR; SELECT 1 FROM master_activate_node('localhost', :worker_1_port); -NOTICE: Replicating postgres objects to node localhost:57637 -DETAIL: There are 115 objects to replicate, depending on your environment this might take a while ?column? --------------------------------------------------------------------- 1 diff --git a/src/test/regress/spec/isolation_select_for_update.spec b/src/test/regress/spec/isolation_select_for_update.spec index 3eb16a94e..4dcd2f42c 100644 --- a/src/test/regress/spec/isolation_select_for_update.spec +++ b/src/test/regress/spec/isolation_select_for_update.spec @@ -10,7 +10,9 @@ setup SELECT create_distributed_table('test_table_1_rf1','id'); INSERT INTO test_table_1_rf1 values(1,2),(2,3),(3,4); + SET citus.enable_ddl_propagation TO OFF; CREATE VIEW test_1 AS SELECT * FROM test_table_1_rf1 WHERE val_1 = 2; + RESET citus.enable_ddl_propagation; CREATE TABLE test_table_2_rf1(id int, val_1 int); SELECT create_distributed_table('test_table_2_rf1','id'); diff --git a/src/test/regress/sql/alter_table_set_access_method.sql b/src/test/regress/sql/alter_table_set_access_method.sql index 7ddadc531..0ffabf664 100644 --- a/src/test/regress/sql/alter_table_set_access_method.sql +++ b/src/test/regress/sql/alter_table_set_access_method.sql @@ -224,7 +224,6 @@ select alter_table_set_access_method('local','columnar'); select alter_table_set_access_method('ref','columnar'); select alter_table_set_access_method('dist','columnar'); - SELECT alter_distributed_table('dist', shard_count:=1, cascade_to_colocated:=false); select alter_table_set_access_method('local','heap'); diff --git a/src/test/regress/sql/citus_local_tables_queries_mx.sql b/src/test/regress/sql/citus_local_tables_queries_mx.sql index ddcc95d84..cad6a0386 100644 --- a/src/test/regress/sql/citus_local_tables_queries_mx.sql +++ b/src/test/regress/sql/citus_local_tables_queries_mx.sql @@ -445,20 +445,24 @@ SELECT count(*) FROM mat_view_4; SELECT count(*) FROM distributed_table WHERE b in (SELECT count FROM mat_view_4); +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW view_2 AS SELECT count(*) FROM citus_local_table JOIN citus_local_table_2 USING (a) JOIN distributed_table USING (a); +RESET citus.enable_ddl_propagation; -- should fail as view contains direct local dist join SELECT count(*) FROM view_2; +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW view_3 AS SELECT count(*) FROM citus_local_table_2 JOIN reference_table USING (a); +RESET citus.enable_ddl_propagation; -- ok SELECT count(*) FROM view_3; diff --git a/src/test/regress/sql/distributed_planning.sql b/src/test/regress/sql/distributed_planning.sql index b19654ff4..c6a2bf29c 100644 --- a/src/test/regress/sql/distributed_planning.sql +++ b/src/test/regress/sql/distributed_planning.sql @@ -73,8 +73,10 @@ COMMIT; -- basic view queries +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW simple_view AS SELECT count(*) as cnt FROM test t1 JOIN test t2 USING (x); +RESET citus.enable_ddl_propagation; SELECT * FROM simple_view; SELECT * FROM simple_view, test WHERE test.x = simple_view.cnt; diff --git a/src/test/regress/sql/drop_partitioned_table.sql b/src/test/regress/sql/drop_partitioned_table.sql index a9842b10a..b1c64d5cb 100644 --- a/src/test/regress/sql/drop_partitioned_table.sql +++ b/src/test/regress/sql/drop_partitioned_table.sql @@ -52,22 +52,6 @@ WHERE n.nspname IN ('drop_partitioned_table', 'schema1') AND c.relkind IN ('r','p') ORDER BY 1, 2; -\c - - - :worker_1_port -SET search_path = drop_partitioned_table; -CREATE VIEW tables_info AS -SELECT n.nspname as "Schema", - c.relname as "Name", - CASE c.relkind WHEN 'r' THEN 'table' WHEN 'p' THEN 'partitioned table' END as "Type", - pg_catalog.pg_get_userbyid(c.relowner) as "Owner" -FROM pg_catalog.pg_class c - LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - LEFT JOIN pg_user u ON u.usesysid = c.relowner -WHERE n.nspname IN ('drop_partitioned_table', 'schema1') - AND c.relkind IN ('r','p') -ORDER BY 1, 2; - -\c - - - :master_port -SET search_path = drop_partitioned_table; SET citus.next_shard_id TO 721000; -- CASE 1 diff --git a/src/test/regress/sql/function_propagation.sql b/src/test/regress/sql/function_propagation.sql index d4d3a7322..579a1aa9f 100644 --- a/src/test/regress/sql/function_propagation.sql +++ b/src/test/regress/sql/function_propagation.sql @@ -101,7 +101,7 @@ $$; SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid; SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid;$$) ORDER BY 1,2; --- Views are not supported +-- Views are supported CREATE VIEW function_prop_view AS SELECT * FROM function_prop_table; CREATE OR REPLACE FUNCTION func_7(param_1 function_prop_view) RETURNS int diff --git a/src/test/regress/sql/local_dist_join_mixed.sql b/src/test/regress/sql/local_dist_join_mixed.sql index ebc475de1..b07da2fc8 100644 --- a/src/test/regress/sql/local_dist_join_mixed.sql +++ b/src/test/regress/sql/local_dist_join_mixed.sql @@ -408,4 +408,5 @@ JOIN USING (id); +SET client_min_messages TO ERROR; DROP SCHEMA local_dist_join_mixed CASCADE; diff --git a/src/test/regress/sql/local_shard_execution.sql b/src/test/regress/sql/local_shard_execution.sql index 7a0fc8b8a..76ed8b555 100644 --- a/src/test/regress/sql/local_shard_execution.sql +++ b/src/test/regress/sql/local_shard_execution.sql @@ -230,7 +230,9 @@ SELECT * FROM second_distributed_table WHERE key = 1 ORDER BY 1,2; INSERT INTO distributed_table VALUES (1, '22', 20); INSERT INTO second_distributed_table VALUES (1, '1'); +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW abcd_view AS SELECT * FROM abcd; +RESET citus.enable_ddl_propagation; SELECT * FROM abcd first join abcd second on first.b = second.b ORDER BY 1,2,3,4; @@ -765,15 +767,19 @@ ROLLBACK; -- probably not a realistic case since views are not very -- well supported with MX +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution AS SELECT * FROM distributed_table WHERE key = 500; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution; -- similar test, but this time the view itself is a non-local -- query, but the query on the view is local +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution_2 AS SELECT * FROM distributed_table; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution_2 WHERE key = 500; diff --git a/src/test/regress/sql/local_shard_execution_replicated.sql b/src/test/regress/sql/local_shard_execution_replicated.sql index 81b47cfc8..a8fe72b98 100644 --- a/src/test/regress/sql/local_shard_execution_replicated.sql +++ b/src/test/regress/sql/local_shard_execution_replicated.sql @@ -194,7 +194,9 @@ SELECT * FROM second_distributed_table WHERE key = 1 ORDER BY 1,2; -- Put row back for other tests INSERT INTO distributed_table VALUES (1, '22', 20); +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW abcd_view AS SELECT * FROM abcd; +RESET citus.enable_ddl_propagation; SELECT * FROM abcd first join abcd second on first.b = second.b ORDER BY 1,2,3,4; @@ -729,15 +731,19 @@ ROLLBACK; -- probably not a realistic case since views are not very -- well supported with MX +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution AS SELECT * FROM distributed_table WHERE key = 500; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution; -- similar test, but this time the view itself is a non-local -- query, but the query on the view is local +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution_2 AS SELECT * FROM distributed_table; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution_2 WHERE key = 500; diff --git a/src/test/regress/sql/multi_generate_ddl_commands.sql b/src/test/regress/sql/multi_generate_ddl_commands.sql index 4237d62b0..b4d04931a 100644 --- a/src/test/regress/sql/multi_generate_ddl_commands.sql +++ b/src/test/regress/sql/multi_generate_ddl_commands.sql @@ -116,7 +116,7 @@ ALTER TABLE fiddly_table SELECT master_get_table_ddl_events('fiddly_table'); --- propagating views is not supported +-- propagating views is not supported if local table dependency exists CREATE VIEW local_view AS SELECT * FROM simple_table; SELECT master_get_table_ddl_events('local_view'); diff --git a/src/test/regress/sql/multi_partitioning.sql b/src/test/regress/sql/multi_partitioning.sql index 91bbc180b..cbfa20440 100644 --- a/src/test/regress/sql/multi_partitioning.sql +++ b/src/test/regress/sql/multi_partitioning.sql @@ -1992,7 +1992,9 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_%' ORDER BY 1, 2; -- should work properly - no names clashes +SET client_min_messages TO WARNING; SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); +RESET client_min_messages; \c - - - :worker_1_port -- check that indexes are named properly diff --git a/src/test/regress/sql/non_colocated_subquery_joins.sql b/src/test/regress/sql/non_colocated_subquery_joins.sql index d8a2aaea1..0c8953d2c 100644 --- a/src/test/regress/sql/non_colocated_subquery_joins.sql +++ b/src/test/regress/sql/non_colocated_subquery_joins.sql @@ -785,7 +785,6 @@ SELECT count(*) FROM events_table WHERE user_id NOT IN -- make sure that non-colocated subquery joins work fine in -- modifications CREATE TABLE table1 (id int, tenant_id int); -CREATE VIEW table1_view AS SELECT * from table1 where id < 100; CREATE TABLE table2 (id int, tenant_id int) partition by range(tenant_id); CREATE TABLE table2_p1 PARTITION OF table2 FOR VALUES FROM (1) TO (10); @@ -795,6 +794,8 @@ SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('table2','tenant_id'); SELECT create_distributed_table('table1','tenant_id'); +CREATE VIEW table1_view AS SELECT * from table1 where id < 100; + -- all of the above queries are non-colocated subquery joins -- because the views are replaced with subqueries UPDATE table2 SET id=20 FROM table1_view WHERE table1_view.id=table2.id; diff --git a/src/test/regress/sql/recursive_dml_queries_mx.sql b/src/test/regress/sql/recursive_dml_queries_mx.sql index 88b21e0b4..426fbc8ae 100644 --- a/src/test/regress/sql/recursive_dml_queries_mx.sql +++ b/src/test/regress/sql/recursive_dml_queries_mx.sql @@ -104,6 +104,7 @@ SET search_path TO recursive_dml_queries_mx, public; CREATE TABLE recursive_dml_queries_mx.local_table (id text, name text); INSERT INTO local_table SELECT i::text, 'user_' || i FROM generate_series (0, 100) i; +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW tenant_ids AS SELECT tenant_id, name @@ -112,6 +113,7 @@ CREATE VIEW tenant_ids AS WHERE distributed_table.dept::text = reference_table.id ORDER BY 2 DESC, 1 DESC; +RESET citus.enable_ddl_propagation; -- we currently do not allow local tables in modification queries UPDATE diff --git a/src/test/regress/sql/resync_metadata_with_sequences.sql b/src/test/regress/sql/resync_metadata_with_sequences.sql index e0b263f52..446fa7e2d 100644 --- a/src/test/regress/sql/resync_metadata_with_sequences.sql +++ b/src/test/regress/sql/resync_metadata_with_sequences.sql @@ -55,6 +55,7 @@ INSERT INTO sensors VALUES (DEFAULT, DEFAULT, '2010-01-01') RETURNING *; INSERT INTO sensors_news VALUES (DEFAULT, DEFAULT, '2021-01-01') RETURNING *; \c - - - :master_port +SET client_min_messages TO ERROR; SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); diff --git a/src/test/regress/sql/start_stop_metadata_sync.sql b/src/test/regress/sql/start_stop_metadata_sync.sql index a23eba415..4e30cef1c 100644 --- a/src/test/regress/sql/start_stop_metadata_sync.sql +++ b/src/test/regress/sql/start_stop_metadata_sync.sql @@ -103,7 +103,6 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); \c - - - :worker_1_port SET search_path TO "start_stop_metadata_sync"; SELECT * FROM distributed_table_1; -CREATE VIEW test_view AS SELECT COUNT(*) FROM distributed_table_3; CREATE MATERIALIZED VIEW test_matview AS SELECT COUNT(*) FROM distributed_table_3; SELECT * FROM test_view; SELECT * FROM test_matview; diff --git a/src/test/regress/sql/view_propagation.sql b/src/test/regress/sql/view_propagation.sql new file mode 100644 index 000000000..deb9f050c --- /dev/null +++ b/src/test/regress/sql/view_propagation.sql @@ -0,0 +1,273 @@ +-- Tests to check propagation of all view commands +CREATE SCHEMA view_prop_schema; +SET search_path to view_prop_schema; + +-- Check creating views depending on different types of tables +-- and from multiple schemas + +-- Check the most basic one +CREATE VIEW prop_view_basic AS SELECT 1; + +-- Try to create view depending local table, then try to recreate it after distributing the table +CREATE TABLE view_table_1(id int, val_1 text); +CREATE VIEW prop_view_1 AS + SELECT * FROM view_table_1; + +SELECT create_distributed_table('view_table_1', 'id'); +CREATE OR REPLACE VIEW prop_view_1 AS + SELECT * FROM view_table_1; + +-- Try to create view depending local table, then try to recreate it after making the table reference table +CREATE TABLE view_table_2(id int PRIMARY KEY, val_1 text); +CREATE VIEW prop_view_2 AS + SELECT view_table_1.id, view_table_2.val_1 FROM view_table_1 INNER JOIN view_table_2 + ON view_table_1.id = view_table_2.id; + +SELECT create_reference_table('view_table_2'); +CREATE OR REPLACE VIEW prop_view_2 AS + SELECT view_table_1.id, view_table_2.val_1 FROM view_table_1 INNER JOIN view_table_2 + ON view_table_1.id = view_table_2.id; + +-- Try to create view depending local table, then try to recreate it after making the table citus local table +CREATE TABLE view_table_3(id int, val_1 text); +CREATE VIEW prop_view_3 AS + SELECT * FROM view_table_1 WHERE id IN + (SELECT view_table_2.id FROM view_table_2 INNER JOIN view_table_3 ON view_table_2.id = view_table_3.id); + +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid=>0); +RESET client_min_messages; + +ALTER TABLE view_table_3 +ADD CONSTRAINT f_key_for_local_table +FOREIGN KEY(id) +REFERENCES view_table_2(id); + +CREATE OR REPLACE VIEW prop_view_3 AS + SELECT * FROM view_table_1 WHERE id IN + (SELECT view_table_2.id FROM view_table_2 INNER JOIN view_table_3 ON view_table_2.id = view_table_3.id); + +-- Try to create view depending on PG metadata table +CREATE VIEW prop_view_4 AS + SELECT * FROM pg_stat_activity; + +-- Try to create view depending on Citus metadata table +CREATE VIEW prop_view_5 AS + SELECT * FROM citus_dist_stat_activity; + +-- Try to create table depending on a local table from another schema, then try to create it again after distributing the table +CREATE SCHEMA view_prop_schema_inner; +SET search_path TO view_prop_schema_inner; + +-- Create local table for tests below +CREATE TABLE view_table_4(id int, val_1 text); + +-- Create a distributed table and view to test drop view below +CREATE TABLE inner_view_table(id int); +SELECT create_distributed_table('inner_view_table','id'); +CREATE VIEW inner_view_prop AS SELECT * FROM inner_view_table; + +SET search_path to view_prop_schema; + +CREATE VIEW prop_view_6 AS + SELECT vt1.id, vt4.val_1 FROM view_table_1 AS vt1 + INNER JOIN view_prop_schema_inner.view_table_4 AS vt4 ON vt1.id = vt4.id; + +SELECT create_distributed_table('view_prop_schema_inner.view_table_4','id'); +CREATE OR REPLACE VIEW prop_view_6 AS + SELECT vt1.id, vt4.val_1 FROM view_table_1 AS vt1 + INNER JOIN view_prop_schema_inner.view_table_4 AS vt4 ON vt1.id = vt4.id; + +-- Show that all views are propagated as distributed object +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_%' ORDER BY 1; + +-- Check creating views depending various kind of objects +-- Tests will also check propagating dependent objects + +-- Depending on function +SET citus.enable_ddl_propagation TO OFF; +CREATE OR REPLACE FUNCTION func_1_for_view(param_1 int) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return param_1; +END; +$$; +RESET citus.enable_ddl_propagation; + +-- Show that function will be propagated together with the view +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%func_1_for_view%'; + +CREATE VIEW prop_view_7 AS SELECT func_1_for_view(id) FROM view_table_1; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%func_1_for_view%'; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_7%'; + +-- Depending on type +SET citus.enable_ddl_propagation TO OFF; +CREATE TYPE type_for_view_prop AS ENUM ('a','b','c'); +RESET citus.enable_ddl_propagation; + +-- Show that type will be propagated together with the view +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%type_for_view_prop%'; + +CREATE VIEW prop_view_8 AS SELECT val_1::type_for_view_prop FROM view_table_1; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%type_for_view_prop%'; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_8%'; + +-- Depending on another view +CREATE TABLE view_table_5(id int); +CREATE VIEW prop_view_9 AS SELECT * FROM view_table_5; +CREATE VIEW prop_view_10 AS SELECT * FROM prop_view_9; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_9%'; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_10%'; + +SELECT create_distributed_table('view_table_5', 'id'); +CREATE OR REPLACE VIEW prop_view_10 AS SELECT * FROM prop_view_9; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_9%'; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_10%'; + +-- Check views owned by non-superuser +SET client_min_messages TO ERROR; +CREATE USER view_creation_user; +SELECT 1 FROM run_command_on_workers($$CREATE USER view_creation_user;$$); +GRANT ALL PRIVILEGES ON SCHEMA view_prop_schema to view_creation_user; + +SET ROLE view_creation_user; + +CREATE TABLE user_owned_table_for_view(id int); +SELECT create_distributed_table('user_owned_table_for_view','id'); +CREATE VIEW view_owned_by_user AS SELECT * FROM user_owned_table_for_view; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%view_owned_by_user%'; +DROP VIEW view_owned_by_user; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%view_owned_by_user%'; +DROP TABLE user_owned_table_for_view; + +RESET ROLE; +RESET client_min_messages; + +-- Create view with different options + +CREATE TABLE view_table_6(id int, val_1 text); +SELECT create_distributed_table('view_table_6','id'); + +-- TEMP VIEW is not supported. View will be created locally. +CREATE TEMP VIEW temp_prop_view AS SELECT * FROM view_table_6; + +-- Recursive views are supported +CREATE RECURSIVE VIEW nums_1_100_prop_view (n) AS + VALUES (1) +UNION ALL + SELECT n+1 FROM nums_1_100_prop_view WHERE n < 100; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%nums_1_100_prop_view%'; + +-- Sequences are supported as dependency +CREATE SEQUENCE sequence_to_prop; +CREATE VIEW seq_view_prop AS SELECT sequence_to_prop.is_called FROM sequence_to_prop; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%sequence_to_prop%'; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%seq_view_prop%'; + +-- Views depend on temp sequences will be created locally +CREATE TEMPORARY SEQUENCE temp_sequence_to_drop; +CREATE VIEW temp_seq_view_prop AS SELECT temp_sequence_to_drop.is_called FROM temp_sequence_to_drop; + +-- Check circular dependencies are detected +CREATE VIEW circular_view_1 AS SELECT * FROM view_table_6; +CREATE VIEW circular_view_2 AS SELECT * FROM view_table_6; +CREATE OR REPLACE VIEW circular_view_1 AS SELECT view_table_6.* FROM view_table_6 JOIN circular_view_2 USING (id); +CREATE OR REPLACE VIEW circular_view_2 AS SELECT view_table_6.* FROM view_table_6 JOIN circular_view_1 USING (id); + +-- Recursive views with distributed tables included +CREATE TABLE employees (employee_id int, manager_id int, full_name text); +SELECT create_distributed_table('employees', 'employee_id'); + +CREATE OR REPLACE RECURSIVE VIEW reporting_line (employee_id, subordinates) AS +SELECT + employee_id, + full_name AS subordinates +FROM + employees +WHERE + manager_id IS NULL +UNION ALL + SELECT + e.employee_id, + ( + rl.subordinates || ' > ' || e.full_name + ) AS subordinates + FROM + employees e + INNER JOIN reporting_line rl ON e.manager_id = rl.employee_id; + +-- Aliases are supported +CREATE VIEW aliased_opt_prop_view(alias_1, alias_2) AS SELECT * FROM view_table_6; + +-- View options are supported +CREATE VIEW opt_prop_view + WITH(check_option=CASCADED, security_barrier=true) + AS SELECT * FROM view_table_6; + +CREATE VIEW sep_opt_prop_view + AS SELECT * FROM view_table_6 + WITH LOCAL CHECK OPTION; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%opt_prop_view%' ORDER BY 1; + +-- Check definitions and reltoptions of views are correct on workers +\c - - - :worker_1_port + +SELECT definition FROM pg_views WHERE viewname = 'aliased_opt_prop_view'; +SELECT definition FROM pg_views WHERE viewname = 'opt_prop_view'; +SELECT definition FROM pg_views WHERE viewname = 'sep_opt_prop_view'; + +SELECT relname, reloptions +FROM pg_class +WHERE + oid = 'view_prop_schema.aliased_opt_prop_view'::regclass::oid OR + oid = 'view_prop_schema.opt_prop_view'::regclass::oid OR + oid = 'view_prop_schema.sep_opt_prop_view'::regclass::oid +ORDER BY 1; + +\c - - - :master_port +SET search_path to view_prop_schema; + +-- Sync metadata to check it works properly after adding a view +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- Drop views and check metadata afterwards +DROP VIEW prop_view_9 CASCADE; +DROP VIEW opt_prop_view, aliased_opt_prop_view, view_prop_schema_inner.inner_view_prop, sep_opt_prop_view; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%inner_view_prop%'; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%opt_prop_view%'; + +-- Drop a column that view depends on +ALTER TABLE view_table_1 DROP COLUMN val_1 CASCADE; + +-- Since prop_view_3 depends on the view_table_1's val_1 column, it should be dropped +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_3%'; + +-- Drop a table that view depends on +DROP TABLE view_table_2 CASCADE; + +-- Since prop_view_2 depends on the view_table_2, it should be dropped +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_2%'; + +-- Show that unsupported CREATE OR REPLACE VIEW commands are catched by PG on the coordinator +CREATE TABLE table_to_test_unsup_view(id int, val1 text); +SELECT create_distributed_table('table_to_test_unsup_view', 'id'); + +CREATE VIEW view_for_unsup_commands AS SELECT * FROM table_to_test_unsup_view; + +CREATE OR REPLACE VIEW view_for_unsup_commands(a,b) AS SELECT * FROM table_to_test_unsup_view; +CREATE OR REPLACE VIEW view_for_unsup_commands AS SELECT id FROM table_to_test_unsup_view; + +SET client_min_messages TO ERROR; +DROP SCHEMA view_prop_schema_inner CASCADE; +DROP SCHEMA view_prop_schema CASCADE; diff --git a/src/test/regress/sql/views_create.sql b/src/test/regress/sql/views_create.sql index d30676c42..0c9b2acc1 100644 --- a/src/test/regress/sql/views_create.sql +++ b/src/test/regress/sql/views_create.sql @@ -2,6 +2,10 @@ CREATE SCHEMA views_create; SET search_path TO views_create; CREATE TABLE view_test_table(a INT NOT NULL PRIMARY KEY, b BIGINT, c text); +SELECT create_distributed_table('view_test_table', 'a'); +-- Since creating view distributed or locally depends on the arbitrary config +-- set client_min_messages to ERROR to get consistent result. +SET client_min_messages TO ERROR; CREATE OR REPLACE VIEW select_filtered_view AS SELECT * FROM view_test_table WHERE c = 'testing' WITH CASCADED CHECK OPTION; @@ -10,7 +14,7 @@ CREATE OR REPLACE VIEW select_all_view AS WITH LOCAL CHECK OPTION; CREATE OR REPLACE VIEW count_view AS SELECT COUNT(*) FROM view_test_table; -SELECT create_distributed_table('view_test_table', 'a'); +RESET client_min_messages; INSERT INTO view_test_table VALUES (1,1,'testing'), (2,1,'views'); SELECT * FROM count_view;