From 67ac3da2b016bf36e08dd7b13e03ebb21984207e Mon Sep 17 00:00:00 2001 From: aykut-bozkurt <51649454+aykut-bozkurt@users.noreply.github.com> Date: Mon, 25 Jul 2022 16:43:34 +0300 Subject: [PATCH 01/38] added citus_depended_objects udf and HideCitusDependentObjects GUC to hide citus depended objects from pg meta queries (#6055) use RecurseObjectDependencies api to find if an object is citus depended make vanilla tests runnable to see if citus_depended function is working correctly --- src/backend/distributed/metadata/dependency.c | 150 +++++++++ .../distributed/metadata/metadata_cache.c | 26 ++ .../distributed/planner/distributed_planner.c | 20 ++ .../distributed/planner/multi_explain.c | 15 + src/backend/distributed/shared_library_init.c | 13 + .../distributed/test/citus_depended_object.c | 151 +++++++++ .../distributed/utils/citus_depended_object.c | 287 ++++++++++++++++++ .../distributed/citus_depended_object.h | 24 ++ src/include/distributed/metadata/dependency.h | 1 + src/include/distributed/metadata/distobject.h | 1 + src/include/distributed/metadata_cache.h | 1 + src/test/regress/.gitignore | 21 ++ src/test/regress/Makefile | 9 +- .../expected/citus_depended_object.out | 195 ++++++++++++ src/test/regress/multi_1_schedule | 2 +- src/test/regress/pg_regress_multi.pl | 96 +++++- .../regress/sql/citus_depended_object.sql | 151 +++++++++ 17 files changed, 1146 insertions(+), 17 deletions(-) create mode 100644 src/backend/distributed/test/citus_depended_object.c create mode 100644 src/backend/distributed/utils/citus_depended_object.c create mode 100644 src/include/distributed/citus_depended_object.h create mode 100644 src/test/regress/expected/citus_depended_object.out create mode 100644 src/test/regress/sql/citus_depended_object.sql diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index 67747dee6..e03edadda 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -34,7 +34,9 @@ #include "catalog/pg_rewrite_d.h" #include "catalog/pg_shdepend.h" #include "catalog/pg_type.h" +#include "commands/extension.h" #include "common/hashfn.h" +#include "distributed/citus_depended_object.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/listutils.h" @@ -168,11 +170,18 @@ static bool FollowNewSupportedDependencies(ObjectAddressCollector *collector, DependencyDefinition *definition); static bool FollowAllDependencies(ObjectAddressCollector *collector, DependencyDefinition *definition); +static bool FollowExtAndInternalDependencies(ObjectAddressCollector *collector, + DependencyDefinition *definition); static void ApplyAddToDependencyList(ObjectAddressCollector *collector, DependencyDefinition *definition); +static void ApplyAddCitusDependedObjectsToDependencyList( + ObjectAddressCollector *collector, + DependencyDefinition *definition); static List * GetViewRuleReferenceDependencyList(Oid relationId); static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress target); +static List * ExpandForPgVanilla(ObjectAddressCollector *collector, + ObjectAddress target); static List * GetDependentRoleIdsFDW(Oid FDWOid); static List * ExpandRolesToGroups(Oid roleid); static ViewDependencyNode * BuildViewDependencyGraph(Oid relationId, HTAB *nodeMap); @@ -280,6 +289,26 @@ GetAllDependenciesForObject(const ObjectAddress *target) } +/* + * GetAllCitusDependedDependenciesForObject returns all the dependencies + * which are owned by citus extension for the target. + */ +List * +GetAllCitusDependedDependenciesForObject(const ObjectAddress *target) +{ + ObjectAddressCollector collector = { 0 }; + InitObjectAddressCollector(&collector); + + RecurseObjectDependencies(*target, + &ExpandForPgVanilla, + &FollowExtAndInternalDependencies, + &ApplyAddCitusDependedObjectsToDependencyList, + &collector); + + return collector.dependencyList; +} + + /* * OrderObjectAddressListInDependencyOrder given a list of ObjectAddresses return a new * list of the same ObjectAddresses ordered on dependency order where dependencies @@ -1121,6 +1150,37 @@ IsAnyObjectAddressOwnedByExtension(const List *targets, } +/* + * IsObjectAddressOwnedByCitus returns true if the given object address + * is owned by the citus or citus_columnar extensions. + */ +bool +IsObjectAddressOwnedByCitus(const ObjectAddress *objectAddress) +{ + Oid citusId = get_extension_oid("citus", true); + Oid citusColumnarId = get_extension_oid("citus_columnar", true); + + /* return false because we could not find any citus extension */ + if (!OidIsValid(citusId) && !OidIsValid(citusColumnarId)) + { + return false; + } + + ObjectAddress extObjectAddress = InvalidObjectAddress; + bool ownedByExt = IsObjectAddressOwnedByExtension(objectAddress, + &extObjectAddress); + if (!ownedByExt) + { + return false; + } + + bool ownedByCitus = extObjectAddress.objectId == citusId; + bool ownedByCitusColumnar = extObjectAddress.objectId == citusColumnarId; + + return ownedByCitus || ownedByCitusColumnar; +} + + /* * FollowNewSupportedDependencies applies filters on pg_depend entries to follow all * objects which should be distributed before the root object can safely be created. @@ -1302,6 +1362,39 @@ FollowAllDependencies(ObjectAddressCollector *collector, } +/* + * FollowExtAndInternalDependencies applies filters on pg_depend entries to follow + * the dependency tree of objects in depth first order. We will visit all objects + * irrespective of it is supported by Citus or not and it is internal or not. + */ +static bool +FollowExtAndInternalDependencies(ObjectAddressCollector *collector, + DependencyDefinition *definition) +{ + ObjectAddress address = DependencyDefinitionObjectAddress(definition); + + /* + * If the object is already in our dependency list we do not have to follow any + * further + */ + if (IsObjectAddressCollected(address, collector)) + { + return false; + } + + if (CitusExtensionObject(&address)) + { + /* + * We do not need to follow citus extension because the purpose + * of our walk is to find if an object is owned by citus. + */ + return false; + } + + return true; +} + + /* * ApplyAddToDependencyList is an apply function for RecurseObjectDependencies that will * collect all the ObjectAddresses for pg_depend entries to the context, except it is @@ -1332,6 +1425,30 @@ ApplyAddToDependencyList(ObjectAddressCollector *collector, } +/* + * ApplyAddCitusDependedObjectsToDependencyList is an apply function for + * RecurseObjectDependencies that will collect all the ObjectAddresses for + * pg_depend entries to the context if it is citus extension owned one. + * + * The context here is assumed to be a (ObjectAddressCollector *) to the location where + * all ObjectAddresses will be collected. + */ +static void +ApplyAddCitusDependedObjectsToDependencyList(ObjectAddressCollector *collector, + DependencyDefinition *definition) +{ + ObjectAddress address = DependencyDefinitionObjectAddress(definition); + + /* + * We only collect the object if it is owned by citus extension. + */ + if (IsObjectAddressOwnedByCitus(&address)) + { + CollectObjectAddress(collector, &address); + } +} + + /* * ExpandCitusSupportedTypes base on supported types by citus we might want to expand * the list of objects to visit in pg_depend. @@ -1515,6 +1632,39 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe } +/* + * ExpandForPgVanilla only expands only comosite types because other types + * will find their dependencies in pg_depend. The method should only be called by + * is_citus_depended_object udf. + */ +static List * +ExpandForPgVanilla(ObjectAddressCollector *collector, + ObjectAddress target) +{ + /* should only be called if GUC is enabled */ + Assert(HideCitusDependentObjects == true); + + List *result = NIL; + + if (target.classId == TypeRelationId && get_typtype(target.objectId) == + TYPTYPE_COMPOSITE) + { + /* + * types depending on other types are not captured in pg_depend, instead + * they are described with their dependencies by the relation that + * describes the composite type. + */ + Oid typeRelationId = get_typ_typrelid(target.objectId); + DependencyDefinition *dependency = + CreateObjectAddressDependencyDef(RelationRelationId, + typeRelationId); + result = lappend(result, dependency); + } + + return result; +} + + /* * GetDependentRoleIdsFDW returns a list of role oids that has privileges on the * FDW with the given object id. diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index fb6efdae6..7373162aa 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -35,6 +35,7 @@ #include "commands/extension.h" #include "commands/trigger.h" #include "distributed/backend_data.h" +#include "distributed/citus_depended_object.h" #include "distributed/colocation_utils.h" #include "distributed/connection_management.h" #include "distributed/citus_ruleutils.h" @@ -182,6 +183,7 @@ typedef struct MetadataCacheData Oid relationIsAKnownShardFuncId; Oid jsonbExtractPathFuncId; Oid jsonbExtractPathTextFuncId; + Oid CitusDependentObjectFuncId; bool databaseNameValid; char databaseName[NAMEDATALEN]; } MetadataCacheData; @@ -2896,6 +2898,30 @@ JsonbExtractPathTextFuncId(void) } +/* + * CitusDependentObjectFuncId returns oid of the is_citus_depended_object function. + */ +Oid +CitusDependentObjectFuncId(void) +{ + if (!HideCitusDependentObjects) + { + ereport(ERROR, (errmsg( + "is_citus_depended_object can only be used while running the regression tests"))); + } + + if (MetadataCache.CitusDependentObjectFuncId == InvalidOid) + { + const int argCount = 2; + + MetadataCache.CitusDependentObjectFuncId = + FunctionOid("pg_catalog", "is_citus_depended_object", argCount); + } + + return MetadataCache.CitusDependentObjectFuncId; +} + + /* * CurrentDatabaseName gets the name of the current database and caches * the result. diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 7cc87dc9a..5b677fb77 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -17,9 +17,11 @@ #include #include "access/htup_details.h" +#include "access/xact.h" #include "catalog/pg_class.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" +#include "distributed/citus_depended_object.h" #include "distributed/citus_nodefuncs.h" #include "distributed/citus_nodes.h" #include "distributed/citus_ruleutils.h" @@ -204,6 +206,13 @@ distributed_planner(Query *parse, */ HideShardsFromSomeApplications(parse); + /* + * If GUC is set, we prevent queries, which contain pg meta relations, from + * showing any citus dependent object. The flag is expected to be set only before + * postgres vanilla tests. + */ + HideCitusDependentObjectsOnQueriesOfPgMetaTables((Node *) parse, NULL); + /* create a restriction context and put it at the end if context list */ planContext.plannerRestrictionContext = CreateAndPushPlannerRestrictionContext(); @@ -345,6 +354,17 @@ ListContainsDistributedTableRTE(List *rangeTableList, continue; } + if (HideCitusDependentObjects && IsolationIsSerializable() && IsPgLocksTable( + rangeTableEntry)) + { + /* + * Postgres tidscan.sql test fails if we do not filter pg_locks table because + * test results, which show taken locks in serializable isolation mode, + * fails by showing extra lock taken by IsCitusTable below. + */ + continue; + } + if (IsCitusTable(rangeTableEntry->relid)) { if (maybeHasForeignDistributedTable != NULL && diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index b627ecbfa..b9ee05aec 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -25,6 +25,7 @@ #include "commands/explain.h" #include "commands/tablecmds.h" #include "optimizer/cost.h" +#include "distributed/citus_depended_object.h" #include "distributed/citus_nodefuncs.h" #include "distributed/connection_management.h" #include "distributed/deparse_shard_query.h" @@ -1185,6 +1186,20 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, INSTR_TIME_SET_CURRENT(planstart); + /* + * We should not hide any objects while explaining some query to not break + * postgres vanilla tests. + * + * The filter 'is_citus_depended_object' is added to explain result + * and causes some tests to fail if HideCitusDependentObjects is true. + * Therefore, we disable HideCitusDependentObjects until the current transaction + * ends. + * + * We do not use security quals because a postgres vanilla test fails + * with a change of order for its result. + */ + SetLocalHideCitusDependentObjectsDisabledWhenAlreadyEnabled(); + /* plan the query */ PlannedStmt *plan = pg_plan_query_compat(query, NULL, cursorOptions, params); INSTR_TIME_SET_CURRENT(planduration); diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 6c44d2127..6a5f229c9 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -32,6 +32,7 @@ #include "common/string.h" #include "executor/executor.h" #include "distributed/backend_data.h" +#include "distributed/citus_depended_object.h" #include "distributed/citus_nodefuncs.h" #include "distributed/citus_safe_lib.h" #include "distributed/commands.h" @@ -1272,6 +1273,18 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL, NULL, NULL, NULL); + DefineCustomBoolVariable( + "citus.hide_citus_dependent_objects", + gettext_noop( + "Hides some objects, which depends on citus extension, from pg meta class queries." + "It is intended to be used only before postgres vanilla tests to not break them."), + NULL, + &HideCitusDependentObjects, + false, + PGC_USERSET, + GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL, + NULL, NULL, NULL); + /* * This was a GUC we added on Citus 11.0.1, and * replaced with another name on 11.0.2 via #5920. diff --git a/src/backend/distributed/test/citus_depended_object.c b/src/backend/distributed/test/citus_depended_object.c new file mode 100644 index 000000000..f7eb383b4 --- /dev/null +++ b/src/backend/distributed/test/citus_depended_object.c @@ -0,0 +1,151 @@ +/* + * citus_depended_object.c + * + * Implements udf function related to hiding citus depended objects while executing + * postgres vanilla tests. + * + * Copyright (c) Citus Data, Inc. + */ + +#include "postgres.h" + +#include "catalog/pg_aggregate.h" +#include "catalog/pg_am.h" +#include "catalog/pg_attribute.h" +#include "catalog/pg_attrdef.h" +#include "catalog/pg_constraint.h" +#include "catalog/pg_class.h" +#include "catalog/pg_depend.h" +#include "catalog/pg_enum.h" +#include "catalog/pg_event_trigger.h" +#include "catalog/pg_language.h" +#include "catalog/pg_namespace.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_rewrite.h" +#include "catalog/pg_sequence.h" +#include "catalog/pg_statistic.h" +#include "catalog/pg_trigger.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_dict.h" +#include "catalog/pg_ts_template.h" +#include "catalog/pg_type.h" +#include "distributed/citus_depended_object.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata/dependency.h" +#include "distributed/metadata/distobject.h" + +static bool IsCitusDependentObject(ObjectAddress objectAddress); + +PG_FUNCTION_INFO_V1(is_citus_depended_object); + +/* + * is_citus_depended_object a wrapper around IsCitusDependentObject, so + * see the details there. + * + * The first parameter expects an oid for + * a pg meta class, and the second parameter expects an oid for + * the object which is found in the pg meta class. + */ +Datum +is_citus_depended_object(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) + { + /* Because we want to return false for null arguments, we donot use strict keyword while creating that function. */ + PG_RETURN_BOOL(false); + } + + Oid metaTableId = PG_GETARG_OID(0); + Oid objectId = PG_GETARG_OID(1); + + if (!OidIsValid(metaTableId) || !OidIsValid(objectId)) + { + /* we cannot continue without valid meta table or object oid */ + PG_RETURN_BOOL(false); + } + + bool dependsOnCitus = false; + + ObjectAddress objectAdress = { metaTableId, objectId, 0 }; + + switch (metaTableId) + { + case ProcedureRelationId: + case AccessMethodRelationId: + case EventTriggerRelationId: + case TriggerRelationId: + case TSConfigRelationId: + case TSTemplateRelationId: + case TSDictionaryRelationId: + case LanguageRelationId: + case RewriteRelationId: + case AttrDefaultRelationId: + case NamespaceRelationId: + case ConstraintRelationId: + case TypeRelationId: + case RelationRelationId: + { + /* meta classes that access their own oid */ + dependsOnCitus = IsCitusDependentObject(objectAdress); + break; + } + + case EnumRelationId: + { + /* + * we do not directly access the oid in pg_enum, + * because it does not exist in pg_depend, but its type does + */ + objectAdress.classId = TypeRelationId; + dependsOnCitus = IsCitusDependentObject(objectAdress); + break; + } + + case IndexRelationId: + case AttributeRelationId: + case SequenceRelationId: + case StatisticRelationId: + { + /* meta classes that access their relation's oid */ + objectAdress.classId = RelationRelationId; + dependsOnCitus = IsCitusDependentObject(objectAdress); + break; + } + + case AggregateRelationId: + { + /* We access procedure oid for aggregates. */ + objectAdress.classId = ProcedureRelationId; + dependsOnCitus = IsCitusDependentObject(objectAdress); + break; + } + + default: + { + break; + } + } + + PG_RETURN_BOOL(dependsOnCitus); +} + + +/* + * IsCitusDependentObject returns true if the given object depends on the citus extension. + */ +static bool +IsCitusDependentObject(ObjectAddress objectAddress) +{ + if (IsObjectAddressOwnedByCitus(&objectAddress)) + { + /* object itself is owned by citus */ + return true; + } + + /* check if object's any dependency is owned by citus. */ + List *citusDependencies = GetAllCitusDependedDependenciesForObject(&objectAddress); + return list_length(citusDependencies) > 0; +} diff --git a/src/backend/distributed/utils/citus_depended_object.c b/src/backend/distributed/utils/citus_depended_object.c new file mode 100644 index 000000000..b844c3515 --- /dev/null +++ b/src/backend/distributed/utils/citus_depended_object.c @@ -0,0 +1,287 @@ +/* + * citus_depended_object.c + * + * Implements exposed functions related to hiding citus depended objects. + * + * Copyright (c) Citus Data, Inc. + */ + +#include "postgres.h" +#include "miscadmin.h" + +#include "catalog/namespace.h" +#include "catalog/pg_aggregate.h" +#include "catalog/pg_am.h" +#include "catalog/pg_attribute.h" +#include "catalog/pg_attrdef.h" +#include "catalog/pg_constraint.h" +#include "catalog/pg_class.h" +#include "catalog/pg_depend.h" +#include "catalog/pg_enum.h" +#include "catalog/pg_event_trigger.h" +#include "catalog/pg_language.h" +#include "catalog/pg_namespace.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_rewrite.h" +#include "catalog/pg_sequence.h" +#include "catalog/pg_statistic.h" +#include "catalog/pg_trigger.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_dict.h" +#include "catalog/pg_ts_template.h" +#include "catalog/pg_type.h" +#include "distributed/citus_depended_object.h" +#include "distributed/metadata_cache.h" +#include "distributed/listutils.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/parsenodes.h" +#include "utils/lsyscache.h" + +/* + * GUC hides any objects, which depends on citus extension, from pg meta class queries, + * it is intended to be used in vanilla tests to not break postgres test logs + */ +bool HideCitusDependentObjects = false; + +static Node * CreateCitusDependentObjectExpr(int pgMetaTableVarno, int pgMetaTableOid); +static List * GetCitusDependedObjectArgs(int pgMetaTableVarno, int pgMetaTableOid); + +/* + * IsPgLocksTable returns true if RTE is pg_locks table. + */ +bool +IsPgLocksTable(RangeTblEntry *rte) +{ + Oid pgLocksId = get_relname_relid("pg_locks", get_namespace_oid("pg_catalog", false)); + return rte->relid == pgLocksId; +} + + +/* + * SetLocalHideCitusDependentObjectsDisabledWhenAlreadyEnabled disables the GUC HideCitusDependentObjects + * if only it is enabled for local transaction. + */ +void +SetLocalHideCitusDependentObjectsDisabledWhenAlreadyEnabled(void) +{ + if (!HideCitusDependentObjects) + { + return; + } + + set_config_option("citus.hide_citus_dependent_objects", "false", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); +} + + +/* + * HideCitusDependentObjectsOnQueriesOfPgMetaTables adds a NOT is_citus_depended_object(oid, oid) expr + * to the quals of meta class RTEs that we are interested in. + */ +bool +HideCitusDependentObjectsOnQueriesOfPgMetaTables(Node *node, void *context) +{ + if (!CitusHasBeenLoaded() || !HideCitusDependentObjects || node == NULL) + { + return false; + } + + if (IsA(node, Query)) + { + Query *query = (Query *) node; + MemoryContext queryContext = GetMemoryChunkContext(query); + + /* + * We process the whole rtable rather than visiting individual RangeTblEntry's + * in the walker, since we need to know the varno to generate the right + * filter. + */ + int varno = 0; + RangeTblEntry *rangeTableEntry = NULL; + + foreach_ptr(rangeTableEntry, query->rtable) + { + varno++; + + if (rangeTableEntry->rtekind == RTE_RELATION) + { + /* make sure the expression is in the right memory context */ + MemoryContext originalContext = MemoryContextSwitchTo(queryContext); + + Oid metaTableOid = InvalidOid; + + /* + * add NOT is_citus_depended_object(oid, oid) to the quals + * of the RTE if it is a pg meta table that we are interested in. + */ + switch (rangeTableEntry->relid) + { + /* pg_class */ + case RelationRelationId: + + /* pg_proc */ + case ProcedureRelationId: + + /* pg_am */ + case AccessMethodRelationId: + + /* pg_type */ + case TypeRelationId: + + /* pg_enum */ + case EnumRelationId: + + /* pg_event_trigger */ + case EventTriggerRelationId: + + /* pg_trigger */ + case TriggerRelationId: + + /* pg_rewrite */ + case RewriteRelationId: + + /* pg_attrdef */ + case AttrDefaultRelationId: + + /* pg_constraint */ + case ConstraintRelationId: + + /* pg_ts_config */ + case TSConfigRelationId: + + /* pg_ts_template */ + case TSTemplateRelationId: + + /* pg_ts_dict */ + case TSDictionaryRelationId: + + /* pg_language */ + case LanguageRelationId: + + /* pg_namespace */ + case NamespaceRelationId: + + /* pg_sequence */ + case SequenceRelationId: + + /* pg_statistic */ + case StatisticRelationId: + + /* pg_attribute */ + case AttributeRelationId: + + /* pg_index */ + case IndexRelationId: + + /* pg_aggregate */ + case AggregateRelationId: + { + metaTableOid = rangeTableEntry->relid; + break; + } + + default: + { + metaTableOid = InvalidOid; + break; + } + } + + if (OidIsValid(metaTableOid)) + { + /* + * We found a valid pg meta class in query, + * so we assert below conditions. + */ + Assert(query->jointree != NULL); + Assert(query->jointree->fromlist != NULL); + + Node *citusDependentObjExpr = + CreateCitusDependentObjectExpr(varno, metaTableOid); + + /* + * We do not use security quals because a postgres vanilla test fails + * with a change of order for its result. + */ + query->jointree->quals = make_and_qual( + query->jointree->quals, citusDependentObjExpr); + } + + MemoryContextSwitchTo(originalContext); + } + } + + return query_tree_walker((Query *) node, + HideCitusDependentObjectsOnQueriesOfPgMetaTables, + context, 0); + } + + return expression_tree_walker(node, HideCitusDependentObjectsOnQueriesOfPgMetaTables, + context); +} + + +/* + * CreateCitusDependentObjectExpr constructs an expression of the form: + * NOT pg_catalog.is_citus_depended_object(oid, oid) + */ +static Node * +CreateCitusDependentObjectExpr(int pgMetaTableVarno, int pgMetaTableOid) +{ + /* build the call to read_intermediate_result */ + FuncExpr *funcExpr = makeNode(FuncExpr); + funcExpr->funcid = CitusDependentObjectFuncId(); + funcExpr->funcretset = false; + funcExpr->funcvariadic = false; + funcExpr->funcformat = 0; + funcExpr->funccollid = 0; + funcExpr->inputcollid = 0; + funcExpr->location = -1; + funcExpr->args = GetCitusDependedObjectArgs(pgMetaTableVarno, pgMetaTableOid); + + BoolExpr *notExpr = makeNode(BoolExpr); + notExpr->boolop = NOT_EXPR; + notExpr->args = list_make1(funcExpr); + notExpr->location = -1; + + return (Node *) notExpr; +} + + +/* + * GetCitusDependedObjectArgs returns func arguments for pg_catalog.is_citus_depended_object + */ +static List * +GetCitusDependedObjectArgs(int pgMetaTableVarno, int pgMetaTableOid) +{ + /* + * set attribute number for the oid, which we are insterest in, inside pg meta tables. + * We are accessing the 1. col(their own oid or their relation's oid) to get the related + * object's oid for all of the pg meta tables except pg_enum and pg_index. For pg_enum, + * class, we access its 2. col(its type's oid) to see if its type depends on citus, + * so it does. For pg_index, we access its 2. col (its relation's oid) to see if its relation + * depends on citus, so it does. + */ + AttrNumber oidAttNum = 1; + if (pgMetaTableOid == EnumRelationId || pgMetaTableOid == IndexRelationId) + { + oidAttNum = 2; + } + + /* create const for meta table oid */ + Const *metaTableOidConst = makeConst(OIDOID, -1, InvalidOid, sizeof(Oid), + ObjectIdGetDatum(pgMetaTableOid), + false, true); + + /* + * create a var for the oid that we are interested in, + * col type should be regproc for pg_aggregate table; else oid + */ + Oid varType = (pgMetaTableOid == AggregateRelationId) ? REGPROCOID : OIDOID; + Var *oidVar = makeVar(pgMetaTableVarno, oidAttNum, + varType, -1, InvalidOid, 0); + + return list_make2((Node *) metaTableOidConst, (Node *) oidVar); +} diff --git a/src/include/distributed/citus_depended_object.h b/src/include/distributed/citus_depended_object.h new file mode 100644 index 000000000..61abfa68a --- /dev/null +++ b/src/include/distributed/citus_depended_object.h @@ -0,0 +1,24 @@ +/*------------------------------------------------------------------------- + * + * citus_depended_object.h + * Exposes functions related to hiding citus depended objects while executing + * postgres vanilla tests. + * + * Copyright (c) CitusDependent Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#ifndef CITUS_DEPENDED_OBJECT_H +#define CITUS_DEPENDED_OBJECT_H + +#include "nodes/nodes.h" +#include "nodes/parsenodes.h" + +extern bool HideCitusDependentObjects; + +extern void SetLocalHideCitusDependentObjectsDisabledWhenAlreadyEnabled(void); +extern bool HideCitusDependentObjectsOnQueriesOfPgMetaTables(Node *node, void *context); +extern bool IsPgLocksTable(RangeTblEntry *rte); + +#endif /* CITUS_DEPENDED_OBJECT_H */ diff --git a/src/include/distributed/metadata/dependency.h b/src/include/distributed/metadata/dependency.h index f04e3a869..c5a65319e 100644 --- a/src/include/distributed/metadata/dependency.h +++ b/src/include/distributed/metadata/dependency.h @@ -26,6 +26,7 @@ extern List * GetAllDependenciesForObject(const ObjectAddress *target); extern bool ErrorOrWarnIfAnyObjectHasUnsupportedDependency(List *objectAddresses); extern DeferredErrorMessage * DeferErrorIfAnyObjectHasUnsupportedDependency(const List * objectAddresses); +extern List * GetAllCitusDependedDependenciesForObject(const ObjectAddress *target); extern List * OrderObjectAddressListInDependencyOrder(List *objectAddressList); extern bool SupportedDependencyByCitus(const ObjectAddress *address); extern List * GetPgDependTuplesForDependingObjects(Oid targetObjectClassId, diff --git a/src/include/distributed/metadata/distobject.h b/src/include/distributed/metadata/distobject.h index cb905bbfe..900c15590 100644 --- a/src/include/distributed/metadata/distobject.h +++ b/src/include/distributed/metadata/distobject.h @@ -29,6 +29,7 @@ extern void UnmarkObjectDistributed(const ObjectAddress *address); extern bool IsTableOwnedByExtension(Oid relationId); extern bool IsAnyObjectAddressOwnedByExtension(const List *targets, ObjectAddress *extensionAddress); +extern bool IsObjectAddressOwnedByCitus(const ObjectAddress *objectAddress); extern ObjectAddress PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr); extern List * GetDistributedObjectAddressList(void); diff --git a/src/include/distributed/metadata_cache.h b/src/include/distributed/metadata_cache.h index 81cdeee4c..92f8a4514 100644 --- a/src/include/distributed/metadata_cache.h +++ b/src/include/distributed/metadata_cache.h @@ -263,6 +263,7 @@ extern Oid CitusTableVisibleFuncId(void); extern Oid RelationIsAKnownShardFuncId(void); extern Oid JsonbExtractPathFuncId(void); extern Oid JsonbExtractPathTextFuncId(void); +extern Oid CitusDependentObjectFuncId(void); /* enum oids */ extern Oid PrimaryNodeRoleId(void); diff --git a/src/test/regress/.gitignore b/src/test/regress/.gitignore index 8bbe973b4..bdc8b8df9 100644 --- a/src/test/regress/.gitignore +++ b/src/test/regress/.gitignore @@ -28,3 +28,24 @@ # core dumps core + +# postgres vanilla test's outputs +constraints.sql +copy.sql +create_function_0.sql +create_function_1.sql +create_function_2.sql +largeobject.sql +misc.sql +security_label.sql +tablespace.sql +constraints.out +copy.out +create_function_0.out +create_function_1.out +create_function_2.out +largeobject.out +largeobject_1.out +misc.out +security_label.out +tablespace.out diff --git a/src/test/regress/Makefile b/src/test/regress/Makefile index 6007d2508..0c3d05b4d 100644 --- a/src/test/regress/Makefile +++ b/src/test/regress/Makefile @@ -188,13 +188,8 @@ check-isolation-base: all $(isolation_test_files) $(pg_regress_multi_check) --load-extension=citus --isolationtester \ -- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/base_isolation_schedule $(EXTRA_TESTS) -check-vanilla: all - # it is possible that sometimes vanilla tests will fail, which is related to postgres. - # So we try it once more if it fails to prevent some failures in our CI. - ${MAKE} check-vanilla-internal || ${MAKE} check-vanilla-internal - -check-vanilla-internal: - $(pg_regress_multi_check) --load-extension=citus --vanillatest +check-vanilla: + $(pg_regress_multi_check) --vanillatest --vanilla-dev check-multi-mx: all $(pg_regress_multi_check) --load-extension=citus \ diff --git a/src/test/regress/expected/citus_depended_object.out b/src/test/regress/expected/citus_depended_object.out new file mode 100644 index 000000000..88eca1f5a --- /dev/null +++ b/src/test/regress/expected/citus_depended_object.out @@ -0,0 +1,195 @@ +-- create the udf is_citus_depended_object that is needed for the tests +CREATE OR REPLACE FUNCTION + pg_catalog.is_citus_depended_object(oid,oid) + RETURNS bool + LANGUAGE C + AS 'citus', $$is_citus_depended_object$$; +-- execute tests in a separate namespace +CREATE SCHEMA citus_dependend_object; +SET search_path TO citus_dependend_object; +-- PG_CLASS VISIBILITY +-- check if we correctly determine whether a relation is citus dependent or not. +CREATE TABLE no_hide_pg_class(relname text); +CREATE TABLE hide_pg_class(relname text); +-- create a relation that depends on noderole type which is a citus object +CREATE TABLE citus_depended_class(nrole noderole); +-- create a relation that depends on columnar access method which is a citus object +CREATE TABLE citus_depended_class2(id int); +SELECT alter_table_set_access_method('citus_depended_class2', 'columnar'); +NOTICE: creating a new table for citus_dependend_object.citus_depended_class2 +NOTICE: moving the data of citus_dependend_object.citus_depended_class2 +NOTICE: dropping the old citus_dependend_object.citus_depended_class2 +NOTICE: renaming the new table to citus_dependend_object.citus_depended_class2 + alter_table_set_access_method +--------------------------------------------------------------------- + +(1 row) + +-- create a relation that does not depend on citus +CREATE TABLE citus_independed_class(id int); +-- store all relations +SET citus.hide_citus_dependent_objects TO false; +INSERT INTO no_hide_pg_class SELECT relname FROM pg_class; +-- store all relations except citus relations +SET citus.hide_citus_dependent_objects TO true; +INSERT INTO hide_pg_class SELECT relname FROM pg_class; +-- prove that some relations are hidden or not +SELECT relname, + CASE + WHEN relname IN + ( + SELECT relname FROM no_hide_pg_class + EXCEPT + SELECT relname FROM hide_pg_class + ) THEN true + ELSE false + END AS is_hidden +FROM (VALUES ('pg_dist_shard'), ('pg_dist_placement'), ('pg_type'), ('pg_proc'), +('citus_depended_class'), ('citus_depended_class2'), ('citus_independed_class')) rels(relname); + relname | is_hidden +--------------------------------------------------------------------- + pg_dist_shard | t + pg_dist_placement | t + pg_type | f + pg_proc | f + citus_depended_class | t + citus_depended_class2 | t + citus_independed_class | f +(7 rows) + +-- PG_TYPE VISIBILITY +-- check if we correctly determine whether a type is citus dependent or not. +CREATE TABLE no_hide_pg_type(typname text); +CREATE TABLE hide_pg_type(typname text); +-- create a type that depends on noderole type which is a citus object +CREATE TYPE citus_depended_type AS (nrole noderole); +-- create a relation that does not depend on citus +CREATE TYPE citus_independed_type AS (id int); +-- store all types +SET citus.hide_citus_dependent_objects TO false; +INSERT INTO no_hide_pg_type SELECT typname FROM pg_type; +-- store all types except citus types +SET citus.hide_citus_dependent_objects TO true; +INSERT INTO hide_pg_type SELECT typname FROM pg_type; +-- prove that some types are hidden or not +SELECT typname, + CASE + WHEN typname IN + ( + SELECT typname FROM no_hide_pg_type + EXCEPT + SELECT typname FROM hide_pg_type + ) THEN true + ELSE false + END AS is_hidden +FROM (VALUES ('noderole'), ('_noderole'), ('int'), ('_int'), +('citus_depended_type'), ('citus_independed_type')) types(typname); + typname | is_hidden +--------------------------------------------------------------------- + noderole | t + _noderole | t + int | f + _int | f + citus_depended_type | t + citus_independed_type | f +(6 rows) + +-- PG_AM VISIBILITY +-- check if we correctly determine whether an access method is citus dependent or not. +CREATE TABLE no_hide_pg_am(amname text); +CREATE TABLE hide_pg_am(amname text); +-- store all access methods +SET citus.hide_citus_dependent_objects TO false; +INSERT INTO no_hide_pg_am SELECT amname FROM pg_am; +-- store all access methods except citus access methods +SET citus.hide_citus_dependent_objects TO true; +INSERT INTO hide_pg_am SELECT amname FROM pg_am; +-- show all hidden access methods +SELECT amname AS hidden_am FROM no_hide_pg_am +EXCEPT +SELECT amname AS hidden_am FROM hide_pg_am +ORDER BY 1; + hidden_am +--------------------------------------------------------------------- + columnar +(1 row) + +-- show all unhidden access methods +SELECT amname AS unhidden_am FROM no_hide_pg_am +EXCEPT +( + SELECT amname FROM no_hide_pg_am + EXCEPT + SELECT amname FROM hide_pg_am +) +ORDER BY 1; + unhidden_am +--------------------------------------------------------------------- + brin + btree + gin + gist + hash + heap + spgist +(7 rows) + +-- PG_PROC VISIBILITY +-- check if we correctly determine whether a procedure is citus dependent or not. +CREATE TABLE no_hide_pg_proc(proname text); +CREATE TABLE hide_pg_proc(proname text); +-- create a procedure that depends on noderole type which is a citus object +CREATE OR REPLACE PROCEDURE citus_depended_proc(nrole noderole) +LANGUAGE SQL +AS $$ +$$; +-- create a procedure that does not depend on citus +CREATE OR REPLACE PROCEDURE citus_independed_proc(id int) +LANGUAGE SQL +AS $$ +$$; +-- store all access procedures +SET citus.hide_citus_dependent_objects TO false; +INSERT INTO no_hide_pg_proc SELECT proname FROM pg_proc; +-- store all access procedures except citus procedures +SET citus.hide_citus_dependent_objects TO true; +INSERT INTO hide_pg_proc SELECT proname FROM pg_proc; +-- prove that some procedures are hidden or not +SELECT proname, + CASE + WHEN proname IN + ( + SELECT proname FROM no_hide_pg_proc + EXCEPT + SELECT proname FROM hide_pg_proc + ) THEN true + ELSE false + END AS is_hidden +FROM (VALUES ('master_add_node'), ('format'), +('citus_depended_proc'), ('citus_independed_proc')) procs(proname); + proname | is_hidden +--------------------------------------------------------------------- + master_add_node | t + format | f + citus_depended_proc | t + citus_independed_proc | f +(4 rows) + +-- drop the namespace with all its objects +DROP SCHEMA citus_dependend_object CASCADE; +NOTICE: drop cascades to 15 other objects +DETAIL: drop cascades to table no_hide_pg_class +drop cascades to table hide_pg_class +drop cascades to table citus_depended_class +drop cascades to table citus_depended_class2 +drop cascades to table citus_independed_class +drop cascades to table no_hide_pg_type +drop cascades to table hide_pg_type +drop cascades to type citus_depended_type +drop cascades to type citus_independed_type +drop cascades to table no_hide_pg_am +drop cascades to table hide_pg_am +drop cascades to table no_hide_pg_proc +drop cascades to table hide_pg_proc +drop cascades to function citus_depended_proc(noderole) +drop cascades to function citus_independed_proc(integer) diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 74cc196d4..a2bd068ba 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -46,7 +46,7 @@ test: multi_read_from_secondaries # ---------- # multi_citus_tools tests utility functions written for citus tools # ---------- -test: multi_citus_tools +test: multi_citus_tools citus_depended_object # ---------- # multi_replicate_reference_table tests replicating reference tables to new nodes after we add new nodes diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index aec7e71d5..403879f1f 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -60,6 +60,7 @@ my $MASTER_FOLLOWERDIR = 'master-follower'; # Option parsing my $isolationtester = 0; my $vanillatest = 0; +my $vanillaDev = 0; my $followercluster = 0; my $bindir = ""; my $libdir = undef; @@ -96,6 +97,7 @@ if ($Config{osname} eq "MSWin32") GetOptions( 'isolationtester' => \$isolationtester, 'vanillatest' => \$vanillatest, + 'vanilla-dev' => \$vanillaDev, 'follower-cluster' => \$followercluster, 'bindir=s' => \$bindir, 'libdir=s' => \$libdir, @@ -486,6 +488,20 @@ push(@pgOptions, "citus.show_shards_for_app_name_prefixes='pg_regress'"); # we disable slow start by default to encourage parallelism within tests push(@pgOptions, "citus.executor_slow_start_interval=0ms"); +### +# we set some GUCs to not break postgres vanilla tests +# +# NOTE: we do not want to set the option right now because +# vanilla tests currently fail. We will remove the vanillaDev flag +# after fully supported pg vanilla tests with citus +# extension created. +### +if(!$vanillaDev && $vanillatest) +{ + # we enable hiding the citus dependent objects from pg meta class queries to not break postgres vanilla test behaviour + push(@pgOptions, "citus.hide_citus_dependent_objects=true"); +} + if ($useMitmproxy) { # make tests reproducible by never trying to negotiate ssl @@ -994,26 +1010,88 @@ my $startTime = time(); my $exitcode = 0; +sub PrepForVanillaTest +{ + ### + # We want to add is_citus_depended_object function to the default db. + # But without use-existing flag, pg_regress drops if exist and creates + # the default db. Thus, we set use-existing flag and manually create + # the default db, citus extension and the is_citus_depended_object + # function. + ### + + my $dbName = shift; + + # prepare tablespace folder + rmdir "./testtablespace"; + mkdir "./testtablespace"; + + # create default db + system(catfile($bindir, "psql"), + ('-X', '-h', $host, '-p', $masterPort, '-U', $user, "-d", "postgres", + '-c', "CREATE DATABASE $dbName;")) == 0 + or die "Could not create $dbName database on master"; + + # alter default db's lc_monetary to C + system(catfile($bindir, "psql"), + ('-X', '-h', $host, '-p', $masterPort, '-U', $user, "-d", $dbName, + '-c', "ALTER DATABASE $dbName SET lc_monetary TO 'C';")) == 0 + or die "Could not create $dbName database on master"; + + if ($vanillaDev) + { + return; + } + + # create extension citus + system(catfile($bindir, "psql"), + ('-X', '-h', $host, '-p', $masterPort, '-U', $user, "-d", $dbName, + '-c', "CREATE EXTENSION citus;")) == 0 + or die "Could not create citus extension on master"; + + # we do not want to expose that udf other than vanilla tests + my $citus_depended_object_def = "CREATE OR REPLACE FUNCTION + pg_catalog.is_citus_depended_object(oid,oid) + RETURNS bool + LANGUAGE C + AS 'citus', \$\$is_citus_depended_object\$\$;"; + system(catfile($bindir, "psql"), + ('-X', '-h', $host, '-p', $masterPort, '-U', $user, "-d", $dbName, + '-c', $citus_depended_object_def)) == 0 + or die "Could not create FUNCTION is_citus_depended_object on master"; +} + # Finally run the tests if ($vanillatest) { - $ENV{PGHOST} = $host; - $ENV{PGPORT} = $masterPort; - $ENV{PGUSER} = $user; $ENV{VANILLATEST} = "1"; + my $dbName = "regression"; + PrepForVanillaTest($dbName); + if (-f "$vanillaSchedule") { - rmdir "./testtablespace"; - mkdir "./testtablespace"; - my $pgregressdir=catfile(dirname("$pgxsdir"), "regress"); - $exitcode = system("$plainRegress", ("--inputdir", $pgregressdir), - ("--schedule", catfile("$pgregressdir", "parallel_schedule"))) + $exitcode = system("$plainRegress", + ("--inputdir", $pgregressdir), + ("--schedule", catfile("$pgregressdir", "parallel_schedule")), + ("--use-existing"), + ("--host","$host"), + ("--port","$masterPort"), + ("--user","$user"), + ("--dbname", "$dbName")) } else { - $exitcode = system("make", ("-C", catfile("$postgresBuilddir", "src", "test", "regress"), "installcheck-parallel")) + my $pgregressdir=catfile("$postgresSrcdir", "src", "test", "regress"); + $exitcode = system("$plainRegress", + ("--inputdir", $pgregressdir), + ("--schedule", catfile("$pgregressdir", "parallel_schedule")), + ("--use-existing"), + ("--host","$host"), + ("--port","$masterPort"), + ("--user","$user"), + ("--dbname", "$dbName")) } } elsif ($isolationtester) diff --git a/src/test/regress/sql/citus_depended_object.sql b/src/test/regress/sql/citus_depended_object.sql new file mode 100644 index 000000000..4f35acb1e --- /dev/null +++ b/src/test/regress/sql/citus_depended_object.sql @@ -0,0 +1,151 @@ +-- create the udf is_citus_depended_object that is needed for the tests +CREATE OR REPLACE FUNCTION + pg_catalog.is_citus_depended_object(oid,oid) + RETURNS bool + LANGUAGE C + AS 'citus', $$is_citus_depended_object$$; + +-- execute tests in a separate namespace +CREATE SCHEMA citus_dependend_object; +SET search_path TO citus_dependend_object; + +-- PG_CLASS VISIBILITY +-- check if we correctly determine whether a relation is citus dependent or not. +CREATE TABLE no_hide_pg_class(relname text); +CREATE TABLE hide_pg_class(relname text); + +-- create a relation that depends on noderole type which is a citus object +CREATE TABLE citus_depended_class(nrole noderole); + +-- create a relation that depends on columnar access method which is a citus object +CREATE TABLE citus_depended_class2(id int); +SELECT alter_table_set_access_method('citus_depended_class2', 'columnar'); + +-- create a relation that does not depend on citus +CREATE TABLE citus_independed_class(id int); + +-- store all relations +SET citus.hide_citus_dependent_objects TO false; +INSERT INTO no_hide_pg_class SELECT relname FROM pg_class; + +-- store all relations except citus relations +SET citus.hide_citus_dependent_objects TO true; +INSERT INTO hide_pg_class SELECT relname FROM pg_class; + +-- prove that some relations are hidden or not +SELECT relname, + CASE + WHEN relname IN + ( + SELECT relname FROM no_hide_pg_class + EXCEPT + SELECT relname FROM hide_pg_class + ) THEN true + ELSE false + END AS is_hidden +FROM (VALUES ('pg_dist_shard'), ('pg_dist_placement'), ('pg_type'), ('pg_proc'), +('citus_depended_class'), ('citus_depended_class2'), ('citus_independed_class')) rels(relname); + +-- PG_TYPE VISIBILITY +-- check if we correctly determine whether a type is citus dependent or not. +CREATE TABLE no_hide_pg_type(typname text); +CREATE TABLE hide_pg_type(typname text); + +-- create a type that depends on noderole type which is a citus object +CREATE TYPE citus_depended_type AS (nrole noderole); + +-- create a relation that does not depend on citus +CREATE TYPE citus_independed_type AS (id int); + +-- store all types +SET citus.hide_citus_dependent_objects TO false; +INSERT INTO no_hide_pg_type SELECT typname FROM pg_type; + +-- store all types except citus types +SET citus.hide_citus_dependent_objects TO true; +INSERT INTO hide_pg_type SELECT typname FROM pg_type; + +-- prove that some types are hidden or not +SELECT typname, + CASE + WHEN typname IN + ( + SELECT typname FROM no_hide_pg_type + EXCEPT + SELECT typname FROM hide_pg_type + ) THEN true + ELSE false + END AS is_hidden +FROM (VALUES ('noderole'), ('_noderole'), ('int'), ('_int'), +('citus_depended_type'), ('citus_independed_type')) types(typname); + +-- PG_AM VISIBILITY +-- check if we correctly determine whether an access method is citus dependent or not. +CREATE TABLE no_hide_pg_am(amname text); +CREATE TABLE hide_pg_am(amname text); + +-- store all access methods +SET citus.hide_citus_dependent_objects TO false; +INSERT INTO no_hide_pg_am SELECT amname FROM pg_am; + +-- store all access methods except citus access methods +SET citus.hide_citus_dependent_objects TO true; +INSERT INTO hide_pg_am SELECT amname FROM pg_am; + +-- show all hidden access methods +SELECT amname AS hidden_am FROM no_hide_pg_am +EXCEPT +SELECT amname AS hidden_am FROM hide_pg_am +ORDER BY 1; + +-- show all unhidden access methods +SELECT amname AS unhidden_am FROM no_hide_pg_am +EXCEPT +( + SELECT amname FROM no_hide_pg_am + EXCEPT + SELECT amname FROM hide_pg_am +) +ORDER BY 1; + +-- PG_PROC VISIBILITY +-- check if we correctly determine whether a procedure is citus dependent or not. +CREATE TABLE no_hide_pg_proc(proname text); +CREATE TABLE hide_pg_proc(proname text); + +-- create a procedure that depends on noderole type which is a citus object +CREATE OR REPLACE PROCEDURE citus_depended_proc(nrole noderole) +LANGUAGE SQL +AS $$ +$$; + +-- create a procedure that does not depend on citus +CREATE OR REPLACE PROCEDURE citus_independed_proc(id int) +LANGUAGE SQL +AS $$ +$$; + +-- store all access procedures +SET citus.hide_citus_dependent_objects TO false; +INSERT INTO no_hide_pg_proc SELECT proname FROM pg_proc; + +-- store all access procedures except citus procedures +SET citus.hide_citus_dependent_objects TO true; +INSERT INTO hide_pg_proc SELECT proname FROM pg_proc; + +-- prove that some procedures are hidden or not +SELECT proname, + CASE + WHEN proname IN + ( + SELECT proname FROM no_hide_pg_proc + EXCEPT + SELECT proname FROM hide_pg_proc + ) THEN true + ELSE false + END AS is_hidden +FROM (VALUES ('master_add_node'), ('format'), +('citus_depended_proc'), ('citus_independed_proc')) procs(proname); + +-- drop the namespace with all its objects +DROP SCHEMA citus_dependend_object CASCADE; From dc30ee874a0fcb972c32d3cd69b34159e772d8e7 Mon Sep 17 00:00:00 2001 From: Nils Dijk Date: Wed, 6 Jul 2022 13:18:08 +0200 Subject: [PATCH 02/38] use images that are build with the same libpq version as the minor postgres version --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f9c056a83..0f2637a48 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,7 +6,7 @@ orbs: parameters: image_suffix: type: string - default: '-vabaecad' + default: '-v5579d00' pg13_version: type: string default: '13.4' From f944f97d01960ea55d63169a7646bff709fcd580 Mon Sep 17 00:00:00 2001 From: Hanefi Onaldi Date: Thu, 21 Jul 2022 16:11:11 +0300 Subject: [PATCH 03/38] Normalize messages from different libpq versions Historically we have been testing with the 'latest' version of libpq when the CI images were build. This has the downside that rebuilding the images often break our tests due to different errors returned from libpq. With this change we will actually test with a stable version of libpq that is based on the postgres minor version that we test against. This will make it easier to maintain postgres images over time, as well as running _all_ tests locally, where we change libpq in sync with the postgres server version. --- src/test/regress/bin/normalize.sed | 11 + .../expected/failure_add_disable_node.out | 4 +- .../regress/expected/failure_copy_on_hash.out | 22 +- .../expected/failure_copy_to_reference.out | 23 +- ...ure_create_distributed_table_non_empty.out | 61 +--- .../failure_create_index_concurrently.out | 12 +- .../failure_create_reference_table.out | 26 +- .../regress/expected/failure_create_table.out | 58 +--- .../regress/expected/failure_cte_subquery.out | 24 +- src/test/regress/expected/failure_ddl.out | 79 +---- .../expected/failure_distributed_results.out | 4 +- .../failure_failover_to_local_execution.out | 4 +- .../failure_insert_select_pushdown.out | 8 +- .../failure_insert_select_repartition.out | 28 +- .../failure_insert_select_via_coordinator.out | 24 +- .../regress/expected/failure_multi_dml.out | 33 +- .../expected/failure_multi_row_insert.out | 20 +- .../failure_multi_shard_update_delete.out | 68 +--- .../expected/failure_mx_metadata_sync.out | 4 +- .../failure_offline_move_shard_placement.out | 16 +- .../failure_on_create_subscription.out | 4 +- .../failure_on_create_subscription_0.out | 4 +- .../failure_online_move_shard_placement.out | 89 ++--- .../regress/expected/failure_ref_tables.out | 12 +- .../failure_replicated_partitions.out | 4 +- .../regress/expected/failure_savepoints.out | 311 +++--------------- .../regress/expected/failure_single_mod.out | 12 +- .../expected/failure_single_select.out | 24 +- .../expected/failure_tenant_isolation.out | 36 +- .../regress/expected/failure_truncate.out | 70 +--- src/test/regress/expected/failure_vacuum.out | 23 +- .../regress/expected/failure_vacuum_1.out | 4 - .../isolation_master_update_node_0.out | 2 - .../isolation_master_update_node_2.out | 2 - 34 files changed, 242 insertions(+), 884 deletions(-) diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 61d66ac37..aab3885ca 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -100,6 +100,17 @@ s/of relation ".*" violates not-null constraint/violates not-null constraint/g s/partition ".*" would be violated by some row/partition would be violated by some row/g s/of relation ".*" contains null values/contains null values/g +#if (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) +# (This is not preprocessor directive, but a reminder for the developer that will drop PG13 support ) +# libpq message changes for minor versions of pg13 +# We ignore multiline error messages, and substitute first line with a single line +# alternative that is used in some older libpq versions. +s/(ERROR: |WARNING: |error:) server closed the connection unexpectedly/\1 connection not open/g +/^\s*This probably means the server terminated abnormally$/d +/^\s*before or while processing the request.$/d +/^\s*connection not open$/d +#endif /* (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) */ + # intermediate_results s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g diff --git a/src/test/regress/expected/failure_add_disable_node.out b/src/test/regress/expected/failure_add_disable_node.out index 76952767e..5bd4cc343 100644 --- a/src/test/regress/expected/failure_add_disable_node.out +++ b/src/test/regress/expected/failure_add_disable_node.out @@ -106,9 +106,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA").kill()'); (1 row) SELECT master_activate_node('localhost', :worker_2_proxy_port); -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx ERROR: failure on connection marked as essential: localhost:xxxxx -- verify node is not activated diff --git a/src/test/regress/expected/failure_copy_on_hash.out b/src/test/regress/expected/failure_copy_on_hash.out index 9707d2d2e..24350f707 100644 --- a/src/test/regress/expected/failure_copy_on_hash.out +++ b/src/test/regress/expected/failure_copy_on_hash.out @@ -36,9 +36,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) \COPY test_table FROM stdin delimiter ','; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -175,10 +173,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); (1 row) \COPY test_table FROM stdin delimiter ','; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -238,10 +233,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()'); BEGIN; \COPY test_table FROM stdin delimiter ','; ROLLBACK; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -279,9 +271,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) \COPY test_table_2 FROM stdin delimiter ','; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -324,9 +314,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); (1 row) \COPY test_table_2 FROM stdin delimiter ','; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx COPY test_table_2, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/expected/failure_copy_to_reference.out b/src/test/regress/expected/failure_copy_to_reference.out index a26e7290f..8c610572e 100644 --- a/src/test/regress/expected/failure_copy_to_reference.out +++ b/src/test/regress/expected/failure_copy_to_reference.out @@ -116,9 +116,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); (1 row) \copy test_table FROM STDIN DELIMITER ',' -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); @@ -230,9 +228,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()'); (1 row) \copy test_table FROM STDIN DELIMITER ',' -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); @@ -261,10 +257,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); (1 row) \copy test_table FROM STDIN DELIMITER ',' -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -395,10 +388,7 @@ BEGIN; SET LOCAL client_min_messages TO WARNING; \copy test_table FROM STDIN DELIMITER ',' ROLLBACK; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -431,10 +421,7 @@ BEGIN; SET LOCAL client_min_messages TO WARNING; \copy test_table FROM STDIN DELIMITER ',' ROLLBACK; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_create_distributed_table_non_empty.out b/src/test/regress/expected/failure_create_distributed_table_non_empty.out index 3a88e0192..ef7322d3b 100644 --- a/src/test/regress/expected/failure_create_distributed_table_non_empty.out +++ b/src/test/regress/expected/failure_create_distributed_table_non_empty.out @@ -28,9 +28,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -60,9 +58,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count @@ -119,9 +115,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -183,9 +177,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -200,9 +192,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count @@ -275,10 +265,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count @@ -468,9 +455,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -511,9 +496,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count @@ -555,9 +538,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_apply_shard_ddl_comma (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -591,9 +572,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -658,9 +637,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -722,9 +699,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -739,9 +714,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count @@ -947,9 +920,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -983,9 +954,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count diff --git a/src/test/regress/expected/failure_create_index_concurrently.out b/src/test/regress/expected/failure_create_index_concurrently.out index 1a0dc4dec..a198ddc70 100644 --- a/src/test/regress/expected/failure_create_index_concurrently.out +++ b/src/test/regress/expected/failure_create_index_concurrently.out @@ -28,9 +28,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -63,9 +61,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -143,9 +139,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP INDEX CONCURRENTLY").kill()'); DROP INDEX CONCURRENTLY IF EXISTS idx_index_test; WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_create_reference_table.out b/src/test/regress/expected/failure_create_reference_table.out index 432290c4b..a4cca6817 100644 --- a/src/test/regress/expected/failure_create_reference_table.out +++ b/src/test/regress/expected/failure_create_reference_table.out @@ -25,9 +25,7 @@ SELECT citus.mitmproxy('conn.onQuery().kill()'); (1 row) SELECT create_reference_table('ref_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard_placement; count --------------------------------------------------------------------- @@ -42,9 +40,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").kill()'); (1 row) SELECT create_reference_table('ref_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard_placement; count --------------------------------------------------------------------- @@ -74,9 +70,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").kill()'); (1 row) SELECT create_reference_table('ref_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard_placement; count --------------------------------------------------------------------- @@ -147,10 +141,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki (1 row) SELECT create_reference_table('ref_table'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT count(*) FROM pg_dist_shard_placement; count @@ -207,9 +198,7 @@ SELECT citus.mitmproxy('conn.onQuery().kill()'); BEGIN; SELECT create_reference_table('ref_table'); -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx ERROR: failure on connection marked as essential: localhost:xxxxx COMMIT; @@ -232,10 +221,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut (1 row) ROLLBACK; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid diff --git a/src/test/regress/expected/failure_create_table.out b/src/test/regress/expected/failure_create_table.out index 4a575ed19..af3bf48af 100644 --- a/src/test/regress/expected/failure_create_table.out +++ b/src/test/regress/expected/failure_create_table.out @@ -22,9 +22,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -55,9 +53,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -90,9 +86,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -120,9 +114,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_comman (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -153,9 +145,7 @@ BEGIN; (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -223,9 +213,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -280,10 +268,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -353,9 +338,7 @@ SELECT citus.mitmproxy('conn.kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -389,9 +372,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -465,9 +446,7 @@ SELECT citus.mitmproxy('conn.kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -497,9 +476,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -533,9 +510,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -630,9 +605,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT master_create_worker_shards('test_table_2', 4, 2); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard; count --------------------------------------------------------------------- @@ -666,10 +639,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").k (1 row) SELECT master_create_worker_shards('test_table_2', 4, 2); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_cte_subquery.out b/src/test/regress/expected/failure_cte_subquery.out index 98b1a0b04..19fb11f37 100644 --- a/src/test/regress/expected/failure_cte_subquery.out +++ b/src/test/regress/expected/failure_cte_subquery.out @@ -54,9 +54,7 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- kill at the second copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM cte_failure.events_table_16000002").kill()'); @@ -88,9 +86,7 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- kill at the third copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").kill()'); mitmproxy @@ -121,9 +117,7 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- cancel at the first copy (push) SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy @@ -260,9 +254,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()'); WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -289,9 +281,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); @@ -375,9 +365,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open END; RESET SEARCH_PATH; SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/expected/failure_ddl.out b/src/test/regress/expected/failure_ddl.out index 9a30156c4..9658c9346 100644 --- a/src/test/regress/expected/failure_ddl.out +++ b/src/test/regress/expected/failure_ddl.out @@ -36,9 +36,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -69,10 +67,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx ERROR: failure on connection marked as essential: localhost:xxxxx SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; @@ -104,9 +99,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- show that we've never commited the changes SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg @@ -195,29 +188,15 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()'); (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx WARNING: failed to commit transaction on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx WARNING: failed to commit transaction on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -273,15 +252,9 @@ BEGIN; SET LOCAL client_min_messages TO WARNING; ALTER TABLE test_table DROP COLUMN new_column; ROLLBACK; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -- now cancel just after the worker sends response to -- but Postgres doesn't accepts interrupts during COMMIT and ROLLBACK @@ -338,9 +311,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -401,9 +372,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -433,10 +402,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -706,9 +672,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -769,9 +733,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -801,10 +763,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -1062,9 +1021,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- kill as soon as the coordinator after it sends worker_apply_shard_ddl_command 2nd time SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").after(2).kill()'); mitmproxy @@ -1073,9 +1030,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").aft (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- cancel as soon as the coordinator after it sends worker_apply_shard_ddl_command 2nd time SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").after(2).cancel(' || pg_backend_pid() || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_distributed_results.out b/src/test/regress/expected/failure_distributed_results.out index fa2fa5abc..fc97c9af6 100644 --- a/src/test/regress/expected/failure_distributed_results.out +++ b/src/test/regress/expected/failure_distributed_results.out @@ -86,9 +86,7 @@ CREATE TABLE distributed_result_info AS SELECT resultId, nodeport, rowcount, targetShardId, targetShardIndex FROM partition_task_list_results('test', $$ SELECT * FROM source_table $$, 'target_table') NATURAL JOIN pg_dist_node; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT * FROM distributed_result_info ORDER BY resultId; resultid | nodeport | rowcount | targetshardid | targetshardindex --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_failover_to_local_execution.out b/src/test/regress/expected/failure_failover_to_local_execution.out index 0b5aebccd..0ecc98111 100644 --- a/src/test/regress/expected/failure_failover_to_local_execution.out +++ b/src/test/regress/expected/failure_failover_to_local_execution.out @@ -98,9 +98,7 @@ NOTICE: issuing SELECT count(*) AS count FROM failure_failover_to_local_executi DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT count(*) AS count FROM failure_failover_to_local_execution.failover_to_local_1980003 failover_to_local WHERE true DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open NOTICE: executing the command locally: SELECT count(*) AS count FROM failure_failover_to_local_execution.failover_to_local_1980000 failover_to_local WHERE true NOTICE: executing the command locally: SELECT count(*) AS count FROM failure_failover_to_local_execution.failover_to_local_1980002 failover_to_local WHERE true count diff --git a/src/test/regress/expected/failure_insert_select_pushdown.out b/src/test/regress/expected/failure_insert_select_pushdown.out index 1661b6ff7..ed461d040 100644 --- a/src/test/regress/expected/failure_insert_select_pushdown.out +++ b/src/test/regress/expected/failure_insert_select_pushdown.out @@ -44,9 +44,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown" (1 row) INSERT INTO events_summary SELECT user_id, event_id, count(*) FROM events_table GROUP BY 1,2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -97,9 +95,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown" (1 row) INSERT INTO events_table SELECT * FROM events_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_insert_select_repartition.out b/src/test/regress/expected/failure_insert_select_repartition.out index c634cfff8..0911ae5a8 100644 --- a/src/test/regress/expected/failure_insert_select_repartition.out +++ b/src/test/regress/expected/failure_insert_select_repartition.out @@ -55,9 +55,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_partition_query_result").kill (1 row) INSERT INTO target_table SELECT * FROM source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -70,9 +68,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_partition_query_result").kill (1 row) INSERT INTO target_table SELECT * FROM replicated_source_table; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -101,9 +97,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="fetch_intermediate_results").kill()' (1 row) INSERT INTO target_table SELECT * FROM source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -116,9 +110,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="fetch_intermediate_results").kill()' (1 row) INSERT INTO target_table SELECT * FROM replicated_source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -136,9 +128,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()') (1 row) INSERT INTO target_table SELECT * FROM source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -151,9 +141,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()') (1 row) INSERT INTO target_table SELECT * FROM replicated_source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -170,9 +158,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()') (1 row) INSERT INTO replicated_target_table SELECT * FROM source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT * FROM replicated_target_table; a | b --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_insert_select_via_coordinator.out b/src/test/regress/expected/failure_insert_select_via_coordinator.out index b46a73c27..d5d2e9b59 100644 --- a/src/test/regress/expected/failure_insert_select_via_coordinator.out +++ b/src/test/regress/expected/failure_insert_select_via_coordinator.out @@ -53,9 +53,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); (1 row) INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); @@ -65,9 +63,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").ki (1 row) INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); @@ -109,9 +105,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); (1 row) INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); @@ -121,9 +115,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").ki (1 row) INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); @@ -167,9 +159,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()'); (1 row) INSERT INTO events_reference_distributed SELECT * FROM events_reference; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- kill data push SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()'); @@ -179,9 +169,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").ki (1 row) INSERT INTO events_reference_distributed SELECT * FROM events_reference; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancel coordinator pull query SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); diff --git a/src/test/regress/expected/failure_multi_dml.out b/src/test/regress/expected/failure_multi_dml.out index 612b441c7..eb336894f 100644 --- a/src/test/regress/expected/failure_multi_dml.out +++ b/src/test/regress/expected/failure_multi_dml.out @@ -33,9 +33,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE").kill()'); BEGIN; DELETE FROM dml_test WHERE id = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open DELETE FROM dml_test WHERE id = 2; ERROR: current transaction is aborted, commands ignored until end of transaction block INSERT INTO dml_test VALUES (5, 'Epsilon'); @@ -95,9 +93,7 @@ BEGIN; DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 2; INSERT INTO dml_test VALUES (5, 'Epsilon'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open UPDATE dml_test SET name = 'alpha' WHERE id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block UPDATE dml_test SET name = 'gamma' WHERE id = 3; @@ -152,9 +148,7 @@ DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 2; INSERT INTO dml_test VALUES (5, 'Epsilon'); UPDATE dml_test SET name = 'alpha' WHERE id = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open UPDATE dml_test SET name = 'gamma' WHERE id = 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; @@ -218,10 +212,7 @@ COMMIT; '], false ); -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx while executing command on localhost:xxxxx master_run_on_worker @@ -394,17 +385,10 @@ INSERT INTO dml_test VALUES (5, 'Epsilon'); UPDATE dml_test SET name = 'alpha' WHERE id = 1; UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx WARNING: failed to commit transaction on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -- all changes should be committed because we injected -- the failure on the COMMIT time. And, we should not @@ -470,10 +454,7 @@ INSERT INTO dml_test VALUES (5, 'Epsilon'); UPDATE dml_test SET name = 'alpha' WHERE id = 1; UPDATE dml_test SET name = 'gamma' WHERE id = 3; COMMIT; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx --- shouldn't see any changes after failed COMMIT SELECT * FROM dml_test ORDER BY id ASC; diff --git a/src/test/regress/expected/failure_multi_row_insert.out b/src/test/regress/expected/failure_multi_row_insert.out index cdc0d82af..8948be94e 100644 --- a/src/test/regress/expected/failure_multi_row_insert.out +++ b/src/test/regress/expected/failure_multi_row_insert.out @@ -43,9 +43,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,1), (1,2), (1,3); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- this test is broken, see https://github.com/citusdata/citus/issues/2460 -- SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); -- INSERT INTO distributed_table VALUES (1,4), (1,5), (1,6); @@ -57,9 +55,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,7), (5,8); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- this test is broken, see https://github.com/citusdata/citus/issues/2460 -- SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); -- INSERT INTO distributed_table VALUES (1,9), (5,10); @@ -71,9 +67,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,11), (6,12); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -90,9 +84,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).kill()'); (1 row) INSERT INTO distributed_table VALUES (1,15), (6,16); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -109,9 +101,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (2,19),(1,20); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_multi_shard_update_delete.out b/src/test/regress/expected/failure_multi_shard_update_delete.out index 5be0980f1..24cb895ea 100644 --- a/src/test/regress/expected/failure_multi_shard_update_delete.out +++ b/src/test/regress/expected/failure_multi_shard_update_delete.out @@ -58,9 +58,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); -- issue a multi shard delete DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FROM t2; count @@ -76,9 +74,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005"). (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FROM t2; count @@ -138,9 +134,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); -- issue a multi shard update UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 @@ -156,9 +150,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill( (1 row) UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 @@ -210,9 +202,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); -- issue a multi shard delete DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FROM t2; count @@ -228,9 +218,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005"). (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FROM t2; count @@ -290,9 +278,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); -- issue a multi shard update UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 @@ -308,9 +294,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill( (1 row) UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 @@ -380,9 +364,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); (1 row) DELETE FROM r1 WHERE a = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b2 @@ -397,9 +379,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b2 @@ -447,9 +427,7 @@ UPDATE t3 SET c = q.c FROM ( SELECT b, max(c) as c FROM t2 GROUP BY b) q WHERE t3.b = q.b RETURNING *; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx --- verify nothing is updated SELECT citus.mitmproxy('conn.allow()'); @@ -481,9 +459,7 @@ UPDATE t3 SET c = q.c FROM ( SELECT b, max(c) as c FROM t2 GROUP BY b) q WHERE t3.b = q.b RETURNING *; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open --- verify nothing is updated SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -539,9 +515,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t3_201013").kill( (1 row) UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 @@ -573,9 +547,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- following will fail UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open END; -- verify everything is rolled back SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; @@ -591,9 +563,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO (1 row) UPDATE t3 SET b = 1 WHERE b = 2 RETURNING *; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 @@ -608,9 +578,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO (1 row) UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 @@ -642,9 +610,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- following will fail UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open END; -- verify everything is rolled back SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; diff --git a/src/test/regress/expected/failure_mx_metadata_sync.out b/src/test/regress/expected/failure_mx_metadata_sync.out index 7d667759d..7b4c04ff8 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync.out +++ b/src/test/regress/expected/failure_mx_metadata_sync.out @@ -139,9 +139,7 @@ SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_placement_metadat (1 row) SELECT create_distributed_table('t2', 'id'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_shard_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_offline_move_shard_placement.out b/src/test/regress/expected/failure_offline_move_shard_placement.out index 48aed1169..a6ecee18e 100644 --- a/src/test/regress/expected/failure_offline_move_shard_placement.out +++ b/src/test/regress/expected/failure_offline_move_shard_placement.out @@ -61,9 +61,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS move_shard_offl (1 row) SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on sanity checks SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS move_shard_offline.t CASCADE").cancel(' || :pid || ')'); @@ -82,9 +80,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard_offline.t"). (1 row) SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on move_shard table creation SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard_offline.t").cancel(' || :pid || ')'); @@ -103,9 +99,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_append_table_to_shard").kill( (1 row) SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on blocking append_table_to_shard operation on target node SELECT citus.mitmproxy('conn.onQuery(query="worker_append_table_to_shard").cancel(' || :pid || ')'); @@ -124,9 +118,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT").kill()'); (1 row) SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on adding constraints on target node SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT").cancel(' || :pid || ')'); diff --git a/src/test/regress/expected/failure_on_create_subscription.out b/src/test/regress/expected/failure_on_create_subscription.out index 5ffd2a6b3..abab38a77 100644 --- a/src/test/regress/expected/failure_on_create_subscription.out +++ b/src/test/regress/expected/failure_on_create_subscription.out @@ -49,9 +49,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE SUBSCRIPTION").kill()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- Verify that the shard is not moved and the number of rows are still 100k SELECT * FROM shards_in_workers; diff --git a/src/test/regress/expected/failure_on_create_subscription_0.out b/src/test/regress/expected/failure_on_create_subscription_0.out index 15b894f27..7ea3ee23f 100644 --- a/src/test/regress/expected/failure_on_create_subscription_0.out +++ b/src/test/regress/expected/failure_on_create_subscription_0.out @@ -51,9 +51,7 @@ SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost' WARNING: could not drop the replication slot "citus_shard_move_subscription" on publisher DETAIL: The error was: ERROR: replication slot "citus_shard_move_subscription" does not exist CONTEXT: while executing command on localhost:xxxxx -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- Verify that the shard is not moved and the number of rows are still 100k SELECT * FROM shards_in_workers; diff --git a/src/test/regress/expected/failure_online_move_shard_placement.out b/src/test/regress/expected/failure_online_move_shard_placement.out index 5fa344897..269b3e33e 100644 --- a/src/test/regress/expected/failure_online_move_shard_placement.out +++ b/src/test/regress/expected/failure_online_move_shard_placement.out @@ -49,9 +49,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS move_shard.t CA (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on sanity checks SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS move_shard.t CASCADE").cancel(' || :pid || ')'); @@ -70,9 +68,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard.t").kill()') (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on move_shard table creation SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard.t").cancel(' || :pid || ')'); @@ -91,9 +87,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT count\(\*\) FROM pg_subscrip (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on polling subscription state SELECT citus.mitmproxy('conn.onQuery(query="^SELECT count\(\*\) FROM pg_subscription_rel").cancel(' || :pid || ')'); @@ -112,9 +106,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT sum").kill()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on getting subscriber state SELECT citus.mitmproxy('conn.onQuery(query="^SELECT sum").cancel(' || :pid || ')'); @@ -133,9 +125,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT min\(latest_end_lsn").kill() (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on polling last write-ahead log location reported to origin WAL sender SELECT citus.mitmproxy('conn.onQuery(query="^SELECT min\(latest_end_lsn").cancel(' || :pid || ')'); @@ -154,28 +144,15 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DROP SUBSCRIPTION").kill()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- cancellation on dropping subscription SELECT citus.mitmproxy('conn.onQuery(query="^DROP SUBSCRIPTION").cancel(' || :pid || ')'); mitmproxy @@ -193,9 +170,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="t_pkey").kill()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on creating the primary key SELECT citus.mitmproxy('conn.onQuery(query="t_pkey").cancel(' || :pid || ')'); @@ -214,19 +189,11 @@ SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -254,19 +221,11 @@ SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -288,19 +247,11 @@ SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- Verify that the shard is not moved and the number of rows are still 100k SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_ref_tables.out b/src/test/regress/expected/failure_ref_tables.out index 79f79b09d..6485691af 100644 --- a/src/test/regress/expected/failure_ref_tables.out +++ b/src/test/regress/expected/failure_ref_tables.out @@ -33,9 +33,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO ref_table VALUES (5, 6); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT COUNT(*) FROM ref_table WHERE key=5; count --------------------------------------------------------------------- @@ -50,9 +48,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); (1 row) UPDATE ref_table SET key=7 RETURNING value; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT COUNT(*) FROM ref_table WHERE key=7; count --------------------------------------------------------------------- @@ -69,9 +65,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); BEGIN; DELETE FROM ref_table WHERE key=5; UPDATE ref_table SET key=value; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT COUNT(*) FROM ref_table WHERE key=value; count diff --git a/src/test/regress/expected/failure_replicated_partitions.out b/src/test/regress/expected/failure_replicated_partitions.out index 62f3ba275..4ae2d604c 100644 --- a/src/test/regress/expected/failure_replicated_partitions.out +++ b/src/test/regress/expected/failure_replicated_partitions.out @@ -28,9 +28,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO partitioned_table VALUES (0, 0); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- use both placements SET citus.task_assignment_policy TO "round-robin"; -- the results should be the same diff --git a/src/test/regress/expected/failure_savepoints.out b/src/test/regress/expected/failure_savepoints.out index b51762e08..fb4c870bb 100644 --- a/src/test/regress/expected/failure_savepoints.out +++ b/src/test/regress/expected/failure_savepoints.out @@ -38,40 +38,14 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SAVEPOINT").kill()'); BEGIN; INSERT INTO artists VALUES (5, 'Asher Lev'); SAVEPOINT s1; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open +WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx DELETE FROM artists WHERE id=4; ERROR: current transaction is aborted, commands ignored until end of transaction block @@ -97,42 +71,16 @@ SAVEPOINT s1; DELETE FROM artists WHERE id=4; RELEASE SAVEPOINT s1; WARNING: AbortSubTransaction while in COMMIT state -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open +WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx WARNING: savepoint "savepoint_2" does not exist CONTEXT: while executing command on localhost:xxxxx -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx ROLLBACK; SELECT * FROM artists WHERE id IN (4, 5); @@ -153,16 +101,9 @@ INSERT INTO artists VALUES (5, 'Asher Lev'); SAVEPOINT s1; DELETE FROM artists WHERE id=4; ROLLBACK TO SAVEPOINT s1; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx COMMIT; ERROR: failure on connection marked as essential: localhost:xxxxx @@ -187,40 +128,14 @@ SAVEPOINT s2; INSERT INTO artists VALUES (5, 'Jacob Kahn'); RELEASE SAVEPOINT s2; WARNING: AbortSubTransaction while in COMMIT state -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open +WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx COMMIT; SELECT * FROM artists WHERE id IN (4, 5); @@ -243,16 +158,9 @@ ROLLBACK TO SAVEPOINT s1; SAVEPOINT s2; DELETE FROM artists WHERE id=5; ROLLBACK TO SAVEPOINT s2; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx COMMIT; ERROR: failure on connection marked as essential: localhost:xxxxx @@ -299,46 +207,12 @@ INSERT INTO artists VALUES (6, 'John J. Audubon'); INSERT INTO artists VALUES (7, 'Emily Carr'); INSERT INTO artists VALUES (7, 'Emily Carr'); ROLLBACK TO SAVEPOINT s1; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -connection not open -connection not open +WARNING: connection not open +WARNING: connection not open +WARNING: connection not open +WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection not open +WARNING: connection not open COMMIT; ERROR: failure on connection marked as essential: localhost:xxxxx SELECT * FROM artists WHERE id=6; @@ -370,37 +244,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SAVEPOINT").kill()'); BEGIN; INSERT INTO researchers VALUES (7, 4, 'Jan Plaza'); SAVEPOINT s1; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -connection not open -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open +WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection not open +WARNING: connection not open +ERROR: connection not open INSERT INTO researchers VALUES (8, 4, 'Alonzo Church'); ERROR: current transaction is aborted, commands ignored until end of transaction block ROLLBACK TO s1; @@ -435,15 +283,8 @@ INSERT INTO researchers VALUES (7, 4, 'Jan Plaza'); SAVEPOINT s1; INSERT INTO researchers VALUES (8, 4, 'Alonzo Church'); ROLLBACK TO s1; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open +WARNING: connection not open +WARNING: connection not open RELEASE SAVEPOINT s1; COMMIT; ERROR: failure on connection marked as essential: localhost:xxxxx @@ -476,38 +317,12 @@ INSERT INTO researchers VALUES (8, 4, 'Alonzo Church'); ROLLBACK TO s1; RELEASE SAVEPOINT s1; WARNING: AbortSubTransaction while in COMMIT state -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -connection not open +WARNING: connection not open +WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection not open +WARNING: connection not open WARNING: savepoint "savepoint_3" does not exist -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open COMMIT; -- should see correct results from healthy placement and one bad placement SELECT * FROM researchers WHERE lab_id = 4; @@ -548,52 +363,14 @@ SELECT * FROM ref; (1 row) ROLLBACK TO SAVEPOINT start; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open +WARNING: connection not open +WARNING: connection not open SELECT * FROM ref; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -connection not open -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -connection not open -connection not open -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open +WARNING: connection not open +WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection not open +WARNING: connection not open +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open END; -- clean up SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/expected/failure_single_mod.out b/src/test/regress/expected/failure_single_mod.out index 9345225ae..54db33ff6 100644 --- a/src/test/regress/expected/failure_single_mod.out +++ b/src/test/regress/expected/failure_single_mod.out @@ -27,9 +27,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO mod_test VALUES (2, 6); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT COUNT(*) FROM mod_test WHERE key=2; count --------------------------------------------------------------------- @@ -61,9 +59,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); (1 row) UPDATE mod_test SET value='ok' WHERE key=2 RETURNING key; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT COUNT(*) FROM mod_test WHERE value='ok'; count --------------------------------------------------------------------- @@ -93,9 +89,7 @@ INSERT INTO mod_test VALUES (2, 6); INSERT INTO mod_test VALUES (2, 7); DELETE FROM mod_test WHERE key=2 AND value = '7'; UPDATE mod_test SET value='ok' WHERE key=2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT COUNT(*) FROM mod_test WHERE key=2; count diff --git a/src/test/regress/expected/failure_single_select.out b/src/test/regress/expected/failure_single_select.out index 4cfa1252b..723f5fadc 100644 --- a/src/test/regress/expected/failure_single_select.out +++ b/src/test/regress/expected/failure_single_select.out @@ -30,18 +30,14 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); (1 row) SELECT * FROM select_test WHERE key = 3; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open key | value --------------------------------------------------------------------- 3 | test data (1 row) SELECT * FROM select_test WHERE key = 3; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open key | value --------------------------------------------------------------------- 3 | test data @@ -58,9 +54,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); BEGIN; INSERT INTO select_test VALUES (3, 'more data'); SELECT * FROM select_test WHERE key = 3; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -148,9 +142,7 @@ SELECT * FROM select_test WHERE key = 3; INSERT INTO select_test VALUES (3, 'even more data'); SELECT * FROM select_test WHERE key = 3; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(2).kill()'); mitmproxy @@ -165,9 +157,7 @@ SELECT recover_prepared_transactions(); (1 row) SELECT recover_prepared_transactions(); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- bug from https://github.com/citusdata/citus/issues/1926 SET citus.max_cached_conns_per_worker TO 0; -- purge cache @@ -196,9 +186,7 @@ SELECT * FROM select_test WHERE key = 1; (1 row) SELECT * FROM select_test WHERE key = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open -- now the same test with query cancellation SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_tenant_isolation.out b/src/test/regress/expected/failure_tenant_isolation.out index c07165e9a..a5ec7734d 100644 --- a/src/test/regress/expected/failure_tenant_isolation.out +++ b/src/test/regress/expected/failure_tenant_isolation.out @@ -56,9 +56,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_ (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on colocated table creation SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").after(1).cancel(' || :pid || ')'); @@ -77,9 +75,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO tenant_isolation.table_2 (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on colocated table population SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO tenant_isolation.table_2").cancel(' || :pid || ')'); @@ -98,9 +94,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on colocated table constraints SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); @@ -119,9 +113,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_ (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on table creation SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").after(1).cancel(' || :pid || ')'); @@ -140,9 +132,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO tenant_isolation.table_1 (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on table population SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO tenant_isolation.table_1").cancel(' || :pid || ')'); @@ -161,9 +151,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on table constraints SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); @@ -182,9 +170,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolatio (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on dropping old colocated shard SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')'); @@ -203,9 +189,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolatio (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- cancellation on dropping old shard SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')'); @@ -224,9 +208,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey F (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx -- failure on foreign key creation SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").after(2).cancel(' || :pid || ')'); diff --git a/src/test/regress/expected/failure_truncate.out b/src/test/regress/expected/failure_truncate.out index 4d9c0d6d6..b0dda6bda 100644 --- a/src/test/regress/expected/failure_truncate.out +++ b/src/test/regress/expected/failure_truncate.out @@ -43,9 +43,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -154,9 +152,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -281,29 +277,15 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()'); (1 row) TRUNCATE test_table; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx WARNING: failed to commit transaction on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx WARNING: failed to commit transaction on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -374,10 +356,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); (1 row) TRUNCATE reference_table CASCADE; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -446,9 +425,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE").after(2).kill()'); (1 row) TRUNCATE reference_table CASCADE; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -517,10 +494,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki (1 row) TRUNCATE reference_table CASCADE; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -590,9 +564,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -701,9 +673,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE truncate_failure.tes (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -757,10 +727,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").k (1 row) TRUNCATE test_table; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -966,9 +933,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -1077,9 +1042,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -1133,10 +1096,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki (1 row) TRUNCATE test_table; -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_vacuum.out b/src/test/regress/expected/failure_vacuum.out index 018249ad2..632562b52 100644 --- a/src/test/regress/expected/failure_vacuum.out +++ b/src/test/regress/expected/failure_vacuum.out @@ -30,9 +30,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); (1 row) VACUUM vacuum_test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); mitmproxy --------------------------------------------------------------------- @@ -40,9 +38,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); (1 row) ANALYZE vacuum_test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy --------------------------------------------------------------------- @@ -50,17 +46,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); (1 row) ANALYZE vacuum_test; -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx WARNING: failed to commit transaction on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open +WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -127,9 +116,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()'); (1 row) VACUUM vacuum_test, other_vacuum_test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_vacuum_1.out b/src/test/regress/expected/failure_vacuum_1.out index c13096f6d..7e87f1840 100644 --- a/src/test/regress/expected/failure_vacuum_1.out +++ b/src/test/regress/expected/failure_vacuum_1.out @@ -33,8 +33,6 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); VACUUM vacuum_test; ERROR: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); mitmproxy --------------------------------------------------------------------- @@ -44,8 +42,6 @@ SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); ANALYZE vacuum_test; WARNING: connection error: localhost:xxxxx DETAIL: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_master_update_node_0.out b/src/test/regress/expected/isolation_master_update_node_0.out index cc746278e..756ae222a 100644 --- a/src/test/regress/expected/isolation_master_update_node_0.out +++ b/src/test/regress/expected/isolation_master_update_node_0.out @@ -48,8 +48,6 @@ step s1-abort: ABORT; WARNING: this step had a leftover error message FATAL: terminating connection due to administrator command server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. master_remove_node diff --git a/src/test/regress/expected/isolation_master_update_node_2.out b/src/test/regress/expected/isolation_master_update_node_2.out index 46e0d23d5..194299c4d 100644 --- a/src/test/regress/expected/isolation_master_update_node_2.out +++ b/src/test/regress/expected/isolation_master_update_node_2.out @@ -57,8 +57,6 @@ step s2-abort: ABORT; step s1-abort: ABORT; FATAL: terminating connection due to administrator command server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. master_remove_node --------------------------------------------------------------------- From 6c65d29924d42a517ba2a6a4dd20f1878058ba32 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Thu, 21 Jul 2022 14:46:51 +0200 Subject: [PATCH 04/38] Check the PGPROC's validity properly We used to only check whether the PID is valid or not. However, Postgres does not necessarily set the PID of the backend to 0 when it exists. Instead, we need to be able to check it from procArray. IsBackendPid() is what pg_stat_activity also relies on for a similar purpose. --- src/backend/distributed/shared_library_init.c | 1 + .../distributed/transaction/backend_data.c | 39 ++++++++++++++++++- .../distributed/transaction/lock_graph.c | 12 +++++- src/include/distributed/backend_data.h | 2 + .../isolation_get_all_active_transactions.out | 33 +++++++++++++++- ...isolation_get_all_active_transactions.spec | 23 +++++++++++ 6 files changed, 106 insertions(+), 4 deletions(-) diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 6a5f229c9..fb1259fcd 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -647,6 +647,7 @@ CitusCleanupConnectionsAtExit(int code, Datum arg) /* we don't want any monitoring view/udf to show already exited backends */ UnSetGlobalPID(); + SetActiveMyBackend(false); } diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index 60c42f7ac..92ffaae25 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -40,6 +40,7 @@ #include "storage/ipc.h" #include "storage/lmgr.h" #include "storage/lwlock.h" +#include "storage/procarray.h" #include "storage/proc.h" #include "storage/spin.h" #include "storage/s_lock.h" @@ -392,9 +393,9 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto SpinLockAcquire(¤tBackend->mutex); - if (currentProc->pid == 0) + if (currentProc->pid == 0 || !currentBackend->activeBackend) { - /* unused PGPROC slot */ + /* unused PGPROC slot or the backend already exited */ SpinLockRelease(¤tBackend->mutex); continue; } @@ -698,6 +699,12 @@ InitializeBackendData(void) UnSetDistributedTransactionId(); UnSetGlobalPID(); + /* + * Signal that this backend is active and should show up + * on activity monitors. + */ + SetActiveMyBackend(true); + UnlockBackendSharedMemory(); } @@ -746,6 +753,24 @@ UnSetGlobalPID(void) } +/* + * SetActiveMyBackend is a wrapper around MyBackendData->activeBackend. + */ +void +SetActiveMyBackend(bool value) +{ + /* backend does not exist if the extension is not created */ + if (MyBackendData) + { + SpinLockAcquire(&MyBackendData->mutex); + + MyBackendData->activeBackend = value; + + SpinLockRelease(&MyBackendData->mutex); + } +} + + /* * LockBackendSharedMemory is a simple wrapper around LWLockAcquire on the * shared memory lock. @@ -1224,6 +1249,16 @@ ActiveDistributedTransactionNumbers(void) } GetBackendDataForProc(currentProc, ¤tBackendData); + if (!currentBackendData.activeBackend) + { + /* + * Skip if the PGPROC slot is unused. We should normally use + * IsBackendPid() to be able to skip reliably all the exited + * processes. However, that is a costly operation. Instead, we + * keep track of activeBackend in Citus code. + */ + continue; + } if (!IsInDistributedTransaction(¤tBackendData)) { diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c index e672dafd8..8c09160b0 100644 --- a/src/backend/distributed/transaction/lock_graph.c +++ b/src/backend/distributed/transaction/lock_graph.c @@ -561,13 +561,23 @@ BuildLocalWaitGraph(bool onlyDistributedTx) PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; BackendData currentBackendData; - /* skip if the PGPROC slot is unused */ if (currentProc->pid == 0) { + /* skip if the PGPROC slot is unused */ continue; } GetBackendDataForProc(currentProc, ¤tBackendData); + if (!currentBackendData.activeBackend) + { + /* + * Skip if the PGPROC slot is unused. We should normally use + * IsBackendPid() to be able to skip reliably all the exited + * processes. However, that is a costly operation. Instead, we + * keep track of activeBackend in Citus code. + */ + continue; + } /* * Only start searching from distributed transactions, since we only diff --git a/src/include/distributed/backend_data.h b/src/include/distributed/backend_data.h index 0c3b7ee26..52f2f9c1b 100644 --- a/src/include/distributed/backend_data.h +++ b/src/include/distributed/backend_data.h @@ -42,6 +42,7 @@ typedef struct BackendData uint64 globalPID; bool distributedCommandOriginator; DistributedTransactionId transactionId; + bool activeBackend; /* set to false when backend exists */ } BackendData; @@ -54,6 +55,7 @@ extern void LockBackendSharedMemory(LWLockMode lockMode); extern void UnlockBackendSharedMemory(void); extern void UnSetDistributedTransactionId(void); extern void UnSetGlobalPID(void); +extern void SetActiveMyBackend(bool value); extern void AssignDistributedTransactionId(void); extern void AssignGlobalPID(void); extern uint64 GetGlobalPID(void); diff --git a/src/test/regress/expected/isolation_get_all_active_transactions.out b/src/test/regress/expected/isolation_get_all_active_transactions.out index d2f526e03..a9739a826 100644 --- a/src/test/regress/expected/isolation_get_all_active_transactions.out +++ b/src/test/regress/expected/isolation_get_all_active_transactions.out @@ -1,4 +1,4 @@ -Parsed test spec with 3 sessions +Parsed test spec with 5 sessions starting permutation: s1-grant s1-begin-insert s2-begin-insert s3-as-admin s3-as-user-1 s3-as-readonly s3-as-monitor s1-commit s2-commit step s1-grant: @@ -93,3 +93,34 @@ step s1-commit: step s2-commit: COMMIT; + +starting permutation: s4-record-pid s3-show-activity s5-kill s3-show-activity +step s4-record-pid: + SELECT pg_backend_pid() INTO selected_pid; + +step s3-show-activity: + SET ROLE postgres; + select count(*) from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid); + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s5-kill: + SELECT pg_terminate_backend(pg_backend_pid) FROM selected_pid; + +pg_terminate_backend +--------------------------------------------------------------------- +t +(1 row) + +step s3-show-activity: + SET ROLE postgres; + select count(*) from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid); + +count +--------------------------------------------------------------------- + 0 +(1 row) + diff --git a/src/test/regress/spec/isolation_get_all_active_transactions.spec b/src/test/regress/spec/isolation_get_all_active_transactions.spec index 6aa7e1828..497b3a58a 100644 --- a/src/test/regress/spec/isolation_get_all_active_transactions.spec +++ b/src/test/regress/spec/isolation_get_all_active_transactions.spec @@ -21,6 +21,7 @@ teardown { DROP TABLE test_table; DROP USER test_user_1, test_user_2, test_readonly, test_monitor; + DROP TABLE IF EXISTS selected_pid; } session "s1" @@ -100,4 +101,26 @@ step "s3-as-monitor" SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; } +step "s3-show-activity" +{ + SET ROLE postgres; + select count(*) from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid); +} + +session "s4" + +step "s4-record-pid" +{ + SELECT pg_backend_pid() INTO selected_pid; +} + +session "s5" + +step "s5-kill" +{ + SELECT pg_terminate_backend(pg_backend_pid) FROM selected_pid; +} + + permutation "s1-grant" "s1-begin-insert" "s2-begin-insert" "s3-as-admin" "s3-as-user-1" "s3-as-readonly" "s3-as-monitor" "s1-commit" "s2-commit" +permutation "s4-record-pid" "s3-show-activity" "s5-kill" "s3-show-activity" From 5f27445b696893ba818233c83be206ccaa240d62 Mon Sep 17 00:00:00 2001 From: aykut-bozkurt <51649454+aykut-bozkurt@users.noreply.github.com> Date: Wed, 27 Jul 2022 10:34:41 +0300 Subject: [PATCH 05/38] enable propagation warnings before postgres vanilla tests (#6081) --- src/backend/distributed/commands/cluster.c | 9 ++++++--- src/backend/distributed/commands/common.c | 6 +++++- src/backend/distributed/commands/function.c | 6 +++++- src/backend/distributed/commands/table.c | 11 +++++++---- src/backend/distributed/commands/utility_hook.c | 12 ++++++++---- src/backend/distributed/shared_library_init.c | 12 ++++++++++++ src/backend/distributed/utils/log_utils.c | 6 ++++++ src/include/distributed/log_utils.h | 2 ++ src/test/regress/pg_regress_multi.pl | 3 +++ 9 files changed, 54 insertions(+), 13 deletions(-) diff --git a/src/backend/distributed/commands/cluster.c b/src/backend/distributed/commands/cluster.c index a773816de..c539aa066 100644 --- a/src/backend/distributed/commands/cluster.c +++ b/src/backend/distributed/commands/cluster.c @@ -39,9 +39,12 @@ PreprocessClusterStmt(Node *node, const char *clusterCommand, if (clusterStmt->relation == NULL) { - ereport(WARNING, (errmsg("not propagating CLUSTER command to worker nodes"), - errhint("Provide a specific table in order to CLUSTER " - "distributed tables."))); + if (EnableUnsupportedFeatureMessages) + { + ereport(WARNING, (errmsg("not propagating CLUSTER command to worker nodes"), + errhint("Provide a specific table in order to CLUSTER " + "distributed tables."))); + } return NIL; } diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c index 29cb96e9a..63491fbdc 100644 --- a/src/backend/distributed/commands/common.c +++ b/src/backend/distributed/commands/common.c @@ -74,7 +74,11 @@ PostprocessCreateDistributedObjectFromCatalogStmt(Node *stmt, const char *queryS addresses); if (depError != NULL) { - RaiseDeferredError(depError, WARNING); + if (EnableUnsupportedFeatureMessages) + { + RaiseDeferredError(depError, WARNING); + } + return NIL; } diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 2d8a2e09a..e02f68aa4 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -1387,7 +1387,11 @@ PostprocessCreateFunctionStmt(Node *node, const char *queryString) if (errMsg != NULL) { - RaiseDeferredError(errMsg, WARNING); + if (EnableUnsupportedFeatureMessages) + { + RaiseDeferredError(errMsg, WARNING); + } + return NIL; } diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 9fbe1d993..a1f9a685e 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -1754,10 +1754,13 @@ List * PreprocessAlterTableMoveAllStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { - ereport(WARNING, (errmsg("not propagating ALTER TABLE ALL IN TABLESPACE " - "commands to worker nodes"), - errhint("Connect to worker nodes directly to manually " - "move all tables."))); + if (EnableUnsupportedFeatureMessages) + { + ereport(WARNING, (errmsg("not propagating ALTER TABLE ALL IN TABLESPACE " + "commands to worker nodes"), + errhint("Connect to worker nodes directly to manually " + "move all tables."))); + } return NIL; } diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 0205a0ab9..ab1792b31 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -734,10 +734,14 @@ ProcessUtilityInternal(PlannedStmt *pstmt, if (IsA(parsetree, RenameStmt) && ((RenameStmt *) parsetree)->renameType == OBJECT_ROLE && EnableAlterRolePropagation) { - ereport(NOTICE, (errmsg("not propagating ALTER ROLE ... RENAME TO commands " - "to worker nodes"), - errhint("Connect to worker nodes directly to manually " - "rename the role"))); + if (EnableUnsupportedFeatureMessages) + { + ereport(NOTICE, (errmsg( + "not propagating ALTER ROLE ... RENAME TO commands " + "to worker nodes"), + errhint("Connect to worker nodes directly to manually " + "rename the role"))); + } } } diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index fb1259fcd..1140991be 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -48,6 +48,7 @@ #include "distributed/local_executor.h" #include "distributed/local_distributed_join_planner.h" #include "distributed/locally_reserved_shared_connections.h" +#include "distributed/log_utils.h" #include "distributed/maintenanced.h" #include "distributed/shard_cleaner.h" #include "distributed/metadata_utility.h" @@ -1153,6 +1154,17 @@ RegisterCitusConfigVariables(void) GUC_STANDARD, NULL, NULL, NULL); + DefineCustomBoolVariable( + "citus.enable_unsupported_feature_messages", + gettext_noop("Controls showing of some citus related messages. It is intended to " + "be used before vanilla tests to stop unwanted citus messages."), + NULL, + &EnableUnsupportedFeatureMessages, + true, + PGC_SUSET, + GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL, + NULL, NULL, NULL); + DefineCustomBoolVariable( "citus.enable_version_checks", gettext_noop("Enables version checks during CREATE/ALTER EXTENSION commands"), diff --git a/src/backend/distributed/utils/log_utils.c b/src/backend/distributed/utils/log_utils.c index a74f210c2..97c8a6976 100644 --- a/src/backend/distributed/utils/log_utils.c +++ b/src/backend/distributed/utils/log_utils.c @@ -23,6 +23,12 @@ #endif +/* + * GUC controls showing of some of the unwanted citus messages, it is intended to be set false + * before vanilla tests to not break postgres test logs. + */ +bool EnableUnsupportedFeatureMessages = true; + /* * IsLoggableLevel returns true if either of client or server log guc is configured to * log the given log level. diff --git a/src/include/distributed/log_utils.h b/src/include/distributed/log_utils.h index 2c84f4471..ceddfc838 100644 --- a/src/include/distributed/log_utils.h +++ b/src/include/distributed/log_utils.h @@ -16,6 +16,8 @@ #define CITUS_LOG_LEVEL_OFF 0 +extern bool EnableUnsupportedFeatureMessages; + extern bool IsLoggableLevel(int logLevel); extern char * HashLogMessage(const char *text); diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index 403879f1f..4799ad3c8 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -500,6 +500,9 @@ if(!$vanillaDev && $vanillatest) { # we enable hiding the citus dependent objects from pg meta class queries to not break postgres vanilla test behaviour push(@pgOptions, "citus.hide_citus_dependent_objects=true"); + + # we disable citus related unwanted messages to not break postgres vanilla test behaviour. + push(@pgOptions, "citus.enable_unsupported_feature_messages=false"); } if ($useMitmproxy) From b8008999dc137f90b3c1565141653b2a42f25f0d Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Tue, 26 Jul 2022 12:09:12 +0200 Subject: [PATCH 06/38] Reduce memory consumption while adjust partition index names Previously, CreateFixPartitionShardIndexNames() created all the relevant query strings for all the shards, and executed the large query string. And, in terms of the memory consumption, this huge command (and its ExprContext generated while running the command) is the main bottleneck/ With this change, we are reducing the total amount of memory usage to almost 1/shard_count. On my local machine, a distributed partitioned table with 120 partitions, each 32 shards, the total memory consumption reduced from ~3GB to ~0.1GB. And, the total execution time increased from ~28 seconds to ~30 seconds. This seems like a good trade-off. --- .../utils/multi_partitioning_utils.c | 59 ++++++++----------- 1 file changed, 25 insertions(+), 34 deletions(-) diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index 8a046dcd7..59876e34d 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -53,9 +53,9 @@ static Relation try_relation_open_nolock(Oid relationId); static List * CreateFixPartitionConstraintsTaskList(Oid relationId); static List * WorkerFixPartitionConstraintCommandList(Oid relationId, uint64 shardId, List *checkConstraintList); -static List * CreateFixPartitionShardIndexNamesTaskList(Oid parentRelationId, - Oid partitionRelationId, - Oid parentIndexOid); +static void CreateFixPartitionShardIndexNames(Oid parentRelationId, + Oid partitionRelationId, + Oid parentIndexOid); static List * WorkerFixPartitionShardIndexNamesCommandList(uint64 parentShardId, List *indexIdList, Oid partitionRelationId); @@ -329,17 +329,9 @@ FixPartitionShardIndexNames(Oid relationId, Oid parentIndexOid) RelationGetRelationName(relation)))); } - List *taskList = - CreateFixPartitionShardIndexNamesTaskList(parentRelationId, - partitionRelationId, - parentIndexOid); - - /* do not do anything if there are no index names to fix */ - if (taskList != NIL) - { - bool localExecutionSupported = true; - ExecuteUtilityTaskList(taskList, localExecutionSupported); - } + CreateFixPartitionShardIndexNames(parentRelationId, + partitionRelationId, + parentIndexOid); relation_close(relation, NoLock); } @@ -494,15 +486,15 @@ WorkerFixPartitionConstraintCommandList(Oid relationId, uint64 shardId, * partition each task will have parent_indexes_count query strings. When we need * to fix a single index, parent_indexes_count becomes 1. */ -static List * -CreateFixPartitionShardIndexNamesTaskList(Oid parentRelationId, Oid partitionRelationId, - Oid parentIndexOid) +static void +CreateFixPartitionShardIndexNames(Oid parentRelationId, Oid partitionRelationId, + Oid parentIndexOid) { List *partitionList = PartitionList(parentRelationId); if (partitionList == NIL) { /* early exit if the parent relation does not have any partitions */ - return NIL; + return; } Relation parentRelation = RelationIdGetRelation(parentRelationId); @@ -526,7 +518,7 @@ CreateFixPartitionShardIndexNamesTaskList(Oid parentRelationId, Oid partitionRel { /* early exit if the parent relation does not have any indexes */ RelationClose(parentRelation); - return NIL; + return; } /* @@ -554,8 +546,12 @@ CreateFixPartitionShardIndexNamesTaskList(Oid parentRelationId, Oid partitionRel /* lock metadata before getting placement lists */ LockShardListMetadata(parentShardIntervalList, ShareLock); + MemoryContext localContext = AllocSetContextCreate(CurrentMemoryContext, + "CreateFixPartitionShardIndexNames", + ALLOCSET_DEFAULT_SIZES); + MemoryContext oldContext = MemoryContextSwitchTo(localContext); + int taskId = 1; - List *taskList = NIL; ShardInterval *parentShardInterval = NULL; foreach_ptr(parentShardInterval, parentShardIntervalList) @@ -566,24 +562,14 @@ CreateFixPartitionShardIndexNamesTaskList(Oid parentRelationId, Oid partitionRel WorkerFixPartitionShardIndexNamesCommandList(parentShardId, parentIndexIdList, partitionRelationId); - if (queryStringList != NIL) { Task *task = CitusMakeNode(Task); task->jobId = INVALID_JOB_ID; task->taskId = taskId++; - task->taskType = DDL_TASK; - /* - * There could be O(#partitions * #indexes) queries in - * the queryStringList. - * - * In order to avoid round-trips per query in queryStringList, - * we join the string and send as a single command via the UDF. - * Otherwise, the executor sends each command with one - * round-trip. - */ + char *string = StringJoin(queryStringList, ';'); StringInfo commandToRun = makeStringInfo(); @@ -591,18 +577,23 @@ CreateFixPartitionShardIndexNamesTaskList(Oid parentRelationId, Oid partitionRel "SELECT pg_catalog.citus_run_local_command($$%s$$)", string); SetTaskQueryString(task, commandToRun->data); + task->dependentTaskList = NULL; task->replicationModel = REPLICATION_MODEL_INVALID; task->anchorShardId = parentShardId; task->taskPlacementList = ActiveShardPlacementList(parentShardId); - taskList = lappend(taskList, task); + bool localExecutionSupported = true; + ExecuteUtilityTaskList(list_make1(task), localExecutionSupported); } + + /* after every iteration, clean-up all the memory associated with it */ + MemoryContextReset(localContext); } - RelationClose(parentRelation); + MemoryContextSwitchTo(oldContext); - return taskList; + RelationClose(parentRelation); } From 26fdcb68f007015797bab3070e2d2404bb42bc5c Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Tue, 26 Jul 2022 13:44:42 +0200 Subject: [PATCH 07/38] Optimize StringJoin() for when prefix-postfix is needed Before this commit, we required multiple copies of the same stringInfo if we needed to append/prepend data to the stringInfo. Now, we optionally get prefix/postfix. For large string operations, this can save up to %10 memory. --- src/backend/distributed/utils/listutils.c | 25 ++++++++++++++++++- .../utils/multi_partitioning_utils.c | 10 +++----- src/include/distributed/listutils.h | 2 ++ 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/src/backend/distributed/utils/listutils.c b/src/backend/distributed/utils/listutils.c index ce2920748..3279193ef 100644 --- a/src/backend/distributed/utils/listutils.c +++ b/src/backend/distributed/utils/listutils.c @@ -161,13 +161,31 @@ GeneratePositiveIntSequenceList(int upTo) /* * StringJoin gets a list of char * and then simply * returns a newly allocated char * joined with the - * given delimiter. + * given delimiter. It uses ';' as the delimiter by + * default. */ char * StringJoin(List *stringList, char delimiter) +{ + return StringJoinParams(stringList, delimiter, NULL, NULL); +} + + +/* + * StringJoin gets a list of char * and then simply + * returns a newly allocated char * joined with the + * given delimiter, prefix and postfix. + */ +char * +StringJoinParams(List *stringList, char delimiter, char *prefix, char *postfix) { StringInfo joinedString = makeStringInfo(); + if (prefix != NULL) + { + appendStringInfoString(joinedString, prefix); + } + const char *command = NULL; int curIndex = 0; foreach_ptr(command, stringList) @@ -180,6 +198,11 @@ StringJoin(List *stringList, char delimiter) curIndex++; } + if (postfix != NULL) + { + appendStringInfoString(joinedString, postfix); + } + return joinedString->data; } diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index 59876e34d..9dfa285a2 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -569,13 +569,11 @@ CreateFixPartitionShardIndexNames(Oid parentRelationId, Oid partitionRelationId, task->taskId = taskId++; task->taskType = DDL_TASK; + char *prefix = "SELECT pg_catalog.citus_run_local_command($$"; + char *postfix = "$$)"; + char *string = StringJoinParams(queryStringList, ';', prefix, postfix); - char *string = StringJoin(queryStringList, ';'); - StringInfo commandToRun = makeStringInfo(); - - appendStringInfo(commandToRun, - "SELECT pg_catalog.citus_run_local_command($$%s$$)", string); - SetTaskQueryString(task, commandToRun->data); + SetTaskQueryString(task, string); task->dependentTaskList = NULL; diff --git a/src/include/distributed/listutils.h b/src/include/distributed/listutils.h index c3facf76f..aa6a0e96b 100644 --- a/src/include/distributed/listutils.h +++ b/src/include/distributed/listutils.h @@ -166,6 +166,8 @@ extern List * SortList(List *pointerList, extern void ** PointerArrayFromList(List *pointerList); extern HTAB * ListToHashSet(List *pointerList, Size keySize, bool isStringList); extern char * StringJoin(List *stringList, char delimiter); +extern char * StringJoinParams(List *stringList, char delimiter, + char *prefix, char *postfix); extern List * ListTake(List *pointerList, int size); extern void * safe_list_nth(const List *list, int index); extern List * GeneratePositiveIntSequenceList(int upTo); From f076e811664322ce5171f22ba3251a73c0b79052 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Tue, 26 Jul 2022 15:15:35 +0200 Subject: [PATCH 08/38] Do not cache all the metadata during fix_all_partition_shard_index_names --- src/backend/distributed/utils/multi_partitioning_utils.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index 9dfa285a2..3ec02da48 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -205,6 +205,13 @@ fix_partition_shard_index_names(PG_FUNCTION_ARGS) FixPartitionShardIndexNames(relationId, parentIndexOid); + /* + * This UDF is called from fix_all_partition_shard_index_names() which iterates + * over all the partitioned tables. There is no need to hold all the distributed + * table metadata until the end of the transaction for the input table. + */ + CitusTableCacheFlushInvalidatedEntries(); + PG_RETURN_VOID(); } From 12fa3aaf6bf696ec2f17be48a8fa5117d5f1dae2 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Wed, 20 Jul 2022 11:28:30 +0200 Subject: [PATCH 09/38] Concurrent shard move/copy and colocated table creation fix It turns out that create_distributed_table and citus_move/copy_shard_placement does not work well concurrently. To fix that, we need to acquire a lock, which sounds like a good use of colocation lock. However, the current usage of colocation lock is limited to higher level UDFs like rebalance_table_shards etc. Those usage of lock is still useful, but we cannot acquire the same lock on citus_move_shard_placement etc. because the coordinator connects to itself to acquire the lock. Hence, the high level UDF blocks itself. To fix that, we use one more colocation lock, with the placements are the main objects to consider. --- .../commands/create_distributed_table.c | 12 + .../distributed/operations/repair_shards.c | 6 + .../distributed/operations/shard_rebalancer.c | 60 ++++- src/include/distributed/resource_lock.h | 14 +- src/include/distributed/shard_rebalancer.h | 3 +- ...isolation_concurrent_move_create_table.out | 220 ++++++++++++++++++ ...ation_copy_placement_vs_copy_placement.out | 38 ++- ...ical_replication_single_shard_commands.out | 107 ++++----- .../isolation_max_client_connections.out | 18 +- ...ation_move_placement_vs_move_placement.out | 72 +++--- src/test/regress/isolation_schedule | 1 + ...solation_concurrent_move_create_table.spec | 126 ++++++++++ .../isolation_max_client_connections.spec | 1 + 13 files changed, 541 insertions(+), 137 deletions(-) create mode 100644 src/test/regress/expected/isolation_concurrent_move_create_table.out create mode 100644 src/test/regress/spec/isolation_concurrent_move_create_table.spec diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 1416cbb3b..52043ac25 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -60,6 +60,7 @@ #include "distributed/relation_access_tracking.h" #include "distributed/remote_commands.h" #include "distributed/shared_library_init.h" +#include "distributed/shard_rebalancer.h" #include "distributed/worker_protocol.h" #include "distributed/worker_shard_visibility.h" #include "distributed/worker_transaction.h" @@ -850,6 +851,17 @@ CreateHashDistributedTableShards(Oid relationId, int shardCount, if (colocatedTableId != InvalidOid) { + /* + * We currently allow concurrent distribution of colocated tables (which + * we probably should not be allowing because of foreign keys / + * partitioning etc). + * + * We also prevent concurrent shard moves / copy / splits) while creating + * a colocated table. + */ + AcquirePlacementColocationLock(colocatedTableId, ShareLock, + "colocate distributed table"); + CreateColocatedShards(relationId, colocatedTableId, useExclusiveConnection); } else diff --git a/src/backend/distributed/operations/repair_shards.c b/src/backend/distributed/operations/repair_shards.c index 7db6d8289..26928fd3a 100644 --- a/src/backend/distributed/operations/repair_shards.c +++ b/src/backend/distributed/operations/repair_shards.c @@ -37,6 +37,7 @@ #include "distributed/reference_table_utils.h" #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" +#include "distributed/shard_rebalancer.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" @@ -179,6 +180,9 @@ citus_copy_shard_placement(PG_FUNCTION_ARGS) ShardInterval *shardInterval = LoadShardInterval(shardId); ErrorIfTableCannotBeReplicated(shardInterval->relationId); + AcquirePlacementColocationLock(shardInterval->relationId, ExclusiveLock, + doRepair ? "repair" : "copy"); + if (doRepair) { RepairShardPlacement(shardId, sourceNodeName, sourceNodePort, targetNodeName, @@ -332,6 +336,8 @@ citus_move_shard_placement(PG_FUNCTION_ARGS) ErrorIfMoveUnsupportedTableType(relationId); ErrorIfTargetNodeIsNotSafeToMove(targetNodeName, targetNodePort); + AcquirePlacementColocationLock(relationId, ExclusiveLock, "move"); + ShardInterval *shardInterval = LoadShardInterval(shardId); Oid distributedTableId = shardInterval->relationId; diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index 14d29c135..b7cb900e3 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -227,7 +227,7 @@ static float4 NodeCapacity(WorkerNode *workerNode, void *context); static ShardCost GetShardCost(uint64 shardId, void *context); static List * NonColocatedDistRelationIdList(void); static void RebalanceTableShards(RebalanceOptions *options, Oid shardReplicationModeOid); -static void AcquireColocationLock(Oid relationId, const char *operationName); +static void AcquireRebalanceColocationLock(Oid relationId, const char *operationName); static void ExecutePlacementUpdates(List *placementUpdateList, Oid shardReplicationModeOid, char *noticeOperation); static float4 CalculateUtilization(float4 totalCost, float4 capacity); @@ -616,13 +616,13 @@ GetColocatedRebalanceSteps(List *placementUpdateList) /* - * AcquireColocationLock tries to acquire a lock for rebalance/replication. If - * this is it not possible it fails instantly because this means another - * rebalance/replication is currently happening. This would really mess up - * planning. + * AcquireRelationColocationLock tries to acquire a lock for + * rebalance/replication. If this is it not possible it fails + * instantly because this means another rebalance/replication + * is currently happening. This would really mess up planning. */ static void -AcquireColocationLock(Oid relationId, const char *operationName) +AcquireRebalanceColocationLock(Oid relationId, const char *operationName) { uint32 lockId = relationId; LOCKTAG tag; @@ -639,8 +639,48 @@ AcquireColocationLock(Oid relationId, const char *operationName) if (!lockAcquired) { ereport(ERROR, (errmsg("could not acquire the lock required to %s %s", - operationName, generate_qualified_relation_name( - relationId)))); + operationName, + generate_qualified_relation_name(relationId)), + errdetail("It means that either a concurrent shard move " + "or shard copy is happening."), + errhint("Make sure that the concurrent operation has " + "finished and re-run the command"))); + } +} + + +/* + * AcquirePlacementColocationLock tries to acquire a lock for + * rebalance/replication while moving/copying the placement. If this + * is it not possible it fails instantly because this means + * another move/copy is currently happening. This would really mess up planning. + */ +void +AcquirePlacementColocationLock(Oid relationId, int lockMode, + const char *operationName) +{ + uint32 lockId = relationId; + LOCKTAG tag; + + CitusTableCacheEntry *citusTableCacheEntry = GetCitusTableCacheEntry(relationId); + if (citusTableCacheEntry->colocationId != INVALID_COLOCATION_ID) + { + lockId = citusTableCacheEntry->colocationId; + } + + SET_LOCKTAG_REBALANCE_PLACEMENT_COLOCATION(tag, (int64) lockId); + + LockAcquireResult lockAcquired = LockAcquire(&tag, lockMode, false, true); + if (!lockAcquired) + { + ereport(ERROR, (errmsg("could not acquire the lock required to %s %s", + operationName, + generate_qualified_relation_name(relationId)), + errdetail("It means that either a concurrent shard move " + "or colocated distributed table creation is " + "happening."), + errhint("Make sure that the concurrent operation has " + "finished and re-run the command"))); } } @@ -942,7 +982,7 @@ replicate_table_shards(PG_FUNCTION_ARGS) char transferMode = LookupShardTransferMode(shardReplicationModeOid); EnsureReferenceTablesExistOnAllNodesExtended(transferMode); - AcquireColocationLock(relationId, "replicate"); + AcquireRebalanceColocationLock(relationId, "replicate"); List *activeWorkerList = SortedActiveWorkers(); List *shardPlacementList = FullShardPlacementList(relationId, excludedShardArray); @@ -1555,7 +1595,7 @@ RebalanceTableShards(RebalanceOptions *options, Oid shardReplicationModeOid) foreach_oid(relationId, options->relationIdList) { - AcquireColocationLock(relationId, operationName); + AcquireRebalanceColocationLock(relationId, operationName); } List *placementUpdateList = GetRebalanceSteps(options); diff --git a/src/include/distributed/resource_lock.h b/src/include/distributed/resource_lock.h index caaeea8a7..c808e9157 100644 --- a/src/include/distributed/resource_lock.h +++ b/src/include/distributed/resource_lock.h @@ -41,7 +41,8 @@ typedef enum AdvisoryLocktagClass ADV_LOCKTAG_CLASS_CITUS_COLOCATED_SHARDS_METADATA = 8, ADV_LOCKTAG_CLASS_CITUS_OPERATIONS = 9, ADV_LOCKTAG_CLASS_CITUS_PLACEMENT_CLEANUP = 10, - ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION = 12 + ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION = 12, + ADV_LOCKTAG_CLASS_CITUS_REBALANCE_PLACEMENT_COLOCATION = 13 } AdvisoryLocktagClass; /* CitusOperations has constants for citus operations */ @@ -84,6 +85,17 @@ typedef enum CitusOperations (uint32) (colocationOrTableId), \ ADV_LOCKTAG_CLASS_CITUS_REBALANCE_COLOCATION) +/* reuse advisory lock, but with different, unused field 4 (13) + * Also it has the database hardcoded to MyDatabaseId, to ensure the locks + * are local to each database */ +#define SET_LOCKTAG_REBALANCE_PLACEMENT_COLOCATION(tag, colocationOrTableId) \ + SET_LOCKTAG_ADVISORY(tag, \ + MyDatabaseId, \ + (uint32) ((colocationOrTableId) >> 32), \ + (uint32) (colocationOrTableId), \ + ADV_LOCKTAG_CLASS_CITUS_REBALANCE_PLACEMENT_COLOCATION) + + /* advisory lock for citus operations, also it has the database hardcoded to MyDatabaseId, * to ensure the locks are local to each database */ #define SET_LOCKTAG_CITUS_OPERATION(tag, operationId) \ diff --git a/src/include/distributed/shard_rebalancer.h b/src/include/distributed/shard_rebalancer.h index ed13248c3..0e0cf51d1 100644 --- a/src/include/distributed/shard_rebalancer.h +++ b/src/include/distributed/shard_rebalancer.h @@ -194,6 +194,7 @@ extern List * RebalancePlacementUpdates(List *workerNodeList, extern List * ReplicationPlacementUpdates(List *workerNodeList, List *shardPlacementList, int shardReplicationFactor); extern void ExecuteRebalancerCommandInSeparateTransaction(char *command); - +extern void AcquirePlacementColocationLock(Oid relationId, int lockMode, + const char *operationName); #endif /* SHARD_REBALANCER_H */ diff --git a/src/test/regress/expected/isolation_concurrent_move_create_table.out b/src/test/regress/expected/isolation_concurrent_move_create_table.out new file mode 100644 index 000000000..343955968 --- /dev/null +++ b/src/test/regress/expected/isolation_concurrent_move_create_table.out @@ -0,0 +1,220 @@ +Parsed test spec with 5 sessions + +starting permutation: s2-begin s2-create_distributed_table s3-create_distributed_table s2-commit +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-begin: + BEGIN; + +step s2-create_distributed_table: + SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1'); + +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s3-create_distributed_table: + SELECT create_distributed_table('concurrent_table_3', 'id', colocate_with := 'concurrent_table_1'); + +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + + +starting permutation: s2-begin s2-create_distributed_table s1-move-shard-logical s2-commit s3-sanity-check s3-sanity-check-2 +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-begin: + BEGIN; + +step s2-create_distributed_table: + SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1'); + +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-move-shard-logical: + WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1) + SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid; + +ERROR: could not acquire the lock required to move public.concurrent_table_1 +step s2-commit: + COMMIT; + +step s3-sanity-check: + SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s3-sanity-check-2: + SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id); + +count +--------------------------------------------------------------------- + 0 +(1 row) + + +starting permutation: s2-begin s2-create_distributed_table s1-move-shard-block s2-commit s3-sanity-check s3-sanity-check-2 +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-begin: + BEGIN; + +step s2-create_distributed_table: + SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1'); + +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-move-shard-block: + WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1) + SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid; + +ERROR: could not acquire the lock required to move public.concurrent_table_1 +step s2-commit: + COMMIT; + +step s3-sanity-check: + SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s3-sanity-check-2: + SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id); + +count +--------------------------------------------------------------------- + 0 +(1 row) + + +starting permutation: s4-begin s4-move-shard-logical s5-setup-rep-factor s5-create_implicit_colocated_distributed_table s4-commit s3-sanity-check s3-sanity-check-3 s3-sanity-check-4 +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s4-begin: + BEGIN; + +step s4-move-shard-logical: + WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1) + SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid; + +citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s5-setup-rep-factor: + SET citus.shard_replication_factor TO 1; + +step s5-create_implicit_colocated_distributed_table: + SELECT create_distributed_table('concurrent_table_5', 'id'); + +ERROR: could not acquire the lock required to colocate distributed table public.concurrent_table_4 +step s4-commit: + commit; + +step s3-sanity-check: + SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s3-sanity-check-3: + SELECT count(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid IN ('concurrent_table_4', 'concurrent_table_5'); + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s3-sanity-check-4: + SELECT count(*) FROM concurrent_table_4 JOIN concurrent_table_5 USING (id); + +count +--------------------------------------------------------------------- + 0 +(1 row) + + +starting permutation: s4-begin s4-move-shard-block s5-setup-rep-factor s5-create_implicit_colocated_distributed_table s4-commit s3-sanity-check s3-sanity-check-3 s3-sanity-check-4 +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s4-begin: + BEGIN; + +step s4-move-shard-block: + WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1) + SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid; + +citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s5-setup-rep-factor: + SET citus.shard_replication_factor TO 1; + +step s5-create_implicit_colocated_distributed_table: + SELECT create_distributed_table('concurrent_table_5', 'id'); + +ERROR: could not acquire the lock required to colocate distributed table public.concurrent_table_4 +step s4-commit: + commit; + +step s3-sanity-check: + SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s3-sanity-check-3: + SELECT count(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid IN ('concurrent_table_4', 'concurrent_table_5'); + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s3-sanity-check-4: + SELECT count(*) FROM concurrent_table_4 JOIN concurrent_table_5 USING (id); + +count +--------------------------------------------------------------------- + 0 +(1 row) + diff --git a/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out b/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out index eefb896b8..e2cddade7 100644 --- a/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out +++ b/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out @@ -2,19 +2,19 @@ Parsed test spec with 2 sessions starting permutation: s1-load-cache s2-load-cache s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit step s1-load-cache: - COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; + COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; step s2-load-cache: - COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; + COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; step s2-begin: - BEGIN; + BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement --------------------------------------------------------------------- @@ -22,23 +22,22 @@ master_copy_shard_placement (1 row) step s1-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); - -step s2-commit: - COMMIT; + SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); + +ERROR: could not acquire the lock required to repair public.test_hash_table +step s2-commit: + COMMIT; -step s1-repair-placement: <... completed> -ERROR: target placement must be in inactive state starting permutation: s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit step s2-set-placement-inactive: - UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; + UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; step s2-begin: - BEGIN; + BEGIN; step s2-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); + SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement --------------------------------------------------------------------- @@ -46,10 +45,9 @@ master_copy_shard_placement (1 row) step s1-repair-placement: - SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); - -step s2-commit: - COMMIT; + SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); + +ERROR: could not acquire the lock required to repair public.test_hash_table +step s2-commit: + COMMIT; -step s1-repair-placement: <... completed> -ERROR: target placement must be in inactive state diff --git a/src/test/regress/expected/isolation_logical_replication_single_shard_commands.out b/src/test/regress/expected/isolation_logical_replication_single_shard_commands.out index 2e8125eb9..586e700c7 100644 --- a/src/test/regress/expected/isolation_logical_replication_single_shard_commands.out +++ b/src/test/regress/expected/isolation_logical_replication_single_shard_commands.out @@ -10,10 +10,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-insert: INSERT INTO logical_replicate_placement VALUES (15, 15); @@ -33,7 +33,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -62,10 +62,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-upsert: INSERT INTO logical_replicate_placement VALUES (15, 15); @@ -86,7 +86,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -118,10 +118,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-update: UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15; @@ -141,7 +141,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -173,10 +173,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-delete: DELETE FROM logical_replicate_placement WHERE x = 15; @@ -196,7 +196,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -227,10 +227,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-select: SELECT * FROM logical_replicate_placement ORDER BY y; @@ -255,7 +255,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-get-shard-distribution: select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; @@ -279,10 +279,10 @@ pg_advisory_lock (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-select-for-update: SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE; @@ -307,7 +307,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-get-shard-distribution: select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; @@ -320,7 +320,7 @@ nodeport starting permutation: s1-begin s2-begin s2-insert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; @@ -329,10 +329,10 @@ step s2-insert: INSERT INTO logical_replicate_placement VALUES (15, 15); step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -341,7 +341,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -362,7 +362,7 @@ nodeport starting permutation: s1-begin s2-begin s2-upsert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; @@ -372,10 +372,10 @@ step s2-upsert: INSERT INTO logical_replicate_placement VALUES (15, 15) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -384,7 +384,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -408,7 +408,7 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15); step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; @@ -417,10 +417,10 @@ step s2-update: UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -429,7 +429,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -453,7 +453,7 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15); step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; @@ -462,10 +462,10 @@ step s2-delete: DELETE FROM logical_replicate_placement WHERE x = 15; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -474,7 +474,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-select: SELECT * FROM logical_replicate_placement order by y; @@ -497,7 +497,7 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15); step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; @@ -511,7 +511,7 @@ step s2-select: (1 row) step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_move_shard_placement --------------------------------------------------------------------- @@ -519,10 +519,10 @@ master_move_shard_placement (1 row) step s2-end: - COMMIT; + COMMIT; step s1-end: - COMMIT; + COMMIT; step s1-get-shard-distribution: select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; @@ -538,7 +538,7 @@ step s1-insert: INSERT INTO logical_replicate_placement VALUES (15, 15); step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; @@ -552,10 +552,10 @@ step s2-select-for-update: (1 row) step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); step s2-end: - COMMIT; + COMMIT; step s1-move-placement: <... completed> master_move_shard_placement @@ -564,7 +564,7 @@ master_move_shard_placement (1 row) step s1-end: - COMMIT; + COMMIT; step s1-get-shard-distribution: select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; @@ -577,13 +577,13 @@ nodeport starting permutation: s1-begin s2-begin s1-move-placement s2-move-placement s1-end s2-end step s1-begin: - BEGIN; + BEGIN; step s2-begin: BEGIN; step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_move_shard_placement --------------------------------------------------------------------- @@ -591,19 +591,14 @@ master_move_shard_placement (1 row) step s2-move-placement: - SELECT master_move_shard_placement( - get_shard_id_for_distribution_column('logical_replicate_placement', 4), - 'localhost', 57637, 'localhost', 57638); - -step s1-end: - COMMIT; + SELECT master_move_shard_placement( + get_shard_id_for_distribution_column('logical_replicate_placement', 4), + 'localhost', 57637, 'localhost', 57638); -step s2-move-placement: <... completed> -master_move_shard_placement ---------------------------------------------------------------------- - -(1 row) +ERROR: could not acquire the lock required to move public.logical_replicate_placement +step s1-end: + COMMIT; step s2-end: - COMMIT; + COMMIT; diff --git a/src/test/regress/expected/isolation_max_client_connections.out b/src/test/regress/expected/isolation_max_client_connections.out index ada303511..3c46e6def 100644 --- a/src/test/regress/expected/isolation_max_client_connections.out +++ b/src/test/regress/expected/isolation_max_client_connections.out @@ -8,7 +8,7 @@ run_command_on_workers (2 rows) step s1-grant: - SELECT result FROM run_command_on_placements('my_table', 'GRANT SELECT ON TABLE %s TO my_user'); + SELECT result FROM run_command_on_placements('my_table', 'GRANT SELECT ON TABLE %s TO my_user'); result --------------------------------------------------------------------- @@ -19,7 +19,7 @@ GRANT (4 rows) step s1-connect: - SELECT make_external_connection_to_node('localhost', 57637, 'my_user', current_database()); + SELECT make_external_connection_to_node('localhost', 57637, 'my_user', current_database()); make_external_connection_to_node --------------------------------------------------------------------- @@ -27,11 +27,11 @@ make_external_connection_to_node (1 row) step s2-connect: - SELECT make_external_connection_to_node('localhost', 57637, 'my_user', current_database()); + SELECT make_external_connection_to_node('localhost', 57637, 'my_user', current_database()); ERROR: connection failed step s2-connect-superuser: - SELECT make_external_connection_to_node('localhost', 57637, 'postgres', current_database()); + SELECT make_external_connection_to_node('localhost', 57637, 'postgres', current_database()); make_external_connection_to_node --------------------------------------------------------------------- @@ -39,17 +39,11 @@ make_external_connection_to_node (1 row) step s3-select: - SET ROLE my_user; - SELECT count(*) FROM my_table; + SET ROLE my_user; + SELECT count(*) FROM my_table; count --------------------------------------------------------------------- 0 (1 row) -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,t) -(localhost,57638,t,t) -(2 rows) - diff --git a/src/test/regress/expected/isolation_move_placement_vs_move_placement.out b/src/test/regress/expected/isolation_move_placement_vs_move_placement.out index 43ca63fcc..307b08b00 100644 --- a/src/test/regress/expected/isolation_move_placement_vs_move_placement.out +++ b/src/test/regress/expected/isolation_move_placement_vs_move_placement.out @@ -2,13 +2,13 @@ Parsed test spec with 2 sessions starting permutation: s1-load-cache s2-begin s2-move-placement s1-move-placement s2-commit s2-print-placements step s1-load-cache: - COPY test_move_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + COPY test_move_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; step s2-begin: - BEGIN; + BEGIN; step s2-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); master_move_shard_placement --------------------------------------------------------------------- @@ -16,24 +16,23 @@ master_move_shard_placement (1 row) step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); - -step s2-commit: - COMMIT; + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +ERROR: could not acquire the lock required to move public.test_move_table +step s2-commit: + COMMIT; -step s1-move-placement: <... completed> -ERROR: source placement must be in active state step s2-print-placements: - SELECT - nodename, nodeport, count(*) - FROM - pg_dist_shard_placement - WHERE - shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass) - AND - shardstate = 1 - GROUP BY - nodename, nodeport; + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; nodename |nodeport|count --------------------------------------------------------------------- @@ -43,10 +42,10 @@ localhost| 57638| 2 starting permutation: s2-begin s2-move-placement s1-move-placement s2-commit s2-print-placements step s2-begin: - BEGIN; + BEGIN; step s2-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); master_move_shard_placement --------------------------------------------------------------------- @@ -54,24 +53,23 @@ master_move_shard_placement (1 row) step s1-move-placement: - SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); - -step s2-commit: - COMMIT; + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +ERROR: could not acquire the lock required to move public.test_move_table +step s2-commit: + COMMIT; -step s1-move-placement: <... completed> -ERROR: source placement must be in active state step s2-print-placements: - SELECT - nodename, nodeport, count(*) - FROM - pg_dist_shard_placement - WHERE - shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass) - AND - shardstate = 1 - GROUP BY - nodename, nodeport; + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; nodename |nodeport|count --------------------------------------------------------------------- diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index 0b9738840..aca224db0 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -97,5 +97,6 @@ test: isolation_replicated_dist_on_mx test: isolation_replicate_reference_tables_to_coordinator test: isolation_multiuser_locking test: isolation_acquire_distributed_locks +test: isolation_concurrent_move_create_table test: isolation_check_mx diff --git a/src/test/regress/spec/isolation_concurrent_move_create_table.spec b/src/test/regress/spec/isolation_concurrent_move_create_table.spec new file mode 100644 index 000000000..ae8fd0b95 --- /dev/null +++ b/src/test/regress/spec/isolation_concurrent_move_create_table.spec @@ -0,0 +1,126 @@ +setup +{ + CREATE TABLE concurrent_table_1(id int PRIMARY KEY); + CREATE TABLE concurrent_table_2(id int PRIMARY KEY); + CREATE TABLE concurrent_table_3(id int PRIMARY KEY); + CREATE TABLE concurrent_table_4(id int PRIMARY KEY); + CREATE TABLE concurrent_table_5(id int PRIMARY KEY); + + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('concurrent_table_1', 'id', colocate_with := 'none'); + SELECT create_distributed_table('concurrent_table_4', 'id'); +} + +teardown +{ + DROP TABLE concurrent_table_1, concurrent_table_2, concurrent_table_3, concurrent_table_4, concurrent_table_5 CASCADE; +} + +session "s1" + + +step "s1-move-shard-logical" +{ + WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1) + SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid; +} + +step "s1-move-shard-block" +{ + WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1) + SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid; +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-create_distributed_table" +{ + SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1'); +} + +step "s2-commit" +{ + COMMIT; +} + +session "s3" + +step "s3-create_distributed_table" +{ + SELECT create_distributed_table('concurrent_table_3', 'id', colocate_with := 'concurrent_table_1'); +} + +step "s3-sanity-check" +{ + SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL; +} + +step "s3-sanity-check-2" +{ + SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id); +} + +step "s3-sanity-check-3" +{ + SELECT count(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid IN ('concurrent_table_4', 'concurrent_table_5'); +} + +step "s3-sanity-check-4" +{ + SELECT count(*) FROM concurrent_table_4 JOIN concurrent_table_5 USING (id); +} + + +session "s4" + +step "s4-begin" +{ + BEGIN; +} + +step "s4-commit" +{ + commit; +} + +step "s4-move-shard-logical" +{ + WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1) + SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid; +} + +step "s4-move-shard-block" +{ + WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1) + SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid; +} + +session "s5" + +step "s5-setup-rep-factor" +{ + SET citus.shard_replication_factor TO 1; +} + +step "s5-create_implicit_colocated_distributed_table" +{ + SELECT create_distributed_table('concurrent_table_5', 'id'); +} + + +//concurrent create_distributed_table with the same colocation should not block each other +permutation "s2-begin" "s2-create_distributed_table" "s3-create_distributed_table" "s2-commit" + +// concurrent create colocated table and shard move properly block each other, and cluster is healthy +permutation "s2-begin" "s2-create_distributed_table" "s1-move-shard-logical" "s2-commit" "s3-sanity-check" "s3-sanity-check-2" +permutation "s2-begin" "s2-create_distributed_table" "s1-move-shard-block" "s2-commit" "s3-sanity-check" "s3-sanity-check-2" + +// same test above, but this time implicitly colocated tables +permutation "s4-begin" "s4-move-shard-logical" "s5-setup-rep-factor" "s5-create_implicit_colocated_distributed_table" "s4-commit" "s3-sanity-check" "s3-sanity-check-3" "s3-sanity-check-4" +permutation "s4-begin" "s4-move-shard-block" "s5-setup-rep-factor" "s5-create_implicit_colocated_distributed_table" "s4-commit" "s3-sanity-check" "s3-sanity-check-3" "s3-sanity-check-4" + diff --git a/src/test/regress/spec/isolation_max_client_connections.spec b/src/test/regress/spec/isolation_max_client_connections.spec index ef801d433..6c02cca52 100644 --- a/src/test/regress/spec/isolation_max_client_connections.spec +++ b/src/test/regress/spec/isolation_max_client_connections.spec @@ -26,6 +26,7 @@ teardown { SELECT run_command_on_workers('ALTER SYSTEM RESET citus.max_client_connections'); SELECT run_command_on_workers('SELECT pg_reload_conf()'); + DROP TABLE my_table; } session "s1" From 5bc8a81aa7001bf7e983d6ada84a1bef21108df0 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Thu, 21 Jul 2022 13:13:02 +0200 Subject: [PATCH 10/38] Add colocation checks for shard splits --- .../distributed/operations/shard_split.c | 4 + .../isolation_blocking_shard_split.out | 580 +++++++++--------- ...isolation_concurrent_move_create_table.out | 55 +- .../spec/isolation_blocking_shard_split.spec | 30 +- ...solation_concurrent_move_create_table.spec | 13 +- 5 files changed, 349 insertions(+), 333 deletions(-) diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c index d39780e0d..76687434e 100644 --- a/src/backend/distributed/operations/shard_split.c +++ b/src/backend/distributed/operations/shard_split.c @@ -35,6 +35,7 @@ #include "distributed/metadata_sync.h" #include "distributed/multi_physical_planner.h" #include "distributed/deparse_shard_query.h" +#include "distributed/shard_rebalancer.h" /* * Entry for map that tracks ShardInterval -> Placement Node @@ -329,6 +330,9 @@ SplitShard(SplitMode splitMode, ShardInterval *shardIntervalToSplit = LoadShardInterval(shardIdToSplit); List *colocatedTableList = ColocatedTableList(shardIntervalToSplit->relationId); + Oid relationId = RelationIdForShard(shardIdToSplit); + AcquirePlacementColocationLock(relationId, ExclusiveLock, "split"); + /* sort the tables to avoid deadlocks */ colocatedTableList = SortList(colocatedTableList, CompareOids); Oid colocatedTableId = InvalidOid; diff --git a/src/test/regress/expected/isolation_blocking_shard_split.out b/src/test/regress/expected/isolation_blocking_shard_split.out index 02a23174e..d720f3a32 100644 --- a/src/test/regress/expected/isolation_blocking_shard_split.out +++ b/src/test/regress/expected/isolation_blocking_shard_split.out @@ -7,13 +7,13 @@ create_distributed_table (1 row) step s1-load-cache: - -- Indirect way to load cache. - TRUNCATE to_split_table; + -- Indirect way to load cache. + TRUNCATE to_split_table; step s1-insert: - -- Id '123456789' maps to shard xxxxx. - SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); - INSERT INTO to_split_table VALUES (123456789, 1); + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); get_shard_id_for_distribution_column --------------------------------------------------------------------- @@ -27,7 +27,7 @@ step s1-begin: SET citus.select_opens_transaction_block TO false; step s1-select: - SELECT count(*) FROM to_split_table WHERE id = 123456789; + SELECT count(*) FROM to_split_table WHERE id = 123456789; count --------------------------------------------------------------------- @@ -35,14 +35,14 @@ count (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500002, - ARRAY['1073741824'], - ARRAY[1, 2], - 'block_writes'); + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); citus_split_shard_by_split_points --------------------------------------------------------------------- @@ -50,26 +50,26 @@ citus_split_shard_by_split_points (1 row) step s1-update: - UPDATE to_split_table SET value = 111 WHERE id = 123456789; + UPDATE to_split_table SET value = 111 WHERE id = 123456789; step s2-commit: - COMMIT; + COMMIT; step s1-update: <... completed> ERROR: could not find valid entry for shard xxxxx step s1-commit: - COMMIT; + COMMIT; step s2-print-cluster: - -- row count per shard - SELECT - nodeport, shardid, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from %s') - ORDER BY - nodeport, shardid; - -- rows - SELECT id, value FROM to_split_table ORDER BY id, value; + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; nodeport|shardid|success|result --------------------------------------------------------------------- @@ -91,13 +91,13 @@ create_distributed_table (1 row) step s1-load-cache: - -- Indirect way to load cache. - TRUNCATE to_split_table; + -- Indirect way to load cache. + TRUNCATE to_split_table; step s1-insert: - -- Id '123456789' maps to shard xxxxx. - SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); - INSERT INTO to_split_table VALUES (123456789, 1); + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); get_shard_id_for_distribution_column --------------------------------------------------------------------- @@ -111,7 +111,7 @@ step s1-begin: SET citus.select_opens_transaction_block TO false; step s1-select: - SELECT count(*) FROM to_split_table WHERE id = 123456789; + SELECT count(*) FROM to_split_table WHERE id = 123456789; count --------------------------------------------------------------------- @@ -119,14 +119,14 @@ count (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500002, - ARRAY['1073741824'], - ARRAY[1, 2], - 'block_writes'); + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); citus_split_shard_by_split_points --------------------------------------------------------------------- @@ -134,26 +134,26 @@ citus_split_shard_by_split_points (1 row) step s1-delete: - DELETE FROM to_split_table WHERE id = 123456789; + DELETE FROM to_split_table WHERE id = 123456789; step s2-commit: - COMMIT; + COMMIT; step s1-delete: <... completed> ERROR: could not find valid entry for shard xxxxx step s1-commit: - COMMIT; + COMMIT; step s2-print-cluster: - -- row count per shard - SELECT - nodeport, shardid, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from %s') - ORDER BY - nodeport, shardid; - -- rows - SELECT id, value FROM to_split_table ORDER BY id, value; + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; nodeport|shardid|success|result --------------------------------------------------------------------- @@ -175,8 +175,8 @@ create_distributed_table (1 row) step s1-load-cache: - -- Indirect way to load cache. - TRUNCATE to_split_table; + -- Indirect way to load cache. + TRUNCATE to_split_table; step s1-begin: BEGIN; @@ -185,7 +185,7 @@ step s1-begin: SET citus.select_opens_transaction_block TO false; step s1-select: - SELECT count(*) FROM to_split_table WHERE id = 123456789; + SELECT count(*) FROM to_split_table WHERE id = 123456789; count --------------------------------------------------------------------- @@ -193,14 +193,14 @@ count (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500002, - ARRAY['1073741824'], - ARRAY[1, 2], - 'block_writes'); + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); citus_split_shard_by_split_points --------------------------------------------------------------------- @@ -208,12 +208,12 @@ citus_split_shard_by_split_points (1 row) step s1-insert: - -- Id '123456789' maps to shard xxxxx. - SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); - INSERT INTO to_split_table VALUES (123456789, 1); + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); step s2-commit: - COMMIT; + COMMIT; step s1-insert: <... completed> get_shard_id_for_distribution_column @@ -223,18 +223,18 @@ get_shard_id_for_distribution_column ERROR: could not find valid entry for shard xxxxx step s1-commit: - COMMIT; + COMMIT; step s2-print-cluster: - -- row count per shard - SELECT - nodeport, shardid, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from %s') - ORDER BY - nodeport, shardid; - -- rows - SELECT id, value FROM to_split_table ORDER BY id, value; + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; nodeport|shardid|success|result --------------------------------------------------------------------- @@ -255,8 +255,8 @@ create_distributed_table (1 row) step s1-load-cache: - -- Indirect way to load cache. - TRUNCATE to_split_table; + -- Indirect way to load cache. + TRUNCATE to_split_table; step s1-begin: BEGIN; @@ -265,7 +265,7 @@ step s1-begin: SET citus.select_opens_transaction_block TO false; step s1-select: - SELECT count(*) FROM to_split_table WHERE id = 123456789; + SELECT count(*) FROM to_split_table WHERE id = 123456789; count --------------------------------------------------------------------- @@ -273,14 +273,14 @@ count (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500002, - ARRAY['1073741824'], - ARRAY[1, 2], - 'block_writes'); + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); citus_split_shard_by_split_points --------------------------------------------------------------------- @@ -288,26 +288,26 @@ citus_split_shard_by_split_points (1 row) step s1-copy: - COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; step s2-commit: - COMMIT; + COMMIT; step s1-copy: <... completed> ERROR: could not find valid entry for shard xxxxx step s1-commit: - COMMIT; + COMMIT; step s2-print-cluster: - -- row count per shard - SELECT - nodeport, shardid, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from %s') - ORDER BY - nodeport, shardid; - -- rows - SELECT id, value FROM to_split_table ORDER BY id, value; + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; nodeport|shardid|success|result --------------------------------------------------------------------- @@ -328,9 +328,9 @@ create_distributed_table (1 row) step s1-insert: - -- Id '123456789' maps to shard xxxxx. - SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); - INSERT INTO to_split_table VALUES (123456789, 1); + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); get_shard_id_for_distribution_column --------------------------------------------------------------------- @@ -344,7 +344,7 @@ step s1-begin: SET citus.select_opens_transaction_block TO false; step s1-select: - SELECT count(*) FROM to_split_table WHERE id = 123456789; + SELECT count(*) FROM to_split_table WHERE id = 123456789; count --------------------------------------------------------------------- @@ -352,14 +352,14 @@ count (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500002, - ARRAY['1073741824'], - ARRAY[1, 2], - 'block_writes'); + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); citus_split_shard_by_split_points --------------------------------------------------------------------- @@ -367,26 +367,26 @@ citus_split_shard_by_split_points (1 row) step s1-update: - UPDATE to_split_table SET value = 111 WHERE id = 123456789; + UPDATE to_split_table SET value = 111 WHERE id = 123456789; step s2-commit: - COMMIT; + COMMIT; step s1-update: <... completed> ERROR: could not find valid entry for shard xxxxx step s1-commit: - COMMIT; + COMMIT; step s2-print-cluster: - -- row count per shard - SELECT - nodeport, shardid, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from %s') - ORDER BY - nodeport, shardid; - -- rows - SELECT id, value FROM to_split_table ORDER BY id, value; + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; nodeport|shardid|success|result --------------------------------------------------------------------- @@ -408,9 +408,9 @@ create_distributed_table (1 row) step s1-insert: - -- Id '123456789' maps to shard xxxxx. - SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); - INSERT INTO to_split_table VALUES (123456789, 1); + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); get_shard_id_for_distribution_column --------------------------------------------------------------------- @@ -424,7 +424,7 @@ step s1-begin: SET citus.select_opens_transaction_block TO false; step s1-select: - SELECT count(*) FROM to_split_table WHERE id = 123456789; + SELECT count(*) FROM to_split_table WHERE id = 123456789; count --------------------------------------------------------------------- @@ -432,14 +432,14 @@ count (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500002, - ARRAY['1073741824'], - ARRAY[1, 2], - 'block_writes'); + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); citus_split_shard_by_split_points --------------------------------------------------------------------- @@ -447,26 +447,26 @@ citus_split_shard_by_split_points (1 row) step s1-delete: - DELETE FROM to_split_table WHERE id = 123456789; + DELETE FROM to_split_table WHERE id = 123456789; step s2-commit: - COMMIT; + COMMIT; step s1-delete: <... completed> ERROR: could not find valid entry for shard xxxxx step s1-commit: - COMMIT; + COMMIT; step s2-print-cluster: - -- row count per shard - SELECT - nodeport, shardid, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from %s') - ORDER BY - nodeport, shardid; - -- rows - SELECT id, value FROM to_split_table ORDER BY id, value; + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; nodeport|shardid|success|result --------------------------------------------------------------------- @@ -494,7 +494,7 @@ step s1-begin: SET citus.select_opens_transaction_block TO false; step s1-select: - SELECT count(*) FROM to_split_table WHERE id = 123456789; + SELECT count(*) FROM to_split_table WHERE id = 123456789; count --------------------------------------------------------------------- @@ -502,14 +502,14 @@ count (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500002, - ARRAY['1073741824'], - ARRAY[1, 2], - 'block_writes'); + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); citus_split_shard_by_split_points --------------------------------------------------------------------- @@ -517,12 +517,12 @@ citus_split_shard_by_split_points (1 row) step s1-insert: - -- Id '123456789' maps to shard xxxxx. - SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); - INSERT INTO to_split_table VALUES (123456789, 1); + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); step s2-commit: - COMMIT; + COMMIT; step s1-insert: <... completed> get_shard_id_for_distribution_column @@ -532,18 +532,18 @@ get_shard_id_for_distribution_column ERROR: could not find valid entry for shard xxxxx step s1-commit: - COMMIT; + COMMIT; step s2-print-cluster: - -- row count per shard - SELECT - nodeport, shardid, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from %s') - ORDER BY - nodeport, shardid; - -- rows - SELECT id, value FROM to_split_table ORDER BY id, value; + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; nodeport|shardid|success|result --------------------------------------------------------------------- @@ -570,7 +570,7 @@ step s1-begin: SET citus.select_opens_transaction_block TO false; step s1-select: - SELECT count(*) FROM to_split_table WHERE id = 123456789; + SELECT count(*) FROM to_split_table WHERE id = 123456789; count --------------------------------------------------------------------- @@ -578,14 +578,14 @@ count (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500002, - ARRAY['1073741824'], - ARRAY[1, 2], - 'block_writes'); + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); citus_split_shard_by_split_points --------------------------------------------------------------------- @@ -593,26 +593,26 @@ citus_split_shard_by_split_points (1 row) step s1-copy: - COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; step s2-commit: - COMMIT; + COMMIT; step s1-copy: <... completed> ERROR: could not find valid entry for shard xxxxx step s1-commit: - COMMIT; + COMMIT; step s2-print-cluster: - -- row count per shard - SELECT - nodeport, shardid, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from %s') - ORDER BY - nodeport, shardid; - -- rows - SELECT id, value FROM to_split_table ORDER BY id, value; + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; nodeport|shardid|success|result --------------------------------------------------------------------- @@ -633,13 +633,13 @@ create_distributed_table (1 row) step s1-load-cache: - -- Indirect way to load cache. - TRUNCATE to_split_table; + -- Indirect way to load cache. + TRUNCATE to_split_table; step s1-insert: - -- Id '123456789' maps to shard xxxxx. - SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); - INSERT INTO to_split_table VALUES (123456789, 1); + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); get_shard_id_for_distribution_column --------------------------------------------------------------------- @@ -653,11 +653,11 @@ step s1-begin: SET citus.select_opens_transaction_block TO false; step s1-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500001, - ARRAY['-1073741824'], - ARRAY[1, 2], - 'block_writes'); + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500001, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'block_writes'); citus_split_shard_by_split_points --------------------------------------------------------------------- @@ -665,39 +665,33 @@ citus_split_shard_by_split_points (1 row) step s2-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500002, - ARRAY['1073741824'], - ARRAY[1, 2], - 'block_writes'); - -step s1-commit: - COMMIT; + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); -step s2-blocking-shard-split: <... completed> -citus_split_shard_by_split_points ---------------------------------------------------------------------- - -(1 row) +ERROR: could not acquire the lock required to split public.to_split_table +step s1-commit: + COMMIT; step s2-print-cluster: - -- row count per shard - SELECT - nodeport, shardid, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from %s') - ORDER BY - nodeport, shardid; - -- rows - SELECT id, value FROM to_split_table ORDER BY id, value; + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; nodeport|shardid|success|result --------------------------------------------------------------------- 57637|1500003|t | 0 - 57637|1500005|t | 1 + 57638|1500002|t | 1 57638|1500004|t | 0 - 57638|1500006|t | 0 -(4 rows) +(3 rows) id|value --------------------------------------------------------------------- @@ -712,9 +706,9 @@ create_distributed_table (1 row) step s1-insert: - -- Id '123456789' maps to shard xxxxx. - SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); - INSERT INTO to_split_table VALUES (123456789, 1); + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); get_shard_id_for_distribution_column --------------------------------------------------------------------- @@ -728,11 +722,11 @@ step s1-begin: SET citus.select_opens_transaction_block TO false; step s1-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500001, - ARRAY['-1073741824'], - ARRAY[1, 2], - 'block_writes'); + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500001, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'block_writes'); citus_split_shard_by_split_points --------------------------------------------------------------------- @@ -740,39 +734,33 @@ citus_split_shard_by_split_points (1 row) step s2-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500002, - ARRAY['1073741824'], - ARRAY[1, 2], - 'block_writes'); - -step s1-commit: - COMMIT; + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); -step s2-blocking-shard-split: <... completed> -citus_split_shard_by_split_points ---------------------------------------------------------------------- - -(1 row) +ERROR: could not acquire the lock required to split public.to_split_table +step s1-commit: + COMMIT; step s2-print-cluster: - -- row count per shard - SELECT - nodeport, shardid, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from %s') - ORDER BY - nodeport, shardid; - -- rows - SELECT id, value FROM to_split_table ORDER BY id, value; + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; nodeport|shardid|success|result --------------------------------------------------------------------- 57637|1500003|t | 0 - 57637|1500005|t | 1 + 57638|1500002|t | 1 57638|1500004|t | 0 - 57638|1500006|t | 0 -(4 rows) +(3 rows) id|value --------------------------------------------------------------------- @@ -787,8 +775,8 @@ create_distributed_table (1 row) step s1-load-cache: - -- Indirect way to load cache. - TRUNCATE to_split_table; + -- Indirect way to load cache. + TRUNCATE to_split_table; step s1-begin: BEGIN; @@ -797,7 +785,7 @@ step s1-begin: SET citus.select_opens_transaction_block TO false; step s1-select: - SELECT count(*) FROM to_split_table WHERE id = 123456789; + SELECT count(*) FROM to_split_table WHERE id = 123456789; count --------------------------------------------------------------------- @@ -805,14 +793,14 @@ count (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500002, - ARRAY['1073741824'], - ARRAY[1, 2], - 'block_writes'); + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); citus_split_shard_by_split_points --------------------------------------------------------------------- @@ -820,25 +808,25 @@ citus_split_shard_by_split_points (1 row) step s1-ddl: - CREATE INDEX test_table_index ON to_split_table(id); + CREATE INDEX test_table_index ON to_split_table(id); step s2-commit: - COMMIT; + COMMIT; step s1-ddl: <... completed> step s1-commit: - COMMIT; + COMMIT; step s2-print-cluster: - -- row count per shard - SELECT - nodeport, shardid, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from %s') - ORDER BY - nodeport, shardid; - -- rows - SELECT id, value FROM to_split_table ORDER BY id, value; + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; nodeport|shardid|success|result --------------------------------------------------------------------- @@ -852,12 +840,12 @@ id|value (0 rows) step s2-print-index-count: - SELECT - nodeport, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -880,7 +868,7 @@ step s1-begin: SET citus.select_opens_transaction_block TO false; step s1-select: - SELECT count(*) FROM to_split_table WHERE id = 123456789; + SELECT count(*) FROM to_split_table WHERE id = 123456789; count --------------------------------------------------------------------- @@ -888,14 +876,14 @@ count (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-blocking-shard-split: - SELECT pg_catalog.citus_split_shard_by_split_points( - 1500002, - ARRAY['1073741824'], - ARRAY[1, 2], - 'block_writes'); + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); citus_split_shard_by_split_points --------------------------------------------------------------------- @@ -903,25 +891,25 @@ citus_split_shard_by_split_points (1 row) step s1-ddl: - CREATE INDEX test_table_index ON to_split_table(id); + CREATE INDEX test_table_index ON to_split_table(id); step s2-commit: - COMMIT; + COMMIT; step s1-ddl: <... completed> step s1-commit: - COMMIT; + COMMIT; step s2-print-cluster: - -- row count per shard - SELECT - nodeport, shardid, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from %s') - ORDER BY - nodeport, shardid; - -- rows - SELECT id, value FROM to_split_table ORDER BY id, value; + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; nodeport|shardid|success|result --------------------------------------------------------------------- @@ -935,12 +923,12 @@ id|value (0 rows) step s2-print-index-count: - SELECT - nodeport, success, result - FROM - run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_concurrent_move_create_table.out b/src/test/regress/expected/isolation_concurrent_move_create_table.out index 343955968..4ee46db32 100644 --- a/src/test/regress/expected/isolation_concurrent_move_create_table.out +++ b/src/test/regress/expected/isolation_concurrent_move_create_table.out @@ -1,11 +1,6 @@ Parsed test spec with 5 sessions starting permutation: s2-begin s2-create_distributed_table s3-create_distributed_table s2-commit -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s2-begin: BEGIN; @@ -30,11 +25,6 @@ step s2-commit: starting permutation: s2-begin s2-create_distributed_table s1-move-shard-logical s2-commit s3-sanity-check s3-sanity-check-2 -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s2-begin: BEGIN; @@ -72,11 +62,6 @@ count starting permutation: s2-begin s2-create_distributed_table s1-move-shard-block s2-commit s3-sanity-check s3-sanity-check-2 -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s2-begin: BEGIN; @@ -113,12 +98,45 @@ count (1 row) -starting permutation: s4-begin s4-move-shard-logical s5-setup-rep-factor s5-create_implicit_colocated_distributed_table s4-commit s3-sanity-check s3-sanity-check-3 s3-sanity-check-4 +starting permutation: s2-begin s2-create_distributed_table s1-split-block s2-commit s3-sanity-check s3-sanity-check-2 +step s2-begin: + BEGIN; + +step s2-create_distributed_table: + SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1'); + create_distributed_table --------------------------------------------------------------------- (1 row) +step s1-split-block: + WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1) + SELECT citus_split_shard_by_split_points( + shardid.shardid, ARRAY['2113265921'], ARRAY[(SELECT * FROM first_node_id), (SELECT * FROM first_node_id)], 'block_writes') FROM shardid; + +ERROR: could not acquire the lock required to split public.concurrent_table_1 +step s2-commit: + COMMIT; + +step s3-sanity-check: + SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s3-sanity-check-2: + SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id); + +count +--------------------------------------------------------------------- + 0 +(1 row) + + +starting permutation: s4-begin s4-move-shard-logical s5-setup-rep-factor s5-create_implicit_colocated_distributed_table s4-commit s3-sanity-check s3-sanity-check-3 s3-sanity-check-4 step s4-begin: BEGIN; @@ -167,11 +185,6 @@ count starting permutation: s4-begin s4-move-shard-block s5-setup-rep-factor s5-create_implicit_colocated_distributed_table s4-commit s3-sanity-check s3-sanity-check-3 s3-sanity-check-4 -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s4-begin: BEGIN; diff --git a/src/test/regress/spec/isolation_blocking_shard_split.spec b/src/test/regress/spec/isolation_blocking_shard_split.spec index ddac66f5b..bb2f93368 100644 --- a/src/test/regress/spec/isolation_blocking_shard_split.spec +++ b/src/test/regress/spec/isolation_blocking_shard_split.spec @@ -125,22 +125,22 @@ step "s2-print-index-count" // Run shard split while concurrently performing DML and index creation // We expect DML,Copy to fail because the shard they are waiting for is destroyed. - permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" - permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" - permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" - permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" - // The same tests without loading the cache at first - permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" - permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" - permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" - permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" +// The same tests without loading the cache at first +permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" // Concurrent shard split blocks on different shards of the same table (or any colocated table) - permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster" - // The same test above without loading the cache at first - permutation "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster" +permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster" +// The same test above without loading the cache at first +permutation "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster" // Concurrent DDL blocks on different shards of the same table (or any colocated table) - permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count" - // The same tests without loading the cache at first - permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count" +permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count" +// The same tests without loading the cache at first +permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count" diff --git a/src/test/regress/spec/isolation_concurrent_move_create_table.spec b/src/test/regress/spec/isolation_concurrent_move_create_table.spec index ae8fd0b95..48022425e 100644 --- a/src/test/regress/spec/isolation_concurrent_move_create_table.spec +++ b/src/test/regress/spec/isolation_concurrent_move_create_table.spec @@ -9,11 +9,13 @@ setup SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('concurrent_table_1', 'id', colocate_with := 'none'); SELECT create_distributed_table('concurrent_table_4', 'id'); + + SELECT nodeid INTO first_node_id FROM pg_dist_node WHERE nodeport = 57637; } teardown { - DROP TABLE concurrent_table_1, concurrent_table_2, concurrent_table_3, concurrent_table_4, concurrent_table_5 CASCADE; + DROP TABLE concurrent_table_1, concurrent_table_2, concurrent_table_3, concurrent_table_4, concurrent_table_5, first_node_id CASCADE; } session "s1" @@ -31,6 +33,14 @@ step "s1-move-shard-block" SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid; } +step "s1-split-block" +{ + WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1) + SELECT citus_split_shard_by_split_points( + shardid.shardid, ARRAY['2113265921'], ARRAY[(SELECT * FROM first_node_id), (SELECT * FROM first_node_id)], 'block_writes') FROM shardid; +} + + session "s2" step "s2-begin" @@ -119,6 +129,7 @@ permutation "s2-begin" "s2-create_distributed_table" "s3-create_distributed_tab // concurrent create colocated table and shard move properly block each other, and cluster is healthy permutation "s2-begin" "s2-create_distributed_table" "s1-move-shard-logical" "s2-commit" "s3-sanity-check" "s3-sanity-check-2" permutation "s2-begin" "s2-create_distributed_table" "s1-move-shard-block" "s2-commit" "s3-sanity-check" "s3-sanity-check-2" +permutation "s2-begin" "s2-create_distributed_table" "s1-split-block" "s2-commit" "s3-sanity-check" "s3-sanity-check-2" // same test above, but this time implicitly colocated tables permutation "s4-begin" "s4-move-shard-logical" "s5-setup-rep-factor" "s5-create_implicit_colocated_distributed_table" "s4-commit" "s3-sanity-check" "s3-sanity-check-3" "s3-sanity-check-4" From 1259d83511a403adde8a7a28cd453dba78f4af1f Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Wed, 27 Jul 2022 14:33:31 +0300 Subject: [PATCH 11/38] Smallfix in CreateCollationDDL logic (#6089) --- src/backend/distributed/commands/collation.c | 2 +- src/test/regress/expected/distributed_collations_conflict.out | 4 ++-- src/test/regress/sql/distributed_collations_conflict.sql | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c index 492e2ace2..834e847a1 100644 --- a/src/backend/distributed/commands/collation.c +++ b/src/backend/distributed/commands/collation.c @@ -106,7 +106,7 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati "CREATE COLLATION %s (provider = '%s'", *quotedCollationName, providerString); - if (strcmp(collcollate, collctype)) + if (strcmp(collcollate, collctype) == 0) { appendStringInfo(&collationNameDef, ", locale = %s", diff --git a/src/test/regress/expected/distributed_collations_conflict.out b/src/test/regress/expected/distributed_collations_conflict.out index 818de2697..8c6c741c5 100644 --- a/src/test/regress/expected/distributed_collations_conflict.out +++ b/src/test/regress/expected/distributed_collations_conflict.out @@ -74,13 +74,13 @@ ORDER BY 1,2,3; \c - - - :master_port SET search_path TO collation_conflict; -- now test worker_create_or_replace_object directly -SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', lc_collate = 'und-u-ks-level2', lc_ctype = 'und-u-ks-level2')$$); +SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', locale = 'und-u-ks-level2')$$); worker_create_or_replace_object --------------------------------------------------------------------- f (1 row) -SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', lc_collate = 'und-u-ks-level2', lc_ctype = 'und-u-ks-level2')$$); +SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', locale = 'und-u-ks-level2')$$); worker_create_or_replace_object --------------------------------------------------------------------- f diff --git a/src/test/regress/sql/distributed_collations_conflict.sql b/src/test/regress/sql/distributed_collations_conflict.sql index eceee4dd6..6c44449fe 100644 --- a/src/test/regress/sql/distributed_collations_conflict.sql +++ b/src/test/regress/sql/distributed_collations_conflict.sql @@ -67,8 +67,8 @@ ORDER BY 1,2,3; SET search_path TO collation_conflict; -- now test worker_create_or_replace_object directly -SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', lc_collate = 'und-u-ks-level2', lc_ctype = 'und-u-ks-level2')$$); -SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', lc_collate = 'und-u-ks-level2', lc_ctype = 'und-u-ks-level2')$$); +SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', locale = 'und-u-ks-level2')$$); +SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', locale = 'und-u-ks-level2')$$); -- hide cascades SET client_min_messages TO error; From b08e5ec29d703bc462e8cd470429116eca36f5bf Mon Sep 17 00:00:00 2001 From: aykut-bozkurt <51649454+aykut-bozkurt@users.noreply.github.com> Date: Wed, 27 Jul 2022 17:36:04 +0300 Subject: [PATCH 12/38] added some missing object address callbacks (#6056) --- src/backend/distributed/commands/common.c | 52 +++++++++++ .../commands/distribute_object_ops.c | 12 +-- src/backend/distributed/commands/index.c | 87 ++++++++++++++----- src/backend/distributed/commands/sequence.c | 28 ++++++ src/backend/distributed/commands/statistics.c | 27 ++++++ src/backend/distributed/commands/view.c | 27 ++++++ src/include/distributed/commands.h | 6 ++ 7 files changed, 210 insertions(+), 29 deletions(-) diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c index 63491fbdc..0441abe05 100644 --- a/src/backend/distributed/commands/common.c +++ b/src/backend/distributed/commands/common.c @@ -14,6 +14,8 @@ #include "postgres.h" #include "catalog/objectaddress.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_dict.h" #include "nodes/parsenodes.h" #include "tcop/utility.h" @@ -287,3 +289,53 @@ PreprocessDropDistributedObjectStmt(Node *node, const char *queryString, return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } + + +/* + * DropTextSearchDictObjectAddress returns list of object addresses in + * the drop tsdict statement. + */ +List * +DropTextSearchDictObjectAddress(Node *node, bool missing_ok) +{ + DropStmt *stmt = castNode(DropStmt, node); + + List *objectAddresses = NIL; + + List *objNameList = NIL; + foreach_ptr(objNameList, stmt->objects) + { + Oid tsdictOid = get_ts_dict_oid(objNameList, missing_ok); + + ObjectAddress *objectAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*objectAddress, TSDictionaryRelationId, tsdictOid); + objectAddresses = lappend(objectAddresses, objectAddress); + } + + return objectAddresses; +} + + +/* + * DropTextSearchConfigObjectAddress returns list of object addresses in + * the drop tsconfig statement. + */ +List * +DropTextSearchConfigObjectAddress(Node *node, bool missing_ok) +{ + DropStmt *stmt = castNode(DropStmt, node); + + List *objectAddresses = NIL; + + List *objNameList = NIL; + foreach_ptr(objNameList, stmt->objects) + { + Oid tsconfigOid = get_ts_config_oid(objNameList, missing_ok); + + ObjectAddress *objectAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*objectAddress, TSConfigRelationId, tsconfigOid); + objectAddresses = lappend(objectAddresses, objectAddress); + } + + return objectAddresses; +} diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 9c9fdb6e5..78f72d828 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -291,7 +291,7 @@ static DistributeObjectOps Any_Reindex = { .qualify = NULL, .preprocess = PreprocessReindexStmt, .postprocess = NULL, - .address = NULL, + .address = ReindexStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Any_Rename = { @@ -531,7 +531,7 @@ static DistributeObjectOps View_Drop = { .qualify = QualifyDropViewStmt, .preprocess = PreprocessDropViewStmt, .postprocess = NULL, - .address = NULL, + .address = DropViewStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Function_Rename = { @@ -655,7 +655,7 @@ static DistributeObjectOps Sequence_Drop = { .qualify = QualifyDropSequenceStmt, .preprocess = PreprocessDropSequenceStmt, .postprocess = NULL, - .address = NULL, + .address = SequenceDropStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Sequence_Grant = { @@ -724,7 +724,7 @@ static DistributeObjectOps TextSearchConfig_Drop = { .qualify = QualifyDropTextSearchConfigurationStmt, .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, - .address = NULL, + .address = DropTextSearchConfigObjectAddress, .markDistributed = false, }; static DistributeObjectOps TextSearchConfig_Rename = { @@ -786,7 +786,7 @@ static DistributeObjectOps TextSearchDict_Drop = { .qualify = QualifyDropTextSearchDictionaryStmt, .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, - .address = NULL, + .address = DropTextSearchDictObjectAddress, .markDistributed = false, }; static DistributeObjectOps TextSearchDict_Rename = { @@ -903,7 +903,7 @@ static DistributeObjectOps Statistics_Drop = { .qualify = QualifyDropStatisticsStmt, .preprocess = PreprocessDropStatisticsStmt, .postprocess = NULL, - .address = NULL, + .address = DropStatisticsObjectAddress, .markDistributed = false, }; static DistributeObjectOps Statistics_Rename = { diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index ee7098289..d424d6a3c 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -75,7 +75,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *rel, Oid relOid, Oid static void ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement); static void ErrorIfUnsupportedDropIndexStmt(DropStmt *dropIndexStatement); static List * DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt); - +static Oid ReindexStmtFindRelationOid(ReindexStmt *reindexStmt, bool missingOk); /* * This struct defines the state for the callback for drop statements. @@ -522,6 +522,49 @@ GetCreateIndexRelationLockMode(IndexStmt *createIndexStatement) } +/* + * ReindexStmtFindRelationOid returns the oid of the relation on which the index exist + * if the object is an index in the reindex stmt. It returns the oid of the relation + * if the object is a table in the reindex stmt. It also acquires the relevant lock + * for the statement. + */ +static Oid +ReindexStmtFindRelationOid(ReindexStmt *reindexStmt, bool missingOk) +{ + Assert(reindexStmt->relation != NULL); + + Assert(reindexStmt->kind == REINDEX_OBJECT_INDEX || + reindexStmt->kind == REINDEX_OBJECT_TABLE); + + Oid relationId = InvalidOid; + + LOCKMODE lockmode = IsReindexWithParam_compat(reindexStmt, "concurrently") ? + ShareUpdateExclusiveLock : AccessExclusiveLock; + + if (reindexStmt->kind == REINDEX_OBJECT_INDEX) + { + struct ReindexIndexCallbackState state; + state.concurrent = IsReindexWithParam_compat(reindexStmt, + "concurrently"); + state.locked_table_oid = InvalidOid; + + Oid indOid = RangeVarGetRelidExtended(reindexStmt->relation, lockmode, + (missingOk) ? RVR_MISSING_OK : 0, + RangeVarCallbackForReindexIndex, + &state); + relationId = IndexGetRelation(indOid, missingOk); + } + else + { + relationId = RangeVarGetRelidExtended(reindexStmt->relation, lockmode, + (missingOk) ? RVR_MISSING_OK : 0, + RangeVarCallbackOwnsTable, NULL); + } + + return relationId; +} + + /* * PreprocessReindexStmt determines whether a given REINDEX statement involves * a distributed table. If so (and if the statement does not use unsupported @@ -544,36 +587,17 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand, */ if (reindexStatement->relation != NULL) { - Relation relation = NULL; - Oid relationId = InvalidOid; - LOCKMODE lockmode = IsReindexWithParam_compat(reindexStatement, "concurrently") ? - ShareUpdateExclusiveLock : AccessExclusiveLock; + Oid relationId = ReindexStmtFindRelationOid(reindexStatement, false); MemoryContext relationContext = NULL; - - Assert(reindexStatement->kind == REINDEX_OBJECT_INDEX || - reindexStatement->kind == REINDEX_OBJECT_TABLE); - + Relation relation = NULL; if (reindexStatement->kind == REINDEX_OBJECT_INDEX) { - struct ReindexIndexCallbackState state; - state.concurrent = IsReindexWithParam_compat(reindexStatement, - "concurrently"); - state.locked_table_oid = InvalidOid; - - Oid indOid = RangeVarGetRelidExtended(reindexStatement->relation, - lockmode, 0, - RangeVarCallbackForReindexIndex, - &state); + Oid indOid = RangeVarGetRelid(reindexStatement->relation, NoLock, 0); relation = index_open(indOid, NoLock); - relationId = IndexGetRelation(indOid, false); } else { - RangeVarGetRelidExtended(reindexStatement->relation, lockmode, 0, - RangeVarCallbackOwnsTable, NULL); - relation = table_openrv(reindexStatement->relation, NoLock); - relationId = RelationGetRelid(relation); } bool isCitusRelation = IsCitusTable(relationId); @@ -628,6 +652,23 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand, } +/* + * ReindexStmtObjectAddress returns list of object addresses in the reindex + * statement. + */ +List * +ReindexStmtObjectAddress(Node *stmt, bool missing_ok) +{ + ReindexStmt *reindexStatement = castNode(ReindexStmt, stmt); + + Oid relationId = ReindexStmtFindRelationOid(reindexStatement, missing_ok); + ObjectAddress *objectAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*objectAddress, RelationRelationId, relationId); + + return list_make1(objectAddress); +} + + /* * PreprocessDropIndexStmt determines whether a given DROP INDEX statement involves * a distributed table. If so (and if the statement does not use unsupported diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index 6a51516b8..0850a0222 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -313,6 +313,34 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString, } +/* + * SequenceDropStmtObjectAddress returns list of object addresses in the drop sequence + * statement. + */ +List * +SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok) +{ + DropStmt *dropSeqStmt = castNode(DropStmt, stmt); + + List *objectAddresses = NIL; + + List *droppingSequencesList = dropSeqStmt->objects; + List *objectNameList = NULL; + foreach_ptr(objectNameList, droppingSequencesList) + { + RangeVar *seq = makeRangeVarFromNameList(objectNameList); + + Oid seqOid = RangeVarGetRelid(seq, AccessShareLock, missing_ok); + + ObjectAddress *objectAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*objectAddress, SequenceRelationId, seqOid); + objectAddresses = lappend(objectAddresses, objectAddress); + } + + return objectAddresses; +} + + /* * PreprocessRenameSequenceStmt is called when the user is renaming a sequence. The invocation * happens before the statement is applied locally. diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c index 5592c1df8..a85d2db48 100644 --- a/src/backend/distributed/commands/statistics.c +++ b/src/backend/distributed/commands/statistics.c @@ -210,6 +210,33 @@ PreprocessDropStatisticsStmt(Node *node, const char *queryString, } +/* + * DropStatisticsObjectAddress returns list of object addresses in the drop statistics + * statement. + */ +List * +DropStatisticsObjectAddress(Node *node, bool missing_ok) +{ + DropStmt *dropStatisticsStmt = castNode(DropStmt, node); + Assert(dropStatisticsStmt->removeType == OBJECT_STATISTIC_EXT); + + List *objectAddresses = NIL; + + List *objectNameList = NULL; + foreach_ptr(objectNameList, dropStatisticsStmt->objects) + { + Oid statsOid = get_statistics_object_oid(objectNameList, + dropStatisticsStmt->missing_ok); + + ObjectAddress *objectAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*objectAddress, StatisticExtRelationId, statsOid); + objectAddresses = lappend(objectAddresses, objectAddress); + } + + return objectAddresses; +} + + /* * PreprocessAlterStatisticsRenameStmt is called during the planning phase for * ALTER STATISTICS RENAME. diff --git a/src/backend/distributed/commands/view.c b/src/backend/distributed/commands/view.c index f8900a800..ce7119875 100644 --- a/src/backend/distributed/commands/view.c +++ b/src/backend/distributed/commands/view.c @@ -221,6 +221,33 @@ PreprocessDropViewStmt(Node *node, const char *queryString, ProcessUtilityContex } +/* + * DropViewStmtObjectAddress returns list of object addresses in the drop view + * statement. + */ +List * +DropViewStmtObjectAddress(Node *stmt, bool missing_ok) +{ + DropStmt *dropStmt = castNode(DropStmt, stmt); + + List *objectAddresses = NIL; + + List *possiblyQualifiedViewName = NULL; + foreach_ptr(possiblyQualifiedViewName, dropStmt->objects) + { + RangeVar *viewRangeVar = makeRangeVarFromNameList(possiblyQualifiedViewName); + Oid viewOid = RangeVarGetRelid(viewRangeVar, AccessShareLock, + missing_ok); + + ObjectAddress *objectAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*objectAddress, RelationRelationId, viewOid); + objectAddresses = lappend(objectAddresses, objectAddress); + } + + return objectAddresses; +} + + /* * FilterNameListForDistributedViews takes a list of view names and filters against the * views that are distributed. diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 0bc565aff..f421a1255 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -147,6 +147,8 @@ extern List * PostprocessAlterDistributedObjectStmt(Node *stmt, const char *quer extern List * PreprocessDropDistributedObjectStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * DropTextSearchConfigObjectAddress(Node *node, bool missing_ok); +extern List * DropTextSearchDictObjectAddress(Node *node, bool missing_ok); /* index.c */ typedef void (*PGIndexProcessor)(Form_pg_index, List **, int); @@ -328,6 +330,7 @@ extern LOCKMODE GetCreateIndexRelationLockMode(IndexStmt *createIndexStatement); extern List * PreprocessReindexStmt(Node *ReindexStatement, const char *ReindexCommand, ProcessUtilityContext processUtilityContext); +extern List * ReindexStmtObjectAddress(Node *stmt, bool missing_ok); extern List * PreprocessDropIndexStmt(Node *dropIndexStatement, const char *dropIndexCommand, ProcessUtilityContext processUtilityContext); @@ -417,6 +420,7 @@ extern List * PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryStri extern List * PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString); extern List * PreprocessDropSequenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok); extern List * PreprocessRenameSequenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PreprocessGrantOnSequenceStmt(Node *node, const char *queryString, @@ -439,6 +443,7 @@ extern List * PostprocessCreateStatisticsStmt(Node *node, const char *queryStrin extern List * CreateStatisticsStmtObjectAddress(Node *node, bool missingOk); extern List * PreprocessDropStatisticsStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * DropStatisticsObjectAddress(Node *node, bool missing_ok); extern List * PreprocessAlterStatisticsRenameStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); @@ -569,6 +574,7 @@ extern List * ViewStmtObjectAddress(Node *node, bool missing_ok); extern List * AlterViewStmtObjectAddress(Node *node, bool missing_ok); extern List * PreprocessDropViewStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * DropViewStmtObjectAddress(Node *node, bool missing_ok); extern char * CreateViewDDLCommand(Oid viewOid); extern List * GetViewCreationCommandsOfTable(Oid relationId); extern List * GetViewCreationTableDDLCommandsOfTable(Oid relationId); From 2b2a5296536bece0e80b56416c9f44db607cd0cd Mon Sep 17 00:00:00 2001 From: Ahmet Gedemenli Date: Wed, 27 Jul 2022 17:57:45 +0300 Subject: [PATCH 13/38] Error out for views with circular dependencies (#6051) Adds error check for views with circular dependencies --- .../distributed/commands/dependencies.c | 4 +-- src/backend/distributed/metadata/dependency.c | 19 +++++++++++- .../expected/citus_local_tables_mx.out | 31 ++++++++++++++++++- .../regress/expected/view_propagation.out | 7 ++--- .../regress/sql/citus_local_tables_mx.sql | 21 ++++++++++++- 5 files changed, 72 insertions(+), 10 deletions(-) diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index 6329cf6f4..87491a4f5 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -202,12 +202,12 @@ ErrorIfCircularDependencyExists(const ObjectAddress *objectAddress) /* * DeferErrorIfCircularDependencyExists checks whether given object has - * circular dependency with itself via existing objects of pg_dist_object. + * circular dependency with itself. If so, returns a deferred error. */ DeferredErrorMessage * DeferErrorIfCircularDependencyExists(const ObjectAddress *objectAddress) { - List *dependencies = GetAllSupportedDependenciesForObject(objectAddress); + List *dependencies = GetAllDependenciesForObject(objectAddress); ObjectAddress *dependency = NULL; foreach_ptr(dependency, dependencies) diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index e03edadda..a9900bd87 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -1615,7 +1615,7 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe * rule and that rule has dependencies to other objects. */ char relKind = get_rel_relkind(relationId); - if (relKind == RELKIND_VIEW) + if (relKind == RELKIND_VIEW || relKind == RELKIND_MATVIEW) { List *ruleRefDepList = GetViewRuleReferenceDependencyList(relationId); result = list_concat(result, ruleRefDepList); @@ -2109,6 +2109,23 @@ GetDependingViews(Oid relationId) ViewDependencyNode *dependingNode = NULL; foreach_ptr(dependingNode, node->dependingNodes) { + ObjectAddress relationAddress = { 0 }; + ObjectAddressSet(relationAddress, RelationRelationId, dependingNode->id); + + /* + * This function does not catch views with circular dependencies, + * because of the remaining dependency count check below. + * Here we check if the view has a circular dependency or not. + * If yes, we error out with a message that tells the user that + * Citus does not handle circular dependencies. + */ + DeferredErrorMessage *depError = + DeferErrorIfCircularDependencyExists(&relationAddress); + if (depError != NULL) + { + RaiseDeferredError(depError, ERROR); + } + dependingNode->remainingDependencyCount--; if (dependingNode->remainingDependencyCount == 0) { diff --git a/src/test/regress/expected/citus_local_tables_mx.out b/src/test/regress/expected/citus_local_tables_mx.out index 27424c7d8..6fcad3612 100644 --- a/src/test/regress/expected/citus_local_tables_mx.out +++ b/src/test/regress/expected/citus_local_tables_mx.out @@ -882,8 +882,13 @@ CREATE MATERIALIZED VIEW matview_101 AS SELECT * from loc_tb; CREATE VIEW v103 AS SELECT * from loc_tb; CREATE MATERIALIZED VIEW matview_102 AS SELECT * from loc_tb JOIN v103 USING (a); CREATE OR REPLACE VIEW v103 AS SELECT * from loc_tb JOIN matview_102 USING (a); +-- fails to add local table to metadata, because of the circular dependency +ALTER TABLE loc_tb ADD CONSTRAINT fkey FOREIGN KEY (a) references ref_tb(a); +ERROR: Citus can not handle circular dependencies between distributed objects +-- drop the view&matview with circular dependency +DROP VIEW v103 CASCADE; SET client_min_messages TO DEBUG1; --- auto undistribute +-- now it should successfully add to metadata and create the views on workers ALTER TABLE loc_tb ADD CONSTRAINT fkey FOREIGN KEY (a) references ref_tb(a); DEBUG: executing "CREATE OR REPLACE VIEW citus_local_tables_mx.v100 (a) AS SELECT loc_tb.a FROM citus_local_tables_mx.loc_tb; ALTER VIEW citus_local_tables_mx.v100 OWNER TO postgres" @@ -907,6 +912,7 @@ select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100, (localhost,57638,t,0) (2 rows) +-- auto undistribute ALTER TABLE loc_tb DROP CONSTRAINT fkey; -- fails because fkey is dropped and table is converted to local table select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100$$); @@ -1075,6 +1081,29 @@ SELECT count(*) FROM citus_local_tables_mx.mv4; 0 (1 row) +-- test circular dependency detection among views +create table root_tbl (a int); +create materialized view chain_v1 as select * from root_tbl; +create view chain_v2 as select * from chain_v1; +WARNING: "view chain_v2" has dependency to "table root_tbl" that is not in Citus' metadata +create materialized view chain_v3 as select * from chain_v2; +create or replace view chain_v2 as select * from chain_v1 join chain_v3 using (a); +WARNING: "view chain_v2" has dependency on unsupported object "materialized view chain_v3" +-- catch circular dependency and error out +select citus_add_local_table_to_metadata('root_tbl'); +ERROR: Citus can not handle circular dependencies between distributed objects +-- same for create_distributed_table +select create_distributed_table('root_tbl','a'); +ERROR: Citus can not handle circular dependencies between distributed objects +-- fix the circular dependency and add to metadata +create or replace view chain_v2 as select * from chain_v1; +WARNING: "view chain_v2" has dependency to "table root_tbl" that is not in Citus' metadata +select citus_add_local_table_to_metadata('root_tbl'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + -- todo: add more matview tests once 5968 and 6028 are fixed -- cleanup at exit set client_min_messages to error; diff --git a/src/test/regress/expected/view_propagation.out b/src/test/regress/expected/view_propagation.out index 290c45943..245c9a155 100644 --- a/src/test/regress/expected/view_propagation.out +++ b/src/test/regress/expected/view_propagation.out @@ -811,11 +811,8 @@ WARNING: "view v_test_2" has dependency to "table employees" that is not in Cit DETAIL: "view v_test_2" will be created only locally HINT: Distribute "table employees" first to distribute "view v_test_2" SELECT create_distributed_table('employees','employee_id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - +ERROR: Citus can not handle circular dependencies between distributed objects +DETAIL: "view v_test_1" circularly depends itself, resolve circular dependency first -- verify not distributed SELECT run_command_on_workers($$SELECT count(*) FROM v_test_1$$); run_command_on_workers diff --git a/src/test/regress/sql/citus_local_tables_mx.sql b/src/test/regress/sql/citus_local_tables_mx.sql index c81cac2e6..2a2fb70d3 100644 --- a/src/test/regress/sql/citus_local_tables_mx.sql +++ b/src/test/regress/sql/citus_local_tables_mx.sql @@ -460,14 +460,20 @@ CREATE VIEW v103 AS SELECT * from loc_tb; CREATE MATERIALIZED VIEW matview_102 AS SELECT * from loc_tb JOIN v103 USING (a); CREATE OR REPLACE VIEW v103 AS SELECT * from loc_tb JOIN matview_102 USING (a); +-- fails to add local table to metadata, because of the circular dependency +ALTER TABLE loc_tb ADD CONSTRAINT fkey FOREIGN KEY (a) references ref_tb(a); +-- drop the view&matview with circular dependency +DROP VIEW v103 CASCADE; + SET client_min_messages TO DEBUG1; --- auto undistribute +-- now it should successfully add to metadata and create the views on workers ALTER TABLE loc_tb ADD CONSTRAINT fkey FOREIGN KEY (a) references ref_tb(a); SET client_min_messages TO WARNING; -- works fine select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100, citus_local_tables_mx.v101, citus_local_tables_mx.v102$$); +-- auto undistribute ALTER TABLE loc_tb DROP CONSTRAINT fkey; -- fails because fkey is dropped and table is converted to local table select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100$$); @@ -537,6 +543,19 @@ SELECT count(*) FROM citus_local_tables_mx.mv2; SELECT count(*) FROM citus_local_tables_mx.mv3; SELECT count(*) FROM citus_local_tables_mx.mv4; +-- test circular dependency detection among views +create table root_tbl (a int); +create materialized view chain_v1 as select * from root_tbl; +create view chain_v2 as select * from chain_v1; +create materialized view chain_v3 as select * from chain_v2; +create or replace view chain_v2 as select * from chain_v1 join chain_v3 using (a); +-- catch circular dependency and error out +select citus_add_local_table_to_metadata('root_tbl'); +-- same for create_distributed_table +select create_distributed_table('root_tbl','a'); +-- fix the circular dependency and add to metadata +create or replace view chain_v2 as select * from chain_v1; +select citus_add_local_table_to_metadata('root_tbl'); -- todo: add more matview tests once 5968 and 6028 are fixed -- cleanup at exit From 0f50bef696e8925374a7423f89b7513bf6f31d44 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Wed, 27 Jul 2022 17:46:32 +0200 Subject: [PATCH 14/38] Avoid possible information leakage about existing users (#6090) --- src/backend/distributed/shared_library_init.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 1140991be..8ea60fe74 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -2498,7 +2498,11 @@ CitusAuthHook(Port *port, int status) /* - * IsSuperuser returns whether the role with the given name is superuser. + * IsSuperuser returns whether the role with the given name is superuser. If + * the user doesn't exist, this simply returns false instead of throwing an + * error. This is done to not leak information about users existing or not, in + * some cases postgres is vague about this on purpose. So, by returning false + * we let postgres return this possibly vague error message. */ static bool IsSuperuser(char *roleName) @@ -2511,9 +2515,7 @@ IsSuperuser(char *roleName) HeapTuple roleTuple = SearchSysCache1(AUTHNAME, CStringGetDatum(roleName)); if (!HeapTupleIsValid(roleTuple)) { - ereport(FATAL, - (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION), - errmsg("role \"%s\" does not exist", roleName))); + return false; } Form_pg_authid rform = (Form_pg_authid) GETSTRUCT(roleTuple); From fdf090758b2ddb8baf3a84a98b919e869f273bc8 Mon Sep 17 00:00:00 2001 From: Ying Xu <32597660+yxu2162@users.noreply.github.com> Date: Wed, 27 Jul 2022 11:06:49 -0700 Subject: [PATCH 15/38] Bugfix for IN clause to be considered during planner phase in Columnar (#6030) Reported bug #5803 shows that we are currently not sending the IN clause to our planner for columnar. This PR fixes it by checking for ScalarArrayOpExpr in ExtractPushdownClause so that we do not skip it. Also added a test case for this new addition. --- src/backend/columnar/columnar_customscan.c | 12 ++++ .../expected/columnar_chunk_filtering.out | 64 +++++++++++++++++++ .../regress/sql/columnar_chunk_filtering.sql | 41 ++++++++++++ 3 files changed, 117 insertions(+) diff --git a/src/backend/columnar/columnar_customscan.c b/src/backend/columnar/columnar_customscan.c index 4d4ba63f0..98c13e2a7 100644 --- a/src/backend/columnar/columnar_customscan.c +++ b/src/backend/columnar/columnar_customscan.c @@ -824,6 +824,18 @@ ExtractPushdownClause(PlannerInfo *root, RelOptInfo *rel, Node *node) } } + if (IsA(node, ScalarArrayOpExpr)) + { + if (!contain_volatile_functions(node)) + { + return (Expr *) node; + } + else + { + return NULL; + } + } + if (!IsA(node, OpExpr) || list_length(((OpExpr *) node)->args) != 2) { ereport(ColumnarPlannerDebugLevel, diff --git a/src/test/regress/expected/columnar_chunk_filtering.out b/src/test/regress/expected/columnar_chunk_filtering.out index 980b2454f..09688d7aa 100644 --- a/src/test/regress/expected/columnar_chunk_filtering.out +++ b/src/test/regress/expected/columnar_chunk_filtering.out @@ -1066,3 +1066,67 @@ RESET columnar.max_custom_scan_paths; RESET columnar.qual_pushdown_correlation_threshold; RESET columnar.planner_debug_level; DROP TABLE pushdown_test; +-- https://github.com/citusdata/citus/issues/5803 +CREATE TABLE pushdown_test(id int, country text) using columnar; +BEGIN; + INSERT INTO pushdown_test VALUES(1, 'AL'); + INSERT INTO pushdown_test VALUES(2, 'AU'); +END; +BEGIN; + INSERT INTO pushdown_test VALUES(3, 'BR'); + INSERT INTO pushdown_test VALUES(4, 'BT'); +END; +BEGIN; + INSERT INTO pushdown_test VALUES(5, 'PK'); + INSERT INTO pushdown_test VALUES(6, 'PA'); +END; +BEGIN; + INSERT INTO pushdown_test VALUES(7, 'USA'); + INSERT INTO pushdown_test VALUES(8, 'ZW'); +END; +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on pushdown_test (actual rows=3 loops=1) + Filter: (country = ANY ('{USA,BR,ZW}'::text[])) + Rows Removed by Filter: 1 + Columnar Projected Columns: id, country + Columnar Chunk Group Filters: (country = ANY ('{USA,BR,ZW}'::text[])) + Columnar Chunk Groups Removed by Filter: 2 +(6 rows) + +SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); + id +--------------------------------------------------------------------- + 3 + 7 + 8 +(3 rows) + +-- test for volatile functions with IN +CREATE FUNCTION volatileFunction() returns TEXT language plpgsql AS +$$ +BEGIN + return 'AL'; +END; +$$; +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ColumnarScan) on pushdown_test (actual rows=3 loops=1) + Filter: (country = ANY (ARRAY['USA'::text, 'ZW'::text, volatilefunction()])) + Rows Removed by Filter: 5 + Columnar Projected Columns: id, country +(4 rows) + +SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); + id | country +--------------------------------------------------------------------- + 1 | AL + 7 | USA + 8 | ZW +(3 rows) + +DROP TABLE pushdown_test; diff --git a/src/test/regress/sql/columnar_chunk_filtering.sql b/src/test/regress/sql/columnar_chunk_filtering.sql index a2d2d628e..9e7d43363 100644 --- a/src/test/regress/sql/columnar_chunk_filtering.sql +++ b/src/test/regress/sql/columnar_chunk_filtering.sql @@ -445,3 +445,44 @@ RESET columnar.max_custom_scan_paths; RESET columnar.qual_pushdown_correlation_threshold; RESET columnar.planner_debug_level; DROP TABLE pushdown_test; + +-- https://github.com/citusdata/citus/issues/5803 + +CREATE TABLE pushdown_test(id int, country text) using columnar; + +BEGIN; + INSERT INTO pushdown_test VALUES(1, 'AL'); + INSERT INTO pushdown_test VALUES(2, 'AU'); +END; + +BEGIN; + INSERT INTO pushdown_test VALUES(3, 'BR'); + INSERT INTO pushdown_test VALUES(4, 'BT'); +END; + +BEGIN; + INSERT INTO pushdown_test VALUES(5, 'PK'); + INSERT INTO pushdown_test VALUES(6, 'PA'); +END; +BEGIN; + INSERT INTO pushdown_test VALUES(7, 'USA'); + INSERT INTO pushdown_test VALUES(8, 'ZW'); +END; +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); + +SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW'); + +-- test for volatile functions with IN +CREATE FUNCTION volatileFunction() returns TEXT language plpgsql AS +$$ +BEGIN + return 'AL'; +END; +$$; +EXPLAIN (analyze on, costs off, timing off, summary off) +SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); + +SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction()); + +DROP TABLE pushdown_test; From d67cf907a2ed20c9f8a1d190d1eeec69d3d9a75d Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Wed, 27 Jul 2022 18:03:53 +0200 Subject: [PATCH 16/38] Detach relation access tracking from connection management --- .../connection/placement_connection.c | 3 -- src/backend/distributed/shared_library_init.c | 1 + .../transaction/relation_access_tracking.c | 34 ++++++++++++++++--- .../distributed/relation_access_tracking.h | 2 +- 4 files changed, 31 insertions(+), 9 deletions(-) diff --git a/src/backend/distributed/connection/placement_connection.c b/src/backend/distributed/connection/placement_connection.c index d3929f4b9..2aa3994a0 100644 --- a/src/backend/distributed/connection/placement_connection.c +++ b/src/backend/distributed/connection/placement_connection.c @@ -1089,9 +1089,6 @@ InitPlacementConnectionManagement(void) ConnectionShardHash = hash_create("citus connection cache (shardid)", 64, &info, hashFlags); - - /* (relationId) = [relationAccessMode] hash */ - AllocateRelationAccessHash(); } diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 8ea60fe74..600b62b69 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -390,6 +390,7 @@ _PG_init(void) InitializeBackendManagement(); InitializeConnectionManagement(); InitPlacementConnectionManagement(); + InitRelationAccessHash(); InitializeCitusQueryStats(); InitializeSharedConnectionStats(); InitializeLocallyReservedSharedConnections(); diff --git a/src/backend/distributed/transaction/relation_access_tracking.c b/src/backend/distributed/transaction/relation_access_tracking.c index 9fdb226e1..f69de6f8a 100644 --- a/src/backend/distributed/transaction/relation_access_tracking.c +++ b/src/backend/distributed/transaction/relation_access_tracking.c @@ -47,6 +47,8 @@ bool EnforceForeignKeyRestrictions = true; (1 << (PLACEMENT_ACCESS_DDL + \ PARALLEL_MODE_FLAG_OFFSET))) +MemoryContext RelationAcessContext = NULL; + /* * Hash table mapping relations to the @@ -84,8 +86,8 @@ typedef struct RelationAccessHashEntry static HTAB *RelationAccessHash; - /* functions related to access recording */ +static void AllocateRelationAccessHash(void); static void RecordRelationAccessBase(Oid relationId, ShardPlacementAccessType accessType); static void RecordPlacementAccessToCache(Oid relationId, ShardPlacementAccessType accessType); @@ -120,6 +122,18 @@ static bool HoldsConflictingLockWithReferencedRelations(Oid relationId, conflictingAccessMode); +/* + * InitRelationAccessHash performs initialization of the + * infrastructure in this file at backend start. + */ +void +InitRelationAccessHash(void) +{ + /* allocate (relationId) = [relationAccessMode] hash */ + AllocateRelationAccessHash(); +} + + /* * Empty RelationAccessHash, without destroying the hash table itself. */ @@ -133,19 +147,29 @@ ResetRelationAccessHash() /* * Allocate RelationAccessHash. */ -void +static void AllocateRelationAccessHash(void) { - HASHCTL info; + /* + * Create a single context for relation access related memory + * management. Doing so, instead of allocating in TopMemoryContext, makes + * it easier to associate used memory. + */ + RelationAcessContext = AllocSetContextCreateExtended(TopMemoryContext, + "Relation Access Context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + HASHCTL info; memset(&info, 0, sizeof(info)); info.keysize = sizeof(RelationAccessHashKey); info.entrysize = sizeof(RelationAccessHashEntry); info.hash = tag_hash; - info.hcxt = ConnectionContext; + info.hcxt = RelationAcessContext; uint32 hashFlags = (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - RelationAccessHash = hash_create("citus connection cache (relationid)", + RelationAccessHash = hash_create("citus relation access cache (relationid)", 8, &info, hashFlags); } diff --git a/src/include/distributed/relation_access_tracking.h b/src/include/distributed/relation_access_tracking.h index deacdec94..295d54351 100644 --- a/src/include/distributed/relation_access_tracking.h +++ b/src/include/distributed/relation_access_tracking.h @@ -34,7 +34,7 @@ typedef enum RelationAccessMode RELATION_PARALLEL_ACCESSED } RelationAccessMode; -extern void AllocateRelationAccessHash(void); +extern void InitRelationAccessHash(void); extern void ResetRelationAccessHash(void); extern void RecordRelationAccessIfNonDistTable(Oid relationId, ShardPlacementAccessType accessType); From 0a5112964de19ae4571a3a4f3f75800b661d369d Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Wed, 27 Jul 2022 18:06:53 +0200 Subject: [PATCH 17/38] Call relation access hash clean-up irrespective of remote transaction state Mainly because local-only transactions should be cleaned up --- src/backend/distributed/connection/placement_connection.c | 1 - src/backend/distributed/transaction/transaction_management.c | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/connection/placement_connection.c b/src/backend/distributed/connection/placement_connection.c index 2aa3994a0..225bf9708 100644 --- a/src/backend/distributed/connection/placement_connection.c +++ b/src/backend/distributed/connection/placement_connection.c @@ -969,7 +969,6 @@ ResetPlacementConnectionManagement(void) hash_delete_all(ConnectionPlacementHash); hash_delete_all(ConnectionShardHash); hash_delete_all(ColocatedPlacementsHash); - ResetRelationAccessHash(); /* * NB: memory for ConnectionReference structs and subordinate data is diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 66525e6ac..0778a34fa 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -36,6 +36,7 @@ #include "distributed/repartition_join_execution.h" #include "distributed/transaction_management.h" #include "distributed/placement_connection.h" +#include "distributed/relation_access_tracking.h" #include "distributed/shared_connection_stats.h" #include "distributed/subplan_execution.h" #include "distributed/version_compat.h" @@ -307,6 +308,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) } ResetGlobalVariables(); + ResetRelationAccessHash(); /* * Make sure that we give the shared connections back to the shared @@ -376,6 +378,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) AfterXactConnectionHandling(false); ResetGlobalVariables(); + ResetRelationAccessHash(); /* * Clear MetadataCache table if we're aborting from a CREATE EXTENSION Citus From b41c3fd30dbe06614b95df2ff0bfe82af90c4806 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Wed, 27 Jul 2022 18:16:21 +0200 Subject: [PATCH 18/38] Add tests --- src/test/regress/expected/single_node.out | 84 +++++++++++++++++++++-- src/test/regress/sql/single_node.sql | 38 ++++++++++ 2 files changed, 116 insertions(+), 6 deletions(-) diff --git a/src/test/regress/expected/single_node.out b/src/test/regress/expected/single_node.out index 6177e215e..0490d1848 100644 --- a/src/test/regress/expected/single_node.out +++ b/src/test/regress/expected/single_node.out @@ -503,6 +503,82 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONST (0 rows) DROP TABLE upsert_test; +CREATE TABLE relation_tracking_table_1(id int, nonid int); +SELECT create_distributed_table('relation_tracking_table_1', 'id', colocate_with := 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO relation_tracking_table_1 select generate_series(6, 10000, 1), 0; +CREATE or REPLACE function foo() +returns setof relation_tracking_table_1 +AS $$ +BEGIN +RETURN query select * from relation_tracking_table_1 order by 1 limit 10; +end; +$$ language plpgsql; +CREATE TABLE relation_tracking_table_2 (id int, nonid int); +-- use the relation-access in this session +select foo(); + foo +--------------------------------------------------------------------- + (6,0) + (7,0) + (8,0) + (9,0) + (10,0) + (11,0) + (12,0) + (13,0) + (14,0) + (15,0) +(10 rows) + +-- we should be able to use sequential mode, as the previous multi-shard +-- relation access has been cleaned-up +BEGIN; +SET LOCAL citus.multi_shard_modify_mode TO sequential; +INSERT INTO relation_tracking_table_2 select generate_series(6, 1000, 1), 0; +SELECT create_distributed_table('relation_tracking_table_2', 'id', colocate_with := 'none'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$single_node.relation_tracking_table_2$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT count(*) FROM relation_tracking_table_2; + count +--------------------------------------------------------------------- + 995 +(1 row) + +ROLLBACK; +BEGIN; +INSERT INTO relation_tracking_table_2 select generate_series(6, 1000, 1), 0; +SELECT create_distributed_table('relation_tracking_table_2', 'id', colocate_with := 'none'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$single_node.relation_tracking_table_2$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT count(*) FROM relation_tracking_table_2; + count +--------------------------------------------------------------------- + 995 +(1 row) + +COMMIT; +SET client_min_messages TO ERROR; +DROP TABLE relation_tracking_table_2, relation_tracking_table_1 CASCADE; +RESET client_min_messages; CREATE SCHEMA "Quoed.Schema"; SET search_path TO "Quoed.Schema"; CREATE TABLE "long_constraint_upsert\_test" @@ -541,13 +617,9 @@ NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quot ERROR: renaming constraints belonging to distributed tables is currently unsupported --INSERT INTO simple_table_name (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT simple_constraint_name DO NOTHING RETURNING *; SET search_path TO single_node; +SET client_min_messages TO ERROR; DROP SCHEMA "Quoed.Schema" CASCADE; -NOTICE: drop cascades to 5 other objects -DETAIL: drop cascades to table "Quoed.Schema".simple_table_name -drop cascades to table "Quoed.Schema".simple_table_name_90630528 -drop cascades to table "Quoed.Schema".simple_table_name_90630529 -drop cascades to table "Quoed.Schema".simple_table_name_90630530 -drop cascades to table "Quoed.Schema".simple_table_name_90630531 +RESET client_min_messages; -- test partitioned index creation with long name CREATE TABLE test_index_creation1 ( diff --git a/src/test/regress/sql/single_node.sql b/src/test/regress/sql/single_node.sql index 21ae9e3ac..09a8c9870 100644 --- a/src/test/regress/sql/single_node.sql +++ b/src/test/regress/sql/single_node.sql @@ -252,6 +252,42 @@ INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONST DROP TABLE upsert_test; +CREATE TABLE relation_tracking_table_1(id int, nonid int); +SELECT create_distributed_table('relation_tracking_table_1', 'id', colocate_with := 'none'); +INSERT INTO relation_tracking_table_1 select generate_series(6, 10000, 1), 0; + +CREATE or REPLACE function foo() +returns setof relation_tracking_table_1 +AS $$ +BEGIN +RETURN query select * from relation_tracking_table_1 order by 1 limit 10; +end; +$$ language plpgsql; + +CREATE TABLE relation_tracking_table_2 (id int, nonid int); + +-- use the relation-access in this session +select foo(); + +-- we should be able to use sequential mode, as the previous multi-shard +-- relation access has been cleaned-up +BEGIN; +SET LOCAL citus.multi_shard_modify_mode TO sequential; +INSERT INTO relation_tracking_table_2 select generate_series(6, 1000, 1), 0; +SELECT create_distributed_table('relation_tracking_table_2', 'id', colocate_with := 'none'); +SELECT count(*) FROM relation_tracking_table_2; +ROLLBACK; + +BEGIN; +INSERT INTO relation_tracking_table_2 select generate_series(6, 1000, 1), 0; +SELECT create_distributed_table('relation_tracking_table_2', 'id', colocate_with := 'none'); +SELECT count(*) FROM relation_tracking_table_2; +COMMIT; + +SET client_min_messages TO ERROR; +DROP TABLE relation_tracking_table_2, relation_tracking_table_1 CASCADE; +RESET client_min_messages; + CREATE SCHEMA "Quoed.Schema"; SET search_path TO "Quoed.Schema"; @@ -280,7 +316,9 @@ ALTER TABLE simple_table_name RENAME CONSTRAINT "looo oooo ooooo ooooooooooooooo --INSERT INTO simple_table_name (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT simple_constraint_name DO NOTHING RETURNING *; SET search_path TO single_node; +SET client_min_messages TO ERROR; DROP SCHEMA "Quoed.Schema" CASCADE; +RESET client_min_messages; -- test partitioned index creation with long name CREATE TABLE test_index_creation1 From 789d5b9ef9256ffd9914290163823ede4c45a223 Mon Sep 17 00:00:00 2001 From: aykut-bozkurt <51649454+aykut-bozkurt@users.noreply.github.com> Date: Thu, 28 Jul 2022 13:13:28 +0300 Subject: [PATCH 19/38] null check for server in GetObjectAddressByServerName (#6095) --- src/backend/distributed/commands/foreign_server.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/distributed/commands/foreign_server.c b/src/backend/distributed/commands/foreign_server.c index d98393e48..b8fcf0412 100644 --- a/src/backend/distributed/commands/foreign_server.c +++ b/src/backend/distributed/commands/foreign_server.c @@ -269,7 +269,7 @@ static List * GetObjectAddressByServerName(char *serverName, bool missing_ok) { ForeignServer *server = GetForeignServerByName(serverName, missing_ok); - Oid serverOid = server->serverid; + Oid serverOid = (server) ? server->serverid : InvalidOid; ObjectAddress *address = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*address, ForeignServerRelationId, serverOid); From cff013a057884c337aff4c5c331b65f61dc1f015 Mon Sep 17 00:00:00 2001 From: Marco Slot Date: Wed, 13 Jul 2022 16:13:33 +0200 Subject: [PATCH 20/38] Fix issues with insert..select casts and column ordering --- .../executor/insert_select_executor.c | 95 --- .../planner/insert_select_planner.c | 187 ++++- .../planner/multi_router_planner.c | 39 +- .../distributed/insert_select_executor.h | 1 - .../expected/coordinator_shouldhaveshards.out | 10 +- .../insert_select_into_local_table.out | 758 +++++++++++++++++- .../expected/insert_select_repartition.out | 2 +- src/test/regress/expected/multi_explain.out | 25 +- .../regress/expected/multi_insert_select.out | 45 +- .../mx_coordinator_shouldhaveshards.out | 16 +- .../sql/insert_select_into_local_table.sql | 447 +++++++++++ src/test/regress/sql/multi_insert_select.sql | 34 + 12 files changed, 1495 insertions(+), 164 deletions(-) diff --git a/src/backend/distributed/executor/insert_select_executor.c b/src/backend/distributed/executor/insert_select_executor.c index 338b03075..9549846d5 100644 --- a/src/backend/distributed/executor/insert_select_executor.c +++ b/src/backend/distributed/executor/insert_select_executor.c @@ -55,7 +55,6 @@ bool EnableRepartitionedInsertSelect = true; -static Query * WrapSubquery(Query *subquery); static List * TwoPhaseInsertSelectTaskList(Oid targetRelationId, Query *insertSelectQuery, char *resultIdPrefix); static void ExecutePlanIntoRelation(Oid targetRelationId, List *insertTargetList, @@ -299,100 +298,6 @@ NonPushableInsertSelectExecScan(CustomScanState *node) } -/* - * BuildSelectForInsertSelect extracts the SELECT part from an INSERT...SELECT query. - * If the INSERT...SELECT has CTEs then these are added to the resulting SELECT instead. - */ -Query * -BuildSelectForInsertSelect(Query *insertSelectQuery) -{ - RangeTblEntry *selectRte = ExtractSelectRangeTableEntry(insertSelectQuery); - Query *selectQuery = selectRte->subquery; - - /* - * Wrap the SELECT as a subquery if the INSERT...SELECT has CTEs or the SELECT - * has top-level set operations. - * - * We could simply wrap all queries, but that might create a subquery that is - * not supported by the logical planner. Since the logical planner also does - * not support CTEs and top-level set operations, we can wrap queries containing - * those without breaking anything. - */ - if (list_length(insertSelectQuery->cteList) > 0) - { - selectQuery = WrapSubquery(selectRte->subquery); - - /* copy CTEs from the INSERT ... SELECT statement into outer SELECT */ - selectQuery->cteList = copyObject(insertSelectQuery->cteList); - selectQuery->hasModifyingCTE = insertSelectQuery->hasModifyingCTE; - } - else if (selectQuery->setOperations != NULL) - { - /* top-level set operations confuse the ReorderInsertSelectTargetLists logic */ - selectQuery = WrapSubquery(selectRte->subquery); - } - - return selectQuery; -} - - -/* - * WrapSubquery wraps the given query as a subquery in a newly constructed - * "SELECT * FROM (...subquery...) citus_insert_select_subquery" query. - */ -static Query * -WrapSubquery(Query *subquery) -{ - ParseState *pstate = make_parsestate(NULL); - List *newTargetList = NIL; - - Query *outerQuery = makeNode(Query); - outerQuery->commandType = CMD_SELECT; - - /* create range table entries */ - Alias *selectAlias = makeAlias("citus_insert_select_subquery", NIL); - RangeTblEntry *newRangeTableEntry = RangeTableEntryFromNSItem( - addRangeTableEntryForSubquery( - pstate, subquery, - selectAlias, false, true)); - outerQuery->rtable = list_make1(newRangeTableEntry); - - /* set the FROM expression to the subquery */ - RangeTblRef *newRangeTableRef = makeNode(RangeTblRef); - newRangeTableRef->rtindex = 1; - outerQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL); - - /* create a target list that matches the SELECT */ - TargetEntry *selectTargetEntry = NULL; - foreach_ptr(selectTargetEntry, subquery->targetList) - { - /* exactly 1 entry in FROM */ - int indexInRangeTable = 1; - - if (selectTargetEntry->resjunk) - { - continue; - } - - Var *newSelectVar = makeVar(indexInRangeTable, selectTargetEntry->resno, - exprType((Node *) selectTargetEntry->expr), - exprTypmod((Node *) selectTargetEntry->expr), - exprCollation((Node *) selectTargetEntry->expr), 0); - - TargetEntry *newSelectTargetEntry = makeTargetEntry((Expr *) newSelectVar, - selectTargetEntry->resno, - selectTargetEntry->resname, - selectTargetEntry->resjunk); - - newTargetList = lappend(newTargetList, newSelectTargetEntry); - } - - outerQuery->targetList = newTargetList; - - return outerQuery; -} - - /* * TwoPhaseInsertSelectTaskList generates a list of tasks for a query that * inserts into a target relation and selects from a set of co-located diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index 746e2846c..e861a7bbb 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -48,8 +48,10 @@ #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/rel.h" +#include +static void PrepareInsertSelectForCitusPlanner(Query *insertSelectQuery); static DistributedPlan * CreateInsertSelectPlanInternal(uint64 planId, Query *originalQuery, PlannerRestrictionContext * @@ -83,6 +85,7 @@ static DeferredErrorMessage * InsertPartitionColumnMatchesSelect(Query *query, static DistributedPlan * CreateNonPushableInsertSelectPlan(uint64 planId, Query *parse, ParamListInfo boundParams); static DeferredErrorMessage * NonPushableInsertSelectSupported(Query *insertSelectQuery); +static Query * WrapSubquery(Query *subquery); static void RelabelTargetEntryList(List *selectTargetList, List *insertTargetList); static List * AddInsertSelectCasts(List *insertTargetList, List *selectTargetList, Oid targetRelationId); @@ -370,14 +373,17 @@ CreateDistributedInsertSelectPlan(Query *originalQuery, * combineQuery, this function also creates a dummy combineQuery for that. */ DistributedPlan * -CreateInsertSelectIntoLocalTablePlan(uint64 planId, Query *originalQuery, ParamListInfo - boundParams, bool hasUnresolvedParams, +CreateInsertSelectIntoLocalTablePlan(uint64 planId, Query *insertSelectQuery, + ParamListInfo boundParams, bool hasUnresolvedParams, PlannerRestrictionContext *plannerRestrictionContext) { - RangeTblEntry *selectRte = ExtractSelectRangeTableEntry(originalQuery); + RangeTblEntry *selectRte = ExtractSelectRangeTableEntry(insertSelectQuery); + + PrepareInsertSelectForCitusPlanner(insertSelectQuery); + + /* get the SELECT query (may have changed after PrepareInsertSelectForCitusPlanner) */ + Query *selectQuery = selectRte->subquery; - Query *selectQuery = BuildSelectForInsertSelect(originalQuery); - originalQuery->cteList = NIL; DistributedPlan *distPlan = CreateDistributedPlan(planId, selectQuery, copyObject(selectQuery), boundParams, hasUnresolvedParams, @@ -417,12 +423,84 @@ CreateInsertSelectIntoLocalTablePlan(uint64 planId, Query *originalQuery, ParamL * distributed select instead of returning it. */ selectRte->subquery = distPlan->combineQuery; - distPlan->combineQuery = originalQuery; + distPlan->combineQuery = insertSelectQuery; return distPlan; } +/* + * PrepareInsertSelectForCitusPlanner prepares an INSERT..SELECT query tree + * that was passed to the planner for use by Citus. + * + * First, it rebuilds the target lists of the INSERT and the SELECT + * to be in the same order, which is not guaranteed in the parse tree. + * + * Second, some of the constants in the target list will have type + * "unknown", which would confuse the Citus planner. To address that, + * we add casts to SELECT target list entries whose type does not correspond + * to the destination. This also helps us feed the output directly into + * a COPY stream for INSERT..SELECT via coordinator. + * + * In case of UNION or other set operations, the SELECT does not have a + * clearly defined target list, so we first wrap the UNION in a subquery. + * UNION queries do not have the "unknown" type problem. + * + * Finally, if the INSERT has CTEs, we move those CTEs into the SELECT, + * such that we can plan the SELECT as an independent query. To ensure + * the ctelevelsup for CTE RTE's remain the same, we wrap the SELECT into + * a subquery, unless we already did so in case of a UNION. + */ +static void +PrepareInsertSelectForCitusPlanner(Query *insertSelectQuery) +{ + RangeTblEntry *insertRte = ExtractResultRelationRTEOrError(insertSelectQuery); + RangeTblEntry *selectRte = ExtractSelectRangeTableEntry(insertSelectQuery); + Oid targetRelationId = insertRte->relid; + + bool isWrapped = false; + + if (selectRte->subquery->setOperations != NULL) + { + /* + * Prepare UNION query for reordering and adding casts by + * wrapping it in a subquery to have a single target list. + */ + selectRte->subquery = WrapSubquery(selectRte->subquery); + isWrapped = true; + } + + /* this is required for correct deparsing of the query */ + ReorderInsertSelectTargetLists(insertSelectQuery, insertRte, selectRte); + + /* + * Cast types of insert target list and select projection list to + * match the column types of the target relation. + */ + selectRte->subquery->targetList = + AddInsertSelectCasts(insertSelectQuery->targetList, + copyObject(selectRte->subquery->targetList), + targetRelationId); + + if (list_length(insertSelectQuery->cteList) > 0) + { + if (!isWrapped) + { + /* + * By wrapping the SELECT in a subquery, we can avoid adjusting + * ctelevelsup in RTE's that point to the CTEs. + */ + selectRte->subquery = WrapSubquery(selectRte->subquery); + } + + /* copy CTEs from the INSERT ... SELECT statement into outer SELECT */ + selectRte->subquery->cteList = copyObject(insertSelectQuery->cteList); + selectRte->subquery->hasModifyingCTE = insertSelectQuery->hasModifyingCTE; + insertSelectQuery->cteList = NIL; + } +} + + /* * CreateCombineQueryForRouterPlan is used for creating a dummy combineQuery * for a router plan, since router plans normally don't have one. @@ -881,12 +959,11 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, ListCell *insertTargetEntryCell; List *newSubqueryTargetlist = NIL; List *newInsertTargetlist = NIL; + List *columnNameList = NIL; int resno = 1; - Index insertTableId = 1; + Index selectTableId = 2; int targetEntryIndex = 0; - AssertArg(InsertSelectIntoCitusTable(originalQuery)); - Query *subquery = subqueryRte->subquery; Oid insertRelationId = insertRte->relid; @@ -954,6 +1031,9 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, newSubqueryTargetEntry); } + String *columnName = makeString(newSubqueryTargetEntry->resname); + columnNameList = lappend(columnNameList, columnName); + /* * The newly created select target entry cannot be a junk entry since junk * entries are not in the final target list and we're processing the @@ -961,7 +1041,7 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, */ Assert(!newSubqueryTargetEntry->resjunk); - Var *newInsertVar = makeVar(insertTableId, originalAttrNo, + Var *newInsertVar = makeVar(selectTableId, resno, exprType((Node *) newSubqueryTargetEntry->expr), exprTypmod((Node *) newSubqueryTargetEntry->expr), exprCollation((Node *) newSubqueryTargetEntry->expr), @@ -1005,6 +1085,7 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, originalQuery->targetList = newInsertTargetlist; subquery->targetList = newSubqueryTargetlist; + subqueryRte->eref->colnames = columnNameList; return NULL; } @@ -1412,19 +1493,10 @@ CreateNonPushableInsertSelectPlan(uint64 planId, Query *parse, ParamListInfo bou return distributedPlan; } - Query *selectQuery = BuildSelectForInsertSelect(insertSelectQuery); + PrepareInsertSelectForCitusPlanner(insertSelectQuery); - selectRte->subquery = selectQuery; - ReorderInsertSelectTargetLists(insertSelectQuery, insertRte, selectRte); - - /* - * Cast types of insert target list and select projection list to - * match the column types of the target relation. - */ - selectQuery->targetList = - AddInsertSelectCasts(insertSelectQuery->targetList, - selectQuery->targetList, - targetRelationId); + /* get the SELECT query (may have changed after PrepareInsertSelectForCitusPlanner) */ + Query *selectQuery = selectRte->subquery; /* * Later we might need to call WrapTaskListForProjection(), which requires @@ -1506,6 +1578,63 @@ InsertSelectResultIdPrefix(uint64 planId) } +/* + * WrapSubquery wraps the given query as a subquery in a newly constructed + * "SELECT * FROM (...subquery...) citus_insert_select_subquery" query. + */ +static Query * +WrapSubquery(Query *subquery) +{ + ParseState *pstate = make_parsestate(NULL); + List *newTargetList = NIL; + + Query *outerQuery = makeNode(Query); + outerQuery->commandType = CMD_SELECT; + + /* create range table entries */ + Alias *selectAlias = makeAlias("citus_insert_select_subquery", NIL); + RangeTblEntry *newRangeTableEntry = RangeTableEntryFromNSItem( + addRangeTableEntryForSubquery( + pstate, subquery, + selectAlias, false, true)); + outerQuery->rtable = list_make1(newRangeTableEntry); + + /* set the FROM expression to the subquery */ + RangeTblRef *newRangeTableRef = makeNode(RangeTblRef); + newRangeTableRef->rtindex = 1; + outerQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL); + + /* create a target list that matches the SELECT */ + TargetEntry *selectTargetEntry = NULL; + foreach_ptr(selectTargetEntry, subquery->targetList) + { + /* exactly 1 entry in FROM */ + int indexInRangeTable = 1; + + if (selectTargetEntry->resjunk) + { + continue; + } + + Var *newSelectVar = makeVar(indexInRangeTable, selectTargetEntry->resno, + exprType((Node *) selectTargetEntry->expr), + exprTypmod((Node *) selectTargetEntry->expr), + exprCollation((Node *) selectTargetEntry->expr), 0); + + TargetEntry *newSelectTargetEntry = makeTargetEntry((Expr *) newSelectVar, + selectTargetEntry->resno, + selectTargetEntry->resname, + selectTargetEntry->resjunk); + + newTargetList = lappend(newTargetList, newSelectTargetEntry); + } + + outerQuery->targetList = newTargetList; + + return outerQuery; +} + + /* * RelabelTargetEntryList relabels select target list to have matching names with * insert target list. @@ -1549,18 +1678,24 @@ AddInsertSelectCasts(List *insertTargetList, List *selectTargetList, int targetEntryIndex = 0; TargetEntry *insertEntry = NULL; TargetEntry *selectEntry = NULL; + forboth_ptr(insertEntry, insertTargetList, selectEntry, selectTargetList) { - Var *insertColumn = (Var *) insertEntry->expr; Form_pg_attribute attr = TupleDescAttr(destTupleDescriptor, insertEntry->resno - 1); - Oid sourceType = insertColumn->vartype; + Oid sourceType = exprType((Node *) selectEntry->expr); Oid targetType = attr->atttypid; if (sourceType != targetType) { - insertEntry->expr = CastExpr((Expr *) insertColumn, sourceType, targetType, - attr->attcollation, attr->atttypmod); + /* ReorderInsertSelectTargetLists ensures we only have Vars */ + Assert(IsA(insertEntry->expr, Var)); + + /* we will cast the SELECT expression, so the type changes */ + Var *insertVar = (Var *) insertEntry->expr; + insertVar->vartype = targetType; + insertVar->vartypmod = attr->atttypmod; + insertVar->varcollid = attr->attcollation; /* * We cannot modify the selectEntry in-place, because ORDER BY or diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index 7c57a77f2..3193305a2 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -3558,19 +3558,9 @@ DeferErrorIfUnsupportedRouterPlannableSelectQuery(Query *query) NULL, NULL); } - if (contain_nextval_expression_walker((Node *) query->targetList, NULL)) - { - /* - * We let queries with nextval in the target list fall through to - * the logical planner, which knows how to handle those queries. - */ - return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, - "Sequences cannot be used in router queries", - NULL, NULL); - } - bool hasPostgresOrCitusLocalTable = false; bool hasDistributedTable = false; + bool hasReferenceTable = false; ExtractRangeTableRelationWalker((Node *) query, &rangeTableRelationList); foreach(rangeTableRelationCell, rangeTableRelationList) @@ -3586,6 +3576,11 @@ DeferErrorIfUnsupportedRouterPlannableSelectQuery(Query *query) hasPostgresOrCitusLocalTable = true; continue; } + else if (IsCitusTableType(distributedTableId, REFERENCE_TABLE)) + { + hasReferenceTable = true; + continue; + } else if (IsCitusTableType(distributedTableId, CITUS_LOCAL_TABLE)) { hasPostgresOrCitusLocalTable = true; @@ -3628,6 +3623,28 @@ DeferErrorIfUnsupportedRouterPlannableSelectQuery(Query *query) } } + /* + * We want to make sure nextval happens on the coordinator / the current + * node, since the user may have certain expectations around the values + * produced by the sequence. We therefore cannot push down the nextval + * call as part of a router query. + * + * We let queries with nextval in the target list fall through to + * the logical planner, which will ensure that the nextval is called + * in the combine query on the coordinator. + * + * If there are no distributed or reference tables in the query, + * then the query will anyway happen on the coordinator, so we can + * allow nextval. + */ + if (contain_nextval_expression_walker((Node *) query->targetList, NULL) && + (hasDistributedTable || hasReferenceTable)) + { + return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, + "Sequences cannot be used in router queries", + NULL, NULL); + } + /* local tables are not allowed if there are distributed tables */ if (hasPostgresOrCitusLocalTable && hasDistributedTable) { diff --git a/src/include/distributed/insert_select_executor.h b/src/include/distributed/insert_select_executor.h index bcfe29bfb..6e84b80f2 100644 --- a/src/include/distributed/insert_select_executor.h +++ b/src/include/distributed/insert_select_executor.h @@ -19,7 +19,6 @@ extern bool EnableRepartitionedInsertSelect; extern TupleTableSlot * NonPushableInsertSelectExecScan(CustomScanState *node); -extern Query * BuildSelectForInsertSelect(Query *insertSelectQuery); extern bool IsSupportedRedistributionTarget(Oid targetRelationId); extern bool IsRedistributablePlan(Plan *selectPlan); diff --git a/src/test/regress/expected/coordinator_shouldhaveshards.out b/src/test/regress/expected/coordinator_shouldhaveshards.out index 485e7f11b..dd93dad39 100644 --- a/src/test/regress/expected/coordinator_shouldhaveshards.out +++ b/src/test/regress/expected/coordinator_shouldhaveshards.out @@ -896,8 +896,8 @@ HAVING (max(table_2.value) >= (SELECT value FROM a)); DEBUG: Group by list without distribution column is not allowed in distributed INSERT ... SELECT queries DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM coordinator_shouldhaveshards.table_1 ORDER BY key, value DESC LIMIT 1 DEBUG: push down of limit count: 1 -DEBUG: generating subplan XXX_2 for subquery SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN coordinator_shouldhaveshards.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT int4(count) AS key, (key)::text AS value FROM (SELECT intermediate_result.count, intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint, key integer)) citus_insert_select_subquery +DEBUG: generating subplan XXX_2 for subquery SELECT int4(count(*)) AS auto_coerced_by_citus_0, (a.key)::text AS auto_coerced_by_citus_1 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN coordinator_shouldhaveshards.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT auto_coerced_by_citus_0 AS key, auto_coerced_by_citus_1 AS value FROM (SELECT intermediate_result.auto_coerced_by_citus_0, intermediate_result.auto_coerced_by_citus_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(auto_coerced_by_citus_0 integer, auto_coerced_by_citus_1 text)) citus_insert_select_subquery DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx @@ -905,9 +905,9 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx NOTICE: executing the command locally: SELECT key, value FROM coordinator_shouldhaveshards.table_1_1503102 table_1 WHERE true ORDER BY key, value DESC LIMIT '1'::bigint NOTICE: executing the command locally: SELECT key, value FROM coordinator_shouldhaveshards.table_1_1503105 table_1 WHERE true ORDER BY key, value DESC LIMIT '1'::bigint DEBUG: Subplan XXX_2 will be written to local file -NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_1 AS key, max(worker_column_2) AS worker_column_3 FROM (SELECT a.key AS worker_column_1, table_2.value AS worker_column_2 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN coordinator_shouldhaveshards.table_2_1503106 table_2(key, value) USING (key))) worker_subquery GROUP BY worker_column_1 -NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_1 AS key, max(worker_column_2) AS worker_column_3 FROM (SELECT a.key AS worker_column_1, table_2.value AS worker_column_2 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN coordinator_shouldhaveshards.table_2_1503109 table_2(key, value) USING (key))) worker_subquery GROUP BY worker_column_1 -NOTICE: executing the command locally: SELECT int4(count) AS key, (key)::text AS value FROM (SELECT intermediate_result.count, intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint, key integer)) citus_insert_select_subquery +NOTICE: executing the command locally: SELECT count(*) AS auto_coerced_by_citus_0, (worker_column_1)::text AS auto_coerced_by_citus_1, worker_column_1 AS discarded_target_item_1, max(worker_column_2) AS worker_column_4 FROM (SELECT a.key AS worker_column_1, table_2.value AS worker_column_2 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN coordinator_shouldhaveshards.table_2_1503106 table_2(key, value) USING (key))) worker_subquery GROUP BY worker_column_1 +NOTICE: executing the command locally: SELECT count(*) AS auto_coerced_by_citus_0, (worker_column_1)::text AS auto_coerced_by_citus_1, worker_column_1 AS discarded_target_item_1, max(worker_column_2) AS worker_column_4 FROM (SELECT a.key AS worker_column_1, table_2.value AS worker_column_2 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN coordinator_shouldhaveshards.table_2_1503109 table_2(key, value) USING (key))) worker_subquery GROUP BY worker_column_1 +NOTICE: executing the command locally: SELECT auto_coerced_by_citus_0 AS key, auto_coerced_by_citus_1 AS value FROM (SELECT intermediate_result.auto_coerced_by_citus_0, intermediate_result.auto_coerced_by_citus_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(auto_coerced_by_citus_0 integer, auto_coerced_by_citus_1 text)) citus_insert_select_subquery NOTICE: executing the copy locally for shard xxxxx WITH stats AS ( SELECT count(key) m FROM table_1 diff --git a/src/test/regress/expected/insert_select_into_local_table.out b/src/test/regress/expected/insert_select_into_local_table.out index 79376f6a4..f53348272 100644 --- a/src/test/regress/expected/insert_select_into_local_table.out +++ b/src/test/regress/expected/insert_select_into_local_table.out @@ -149,6 +149,67 @@ SELECT * FROM non_dist_unique ORDER BY 1; 5 | 8 (5 rows) +INSERT INTO non_dist_unique +SELECT a+1, b FROM dist_table +UNION ALL +SELECT a+100, b FROM dist_table +ON CONFLICT (a) DO NOTHING; +SELECT * FROM non_dist_unique ORDER BY 1; + a | b +--------------------------------------------------------------------- + 1 | 6 + 2 | 7 + 3 | 14 + 4 | 15 + 5 | 8 + 101 | 6 + 102 | 7 + 103 | 8 +(8 rows) + +INSERT INTO non_dist_unique +SELECT a+1, b FROM dist_table +UNION ALL +SELECT a+100, b FROM dist_table +ON CONFLICT (a) DO UPDATE SET b = EXCLUDED.b + 1; +SELECT * FROM non_dist_unique ORDER BY 1; + a | b +--------------------------------------------------------------------- + 1 | 6 + 2 | 7 + 3 | 8 + 4 | 9 + 5 | 8 + 101 | 7 + 102 | 8 + 103 | 9 +(8 rows) + +WITH cte1 AS (SELECT s FROM generate_series(1,10) s) +INSERT INTO non_dist_unique +WITH cte2 AS (SELECT s FROM generate_series(1,10) s) +SELECT a+1, b FROM dist_table WHERE b IN (SELECT s FROM cte1) +UNION ALL +SELECT s, s FROM cte1 +ON CONFLICT (a) DO NOTHING; +SELECT * FROM non_dist_unique ORDER BY 1; + a | b +--------------------------------------------------------------------- + 1 | 6 + 2 | 7 + 3 | 8 + 4 | 9 + 5 | 8 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 + 101 | 7 + 102 | 8 + 103 | 9 +(13 rows) + DROP TABLE non_dist_unique; -- test INSERT INTO a table with DEFAULT CREATE TABLE non_dist_default (a INT, c TEXT DEFAULT 'def'); @@ -168,6 +229,16 @@ SELECT * FROM non_dist_default ORDER BY 1, 2; 3 | def (3 rows) +SELECT alter_table_set_access_method('non_dist_default', 'columnar'); +NOTICE: creating a new table for insert_select_into_local_table.non_dist_default +NOTICE: moving the data of insert_select_into_local_table.non_dist_default +NOTICE: dropping the old insert_select_into_local_table.non_dist_default +NOTICE: renaming the new table to insert_select_into_local_table.non_dist_default + alter_table_set_access_method +--------------------------------------------------------------------- + +(1 row) + INSERT INTO non_dist_default SELECT a, c FROM dist_table WHERE a = 1; SELECT * FROM non_dist_default ORDER BY 1, 2; a | c @@ -354,6 +425,691 @@ SELECT * FROM non_dist_2 ORDER BY 1, 2; (3 rows) TRUNCATE non_dist_2; +-- check issue https://github.com/citusdata/citus/issues/5858 +CREATE TABLE local_dest_table( + col_1 integer, + col_2 integer, + col_3 text, + col_4 text, + drop_col text, + col_5 bigint, + col_6 text, + col_7 text default 'col_7', + col_8 varchar +); +ALTER TABLE local_dest_table DROP COLUMN drop_col; +CREATE TABLE dist_source_table_1( + int_col integer, + drop_col text, + text_col_1 text, + dist_col integer, + text_col_2 text +); +SELECT create_distributed_table('dist_source_table_1', 'dist_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +ALTER TABLE dist_source_table_1 DROP COLUMN drop_col; +INSERT INTO dist_source_table_1 VALUES (1, 'value', 1, 'value'); +INSERT INTO dist_source_table_1 VALUES (2, 'value2', 1, 'value'); +INSERT INTO dist_source_table_1 VALUES (3, 'value', 3, 'value3'); +CREATE TABLE dist_source_table_2( + dist_col integer, + int_col integer +); +SELECT create_distributed_table('dist_source_table_2', 'dist_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO dist_source_table_2 VALUES (1, 1); +INSERT INTO dist_source_table_2 VALUES (2, 2); +INSERT INTO dist_source_table_2 VALUES (4, 4); +CREATE TABLE local_source_table_1 AS SELECT * FROM dist_source_table_1; +CREATE TABLE local_source_table_2 AS SELECT * FROM dist_source_table_2; +/* + * query_results_equal compares the effect of two queries on local_dest_table. + * We use this to ensure that INSERT INTO local_dest_table SELECT behaves + * the same when selecting from a regular table (postgres handles it) and + * a distributed table (Citus handles it). + * + * The queries are generated by calling format() on query_table twice, + * once for each source_table argument. + */ +CREATE OR REPLACE FUNCTION query_results_equal(query_template text, source_table_1 text, source_table_2 text) +RETURNS bool +AS $$ +DECLARE + l1 local_dest_table[]; + l2 local_dest_table[]; +BEGIN + /* get the results using source_table_1 as source */ + TRUNCATE local_dest_table; + EXECUTE format(query_template, source_table_1); + SELECT array_agg(l) INTO l1 + FROM (SELECT * FROM local_dest_table ORDER BY 1, 2, 3, 4, 5, 6, 7, 8) l; + + /* get the results using source_table_2 as source */ + TRUNCATE local_dest_table; + EXECUTE format(query_template, source_table_2); + SELECT array_agg(l) INTO l2 + FROM (SELECT * FROM local_dest_table ORDER BY 1, 2, 3, 4, 5, 6, 7, 8) l; + + RAISE NOTICE 'l2=%', l1; + RAISE NOTICE 'l2=%', l2; + RETURN l1 = l2; +END; +$$ LANGUAGE plpgsql; +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table + SELECT + t1.dist_col, + 1, + 'string1', + 'string2', + 2, + 'string3', + t1.text_col_1, + t1.text_col_2 + FROM %1$s_1 t1 + WHERE t1.int_col IN (SELECT int_col FROM %1$s_2) +$$, 'local_source_table', 'dist_source_table'); +NOTICE: l2={"(1,1,string1,string2,2,string3,value,value)","(1,1,string1,string2,2,string3,value2,value)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(1,1,string1,string2,2,string3,value,value)","(1,1,string1,string2,2,string3,value2,value)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table + SELECT + t1.dist_col, + 1, + 'string1', + 'string2', + 2, + 'string3', + t1.text_col_1, + t1.text_col_2 + FROM %1$s t1 + returning * +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(1,1,string1,string2,2,string3,value,value)","(1,1,string1,string2,2,string3,value2,value)","(3,1,string1,string2,2,string3,value,value3)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(1,1,string1,string2,2,string3,value,value)","(1,1,string1,string2,2,string3,value2,value)","(3,1,string1,string2,2,string3,value,value3)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_3, col_4) SELECT + 'string1', + 'string2'::text + FROM %1$s t1 + returning *; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,string1,string2,,,col_7,)","(,,string1,string2,,,col_7,)","(,,string1,string2,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,string1,string2,,,col_7,)","(,,string1,string2,,,col_7,)","(,,string1,string2,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_7, col_4) SELECT + 'string1', + 'string2'::text + FROM %1$s t1 + returning *; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,,string2,,,string1,)","(,,,string2,,,string1,)","(,,,string2,,,string1,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,,string2,,,string1,)","(,,,string2,,,string1,)","(,,,string2,,,string1,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_4, col_3) SELECT + 'string1', + 'string2'::text + FROM %1$s t1 + WHERE dist_col = 1 + returning *; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,string2,string1,,,col_7,)","(,,string2,string1,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,string2,string1,,,col_7,)","(,,string2,string1,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_4, col_1) + SELECT + 'string1', + dist_col + FROM %1$s + UNION ALL + SELECT + 'string', + int_col + FROM %1$s; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(1,,,string,,,col_7,)","(1,,,string1,,,col_7,)","(1,,,string1,,,col_7,)","(2,,,string,,,col_7,)","(3,,,string,,,col_7,)","(3,,,string1,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(1,,,string,,,col_7,)","(1,,,string1,,,col_7,)","(1,,,string1,,,col_7,)","(2,,,string,,,col_7,)","(3,,,string,,,col_7,)","(3,,,string1,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + WITH cte1 AS (SELECT s FROM generate_series(1,10) s) + INSERT INTO local_dest_table (col_4, col_1) + SELECT + 'string1', + dist_col + FROM %1$s WHERE int_col IN (SELECT s FROM cte1) + UNION ALL + SELECT + 'string', + int_col + FROM %1$s WHERE int_col IN (SELECT s + 1 FROM cte1) +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(1,,,string1,,,col_7,)","(1,,,string1,,,col_7,)","(2,,,string,,,col_7,)","(3,,,string,,,col_7,)","(3,,,string1,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(1,,,string1,,,col_7,)","(1,,,string1,,,col_7,)","(2,,,string,,,col_7,)","(3,,,string,,,col_7,)","(3,,,string1,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + WITH cte1 AS (SELECT 'stringcte', s FROM generate_series(1,10) s) + INSERT INTO local_dest_table (col_4, col_1) + SELECT + 'string1', + dist_col + FROM %1$s WHERE int_col IN (SELECT s FROM cte1) + UNION ALL + SELECT + * + FROM cte1 +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(1,,,string1,,,col_7,)","(1,,,string1,,,col_7,)","(1,,,stringcte,,,col_7,)","(2,,,stringcte,,,col_7,)","(3,,,string1,,,col_7,)","(3,,,stringcte,,,col_7,)","(4,,,stringcte,,,col_7,)","(5,,,stringcte,,,col_7,)","(6,,,stringcte,,,col_7,)","(7,,,stringcte,,,col_7,)","(8,,,stringcte,,,col_7,)","(9,,,stringcte,,,col_7,)","(10,,,stringcte,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(1,,,string1,,,col_7,)","(1,,,string1,,,col_7,)","(1,,,stringcte,,,col_7,)","(2,,,stringcte,,,col_7,)","(3,,,string1,,,col_7,)","(3,,,stringcte,,,col_7,)","(4,,,stringcte,,,col_7,)","(5,,,stringcte,,,col_7,)","(6,,,stringcte,,,col_7,)","(7,,,stringcte,,,col_7,)","(8,,,stringcte,,,col_7,)","(9,,,stringcte,,,col_7,)","(10,,,stringcte,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_3) + SELECT t1.text_col_1 + FROM %1$s t1 + GROUP BY t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,value,,,,col_7,)","(,,value2,,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,value,,,,col_7,)","(,,value2,,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_1, col_2, col_3, col_5, col_6, col_7, col_8) + SELECT + max(t1.dist_col), + 3, + 'string_3', + 4, + 44, + t1.text_col_1, + 'string_1000' + FROM %1$s t1 + GROUP BY t1.text_col_2, t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(1,3,string_3,,4,44,value,string_1000)","(1,3,string_3,,4,44,value2,string_1000)","(3,3,string_3,,4,44,value,string_1000)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(1,3,string_3,,4,44,value,string_1000)","(1,3,string_3,,4,44,value2,string_1000)","(3,3,string_3,,4,44,value,string_1000)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_7, col_8) + SELECT + t1.text_col_1, + 'string_1000' + FROM dist_source_table_1 t1 + GROUP BY t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,,,,,value,string_1000)","(,,,,,,value2,string_1000)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,,,,,value,string_1000)","(,,,,,,value2,string_1000)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_6, col_7, col_8) + SELECT + 'string_4', + t1.text_col_1, + 'string_1000' + FROM %1$s t1 + GROUP BY t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,,,,string_4,value,string_1000)","(,,,,,string_4,value2,string_1000)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,,,,string_4,value,string_1000)","(,,,,,string_4,value2,string_1000)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_5, col_3) + SELECT 12, 'string_11' FROM %1$s t1 + UNION + SELECT int_col, 'string' FROM %1$s; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,string,,1,,col_7,)","(,,string,,2,,col_7,)","(,,string,,3,,col_7,)","(,,string_11,,12,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,string,,1,,col_7,)","(,,string,,2,,col_7,)","(,,string,,3,,col_7,)","(,,string_11,,12,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table(col_3, col_2) + SELECT text_col_1, count(*) FROM %1$s GROUP BY 1 +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,1,value2,,,,col_7,)","(,2,value,,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,1,value2,,,,col_7,)","(,2,value,,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table(col_3, col_5) + SELECT text_col_1, count(*)::int FROM %1$s GROUP BY 1 +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,value,,2,,col_7,)","(,,value2,,1,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,value,,2,,col_7,)","(,,value2,,1,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +-- repeat above tests with Citus local table +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table + SELECT + t1.dist_col, + 1, + 'string1', + 'string2', + 2, + 'string3', + t1.text_col_1, + t1.text_col_2 + FROM %1$s_1 t1 + WHERE t1.int_col IN (SELECT int_col FROM %1$s_2) +$$, 'local_source_table', 'dist_source_table'); +NOTICE: l2={"(1,1,string1,string2,2,string3,value,value)","(1,1,string1,string2,2,string3,value2,value)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(1,1,string1,string2,2,string3,value,value)","(1,1,string1,string2,2,string3,value2,value)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table + SELECT + t1.dist_col, + 1, + 'string1', + 'string2', + 2, + 'string3', + t1.text_col_1, + t1.text_col_2 + FROM %1$s t1 + returning * +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(1,1,string1,string2,2,string3,value,value)","(1,1,string1,string2,2,string3,value2,value)","(3,1,string1,string2,2,string3,value,value3)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(1,1,string1,string2,2,string3,value,value)","(1,1,string1,string2,2,string3,value2,value)","(3,1,string1,string2,2,string3,value,value3)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_3, col_4) SELECT + 'string1', + 'string2'::text + FROM %1$s t1 + returning *; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,string1,string2,,,col_7,)","(,,string1,string2,,,col_7,)","(,,string1,string2,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,string1,string2,,,col_7,)","(,,string1,string2,,,col_7,)","(,,string1,string2,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_7, col_4) SELECT + 'string1', + 'string2'::text + FROM %1$s t1 + returning *; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,,string2,,,string1,)","(,,,string2,,,string1,)","(,,,string2,,,string1,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,,string2,,,string1,)","(,,,string2,,,string1,)","(,,,string2,,,string1,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_4, col_3) SELECT + 'string1', + 'string2'::text + FROM %1$s t1 + WHERE dist_col = 1 + returning *; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,string2,string1,,,col_7,)","(,,string2,string1,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,string2,string1,,,col_7,)","(,,string2,string1,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_4, col_1) + SELECT + 'string1', + dist_col + FROM %1$s + UNION ALL + SELECT + 'string', + int_col + FROM %1$s; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(1,,,string,,,col_7,)","(1,,,string1,,,col_7,)","(1,,,string1,,,col_7,)","(2,,,string,,,col_7,)","(3,,,string,,,col_7,)","(3,,,string1,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(1,,,string,,,col_7,)","(1,,,string1,,,col_7,)","(1,,,string1,,,col_7,)","(2,,,string,,,col_7,)","(3,,,string,,,col_7,)","(3,,,string1,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + WITH cte1 AS (SELECT s FROM generate_series(1,10) s) + INSERT INTO local_dest_table (col_4, col_1) + SELECT + 'string1', + dist_col + FROM %1$s WHERE int_col IN (SELECT s FROM cte1) + UNION ALL + SELECT + 'string', + int_col + FROM %1$s WHERE int_col IN (SELECT s + 1 FROM cte1) +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(1,,,string1,,,col_7,)","(1,,,string1,,,col_7,)","(2,,,string,,,col_7,)","(3,,,string,,,col_7,)","(3,,,string1,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(1,,,string1,,,col_7,)","(1,,,string1,,,col_7,)","(2,,,string,,,col_7,)","(3,,,string,,,col_7,)","(3,,,string1,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + WITH cte1 AS (SELECT 'stringcte', s FROM generate_series(1,10) s) + INSERT INTO local_dest_table (col_4, col_1) + SELECT + 'string1', + dist_col + FROM %1$s WHERE int_col IN (SELECT s FROM cte1) + UNION ALL + SELECT + * + FROM cte1 +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(1,,,string1,,,col_7,)","(1,,,string1,,,col_7,)","(1,,,stringcte,,,col_7,)","(2,,,stringcte,,,col_7,)","(3,,,string1,,,col_7,)","(3,,,stringcte,,,col_7,)","(4,,,stringcte,,,col_7,)","(5,,,stringcte,,,col_7,)","(6,,,stringcte,,,col_7,)","(7,,,stringcte,,,col_7,)","(8,,,stringcte,,,col_7,)","(9,,,stringcte,,,col_7,)","(10,,,stringcte,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(1,,,string1,,,col_7,)","(1,,,string1,,,col_7,)","(1,,,stringcte,,,col_7,)","(2,,,stringcte,,,col_7,)","(3,,,string1,,,col_7,)","(3,,,stringcte,,,col_7,)","(4,,,stringcte,,,col_7,)","(5,,,stringcte,,,col_7,)","(6,,,stringcte,,,col_7,)","(7,,,stringcte,,,col_7,)","(8,,,stringcte,,,col_7,)","(9,,,stringcte,,,col_7,)","(10,,,stringcte,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_3) + SELECT t1.text_col_1 + FROM %1$s t1 + GROUP BY t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,value,,,,col_7,)","(,,value2,,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,value,,,,col_7,)","(,,value2,,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_1, col_2, col_3, col_5, col_6, col_7, col_8) + SELECT + max(t1.dist_col), + 3, + 'string_3', + 4, + 44, + t1.text_col_1, + 'string_1000' + FROM %1$s t1 + GROUP BY t1.text_col_2, t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(1,3,string_3,,4,44,value,string_1000)","(1,3,string_3,,4,44,value2,string_1000)","(3,3,string_3,,4,44,value,string_1000)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(1,3,string_3,,4,44,value,string_1000)","(1,3,string_3,,4,44,value2,string_1000)","(3,3,string_3,,4,44,value,string_1000)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_7, col_8) + SELECT + t1.text_col_1, + 'string_1000' + FROM dist_source_table_1 t1 + GROUP BY t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,,,,,value,string_1000)","(,,,,,,value2,string_1000)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,,,,,value,string_1000)","(,,,,,,value2,string_1000)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_6, col_7, col_8) + SELECT + 'string_4', + t1.text_col_1, + 'string_1000' + FROM %1$s t1 + GROUP BY t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,,,,string_4,value,string_1000)","(,,,,,string_4,value2,string_1000)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,,,,string_4,value,string_1000)","(,,,,,string_4,value2,string_1000)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_5, col_3) + SELECT 12, 'string_11' FROM %1$s t1 + UNION + SELECT int_col, 'string' FROM %1$s; +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,string,,1,,col_7,)","(,,string,,2,,col_7,)","(,,string,,3,,col_7,)","(,,string_11,,12,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,string,,1,,col_7,)","(,,string,,2,,col_7,)","(,,string,,3,,col_7,)","(,,string_11,,12,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table(col_3, col_2) + SELECT text_col_1, count(*) FROM %1$s GROUP BY 1 +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,1,value2,,,,col_7,)","(,2,value,,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,1,value2,,,,col_7,)","(,2,value,,,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table(col_3, col_5) + SELECT text_col_1, count(*)::int FROM %1$s GROUP BY 1 +$$, 'local_source_table_1', 'dist_source_table_1'); +NOTICE: l2={"(,,value,,2,,col_7,)","(,,value2,,1,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE +NOTICE: l2={"(,,value,,2,,col_7,)","(,,value2,,1,,col_7,)"} +CONTEXT: PL/pgSQL function query_results_equal(text,text,text) line XX at RAISE + query_results_equal +--------------------------------------------------------------------- + t +(1 row) + +-- go back to proper local table for remaining tests +TRUNCATE local_dest_table; +SELECT undistribute_table('local_source_table_1'); +ERROR: cannot undistribute table because the table is not distributed +-- use a sequence (cannot use query_results_equal, since sequence values would not match) +CREATE SEQUENCE seq; +BEGIN; +INSERT INTO local_dest_table (col_5, col_3) +SELECT 12, 'string_11' FROM dist_source_table_1 +UNION +SELECT nextval('seq'), 'string' FROM dist_source_table_1; +SELECT * FROM local_dest_table ORDER BY 1,2,3,4,5,6,7,8; + col_1 | col_2 | col_3 | col_4 | col_5 | col_6 | col_7 | col_8 +--------------------------------------------------------------------- + | | string | | 1 | | col_7 | + | | string | | 2 | | col_7 | + | | string | | 3 | | col_7 | + | | string_11 | | 12 | | col_7 | +(4 rows) + +ROLLBACK; +-- add a bigserial column +ALTER TABLE local_dest_table ADD COLUMN col_9 bigserial; +-- not supported due to limitations in nextval handling +INSERT INTO local_dest_table (col_5, col_3) +SELECT 12, 'string_11' FROM dist_source_table_1 +UNION +SELECT 11, 'string' FROM dist_source_table_1; +SELECT * FROM local_dest_table ORDER BY 1,2,3,4,5,6,7,8; + col_1 | col_2 | col_3 | col_4 | col_5 | col_6 | col_7 | col_8 | col_9 +--------------------------------------------------------------------- + | | string | | 11 | | col_7 | | 2 + | | string_11 | | 12 | | col_7 | | 1 +(2 rows) + +BEGIN; +INSERT INTO local_dest_table(col_3, col_2) +SELECT text_col_1, count(*) FROM dist_source_table_1 GROUP BY 1; +SELECT * FROM local_dest_table ORDER BY 1,2,3,4,5,6,7,8; + col_1 | col_2 | col_3 | col_4 | col_5 | col_6 | col_7 | col_8 | col_9 +--------------------------------------------------------------------- + | 1 | value2 | | | | col_7 | | 3 + | 2 | value | | | | col_7 | | 4 + | | string | | 11 | | col_7 | | 2 + | | string_11 | | 12 | | col_7 | | 1 +(4 rows) + +ROLLBACK; +BEGIN; +INSERT INTO local_dest_table (col_4, col_3) SELECT + 'string1', + 'string2'::text +FROM dist_source_table_1 t1 +WHERE dist_col = 1 +RETURNING *; + col_1 | col_2 | col_3 | col_4 | col_5 | col_6 | col_7 | col_8 | col_9 +--------------------------------------------------------------------- + | | string2 | string1 | | | col_7 | | 5 + | | string2 | string1 | | | col_7 | | 6 +(2 rows) + +ROLLBACK; \set VERBOSITY terse DROP SCHEMA insert_select_into_local_table CASCADE; -NOTICE: drop cascades to 5 other objects +NOTICE: drop cascades to 12 other objects diff --git a/src/test/regress/expected/insert_select_repartition.out b/src/test/regress/expected/insert_select_repartition.out index 856690159..f6e4f17a5 100644 --- a/src/test/regress/expected/insert_select_repartition.out +++ b/src/test/regress/expected/insert_select_repartition.out @@ -500,7 +500,7 @@ INSERT INTO target_table SELECT mapped_key, c FROM t NATURAL JOIN source_table; DEBUG: volatile functions are not allowed in distributed INSERT ... SELECT queries DEBUG: generating subplan XXX_1 for CTE t: SELECT mapped_key, a, c FROM insert_select_repartition.source_table WHERE ((a)::double precision OPERATOR(pg_catalog.>) floor(random())) -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT mapped_key AS a, (c)::integer[] AS b FROM (SELECT t.mapped_key, t.c FROM ((SELECT intermediate_result.mapped_key, intermediate_result.a, intermediate_result.c FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(mapped_key integer, a integer, c double precision[])) t JOIN insert_select_repartition.source_table USING (mapped_key, a, c))) citus_insert_select_subquery +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT mapped_key AS a, auto_coerced_by_citus_1 AS b FROM (SELECT t.mapped_key, (t.c)::integer[] AS auto_coerced_by_citus_1 FROM ((SELECT intermediate_result.mapped_key, intermediate_result.a, intermediate_result.c FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(mapped_key integer, a integer, c double precision[])) t JOIN insert_select_repartition.source_table USING (mapped_key, a, c))) citus_insert_select_subquery DEBUG: performing repartitioned INSERT ... SELECT RESET client_min_messages; SELECT * FROM target_table ORDER BY a; diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index a7c53e293..8d4d00d36 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -1411,21 +1411,20 @@ WITH cte1 AS (SELECT * FROM cte1 WHERE EXISTS (SELECT * FROM cte1) LIMIT 5) SELECT s FROM cte1 WHERE EXISTS (SELECT * FROM cte1); Custom Scan (Citus INSERT ... SELECT) INSERT/SELECT method: pull to coordinator - -> Subquery Scan on citus_insert_select_subquery + -> Result + One-Time Filter: $3 CTE cte1 -> Function Scan on generate_series s - -> Result - One-Time Filter: $3 - CTE cte1 - -> Limit - InitPlan 2 (returns $1) - -> CTE Scan on cte1 cte1_1 - -> Result - One-Time Filter: $1 - -> CTE Scan on cte1 cte1_2 - InitPlan 4 (returns $3) - -> CTE Scan on cte1 cte1_3 - -> CTE Scan on cte1 + CTE cte1 + -> Limit + InitPlan 2 (returns $1) + -> CTE Scan on cte1 cte1_1 + -> Result + One-Time Filter: $1 + -> CTE Scan on cte1 cte1_2 + InitPlan 4 (returns $3) + -> CTE Scan on cte1 cte1_3 + -> CTE Scan on cte1 EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part ( SELECT s FROM generate_series(1,5) s) UNION diff --git a/src/test/regress/expected/multi_insert_select.out b/src/test/regress/expected/multi_insert_select.out index c7679d02e..f02c58fb1 100644 --- a/src/test/regress/expected/multi_insert_select.out +++ b/src/test/regress/expected/multi_insert_select.out @@ -669,7 +669,7 @@ DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Router planner cannot handle multi-shard select queries DEBUG: generating subplan XXX_1 for CTE fist_table_agg: SELECT (max(value_1) OPERATOR(pg_catalog.+) 1) AS v1_agg, user_id FROM public.raw_events_first GROUP BY user_id DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, v1_agg AS value_1_agg FROM (SELECT fist_table_agg.v1_agg, fist_table_agg.user_id FROM (SELECT intermediate_result.v1_agg, intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(v1_agg integer, user_id integer)) fist_table_agg) citus_insert_select_subquery +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, v1_agg AS value_1_agg FROM (SELECT fist_table_agg.user_id, fist_table_agg.v1_agg FROM (SELECT intermediate_result.v1_agg, intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(v1_agg integer, user_id integer)) fist_table_agg) citus_insert_select_subquery DEBUG: Creating router plan DEBUG: Collecting INSERT ... SELECT results on coordinator ROLLBACK; @@ -2714,7 +2714,6 @@ WITH top10 AS ( ) INSERT INTO dist_table_with_sequence (value_1) SELECT * FROM top10; -ERROR: cannot handle complex subqueries when the router executor is disabled SELECT * FROM dist_table_with_sequence ORDER BY user_id, value_1; user_id | value_1 --------------------------------------------------------------------- @@ -2799,7 +2798,6 @@ WITH top10 AS ( ) INSERT INTO dist_table_with_user_sequence (value_1) SELECT * FROM top10; -ERROR: cannot handle complex subqueries when the router executor is disabled SELECT * FROM dist_table_with_user_sequence ORDER BY user_id, value_1; user_id | value_1 --------------------------------------------------------------------- @@ -3236,6 +3234,47 @@ INSERT INTO raw_events_first SELECT * FROM raw_events_first OFFSET 0 ON CONFLICT DO NOTHING; ABORT; +-- test fix for issue https://github.com/citusdata/citus/issues/5891 +CREATE TABLE dist_table_1( +dist_col integer, +int_col integer, +text_col_1 text, +text_col_2 text +); +SELECT create_distributed_table('dist_table_1', 'dist_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO dist_table_1 VALUES (1, 1, 'string', 'string'); +CREATE TABLE dist_table_2( +dist_col integer, +int_col integer +); +SELECT create_distributed_table('dist_table_2', 'dist_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO dist_table_2 VALUES (1, 1); +with a as (select random()) INSERT INTO dist_table_1 +SELECT +t1.dist_col, +1, +'string', +'string' +FROM a, dist_table_1 t1 +join dist_table_2 t2 using (dist_col) +limit 1 +returning text_col_1; + text_col_1 +--------------------------------------------------------------------- + string +(1 row) + +DROP TABLE dist_table_1, dist_table_2; -- wrap in a transaction to improve performance BEGIN; DROP TABLE coerce_events; diff --git a/src/test/regress/expected/mx_coordinator_shouldhaveshards.out b/src/test/regress/expected/mx_coordinator_shouldhaveshards.out index ba6eb8dba..cbfe25281 100644 --- a/src/test/regress/expected/mx_coordinator_shouldhaveshards.out +++ b/src/test/regress/expected/mx_coordinator_shouldhaveshards.out @@ -78,8 +78,8 @@ HAVING (max(table_2.value) >= (SELECT value FROM a)); DEBUG: Group by list without distribution column is not allowed in distributed INSERT ... SELECT queries DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM mx_coordinator_shouldhaveshards.table_1 ORDER BY key, value DESC LIMIT 1 DEBUG: push down of limit count: 1 -DEBUG: generating subplan XXX_2 for subquery SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT int4(count) AS key, (key)::text AS value FROM (SELECT intermediate_result.count, intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint, key integer)) citus_insert_select_subquery +DEBUG: generating subplan XXX_2 for subquery SELECT int4(count(*)) AS auto_coerced_by_citus_0, (a.key)::text AS auto_coerced_by_citus_1 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT auto_coerced_by_citus_0 AS key, auto_coerced_by_citus_1 AS value FROM (SELECT intermediate_result.auto_coerced_by_citus_0, intermediate_result.auto_coerced_by_citus_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(auto_coerced_by_citus_0 integer, auto_coerced_by_citus_1 text)) citus_insert_select_subquery DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx @@ -139,8 +139,8 @@ HAVING (max(table_2_rep.value) >= (SELECT value FROM a)); DEBUG: Group by list without distribution column is not allowed in distributed INSERT ... SELECT queries DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM mx_coordinator_shouldhaveshards.table_1_rep ORDER BY key, value DESC LIMIT 1 DEBUG: push down of limit count: 1 -DEBUG: generating subplan XXX_2 for subquery SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2_rep USING (key)) GROUP BY a.key HAVING (max(table_2_rep.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT int4(count) AS key, (key)::text AS value FROM (SELECT intermediate_result.count, intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint, key integer)) citus_insert_select_subquery +DEBUG: generating subplan XXX_2 for subquery SELECT int4(count(*)) AS auto_coerced_by_citus_0, (a.key)::text AS auto_coerced_by_citus_1 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2_rep USING (key)) GROUP BY a.key HAVING (max(table_2_rep.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT auto_coerced_by_citus_0 AS key, auto_coerced_by_citus_1 AS value FROM (SELECT intermediate_result.auto_coerced_by_citus_0, intermediate_result.auto_coerced_by_citus_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(auto_coerced_by_citus_0 integer, auto_coerced_by_citus_1 text)) citus_insert_select_subquery DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx @@ -204,8 +204,8 @@ HAVING (max(table_2.value) >= (SELECT value FROM a)); DEBUG: Group by list without distribution column is not allowed in distributed INSERT ... SELECT queries DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM mx_coordinator_shouldhaveshards.table_1 ORDER BY key, value DESC LIMIT 1 DEBUG: push down of limit count: 1 -DEBUG: generating subplan XXX_2 for subquery SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT int4(count) AS key, (key)::text AS value FROM (SELECT intermediate_result.count, intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint, key integer)) citus_insert_select_subquery +DEBUG: generating subplan XXX_2 for subquery SELECT int4(count(*)) AS auto_coerced_by_citus_0, (a.key)::text AS auto_coerced_by_citus_1 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2 USING (key)) GROUP BY a.key HAVING (max(table_2.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT auto_coerced_by_citus_0 AS key, auto_coerced_by_citus_1 AS value FROM (SELECT intermediate_result.auto_coerced_by_citus_0, intermediate_result.auto_coerced_by_citus_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(auto_coerced_by_citus_0 integer, auto_coerced_by_citus_1 text)) citus_insert_select_subquery DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx @@ -265,8 +265,8 @@ HAVING (max(table_2_rep.value) >= (SELECT value FROM a)); DEBUG: Group by list without distribution column is not allowed in distributed INSERT ... SELECT queries DEBUG: generating subplan XXX_1 for CTE a: SELECT key, value FROM mx_coordinator_shouldhaveshards.table_1_rep ORDER BY key, value DESC LIMIT 1 DEBUG: push down of limit count: 1 -DEBUG: generating subplan XXX_2 for subquery SELECT count(*) AS count, a.key FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2_rep USING (key)) GROUP BY a.key HAVING (max(table_2_rep.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT int4(count) AS key, (key)::text AS value FROM (SELECT intermediate_result.count, intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint, key integer)) citus_insert_select_subquery +DEBUG: generating subplan XXX_2 for subquery SELECT int4(count(*)) AS auto_coerced_by_citus_0, (a.key)::text AS auto_coerced_by_citus_1 FROM ((SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a JOIN mx_coordinator_shouldhaveshards.table_2_rep USING (key)) GROUP BY a.key HAVING (max(table_2_rep.value) OPERATOR(pg_catalog.>=) (SELECT a_1.value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) a_1)) +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT auto_coerced_by_citus_0 AS key, auto_coerced_by_citus_1 AS value FROM (SELECT intermediate_result.auto_coerced_by_citus_0, intermediate_result.auto_coerced_by_citus_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(auto_coerced_by_citus_0 integer, auto_coerced_by_citus_1 text)) citus_insert_select_subquery DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Subplan XXX_1 will be written to local file DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx diff --git a/src/test/regress/sql/insert_select_into_local_table.sql b/src/test/regress/sql/insert_select_into_local_table.sql index 21564f1f6..1b2b49a5d 100644 --- a/src/test/regress/sql/insert_select_into_local_table.sql +++ b/src/test/regress/sql/insert_select_into_local_table.sql @@ -64,6 +64,30 @@ INSERT INTO non_dist_unique SELECT a+1, b FROM dist_table ON CONFLICT (a) DO NOT SELECT * FROM non_dist_unique ORDER BY 1; INSERT INTO non_dist_unique SELECT a+2, b FROM dist_table ON CONFLICT (a) DO UPDATE SET b = EXCLUDED.b + non_dist_unique.b; SELECT * FROM non_dist_unique ORDER BY 1; + +INSERT INTO non_dist_unique +SELECT a+1, b FROM dist_table +UNION ALL +SELECT a+100, b FROM dist_table +ON CONFLICT (a) DO NOTHING; +SELECT * FROM non_dist_unique ORDER BY 1; + +INSERT INTO non_dist_unique +SELECT a+1, b FROM dist_table +UNION ALL +SELECT a+100, b FROM dist_table +ON CONFLICT (a) DO UPDATE SET b = EXCLUDED.b + 1; +SELECT * FROM non_dist_unique ORDER BY 1; + +WITH cte1 AS (SELECT s FROM generate_series(1,10) s) +INSERT INTO non_dist_unique +WITH cte2 AS (SELECT s FROM generate_series(1,10) s) +SELECT a+1, b FROM dist_table WHERE b IN (SELECT s FROM cte1) +UNION ALL +SELECT s, s FROM cte1 +ON CONFLICT (a) DO NOTHING; +SELECT * FROM non_dist_unique ORDER BY 1; + DROP TABLE non_dist_unique; @@ -73,6 +97,7 @@ INSERT INTO non_dist_default SELECT a FROM dist_table WHERE a = 1; SELECT * FROM non_dist_default ORDER BY 1, 2; INSERT INTO non_dist_default SELECT a FROM dist_table WHERE a > 1; SELECT * FROM non_dist_default ORDER BY 1, 2; +SELECT alter_table_set_access_method('non_dist_default', 'columnar'); INSERT INTO non_dist_default SELECT a, c FROM dist_table WHERE a = 1; SELECT * FROM non_dist_default ORDER BY 1, 2; INSERT INTO non_dist_default SELECT a, c FROM dist_table WHERE a > 1; @@ -149,5 +174,427 @@ INSERT INTO non_dist_2 SELECT a, c FROM ref_table; SELECT * FROM non_dist_2 ORDER BY 1, 2; TRUNCATE non_dist_2; +-- check issue https://github.com/citusdata/citus/issues/5858 +CREATE TABLE local_dest_table( + col_1 integer, + col_2 integer, + col_3 text, + col_4 text, + drop_col text, + col_5 bigint, + col_6 text, + col_7 text default 'col_7', + col_8 varchar +); + +ALTER TABLE local_dest_table DROP COLUMN drop_col; + +CREATE TABLE dist_source_table_1( + int_col integer, + drop_col text, + text_col_1 text, + dist_col integer, + text_col_2 text +); +SELECT create_distributed_table('dist_source_table_1', 'dist_col'); + +ALTER TABLE dist_source_table_1 DROP COLUMN drop_col; + +INSERT INTO dist_source_table_1 VALUES (1, 'value', 1, 'value'); +INSERT INTO dist_source_table_1 VALUES (2, 'value2', 1, 'value'); +INSERT INTO dist_source_table_1 VALUES (3, 'value', 3, 'value3'); + +CREATE TABLE dist_source_table_2( + dist_col integer, + int_col integer +); +SELECT create_distributed_table('dist_source_table_2', 'dist_col'); + +INSERT INTO dist_source_table_2 VALUES (1, 1); +INSERT INTO dist_source_table_2 VALUES (2, 2); +INSERT INTO dist_source_table_2 VALUES (4, 4); + +CREATE TABLE local_source_table_1 AS SELECT * FROM dist_source_table_1; +CREATE TABLE local_source_table_2 AS SELECT * FROM dist_source_table_2; + +/* + * query_results_equal compares the effect of two queries on local_dest_table. + * We use this to ensure that INSERT INTO local_dest_table SELECT behaves + * the same when selecting from a regular table (postgres handles it) and + * a distributed table (Citus handles it). + * + * The queries are generated by calling format() on query_table twice, + * once for each source_table argument. + */ +CREATE OR REPLACE FUNCTION query_results_equal(query_template text, source_table_1 text, source_table_2 text) +RETURNS bool +AS $$ +DECLARE + l1 local_dest_table[]; + l2 local_dest_table[]; +BEGIN + /* get the results using source_table_1 as source */ + TRUNCATE local_dest_table; + EXECUTE format(query_template, source_table_1); + SELECT array_agg(l) INTO l1 + FROM (SELECT * FROM local_dest_table ORDER BY 1, 2, 3, 4, 5, 6, 7, 8) l; + + /* get the results using source_table_2 as source */ + TRUNCATE local_dest_table; + EXECUTE format(query_template, source_table_2); + SELECT array_agg(l) INTO l2 + FROM (SELECT * FROM local_dest_table ORDER BY 1, 2, 3, 4, 5, 6, 7, 8) l; + + RAISE NOTICE 'l2=%', l1; + RAISE NOTICE 'l2=%', l2; + RETURN l1 = l2; +END; +$$ LANGUAGE plpgsql; + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table + SELECT + t1.dist_col, + 1, + 'string1', + 'string2', + 2, + 'string3', + t1.text_col_1, + t1.text_col_2 + FROM %1$s_1 t1 + WHERE t1.int_col IN (SELECT int_col FROM %1$s_2) +$$, 'local_source_table', 'dist_source_table'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table + SELECT + t1.dist_col, + 1, + 'string1', + 'string2', + 2, + 'string3', + t1.text_col_1, + t1.text_col_2 + FROM %1$s t1 + returning * +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_3, col_4) SELECT + 'string1', + 'string2'::text + FROM %1$s t1 + returning *; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_7, col_4) SELECT + 'string1', + 'string2'::text + FROM %1$s t1 + returning *; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_4, col_3) SELECT + 'string1', + 'string2'::text + FROM %1$s t1 + WHERE dist_col = 1 + returning *; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_4, col_1) + SELECT + 'string1', + dist_col + FROM %1$s + UNION ALL + SELECT + 'string', + int_col + FROM %1$s; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + WITH cte1 AS (SELECT s FROM generate_series(1,10) s) + INSERT INTO local_dest_table (col_4, col_1) + SELECT + 'string1', + dist_col + FROM %1$s WHERE int_col IN (SELECT s FROM cte1) + UNION ALL + SELECT + 'string', + int_col + FROM %1$s WHERE int_col IN (SELECT s + 1 FROM cte1) +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + WITH cte1 AS (SELECT 'stringcte', s FROM generate_series(1,10) s) + INSERT INTO local_dest_table (col_4, col_1) + SELECT + 'string1', + dist_col + FROM %1$s WHERE int_col IN (SELECT s FROM cte1) + UNION ALL + SELECT + * + FROM cte1 +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_3) + SELECT t1.text_col_1 + FROM %1$s t1 + GROUP BY t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_1, col_2, col_3, col_5, col_6, col_7, col_8) + SELECT + max(t1.dist_col), + 3, + 'string_3', + 4, + 44, + t1.text_col_1, + 'string_1000' + FROM %1$s t1 + GROUP BY t1.text_col_2, t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_7, col_8) + SELECT + t1.text_col_1, + 'string_1000' + FROM dist_source_table_1 t1 + GROUP BY t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_6, col_7, col_8) + SELECT + 'string_4', + t1.text_col_1, + 'string_1000' + FROM %1$s t1 + GROUP BY t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_5, col_3) + SELECT 12, 'string_11' FROM %1$s t1 + UNION + SELECT int_col, 'string' FROM %1$s; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table(col_3, col_2) + SELECT text_col_1, count(*) FROM %1$s GROUP BY 1 +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table(col_3, col_5) + SELECT text_col_1, count(*)::int FROM %1$s GROUP BY 1 +$$, 'local_source_table_1', 'dist_source_table_1'); + +-- repeat above tests with Citus local table +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table + SELECT + t1.dist_col, + 1, + 'string1', + 'string2', + 2, + 'string3', + t1.text_col_1, + t1.text_col_2 + FROM %1$s_1 t1 + WHERE t1.int_col IN (SELECT int_col FROM %1$s_2) +$$, 'local_source_table', 'dist_source_table'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table + SELECT + t1.dist_col, + 1, + 'string1', + 'string2', + 2, + 'string3', + t1.text_col_1, + t1.text_col_2 + FROM %1$s t1 + returning * +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_3, col_4) SELECT + 'string1', + 'string2'::text + FROM %1$s t1 + returning *; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_7, col_4) SELECT + 'string1', + 'string2'::text + FROM %1$s t1 + returning *; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_4, col_3) SELECT + 'string1', + 'string2'::text + FROM %1$s t1 + WHERE dist_col = 1 + returning *; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_4, col_1) + SELECT + 'string1', + dist_col + FROM %1$s + UNION ALL + SELECT + 'string', + int_col + FROM %1$s; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + WITH cte1 AS (SELECT s FROM generate_series(1,10) s) + INSERT INTO local_dest_table (col_4, col_1) + SELECT + 'string1', + dist_col + FROM %1$s WHERE int_col IN (SELECT s FROM cte1) + UNION ALL + SELECT + 'string', + int_col + FROM %1$s WHERE int_col IN (SELECT s + 1 FROM cte1) +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + WITH cte1 AS (SELECT 'stringcte', s FROM generate_series(1,10) s) + INSERT INTO local_dest_table (col_4, col_1) + SELECT + 'string1', + dist_col + FROM %1$s WHERE int_col IN (SELECT s FROM cte1) + UNION ALL + SELECT + * + FROM cte1 +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_3) + SELECT t1.text_col_1 + FROM %1$s t1 + GROUP BY t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_1, col_2, col_3, col_5, col_6, col_7, col_8) + SELECT + max(t1.dist_col), + 3, + 'string_3', + 4, + 44, + t1.text_col_1, + 'string_1000' + FROM %1$s t1 + GROUP BY t1.text_col_2, t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_7, col_8) + SELECT + t1.text_col_1, + 'string_1000' + FROM dist_source_table_1 t1 + GROUP BY t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_6, col_7, col_8) + SELECT + 'string_4', + t1.text_col_1, + 'string_1000' + FROM %1$s t1 + GROUP BY t1.text_col_1; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table (col_5, col_3) + SELECT 12, 'string_11' FROM %1$s t1 + UNION + SELECT int_col, 'string' FROM %1$s; +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table(col_3, col_2) + SELECT text_col_1, count(*) FROM %1$s GROUP BY 1 +$$, 'local_source_table_1', 'dist_source_table_1'); + +SELECT * FROM query_results_equal($$ + INSERT INTO local_dest_table(col_3, col_5) + SELECT text_col_1, count(*)::int FROM %1$s GROUP BY 1 +$$, 'local_source_table_1', 'dist_source_table_1'); + +-- go back to proper local table for remaining tests +TRUNCATE local_dest_table; +SELECT undistribute_table('local_source_table_1'); + +-- use a sequence (cannot use query_results_equal, since sequence values would not match) +CREATE SEQUENCE seq; + +BEGIN; +INSERT INTO local_dest_table (col_5, col_3) +SELECT 12, 'string_11' FROM dist_source_table_1 +UNION +SELECT nextval('seq'), 'string' FROM dist_source_table_1; +SELECT * FROM local_dest_table ORDER BY 1,2,3,4,5,6,7,8; +ROLLBACK; + +-- add a bigserial column +ALTER TABLE local_dest_table ADD COLUMN col_9 bigserial; + +-- not supported due to limitations in nextval handling +INSERT INTO local_dest_table (col_5, col_3) +SELECT 12, 'string_11' FROM dist_source_table_1 +UNION +SELECT 11, 'string' FROM dist_source_table_1; +SELECT * FROM local_dest_table ORDER BY 1,2,3,4,5,6,7,8; + +BEGIN; +INSERT INTO local_dest_table(col_3, col_2) +SELECT text_col_1, count(*) FROM dist_source_table_1 GROUP BY 1; +SELECT * FROM local_dest_table ORDER BY 1,2,3,4,5,6,7,8; +ROLLBACK; + +BEGIN; +INSERT INTO local_dest_table (col_4, col_3) SELECT + 'string1', + 'string2'::text +FROM dist_source_table_1 t1 +WHERE dist_col = 1 +RETURNING *; +ROLLBACK; + \set VERBOSITY terse DROP SCHEMA insert_select_into_local_table CASCADE; diff --git a/src/test/regress/sql/multi_insert_select.sql b/src/test/regress/sql/multi_insert_select.sql index 92c802fe1..baa176d9d 100644 --- a/src/test/regress/sql/multi_insert_select.sql +++ b/src/test/regress/sql/multi_insert_select.sql @@ -2337,6 +2337,40 @@ SELECT * FROM raw_events_first OFFSET 0 ON CONFLICT DO NOTHING; ABORT; +-- test fix for issue https://github.com/citusdata/citus/issues/5891 +CREATE TABLE dist_table_1( +dist_col integer, +int_col integer, +text_col_1 text, +text_col_2 text +); + +SELECT create_distributed_table('dist_table_1', 'dist_col'); + +INSERT INTO dist_table_1 VALUES (1, 1, 'string', 'string'); + +CREATE TABLE dist_table_2( +dist_col integer, +int_col integer +); + +SELECT create_distributed_table('dist_table_2', 'dist_col'); + +INSERT INTO dist_table_2 VALUES (1, 1); + +with a as (select random()) INSERT INTO dist_table_1 +SELECT +t1.dist_col, +1, +'string', +'string' +FROM a, dist_table_1 t1 +join dist_table_2 t2 using (dist_col) +limit 1 +returning text_col_1; + +DROP TABLE dist_table_1, dist_table_2; + -- wrap in a transaction to improve performance BEGIN; DROP TABLE coerce_events; From a218198e8faa27333e7113653bb97f1271130a01 Mon Sep 17 00:00:00 2001 From: aykut-bozkurt <51649454+aykut-bozkurt@users.noreply.github.com> Date: Thu, 28 Jul 2022 15:31:49 +0300 Subject: [PATCH 21/38] reindex object address should return invalid addresses for unsepported object types in reindex stmt (#6096) --- src/backend/distributed/commands/index.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index d424d6a3c..8fef77dc0 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -654,14 +654,21 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand, /* * ReindexStmtObjectAddress returns list of object addresses in the reindex - * statement. + * statement. We add the address if the object is either index or table; + * else, we add invalid address. */ List * ReindexStmtObjectAddress(Node *stmt, bool missing_ok) { ReindexStmt *reindexStatement = castNode(ReindexStmt, stmt); - Oid relationId = ReindexStmtFindRelationOid(reindexStatement, missing_ok); + Oid relationId = InvalidOid; + if (reindexStatement->relation != NULL) + { + /* we currently only support reindex commands on tables */ + relationId = ReindexStmtFindRelationOid(reindexStatement, missing_ok); + } + ObjectAddress *objectAddress = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*objectAddress, RelationRelationId, relationId); From 7c1a93b26b47285b02ab13ab877df6011bbdf2b0 Mon Sep 17 00:00:00 2001 From: Ying Xu <32597660+yxu2162@users.noreply.github.com> Date: Thu, 28 Jul 2022 14:15:45 -0700 Subject: [PATCH 22/38] Removed USE_PGXS snippet in Makefile that was blocking citus build when flag is set (#6101) Code snippet in Makefile was blocking Citus build when USE_PGXS flag was set. This was included for port to FSPG but is not needed for Citus engine and can be safely removed. --- src/backend/columnar/Makefile | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/backend/columnar/Makefile b/src/backend/columnar/Makefile index bf17b98bf..f9fa09b7c 100644 --- a/src/backend/columnar/Makefile +++ b/src/backend/columnar/Makefile @@ -17,13 +17,7 @@ DATA = $(columnar_sql_files) \ PG_CPPFLAGS += -I$(libpq_srcdir) -I$(safestringlib_srcdir)/include -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else include $(citus_top_builddir)/Makefile.global .PHONY: install-all install-all: install -endif From 24a9735e1c4a51d325bfb38bd3c21c7f87f443f3 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Wed, 27 Jul 2022 16:21:06 +0200 Subject: [PATCH 23/38] Remove unusued gitattributes --- .gitattributes | 3 --- 1 file changed, 3 deletions(-) diff --git a/.gitattributes b/.gitattributes index 454a83448..f8fea016a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -26,9 +26,6 @@ configure -whitespace # except these exceptions... src/backend/distributed/utils/citus_outfuncs.c -citus-style -src/backend/distributed/utils/pg11_snprintf.c -citus-style -src/backend/distributed/deparser/ruleutils_11.c -citus-style -src/backend/distributed/deparser/ruleutils_12.c -citus-style src/backend/distributed/deparser/ruleutils_13.c -citus-style src/backend/distributed/deparser/ruleutils_14.c -citus-style src/backend/distributed/commands/index_pg_source.c -citus-style From 149771792b22ce56a4bc3e121b94178977d5ed4f Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Wed, 27 Jul 2022 16:41:22 +0200 Subject: [PATCH 24/38] Remove useless version compats most likely leftover from earlier versions --- src/backend/columnar/columnar_tableam.c | 6 ++-- src/backend/distributed/commands/call.c | 4 +-- .../commands/create_distributed_table.c | 2 +- src/backend/distributed/commands/multi_copy.c | 32 +++++++++---------- src/backend/distributed/commands/policy.c | 2 +- src/backend/distributed/commands/sequence.c | 2 +- .../distributed/commands/subscription.c | 5 ++- .../distributed/commands/utility_hook.c | 8 ++--- .../connection/connection_management.c | 2 +- .../distributed/executor/local_executor.c | 6 ++-- .../distributed/executor/multi_executor.c | 12 +++---- .../partitioned_intermediate_results.c | 4 +-- .../distributed/executor/query_stats.c | 2 +- .../distributed/metadata/metadata_cache.c | 10 +++--- .../distributed/operations/node_protocol.c | 2 +- .../planner/combine_query_planner.c | 2 +- .../distributed/planner/distributed_planner.c | 8 ++--- .../planner/insert_select_planner.c | 6 ++-- .../planner/intermediate_result_pruning.c | 2 +- .../distributed/planner/local_plan_cache.c | 2 +- .../distributed/planner/multi_explain.c | 16 +++++----- .../planner/multi_logical_optimizer.c | 8 ++--- .../planner/multi_logical_planner.c | 2 +- .../distributed/planner/recursive_planning.c | 2 +- .../distributed/progress/multi_progress.c | 2 +- .../replication/multi_logical_replication.c | 4 +-- .../test/distributed_intermediate_results.c | 16 +++++----- .../test/foreign_key_relationship_query.c | 4 +-- ...foreign_key_to_reference_table_rebalance.c | 2 +- .../transaction/relation_access_tracking.c | 2 +- .../transaction/transaction_management.c | 2 +- .../transaction/transaction_recovery.c | 2 +- .../utils/foreign_key_relationship.c | 2 +- src/include/distributed/commands/multi_copy.h | 2 +- .../distributed/commands/utility_hook.h | 4 +-- src/include/distributed/listutils.h | 18 +++++------ src/include/pg_version_compat.h | 25 --------------- 37 files changed, 103 insertions(+), 129 deletions(-) diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index cef532d2d..b8e94b774 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -122,7 +122,7 @@ static void ColumnarProcessUtility(PlannedStmt *pstmt, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, - QueryCompletionCompat *completionTag); + QueryCompletion *completionTag); static bool ConditionalLockRelationWithTimeout(Relation rel, LOCKMODE lockMode, int timeout, int retryInterval); static List * NeededColumnsList(TupleDesc tupdesc, Bitmapset *attr_needed); @@ -2239,7 +2239,7 @@ ColumnarProcessUtility(PlannedStmt *pstmt, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, - QueryCompletionCompat *completionTag) + QueryCompletion *completionTag) { #if PG_VERSION_NUM >= PG_VERSION_14 if (readOnlyTree) @@ -2956,7 +2956,7 @@ AvailableExtensionVersionColumnar(void) /* pg_available_extensions returns result set containing all available extensions */ (*pg_available_extensions)(fcinfo); - TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat( + TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlot( extensionsResultSet->setDesc, &TTSOpsMinimalTuple); bool hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, diff --git a/src/backend/distributed/commands/call.c b/src/backend/distributed/commands/call.c index 1572ba8fe..b2f0bfca1 100644 --- a/src/backend/distributed/commands/call.c +++ b/src/backend/distributed/commands/call.c @@ -168,8 +168,8 @@ CallDistributedProcedureRemotely(CallStmt *callStmt, DestReceiver *dest) { Tuplestorestate *tupleStore = tuplestore_begin_heap(true, false, work_mem); TupleDesc tupleDesc = CallStmtResultDesc(callStmt); - TupleTableSlot *slot = MakeSingleTupleTableSlotCompat(tupleDesc, - &TTSOpsMinimalTuple); + TupleTableSlot *slot = MakeSingleTupleTableSlot(tupleDesc, + &TTSOpsMinimalTuple); bool expectResults = true; Task *task = CitusMakeNode(Task); diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 52043ac25..269a315ae 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -1654,7 +1654,7 @@ CopyLocalDataIntoShards(Oid distributedRelationId) /* get the table columns */ TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation); - TupleTableSlot *slot = CreateTableSlotForRel(distributedRelation); + TupleTableSlot *slot = table_slot_create(distributedRelation, NULL); List *columnNameList = TupleDescColumnNameList(tupleDescriptor); int partitionColumnIndex = INVALID_PARTITION_COLUMN_INDEX; diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index 89c30c6a7..1061feb28 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -245,7 +245,7 @@ typedef enum LocalCopyStatus /* Local functions forward declarations */ static void CopyToExistingShards(CopyStmt *copyStatement, - QueryCompletionCompat *completionTag); + QueryCompletion *completionTag); static bool IsCopyInBinaryFormat(CopyStmt *copyStatement); static List * FindJsonbInputColumns(TupleDesc tupleDescriptor, List *inputColumnNameList); @@ -274,7 +274,7 @@ static FmgrInfo * TypeOutputFunctions(uint32 columnCount, Oid *typeIdArray, static List * CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist); #endif static bool CopyStatementHasFormat(CopyStmt *copyStatement, char *formatName); -static void CitusCopyFrom(CopyStmt *copyStatement, QueryCompletionCompat *completionTag); +static void CitusCopyFrom(CopyStmt *copyStatement, QueryCompletion *completionTag); static void EnsureCopyCanRunOnRelation(Oid relationId); static HTAB * CreateConnectionStateHash(MemoryContext memoryContext); static HTAB * CreateShardStateHash(MemoryContext memoryContext); @@ -308,7 +308,7 @@ static void UnclaimCopyConnections(List *connectionStateList); static void ShutdownCopyConnectionState(CopyConnectionState *connectionState, CitusCopyDestReceiver *copyDest); static SelectStmt * CitusCopySelect(CopyStmt *copyStatement); -static void CitusCopyTo(CopyStmt *copyStatement, QueryCompletionCompat *completionTag); +static void CitusCopyTo(CopyStmt *copyStatement, QueryCompletion *completionTag); static int64 ForwardCopyDataFromConnection(CopyOutState copyOutState, MultiConnection *connection); @@ -345,7 +345,7 @@ static bool CitusCopyDestReceiverReceive(TupleTableSlot *slot, static void CitusCopyDestReceiverShutdown(DestReceiver *destReceiver); static void CitusCopyDestReceiverDestroy(DestReceiver *destReceiver); static bool ContainsLocalPlacement(int64 shardId); -static void CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64 +static void CompleteCopyQueryTagCompat(QueryCompletion *completionTag, uint64 processedRowCount); static void FinishLocalCopy(CitusCopyDestReceiver *copyDest); static void CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest, @@ -368,7 +368,7 @@ PG_FUNCTION_INFO_V1(citus_text_send_as_jsonb); * and the partition method of the distributed table. */ static void -CitusCopyFrom(CopyStmt *copyStatement, QueryCompletionCompat *completionTag) +CitusCopyFrom(CopyStmt *copyStatement, QueryCompletion *completionTag) { UseCoordinatedTransaction(); @@ -450,7 +450,7 @@ EnsureCopyCanRunOnRelation(Oid relationId) * rows. */ static void -CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag) +CopyToExistingShards(CopyStmt *copyStatement, QueryCompletion *completionTag) { Oid tableId = RangeVarGetRelid(copyStatement->relation, NoLock, false); @@ -471,8 +471,8 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT bool *columnNulls = palloc0(columnCount * sizeof(bool)); /* set up a virtual tuple table slot */ - TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat(tupleDescriptor, - &TTSOpsVirtual); + TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlot(tupleDescriptor, + &TTSOpsVirtual); tupleTableSlot->tts_nvalid = columnCount; tupleTableSlot->tts_values = columnValues; tupleTableSlot->tts_isnull = columnNulls; @@ -639,8 +639,8 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext); /* parse a row from the input */ - bool nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext, - columnValues, columnNulls); + bool nextRowFound = NextCopyFrom(copyState, executorExpressionContext, + columnValues, columnNulls); if (!nextRowFound) { @@ -760,7 +760,7 @@ FindJsonbInputColumns(TupleDesc tupleDescriptor, List *inputColumnNameList) static void -CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64 processedRowCount) +CompleteCopyQueryTagCompat(QueryCompletion *completionTag, uint64 processedRowCount) { SetQueryCompletion(completionTag, CMDTAG_COPY, processedRowCount); } @@ -780,7 +780,7 @@ RemoveOptionFromList(List *optionList, char *optionName) if (strncmp(option->defname, optionName, NAMEDATALEN) == 0) { - return list_delete_cell_compat(optionList, optionCell, previousCell); + return list_delete_cell(optionList, optionCell); } } @@ -1387,7 +1387,7 @@ ColumnCoercionPaths(TupleDesc destTupleDescriptor, TupleDesc inputTupleDescripto ConversionPathForTypes(inputTupleType, destTupleType, &coercePaths[columnIndex]); - currentColumnName = lnext_compat(columnNameList, currentColumnName); + currentColumnName = lnext(columnNameList, currentColumnName); if (currentColumnName == NULL) { @@ -2864,7 +2864,7 @@ CopyStatementHasFormat(CopyStmt *copyStatement, char *formatName) * further processing is needed. */ Node * -ProcessCopyStmt(CopyStmt *copyStatement, QueryCompletionCompat *completionTag, const +ProcessCopyStmt(CopyStmt *copyStatement, QueryCompletion *completionTag, const char *queryString) { /* @@ -3016,7 +3016,7 @@ CitusCopySelect(CopyStmt *copyStatement) * table dump. */ static void -CitusCopyTo(CopyStmt *copyStatement, QueryCompletionCompat *completionTag) +CitusCopyTo(CopyStmt *copyStatement, QueryCompletion *completionTag) { ListCell *shardIntervalCell = NULL; int64 tuplesSent = 0; @@ -3481,7 +3481,7 @@ InitializeCopyShardState(CopyShardState *shardState, bool hasRemoteCopy = false; MemoryContext localContext = - AllocSetContextCreateExtended(CurrentMemoryContext, + AllocSetContextCreateInternal(CurrentMemoryContext, "InitializeCopyShardState", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, diff --git a/src/backend/distributed/commands/policy.c b/src/backend/distributed/commands/policy.c index f3e7b7ec9..5250f9580 100644 --- a/src/backend/distributed/commands/policy.c +++ b/src/backend/distributed/commands/policy.c @@ -362,7 +362,7 @@ PreprocessAlterPolicyStmt(Node *node, const char *queryString, appendStringInfoString(&ddlString, RoleSpecString(roleSpec, true)); - if (lnext_compat(stmt->roles, roleCell) != NULL) + if (lnext(stmt->roles, roleCell) != NULL) { appendStringInfoString(&ddlString, ", "); } diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index 0850a0222..a5311c714 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -191,7 +191,7 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList, *columnNameList = lappend(*columnNameList, columnName); List *columnOwnedSequences = - GetSequencesOwnedByColumn(relationId, attributeIndex + 1); + getOwnedSequences_internal(relationId, attributeIndex + 1, 0); Oid ownedSequenceId = InvalidOid; if (list_length(columnOwnedSequences) != 0) diff --git a/src/backend/distributed/commands/subscription.c b/src/backend/distributed/commands/subscription.c index 76d3bcb77..59603b559 100644 --- a/src/backend/distributed/commands/subscription.c +++ b/src/backend/distributed/commands/subscription.c @@ -45,9 +45,8 @@ ProcessCreateSubscriptionStmt(CreateSubscriptionStmt *createSubStmt) { useAuthinfo = defGetBoolean(defElem); - createSubStmt->options = list_delete_cell_compat(createSubStmt->options, - currCell, - prevCell); + createSubStmt->options = list_delete_cell(createSubStmt->options, + currCell); break; } diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index ab1792b31..dde27bc97 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -104,7 +104,7 @@ static void ProcessUtilityInternal(PlannedStmt *pstmt, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, - QueryCompletionCompat *completionTag); + QueryCompletion *completionTag); #if PG_VERSION_NUM >= 140000 static void set_indexsafe_procflags(void); #endif @@ -128,7 +128,7 @@ void ProcessUtilityParseTree(Node *node, const char *queryString, ProcessUtilityContext context, ParamListInfo params, DestReceiver *dest, - QueryCompletionCompat *completionTag) + QueryCompletion *completionTag) { PlannedStmt *plannedStmt = makeNode(PlannedStmt); plannedStmt->commandType = CMD_UTILITY; @@ -158,7 +158,7 @@ multi_ProcessUtility(PlannedStmt *pstmt, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, - QueryCompletionCompat *completionTag) + QueryCompletion *completionTag) { Node *parsetree; @@ -372,7 +372,7 @@ ProcessUtilityInternal(PlannedStmt *pstmt, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, - QueryCompletionCompat *completionTag) + QueryCompletion *completionTag) { Node *parsetree = pstmt->utilityStmt; List *ddlJobs = NIL; diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c index df6096321..bb42514ce 100644 --- a/src/backend/distributed/connection/connection_management.c +++ b/src/backend/distributed/connection/connection_management.c @@ -105,7 +105,7 @@ InitializeConnectionManagement(void) * management. Doing so, instead of allocating in TopMemoryContext, makes * it easier to associate used memory. */ - ConnectionContext = AllocSetContextCreateExtended(TopMemoryContext, + ConnectionContext = AllocSetContextCreateInternal(TopMemoryContext, "Connection Context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, diff --git a/src/backend/distributed/executor/local_executor.c b/src/backend/distributed/executor/local_executor.c index 5c1f0981e..48712edd6 100644 --- a/src/backend/distributed/executor/local_executor.c +++ b/src/backend/distributed/executor/local_executor.c @@ -339,7 +339,7 @@ ExecuteLocalTaskListExtended(List *taskList, * implemented. So, let planner to call distributed_planner() which * eventually calls standard_planner(). */ - localPlan = planner_compat(shardQuery, cursorOptions, paramListInfo); + localPlan = planner(shardQuery, NULL, cursorOptions, paramListInfo); } char *shardQueryString = NULL; @@ -382,8 +382,8 @@ LocallyPlanAndExecuteMultipleQueries(List *queryStrings, TupleDestination *tuple 0); int cursorOptions = 0; ParamListInfo paramListInfo = NULL; - PlannedStmt *localPlan = planner_compat(shardQuery, cursorOptions, - paramListInfo); + PlannedStmt *localPlan = planner(shardQuery, NULL, cursorOptions, + paramListInfo); totalProcessedRows += LocallyExecuteTaskPlan(localPlan, queryString, tupleDest, task, paramListInfo); diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index fb7f687a3..6acf6169b 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -459,8 +459,8 @@ ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescript ResetPerTupleExprContext(executorState); MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext); - bool nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext, - columnValues, columnNulls); + bool nextRowFound = NextCopyFrom(copyState, executorExpressionContext, + columnValues, columnNulls); if (!nextRowFound) { MemoryContextSwitchTo(oldContext); @@ -554,8 +554,8 @@ SortTupleStore(CitusScanState *scanState) /* iterate over all the sorted tuples, add them to original tuplestore */ while (true) { - TupleTableSlot *newSlot = MakeSingleTupleTableSlotCompat(tupleDescriptor, - &TTSOpsMinimalTuple); + TupleTableSlot *newSlot = MakeSingleTupleTableSlot(tupleDescriptor, + &TTSOpsMinimalTuple); bool found = tuplesort_gettupleslot(tuplesortstate, true, false, newSlot, NULL); if (!found) @@ -660,7 +660,7 @@ ExecuteQueryIntoDestReceiver(Query *query, ParamListInfo params, DestReceiver *d } /* plan the subquery, this may be another distributed query */ - PlannedStmt *queryPlan = pg_plan_query_compat(query, NULL, cursorOptions, params); + PlannedStmt *queryPlan = pg_plan_query(query, NULL, cursorOptions, params); ExecutePlanIntoDestReceiver(queryPlan, params, dest); } @@ -686,7 +686,7 @@ ExecutePlanIntoDestReceiver(PlannedStmt *queryPlan, ParamListInfo params, PortalDefineQuery(portal, NULL, "", - CMDTAG_SELECT_COMPAT, + CMDTAG_SELECT, list_make1(queryPlan), NULL); diff --git a/src/backend/distributed/executor/partitioned_intermediate_results.c b/src/backend/distributed/executor/partitioned_intermediate_results.c index e19829ca2..752552343 100644 --- a/src/backend/distributed/executor/partitioned_intermediate_results.c +++ b/src/backend/distributed/executor/partitioned_intermediate_results.c @@ -286,14 +286,14 @@ StartPortalForQueryExecution(const char *queryString) Query *query = ParseQueryString(queryString, NULL, 0); int cursorOptions = CURSOR_OPT_PARALLEL_OK; - PlannedStmt *queryPlan = pg_plan_query_compat(query, NULL, cursorOptions, NULL); + PlannedStmt *queryPlan = pg_plan_query(query, NULL, cursorOptions, NULL); Portal portal = CreateNewPortal(); /* don't display the portal in pg_cursors, it is for internal use only */ portal->visible = false; - PortalDefineQuery(portal, NULL, queryString, CMDTAG_SELECT_COMPAT, + PortalDefineQuery(portal, NULL, queryString, CMDTAG_SELECT, list_make1(queryPlan), NULL); int eflags = 0; PortalStart(portal, NULL, eflags, GetActiveSnapshot()); diff --git a/src/backend/distributed/executor/query_stats.c b/src/backend/distributed/executor/query_stats.c index 26b3ff027..bbc104e43 100644 --- a/src/backend/distributed/executor/query_stats.c +++ b/src/backend/distributed/executor/query_stats.c @@ -868,7 +868,7 @@ BuildExistingQueryIdHash(void) fmgrPGStatStatements->fn_addr, pgStatStatementsOid, commandTypeDatum); - TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat( + TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlot( statStatementsReturnSet->setDesc, &TTSOpsMinimalTuple); diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 7373162aa..1520384f3 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -2209,7 +2209,7 @@ AvailableExtensionVersion(void) /* pg_available_extensions returns result set containing all available extensions */ (*pg_available_extensions)(fcinfo); - TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat( + TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlot( extensionsResultSet->setDesc, &TTSOpsMinimalTuple); bool hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, @@ -2700,10 +2700,10 @@ CitusCopyFormatTypeId(void) if (MetadataCache.copyFormatTypeId == InvalidOid) { char *typeName = "citus_copy_format"; - MetadataCache.copyFormatTypeId = GetSysCacheOid2Compat(TYPENAMENSP, - Anum_pg_enum_oid, - PointerGetDatum(typeName), - PG_CATALOG_NAMESPACE); + MetadataCache.copyFormatTypeId = GetSysCacheOid2(TYPENAMENSP, + Anum_pg_enum_oid, + PointerGetDatum(typeName), + PG_CATALOG_NAMESPACE); } return MetadataCache.copyFormatTypeId; diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c index ca8ca8d9f..9d902c776 100644 --- a/src/backend/distributed/operations/node_protocol.c +++ b/src/backend/distributed/operations/node_protocol.c @@ -170,7 +170,7 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS) Assert(CitusIsA(ddlStatement, TableDDLCommand)); text *ddlStatementText = cstring_to_text(GetTableDDLCommand(ddlStatement)); - wrapper->listCell = lnext_compat(wrapper->list, wrapper->listCell); + wrapper->listCell = lnext(wrapper->list, wrapper->listCell); SRF_RETURN_NEXT(functionContext, PointerGetDatum(ddlStatementText)); } diff --git a/src/backend/distributed/planner/combine_query_planner.c b/src/backend/distributed/planner/combine_query_planner.c index 37bb443e3..f67f71b53 100644 --- a/src/backend/distributed/planner/combine_query_planner.c +++ b/src/backend/distributed/planner/combine_query_planner.c @@ -295,7 +295,7 @@ BuildSelectStatementViaStdPlanner(Query *combineQuery, List *remoteScanTargetLis ReplaceCitusExtraDataContainer = true; ReplaceCitusExtraDataContainerWithCustomScan = remoteScan; - standardStmt = standard_planner_compat(combineQuery, 0, NULL); + standardStmt = standard_planner(combineQuery, NULL, 0, NULL); ReplaceCitusExtraDataContainer = false; ReplaceCitusExtraDataContainerWithCustomScan = NULL; diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 5b677fb77..d16aa6785 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -239,9 +239,9 @@ distributed_planner(Query *parse, * restriction information per table and parse tree transformations made by * postgres' planner. */ - planContext.plan = standard_planner_compat(planContext.query, - planContext.cursorOptions, - planContext.boundParams); + planContext.plan = standard_planner(planContext.query, NULL, + planContext.cursorOptions, + planContext.boundParams); if (needsDistributedPlanning) { result = PlanDistributedStmt(&planContext, rteIdCounter); @@ -1024,7 +1024,7 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi * being contiguous. */ - standard_planner_compat(newQuery, 0, boundParams); + standard_planner(newQuery, NULL, 0, boundParams); /* overwrite the old transformed query with the new transformed query */ *query = *newQuery; diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index e861a7bbb..097b2bb81 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -987,7 +987,7 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, oldInsertTargetEntry->resname); /* see transformInsertRow() for the details */ - if (IsA(oldInsertTargetEntry->expr, ArrayRef) || + if (IsA(oldInsertTargetEntry->expr, SubscriptingRef) || IsA(oldInsertTargetEntry->expr, FieldStore)) { ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -1515,8 +1515,8 @@ CreateNonPushableInsertSelectPlan(uint64 planId, Query *parse, ParamListInfo bou /* plan the subquery, this may be another distributed query */ int cursorOptions = CURSOR_OPT_PARALLEL_OK; - PlannedStmt *selectPlan = pg_plan_query_compat(selectQueryCopy, NULL, cursorOptions, - boundParams); + PlannedStmt *selectPlan = pg_plan_query(selectQueryCopy, NULL, cursorOptions, + boundParams); bool repartitioned = IsRedistributablePlan(selectPlan->planTree) && IsSupportedRedistributionTarget(targetRelationId); diff --git a/src/backend/distributed/planner/intermediate_result_pruning.c b/src/backend/distributed/planner/intermediate_result_pruning.c index 94372f4e8..76aba8321 100644 --- a/src/backend/distributed/planner/intermediate_result_pruning.c +++ b/src/backend/distributed/planner/intermediate_result_pruning.c @@ -376,7 +376,7 @@ RemoveLocalNodeFromWorkerList(List *workerNodeList) WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); if (workerNode->groupId == localGroupId) { - return list_delete_cell_compat(workerNodeList, workerNodeCell, prev); + return list_delete_cell(workerNodeList, workerNodeCell); } } diff --git a/src/backend/distributed/planner/local_plan_cache.c b/src/backend/distributed/planner/local_plan_cache.c index 9bb9e51f1..946d9fc46 100644 --- a/src/backend/distributed/planner/local_plan_cache.c +++ b/src/backend/distributed/planner/local_plan_cache.c @@ -88,7 +88,7 @@ CacheLocalPlanForShardQuery(Task *task, DistributedPlan *originalDistributedPlan LockRelationOid(rangeTableEntry->relid, lockMode); LocalPlannedStatement *localPlannedStatement = CitusMakeNode(LocalPlannedStatement); - localPlan = planner_compat(localShardQuery, 0, NULL); + localPlan = planner(localShardQuery, NULL, 0, NULL); localPlannedStatement->localPlan = localPlan; localPlannedStatement->shardId = task->anchorShardId; localPlannedStatement->localGroupId = GetLocalGroupId(); diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index b9ee05aec..502c47b7a 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -358,8 +358,8 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es) ExplainOpenGroup("PlannedStmt", "PlannedStmt", false, es); - ExplainOnePlanCompat(plan, into, es, queryString, params, NULL, &planduration, - (es->buffers ? &bufusage : NULL)); + ExplainOnePlan(plan, into, es, queryString, params, NULL, &planduration, + (es->buffers ? &bufusage : NULL)); ExplainCloseGroup("PlannedStmt", "PlannedStmt", false, es); ExplainCloseGroup("Subplan", NULL, true, es); @@ -1079,7 +1079,7 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS) INSTR_TIME_SET_CURRENT(planStart); - PlannedStmt *plan = pg_plan_query_compat(query, NULL, CURSOR_OPT_PARALLEL_OK, NULL); + PlannedStmt *plan = pg_plan_query(query, NULL, CURSOR_OPT_PARALLEL_OK, NULL); INSTR_TIME_SET_CURRENT(planDuration); INSTR_TIME_SUBTRACT(planDuration, planStart); @@ -1201,7 +1201,7 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, SetLocalHideCitusDependentObjectsDisabledWhenAlreadyEnabled(); /* plan the query */ - PlannedStmt *plan = pg_plan_query_compat(query, NULL, cursorOptions, params); + PlannedStmt *plan = pg_plan_query(query, NULL, cursorOptions, params); INSTR_TIME_SET_CURRENT(planduration); INSTR_TIME_SUBTRACT(planduration, planstart); @@ -1213,8 +1213,8 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, } /* run it (if needed) and produce output */ - ExplainOnePlanCompat(plan, into, es, queryString, params, queryEnv, - &planduration, (es->buffers ? &bufusage : NULL)); + ExplainOnePlan(plan, into, es, queryString, params, queryEnv, + &planduration, (es->buffers ? &bufusage : NULL)); } @@ -1632,7 +1632,7 @@ ExplainOneQuery(Query *query, int cursorOptions, INSTR_TIME_SET_CURRENT(planstart); /* plan the query */ - PlannedStmt *plan = pg_plan_query_compat(query, NULL, cursorOptions, params); + PlannedStmt *plan = pg_plan_query(query, NULL, cursorOptions, params); INSTR_TIME_SET_CURRENT(planduration); INSTR_TIME_SUBTRACT(planduration, planstart); @@ -1645,7 +1645,7 @@ ExplainOneQuery(Query *query, int cursorOptions, } /* run it (if needed) and produce output */ - ExplainOnePlanCompat(plan, into, es, queryString, params, queryEnv, + ExplainOnePlan(plan, into, es, queryString, params, queryEnv, &planduration, (es->buffers ? &bufusage : NULL)); } } diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index dec3fee72..19b4aea4d 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -543,7 +543,7 @@ OrSelectClauseList(List *selectClauseList) Node *selectClause = NULL; foreach_ptr(selectClause, selectClauseList) { - bool orClause = or_clause(selectClause); + bool orClause = is_orclause(selectClause); if (orClause) { orSelectClauseList = lappend(orSelectClauseList, selectClause); @@ -3674,9 +3674,9 @@ CoordCombineAggOid() static Oid TypeOid(Oid schemaId, const char *typeName) { - Oid typeOid = GetSysCacheOid2Compat(TYPENAMENSP, Anum_pg_type_oid, - PointerGetDatum(typeName), - ObjectIdGetDatum(schemaId)); + Oid typeOid = GetSysCacheOid2(TYPENAMENSP, Anum_pg_type_oid, + PointerGetDatum(typeName), + ObjectIdGetDatum(schemaId)); return typeOid; } diff --git a/src/backend/distributed/planner/multi_logical_planner.c b/src/backend/distributed/planner/multi_logical_planner.c index 857327742..7e665b567 100644 --- a/src/backend/distributed/planner/multi_logical_planner.c +++ b/src/backend/distributed/planner/multi_logical_planner.c @@ -1236,7 +1236,7 @@ DeferErrorIfUnsupportedClause(List *clauseList) { Node *clause = (Node *) lfirst(clauseCell); - if (!(IsSelectClause(clause) || IsJoinClause(clause) || or_clause(clause))) + if (!(IsSelectClause(clause) || IsJoinClause(clause) || is_orclause(clause))) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "unsupported clause type", NULL, NULL); diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c index e84c821fa..17983c73c 100644 --- a/src/backend/distributed/planner/recursive_planning.c +++ b/src/backend/distributed/planner/recursive_planning.c @@ -1214,7 +1214,7 @@ CreateDistributedSubPlan(uint32 subPlanId, Query *subPlanQuery) } DistributedSubPlan *subPlan = CitusMakeNode(DistributedSubPlan); - subPlan->plan = planner_compat(subPlanQuery, cursorOptions, NULL); + subPlan->plan = planner(subPlanQuery, NULL, cursorOptions, NULL); subPlan->subPlanId = subPlanId; return subPlan; diff --git a/src/backend/distributed/progress/multi_progress.c b/src/backend/distributed/progress/multi_progress.c index abe388eb2..9b9c2faa6 100644 --- a/src/backend/distributed/progress/multi_progress.c +++ b/src/backend/distributed/progress/multi_progress.c @@ -165,7 +165,7 @@ ProgressMonitorList(uint64 commandTypeMagicNumber, List **attachedDSMSegments) getProgressInfoFunctionOid, commandTypeDatum); - TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat( + TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlot( progressResultSet->setDesc, &TTSOpsMinimalTuple); diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index d24a1c30c..2919abd97 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -1646,7 +1646,7 @@ WaitForRelationSubscriptionsBecomeReady(MultiConnection *targetConnection, * and reset it on every iteration to make sure we don't slowly build up * a lot of memory. */ - MemoryContext loopContext = AllocSetContextCreateExtended(CurrentMemoryContext, + MemoryContext loopContext = AllocSetContextCreateInternal(CurrentMemoryContext, "WaitForRelationSubscriptionsBecomeReady", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, @@ -1928,7 +1928,7 @@ WaitForShardMoveSubscription(MultiConnection *targetConnection, XLogRecPtr sourc * and reset it on every iteration to make sure we don't slowly build up * a lot of memory. */ - MemoryContext loopContext = AllocSetContextCreateExtended(CurrentMemoryContext, + MemoryContext loopContext = AllocSetContextCreateInternal(CurrentMemoryContext, "WaitForShardMoveSubscription", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, diff --git a/src/backend/distributed/test/distributed_intermediate_results.c b/src/backend/distributed/test/distributed_intermediate_results.c index 660c35709..c3b286f52 100644 --- a/src/backend/distributed/test/distributed_intermediate_results.c +++ b/src/backend/distributed/test/distributed_intermediate_results.c @@ -51,10 +51,10 @@ partition_task_list_results(PG_FUNCTION_ARGS) bool binaryFormat = PG_GETARG_BOOL(3); Query *parsedQuery = ParseQueryString(queryString, NULL, 0); - PlannedStmt *queryPlan = pg_plan_query_compat(parsedQuery, - queryString, - CURSOR_OPT_PARALLEL_OK, - NULL); + PlannedStmt *queryPlan = pg_plan_query(parsedQuery, + queryString, + CURSOR_OPT_PARALLEL_OK, + NULL); if (!IsCitusCustomScan(queryPlan->planTree)) { ereport(ERROR, (errmsg("query must be distributed and shouldn't require " @@ -122,10 +122,10 @@ redistribute_task_list_results(PG_FUNCTION_ARGS) bool binaryFormat = PG_GETARG_BOOL(3); Query *parsedQuery = ParseQueryString(queryString, NULL, 0); - PlannedStmt *queryPlan = pg_plan_query_compat(parsedQuery, - queryString, - CURSOR_OPT_PARALLEL_OK, - NULL); + PlannedStmt *queryPlan = pg_plan_query(parsedQuery, + queryString, + CURSOR_OPT_PARALLEL_OK, + NULL); if (!IsCitusCustomScan(queryPlan->planTree)) { ereport(ERROR, (errmsg("query must be distributed and shouldn't require " diff --git a/src/backend/distributed/test/foreign_key_relationship_query.c b/src/backend/distributed/test/foreign_key_relationship_query.c index ab23e4faa..e83e95bac 100644 --- a/src/backend/distributed/test/foreign_key_relationship_query.c +++ b/src/backend/distributed/test/foreign_key_relationship_query.c @@ -117,7 +117,7 @@ get_referencing_relation_id_list(PG_FUNCTION_ARGS) { Oid refId = lfirst_oid(wrapper->listCell); - wrapper->listCell = lnext_compat(wrapper->list, wrapper->listCell); + wrapper->listCell = lnext(wrapper->list, wrapper->listCell); SRF_RETURN_NEXT(functionContext, PointerGetDatum(refId)); } @@ -176,7 +176,7 @@ get_referenced_relation_id_list(PG_FUNCTION_ARGS) { Oid refId = lfirst_oid(wrapper->listCell); - wrapper->listCell = lnext_compat(wrapper->list, wrapper->listCell); + wrapper->listCell = lnext(wrapper->list, wrapper->listCell); SRF_RETURN_NEXT(functionContext, PointerGetDatum(refId)); } diff --git a/src/backend/distributed/test/foreign_key_to_reference_table_rebalance.c b/src/backend/distributed/test/foreign_key_to_reference_table_rebalance.c index 941c3ad6a..80c6fe338 100644 --- a/src/backend/distributed/test/foreign_key_to_reference_table_rebalance.c +++ b/src/backend/distributed/test/foreign_key_to_reference_table_rebalance.c @@ -76,7 +76,7 @@ get_foreign_key_to_reference_table_commands(PG_FUNCTION_ARGS) char *command = (char *) lfirst(wrapper->listCell); text *commandText = cstring_to_text(command); - wrapper->listCell = lnext_compat(wrapper->list, wrapper->listCell); + wrapper->listCell = lnext(wrapper->list, wrapper->listCell); SRF_RETURN_NEXT(functionContext, PointerGetDatum(commandText)); } diff --git a/src/backend/distributed/transaction/relation_access_tracking.c b/src/backend/distributed/transaction/relation_access_tracking.c index f69de6f8a..a6a8ba5f6 100644 --- a/src/backend/distributed/transaction/relation_access_tracking.c +++ b/src/backend/distributed/transaction/relation_access_tracking.c @@ -155,7 +155,7 @@ AllocateRelationAccessHash(void) * management. Doing so, instead of allocating in TopMemoryContext, makes * it easier to associate used memory. */ - RelationAcessContext = AllocSetContextCreateExtended(TopMemoryContext, + RelationAcessContext = AllocSetContextCreateInternal(TopMemoryContext, "Relation Access Context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 0778a34fa..d3f41273d 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -244,7 +244,7 @@ InitializeTransactionManagement(void) AdjustMaxPreparedTransactions(); /* set aside 8kb of memory for use in CoordinatedTransactionCallback */ - CommitContext = AllocSetContextCreateExtended(TopMemoryContext, + CommitContext = AllocSetContextCreateInternal(TopMemoryContext, "CommitContext", 8 * 1024, 8 * 1024, diff --git a/src/backend/distributed/transaction/transaction_recovery.c b/src/backend/distributed/transaction/transaction_recovery.c index 87809c7b5..c2ccd2478 100644 --- a/src/backend/distributed/transaction/transaction_recovery.c +++ b/src/backend/distributed/transaction/transaction_recovery.c @@ -165,7 +165,7 @@ RecoverWorkerTransactions(WorkerNode *workerNode) return 0; } - MemoryContext localContext = AllocSetContextCreateExtended(CurrentMemoryContext, + MemoryContext localContext = AllocSetContextCreateInternal(CurrentMemoryContext, "RecoverWorkerTransactions", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, diff --git a/src/backend/distributed/utils/foreign_key_relationship.c b/src/backend/distributed/utils/foreign_key_relationship.c index f10a0dc7a..bfffe9e9e 100644 --- a/src/backend/distributed/utils/foreign_key_relationship.c +++ b/src/backend/distributed/utils/foreign_key_relationship.c @@ -324,7 +324,7 @@ CreateForeignConstraintRelationshipGraph() ClearForeignConstraintRelationshipGraphContext(); - MemoryContext fConstraintRelationshipMemoryContext = AllocSetContextCreateExtended( + MemoryContext fConstraintRelationshipMemoryContext = AllocSetContextCreateInternal( CacheMemoryContext, "Forign Constraint Relationship Graph Context", ALLOCSET_DEFAULT_MINSIZE, diff --git a/src/include/distributed/commands/multi_copy.h b/src/include/distributed/commands/multi_copy.h index 8e055a6b7..13d589a3a 100644 --- a/src/include/distributed/commands/multi_copy.h +++ b/src/include/distributed/commands/multi_copy.h @@ -176,7 +176,7 @@ extern void AppendCopyBinaryFooters(CopyOutState footerOutputState); extern void EndRemoteCopy(int64 shardId, List *connectionList); extern List * CreateRangeTable(Relation rel, AclMode requiredAccess); extern Node * ProcessCopyStmt(CopyStmt *copyStatement, - QueryCompletionCompat *completionTag, + QueryCompletion *completionTag, const char *queryString); extern void CheckCopyPermissions(CopyStmt *copyStatement); extern bool IsCopyResultStmt(CopyStmt *copyStatement); diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 1dd37ce1e..b0b55a2cd 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -84,13 +84,13 @@ extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString, #endif ProcessUtilityContext context, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, - QueryCompletionCompat *completionTag + QueryCompletion *completionTag ); extern void ProcessUtilityParseTree(Node *node, const char *queryString, ProcessUtilityContext context, ParamListInfo params, DestReceiver *dest, - QueryCompletionCompat *completionTag + QueryCompletion *completionTag ); extern void MarkInvalidateForeignKeyGraph(void); extern void InvalidateForeignKeyGraphForDDL(void); diff --git a/src/include/distributed/listutils.h b/src/include/distributed/listutils.h index aa6a0e96b..833c77d22 100644 --- a/src/include/distributed/listutils.h +++ b/src/include/distributed/listutils.h @@ -52,7 +52,7 @@ typedef struct ListCellAndListWrapper for (ListCell *(var ## CellDoNotUse) = list_head(l); \ (var ## CellDoNotUse) != NULL && \ (((var) = lfirst(var ## CellDoNotUse)) || true); \ - var ## CellDoNotUse = lnext_compat(l, var ## CellDoNotUse)) + var ## CellDoNotUse = lnext(l, var ## CellDoNotUse)) /* @@ -65,7 +65,7 @@ typedef struct ListCellAndListWrapper for (ListCell *(var ## CellDoNotUse) = list_head(l); \ (var ## CellDoNotUse) != NULL && \ (((var) = lfirst_int(var ## CellDoNotUse)) || true); \ - var ## CellDoNotUse = lnext_compat(l, var ## CellDoNotUse)) + var ## CellDoNotUse = lnext(l, var ## CellDoNotUse)) /* @@ -78,7 +78,7 @@ typedef struct ListCellAndListWrapper for (ListCell *(var ## CellDoNotUse) = list_head(l); \ (var ## CellDoNotUse) != NULL && \ (((var) = lfirst_oid(var ## CellDoNotUse)) || true); \ - var ## CellDoNotUse = lnext_compat(l, var ## CellDoNotUse)) + var ## CellDoNotUse = lnext(l, var ## CellDoNotUse)) /* * forboth_ptr - @@ -93,8 +93,8 @@ typedef struct ListCellAndListWrapper (var2 ## CellDoNotUse) != NULL && \ (((var1) = lfirst(var1 ## CellDoNotUse)) || true) && \ (((var2) = lfirst(var2 ## CellDoNotUse)) || true); \ - var1 ## CellDoNotUse = lnext_compat(l1, var1 ## CellDoNotUse), \ - var2 ## CellDoNotUse = lnext_compat(l2, var2 ## CellDoNotUse) \ + var1 ## CellDoNotUse = lnext(l1, var1 ## CellDoNotUse), \ + var2 ## CellDoNotUse = lnext(l2, var2 ## CellDoNotUse) \ ) /* @@ -111,8 +111,8 @@ typedef struct ListCellAndListWrapper (var2 ## CellDoNotUse) != NULL && \ (((var1) = lfirst(var1 ## CellDoNotUse)) || true) && \ (((var2) = lfirst_oid(var2 ## CellDoNotUse)) || true); \ - var1 ## CellDoNotUse = lnext_compat(l1, var1 ## CellDoNotUse), \ - var2 ## CellDoNotUse = lnext_compat(l2, var2 ## CellDoNotUse) \ + var1 ## CellDoNotUse = lnext(l1, var1 ## CellDoNotUse), \ + var2 ## CellDoNotUse = lnext(l2, var2 ## CellDoNotUse) \ ) /* @@ -129,8 +129,8 @@ typedef struct ListCellAndListWrapper (var2 ## CellDoNotUse) != NULL && \ (((var1) = lfirst_int(var1 ## CellDoNotUse)) || true) && \ (((var2) = lfirst_oid(var2 ## CellDoNotUse)) || true); \ - var1 ## CellDoNotUse = lnext_compat(l1, var1 ## CellDoNotUse), \ - var2 ## CellDoNotUse = lnext_compat(l2, var2 ## CellDoNotUse) \ + var1 ## CellDoNotUse = lnext(l1, var1 ## CellDoNotUse), \ + var2 ## CellDoNotUse = lnext(l2, var2 ## CellDoNotUse) \ ) /* diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h index 8fa1f1814..2e242cfe1 100644 --- a/src/include/pg_version_compat.h +++ b/src/include/pg_version_compat.h @@ -123,37 +123,12 @@ RelationGetSmgr(Relation rel) #define ROLE_PG_READ_ALL_STATS DEFAULT_ROLE_READ_ALL_STATS #endif -#define lnext_compat(l, r) lnext(l, r) -#define list_delete_cell_compat(l, c, p) list_delete_cell(l, c) -#define pg_plan_query_compat(p, q, c, b) pg_plan_query(p, q, c, b) -#define planner_compat(p, c, b) planner(p, NULL, c, b) -#define standard_planner_compat(a, c, d) standard_planner(a, NULL, c, d) -#define GetSequencesOwnedByRelation(a) getOwnedSequences(a) -#define GetSequencesOwnedByColumn(a, b) getOwnedSequences_internal(a, b, 0) -#define CMDTAG_SELECT_COMPAT CMDTAG_SELECT -#define ExplainOnePlanCompat(a, b, c, d, e, f, g, h) \ - ExplainOnePlan(a, b, c, d, e, f, g, h) #define SetListCellPtr(a, b) ((a)->ptr_value = (b)) #define RangeTableEntryFromNSItem(a) ((a)->p_rte) -#define QueryCompletionCompat QueryCompletion - -#define CreateTableSlotForRel(rel) table_slot_create(rel, NULL) -#define MakeSingleTupleTableSlotCompat MakeSingleTupleTableSlot -#define AllocSetContextCreateExtended AllocSetContextCreateInternal -#define NextCopyFromCompat NextCopyFrom -#define ArrayRef SubscriptingRef -#define T_ArrayRef T_SubscriptingRef -#define or_clause is_orclause -#define GetSysCacheOid1Compat GetSysCacheOid1 -#define GetSysCacheOid2Compat GetSysCacheOid2 -#define GetSysCacheOid3Compat GetSysCacheOid3 -#define GetSysCacheOid4Compat GetSysCacheOid4 - #define fcGetArgValue(fc, n) ((fc)->args[n].value) #define fcGetArgNull(fc, n) ((fc)->args[n].isnull) #define fcSetArgExt(fc, n, val, is_null) \ (((fc)->args[n].isnull = (is_null)), ((fc)->args[n].value = (val))) - #define fcSetArg(fc, n, value) fcSetArgExt(fc, n, value, false) #define fcSetArgNull(fc, n) fcSetArgExt(fc, n, (Datum) 0, true) From bdaeb40b51b150b0144e7e8681bc02ce20d05eb3 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Thu, 28 Jul 2022 11:35:11 +0200 Subject: [PATCH 25/38] Add missing relation access record for local utility command While testing 5670dffd338bec21372a75da40938b9494f5edc1, I realized that we have a missing RecordNonDistTableAccessesForTask() for local utility commands. Although we don't have to record the relation access for local only cases, we really want to keep the behaviour for scale-out be the same with single node on all aspects. We wouldn't want any single node complex transaction to work on single machine, but not on multi node cluster. Hence, we apply the same restrictions. For example, on a distributed cluster, the following errors, and after this commit this errors locally as well ```SQL CREATE TABLE ref(a int primary key); INSERT INTO ref VALUES (1); CREATE TABLE dist(a int REFERENCES ref(a)); SELECT create_reference_table('ref'); SELECT create_distributed_table('dist', 'a'); BEGIN; SELECT * FROM dist; TRUNCATE ref CASCADE; ERROR: cannot execute DDL on table "ref" because there was a parallel SELECT access to distributed table "dist" in the same transaction HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" COMMIT; ``` We also add the comprehensive test suite and run the same locally. --- .../distributed/executor/local_executor.c | 3 + .../expected/citus_local_tables_queries.out | 3 +- .../create_citus_local_table_cascade.out | 12 +- .../relation_access_tracking_single_node.out | 1090 +++++++++++++++++ src/test/regress/multi_1_schedule | 1 + .../sql/create_citus_local_table_cascade.sql | 4 +- .../relation_access_tracking_single_node.sql | 596 +++++++++ 7 files changed, 1697 insertions(+), 12 deletions(-) create mode 100644 src/test/regress/expected/relation_access_tracking_single_node.out create mode 100644 src/test/regress/sql/relation_access_tracking_single_node.sql diff --git a/src/backend/distributed/executor/local_executor.c b/src/backend/distributed/executor/local_executor.c index 48712edd6..d0f5b1fa9 100644 --- a/src/backend/distributed/executor/local_executor.c +++ b/src/backend/distributed/executor/local_executor.c @@ -413,6 +413,9 @@ ExtractParametersForLocalExecution(ParamListInfo paramListInfo, Oid **parameterT static void LocallyExecuteUtilityTask(Task *task) { + /* keep the parity with multi-node clusters */ + RecordNonDistTableAccessesForTask(task); + /* * If we roll back to a savepoint, we may no longer be in a query on * a shard. Reset the value as we go back up the stack. diff --git a/src/test/regress/expected/citus_local_tables_queries.out b/src/test/regress/expected/citus_local_tables_queries.out index a6d48a7b2..4399062aa 100644 --- a/src/test/regress/expected/citus_local_tables_queries.out +++ b/src/test/regress/expected/citus_local_tables_queries.out @@ -1024,8 +1024,7 @@ BEGIN; NOTICE: truncate cascades to table "reference_table" NOTICE: truncate cascades to table "distributed_table" NOTICE: executing the command locally: TRUNCATE TABLE citus_local_table_queries.citus_local_table_xxxxx CASCADE -NOTICE: truncate cascades to table "reference_table_xxxxx" -ERROR: cannot execute DDL on table "reference_table" because there was a parallel DDL access to distributed table "distributed_table" in the same transaction +ERROR: cannot execute DDL on table "citus_local_table" because there was a parallel DDL access to distributed table "distributed_table" in the same transaction ROLLBACK; BEGIN; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; diff --git a/src/test/regress/expected/create_citus_local_table_cascade.out b/src/test/regress/expected/create_citus_local_table_cascade.out index ab84cc251..0c9fcac89 100644 --- a/src/test/regress/expected/create_citus_local_table_cascade.out +++ b/src/test/regress/expected/create_citus_local_table_cascade.out @@ -60,11 +60,11 @@ BEGIN; (1 row) - -- show that we do parallel execution + -- show that we do sequential execution show citus.multi_shard_modify_mode; citus.multi_shard_modify_mode --------------------------------------------------------------------- - parallel + sequential (1 row) SELECT conname, conrelid::regclass::text, confrelid::regclass::text @@ -196,14 +196,10 @@ BEGIN; --------------------------------------------------------------------- (0 rows) - -- succeeds as citus_add_local_table_to_metadata would also prefer parallel + -- fails as citus_add_local_table_to_metadata would require sequential execution -- execution like above select SELECT citus_add_local_table_to_metadata('local_table_4', cascade_via_foreign_keys=>true); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - +ERROR: cannot modify table "local_table_3" because there was a parallel operation on a distributed table ROLLBACK; BEGIN; set citus.multi_shard_modify_mode to 'sequential'; diff --git a/src/test/regress/expected/relation_access_tracking_single_node.out b/src/test/regress/expected/relation_access_tracking_single_node.out new file mode 100644 index 000000000..4505c8da9 --- /dev/null +++ b/src/test/regress/expected/relation_access_tracking_single_node.out @@ -0,0 +1,1090 @@ +--------------------------------------------------------------------- +--- tests around access tracking within transaction blocks +--------------------------------------------------------------------- +CREATE SCHEMA access_tracking; +SET search_path TO 'access_tracking'; +-- idempotently add node to allow this test to run without add_coordinator +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +RESET client_min_messages; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 90930500; +CREATE OR REPLACE FUNCTION relation_select_access_mode(relationId Oid) + RETURNS int + LANGUAGE C STABLE STRICT + AS 'citus', $$relation_select_access_mode$$; +CREATE OR REPLACE FUNCTION relation_dml_access_mode(relationId Oid) + RETURNS int + LANGUAGE C STABLE STRICT + AS 'citus', $$relation_dml_access_mode$$; +CREATE OR REPLACE FUNCTION relation_ddl_access_mode(relationId Oid) + RETURNS int + LANGUAGE C STABLE STRICT + AS 'citus', $$relation_ddl_access_mode$$; +CREATE OR REPLACE FUNCTION distributed_relation(relation_name text) +RETURNS bool AS +$$ +DECLARE + part_method char; +BEGIN + select partmethod INTO part_method from pg_dist_partition WHERE logicalrelid = relation_name::regclass; + IF part_method = 'h' THEN + RETURN true; + ELSE + RETURN false; + END IF; +END; +$$ LANGUAGE 'plpgsql' IMMUTABLE; +CREATE OR REPLACE FUNCTION relation_access_mode_to_text(relation_name text, relationShardAccess int) +RETURNS text AS +$$ +BEGIN + IF relationShardAccess = 0 and distributed_relation(relation_name) THEN + RETURN 'not_parallel_accessed'; + ELSIF relationShardAccess = 0 and NOT distributed_relation(relation_name) THEN + RETURN 'not_accessed'; + ELSIF relationShardAccess = 1 THEN + RETURN 'reference_table_access'; + ELSE + RETURN 'parallel_access'; + END IF; +END; +$$ LANGUAGE 'plpgsql' IMMUTABLE; +CREATE VIEW relation_accesses AS + SELECT table_name, + relation_access_mode_to_text(table_name, relation_select_access_mode(table_name::regclass)) as select_access, + relation_access_mode_to_text(table_name, relation_dml_access_mode(table_name::regclass)) as dml_access, + relation_access_mode_to_text(table_name, relation_ddl_access_mode(table_name::regclass)) as ddl_access + FROM + ((SELECT 'table_' || i as table_name FROM generate_series(1, 7) i) UNION (SELECT 'partitioning_test') UNION (SELECT 'partitioning_test_2009') UNION (SELECT 'partitioning_test_2010')) tables; +SET citus.shard_replication_factor TO 1; +CREATE TABLE table_1 (key int, value int); +SELECT create_distributed_table('table_1', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE table_2 (key int, value int); +SELECT create_distributed_table('table_2', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE table_3 (key int, value int); +SELECT create_distributed_table('table_3', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE table_4 (key int, value int); +SELECT create_distributed_table('table_4', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE table_5 (key int, value int); +SELECT create_distributed_table('table_5', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE table_6 (key int, value int); +SELECT create_reference_Table('table_6'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO table_1 SELECT i, i FROM generate_series(0,100) i; +INSERT INTO table_2 SELECT i, i FROM generate_series(0,100) i; +INSERT INTO table_3 SELECT i, i FROM generate_series(0,100) i; +INSERT INTO table_4 SELECT i, i FROM generate_series(0,100) i; +INSERT INTO table_5 SELECT i, i FROM generate_series(0,100) i; +INSERT INTO table_6 SELECT i, i FROM generate_series(0,100) i; +-- create_distributed_table works fine +BEGIN; + CREATE TABLE table_7 (key int, value int); + SELECT create_distributed_table('table_7', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_7') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_7 | not_parallel_accessed | not_parallel_accessed | parallel_access +(1 row) + +COMMIT; +-- outside the transaction blocks, the function always returns zero +SELECT count(*) FROM table_1; + count +--------------------------------------------------------------------- + 101 +(1 row) + +SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + +-- a very simple test that first checks sequential +-- and parallel SELECTs,DMLs, and DDLs +BEGIN; + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + + SELECT count(*) FROM table_1 WHERE key = 1; + count +--------------------------------------------------------------------- + 1 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + + SELECT count(*) FROM table_1 WHERE key = 1 OR key = 2; + count +--------------------------------------------------------------------- + 2 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed +(1 row) + + INSERT INTO table_1 VALUES (1,1); + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed +(1 row) + + INSERT INTO table_1 VALUES (1,1), (2,2); + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed +(1 row) + + ALTER TABLE table_1 ADD COLUMN test_col INT; + -- now see that the other tables are not accessed at all + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | parallel_access +(1 row) + +ROLLBACK; +-- this test shows that even if two multiple single shard +-- commands executed, we can treat the transaction as sequential +BEGIN; + SELECT count(*) FROM table_1 WHERE key = 1; + count +--------------------------------------------------------------------- + 1 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + + SELECT count(*) FROM table_1 WHERE key = 2; + count +--------------------------------------------------------------------- + 1 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + + INSERT INTO table_1 VALUES (1,1); + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + + INSERT INTO table_1 VALUES (2,2); + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + +ROLLBACK; +-- a sample DDL example +BEGIN; + ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key); + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access +(1 row) + +ROLLBACK; +-- a simple join touches single shard per table +BEGIN; + SELECT + count(*) + FROM + table_1, table_2, table_3, table_4, table_5 + WHERE + table_1.key = table_2.key AND table_2.key = table_3.key AND + table_3.key = table_4.key AND table_4.key = table_5.key AND + table_1.key = 1; + count +--------------------------------------------------------------------- + 1 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name LIKE 'table_%' ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + table_3 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + table_4 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + table_5 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + table_6 | not_accessed | not_accessed | not_accessed + table_7 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(7 rows) + +ROLLBACK; +-- a simple real-time join touches all shard per table +BEGIN; + SELECT + count(*) + FROM + table_1, table_2 + WHERE + table_1.key = table_2.key; + count +--------------------------------------------------------------------- + 101 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed +(2 rows) + +ROLLBACK; +-- a simple real-time join touches all shard per table +-- in sequential mode +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + SELECT + count(*) + FROM + table_1, table_2 + WHERE + table_1.key = table_2.key; + count +--------------------------------------------------------------------- + 101 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(2 rows) + +ROLLBACK; +-- a simple subquery pushdown that touches all shards +BEGIN; + SELECT + count(*) + FROM + ( + SELECT + random() + FROM + table_1, table_2, table_3, table_4, table_5 + WHERE + table_1.key = table_2.key AND table_2.key = table_3.key AND + table_3.key = table_4.key AND table_4.key = table_5.key + ) as foo; + count +--------------------------------------------------------------------- + 101 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name LIKE 'table_%' ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_3 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_4 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_5 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_6 | not_accessed | not_accessed | not_accessed + table_7 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(7 rows) + +ROLLBACK; +-- simple multi shard update both sequential and parallel modes +-- note that in multi shard modify mode we always add select +-- access for all the shards accessed. But, sequential mode is OK +BEGIN; + UPDATE table_1 SET value = 15; + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | parallel_access | not_parallel_accessed +(1 row) + + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + UPDATE table_2 SET value = 15; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | parallel_access | not_parallel_accessed + table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(2 rows) + +ROLLBACK; +-- now UPDATE/DELETE with subselect pushdown +BEGIN; + UPDATE + table_1 SET value = 15 + WHERE key IN (SELECT key FROM table_2 JOIN table_3 USING (key) WHERE table_2.value = 15); + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | parallel_access | not_parallel_accessed + table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_3 | parallel_access | not_parallel_accessed | not_parallel_accessed +(3 rows) + +ROLLBACK; +-- INSERT .. SELECT pushdown +BEGIN; + INSERT INTO table_2 SELECT * FROM table_1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_2 | not_parallel_accessed | parallel_access | not_parallel_accessed +(2 rows) + +ROLLBACK; +-- INSERT .. SELECT pushdown in sequential mode should be OK +BEGIN; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + INSERT INTO table_2 SELECT * FROM table_1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(2 rows) + +ROLLBACK; +-- coordinator INSERT .. SELECT +BEGIN; + -- We use offset 1 to make sure the result needs to be pulled to the coordinator, offset 0 would be optimized away + INSERT INTO table_2 SELECT * FROM table_1 OFFSET 1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_2 | not_parallel_accessed | parallel_access | not_parallel_accessed +(2 rows) + +ROLLBACK; +-- recursively planned SELECT +BEGIN; + SELECT + count(*) + FROM + ( + SELECT + random() + FROM + table_1, table_2 + WHERE + table_1.key = table_2.key + OFFSET 0 + ) as foo; + count +--------------------------------------------------------------------- + 101 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed +(2 rows) + +ROLLBACK; +-- recursively planned SELECT and coordinator INSERT .. SELECT +BEGIN; + INSERT INTO table_3 (key) + SELECT + * + FROM + ( + SELECT + random() * 1000 + FROM + table_1, table_2 + WHERE + table_1.key = table_2.key + OFFSET 0 + ) as foo; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_3 | not_parallel_accessed | parallel_access | not_parallel_accessed +(3 rows) + +ROLLBACK; +-- recursively planned SELECT and coordinator INSERT .. SELECT +-- but modifies single shard, marked as sequential operation +BEGIN; + INSERT INTO table_3 (key) + SELECT + * + FROM + ( + SELECT + random() * 1000 + FROM + table_1, table_2 + WHERE + table_1.key = table_2.key + AND table_1.key = 1 + OFFSET 0 + ) as foo; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + table_3 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(3 rows) + +ROLLBACK; +-- recursively planned SELECT and recursively planned multi-shard DELETE +BEGIN; + DELETE FROM table_3 where key IN + ( + SELECT + * + FROM + ( + SELECT + table_1.key + FROM + table_1, table_2 + WHERE + table_1.key = table_2.key + OFFSET 0 + ) as foo + ) AND value IN (SELECT key FROM table_4); + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2', 'table_3', 'table_4') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_2 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_3 | parallel_access | parallel_access | not_parallel_accessed + table_4 | parallel_access | not_parallel_accessed | not_parallel_accessed +(4 rows) + +ROLLBACK; +-- copy out +BEGIN; + COPY (SELECT * FROM table_1 WHERE key IN (1,2,3) ORDER BY 1) TO stdout; +1 1 +2 2 +3 3 + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed +(1 row) + +ROLLBACK; +-- copy in +BEGIN; + COPY table_1 FROM STDIN WITH CSV; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | parallel_access | not_parallel_accessed +(1 row) + +ROLLBACK; +-- copy in single shard +BEGIN; + COPY table_1 FROM STDIN WITH CSV; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + +ROLLBACK; +-- reference table accesses should always be a sequential +BEGIN; + SELECT count(*) FROM table_6; + count +--------------------------------------------------------------------- + 101 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_6'); + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_6 | reference_table_access | not_accessed | not_accessed +(1 row) + + UPDATE table_6 SET value = 15; + SELECT * FROM relation_accesses WHERE table_name IN ('table_6'); + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_6 | reference_table_access | reference_table_access | not_accessed +(1 row) + + ALTER TABLE table_6 ADD COLUMN x INT; + SELECT * FROM relation_accesses WHERE table_name IN ('table_6'); + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_6 | reference_table_access | reference_table_access | reference_table_access +(1 row) + +ROLLBACK; +-- reference table join with a distributed table +BEGIN; + SELECT count(*) FROM table_1 JOIN table_6 USING(key); + count +--------------------------------------------------------------------- + 101 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_6', 'table_1') ORDER BY 1,2; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed + table_6 | parallel_access | not_accessed | not_accessed +(2 rows) + +ROLLBACK; +-- TRUNCATE should be DDL +BEGIN; + TRUNCATE table_1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access +(1 row) + +ROLLBACK; +-- TRUNCATE can be a sequential DDL +BEGIN; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + TRUNCATE table_1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + +ROLLBACK; +-- TRUNCATE on a reference table should be sequential +BEGIN; + TRUNCATE table_6; + SELECT * FROM relation_accesses WHERE table_name IN ('table_6') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_6 | not_accessed | not_accessed | reference_table_access +(1 row) + +ROLLBACK; +-- creating foreign keys should consider adding the placement accesses for the referenced table +ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key); +BEGIN; + ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access + table_2 | not_parallel_accessed | not_parallel_accessed | parallel_access +(2 rows) + +ROLLBACK; +-- creating foreign keys should consider adding the placement accesses for the referenced table +-- in sequential mode as well +BEGIN; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + table_2 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(2 rows) + +ROLLBACK; +CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); +SELECT create_distributed_table('partitioning_test', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Adding partition tables via CREATE TABLE should have DDL access the partitioned table as well +BEGIN; + CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access + partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access +(2 rows) + +ROLLBACK; +-- Adding partition tables via ATTACH PARTITION on local tables should have DDL access the partitioned table as well +CREATE TABLE partitioning_test_2009 AS SELECT * FROM partitioning_test; +BEGIN; + ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access + partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access +(2 rows) + +COMMIT; +-- Adding partition tables via ATTACH PARTITION on distributed tables should have DDL access the partitioned table as well +CREATE TABLE partitioning_test_2010 AS SELECT * FROM partitioning_test; +SELECT create_distributed_table('partitioning_test_2010', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +BEGIN; + ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2010 FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access + partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | parallel_access +(2 rows) + +COMMIT; +-- reading from partitioned table marks all of its partitions +BEGIN; + SELECT count(*) FROM partitioning_test; + count +--------------------------------------------------------------------- + 0 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | parallel_access | not_parallel_accessed | not_parallel_accessed + partitioning_test_2009 | parallel_access | not_parallel_accessed | not_parallel_accessed + partitioning_test_2010 | parallel_access | not_parallel_accessed | not_parallel_accessed +(3 rows) + +COMMIT; +-- reading from partitioned table sequentially marks all of its partitions with sequential accesses +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + SELECT count(*) FROM partitioning_test; + count +--------------------------------------------------------------------- + 0 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(3 rows) + +COMMIT; +-- updating partitioned table marks all of its partitions +BEGIN; + UPDATE partitioning_test SET time = now(); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | parallel_access | parallel_access | not_parallel_accessed + partitioning_test_2009 | parallel_access | parallel_access | not_parallel_accessed + partitioning_test_2010 | parallel_access | parallel_access | not_parallel_accessed +(3 rows) + +COMMIT; +-- updating partitioned table sequentially marks all of its partitions with sequential accesses +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + UPDATE partitioning_test SET time = now(); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(3 rows) + +COMMIT; +-- DDLs on partitioned table marks all of its partitions +BEGIN; + ALTER TABLE partitioning_test ADD COLUMN X INT; + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access + partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access + partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | parallel_access +(3 rows) + +ROLLBACK; +-- DDLs on partitioned table sequentially marks all of its partitions with sequential accesses +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + ALTER TABLE partitioning_test ADD COLUMN X INT; + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(3 rows) + +ROLLBACK; +-- reading from partition table marks its parent +BEGIN; + SELECT count(*) FROM partitioning_test_2009; + count +--------------------------------------------------------------------- + 0 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | parallel_access | not_parallel_accessed | not_parallel_accessed + partitioning_test_2009 | parallel_access | not_parallel_accessed | not_parallel_accessed + partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(3 rows) + +COMMIT; +-- rreading from partition table marks its parent with sequential accesses +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + SELECT count(*) FROM partitioning_test_2009; + count +--------------------------------------------------------------------- + 0 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(3 rows) + +COMMIT; +-- updating from partition table marks its parent +BEGIN; + UPDATE partitioning_test_2009 SET time = now(); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | parallel_access | parallel_access | not_parallel_accessed + partitioning_test_2009 | parallel_access | parallel_access | not_parallel_accessed + partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(3 rows) + +COMMIT; +-- updating from partition table marks its parent sequential accesses +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + UPDATE partitioning_test_2009 SET time = now(); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(3 rows) + +COMMIT; +-- DDLs on partition table marks its parent +BEGIN; + CREATE INDEX i1000000 ON partitioning_test_2009 (id); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | not_parallel_accessed | not_parallel_accessed | parallel_access + partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | parallel_access + partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(3 rows) + +ROLLBACK; +-- DDLs on partition table marks its parent in sequential mode +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + CREATE INDEX i1000000 ON partitioning_test_2009 (id); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + partitioning_test | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + partitioning_test_2009 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed + partitioning_test_2010 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(3 rows) + +ROLLBACK; +-- TRUNCATE CASCADE works fine +ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); +BEGIN; + TRUNCATE table_1 CASCADE; +NOTICE: truncate cascades to table "table_2" +NOTICE: truncate cascades to table "table_2_xxxxx" +NOTICE: truncate cascades to table "table_2_xxxxx" +NOTICE: truncate cascades to table "table_2_xxxxx" +NOTICE: truncate cascades to table "table_2_xxxxx" + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | parallel_access + table_2 | not_parallel_accessed | not_parallel_accessed | parallel_access +(2 rows) + +ROLLBACK; +-- CTEs with SELECT only should work fine +BEGIN; + WITH cte AS (SELECT count(*) FROM table_1) + SELECT * FROM cte; + count +--------------------------------------------------------------------- + 101 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | not_parallel_accessed | not_parallel_accessed +(1 row) + +COMMIT; +-- CTEs with SELECT only in sequential mode should work fine +BEGIN; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + WITH cte AS (SELECT count(*) FROM table_1) + SELECT * FROM cte; + count +--------------------------------------------------------------------- + 101 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + +COMMIT; +-- modifying CTEs should work fine with multi-row inserts, which are by default in sequential +BEGIN; + WITH cte_1 AS (INSERT INTO table_1 VALUES (1000,1000), (1001, 1001), (1002, 1002) RETURNING *) + SELECT * FROM cte_1 ORDER BY 1; + key | value +--------------------------------------------------------------------- + 1000 | 1000 + 1001 | 1001 + 1002 | 1002 +(3 rows) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + +ROLLBACK; +-- modifying CTEs should work fine with parallel mode +BEGIN; + WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *) + SELECT count(*) FROM cte_1 ORDER BY 1; + count +--------------------------------------------------------------------- + 101 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | parallel_access | parallel_access | not_parallel_accessed +(1 row) + +ROLLBACK; +-- modifying CTEs should work fine with sequential mode +BEGIN; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *) + SELECT count(*) FROM cte_1 ORDER BY 1; + count +--------------------------------------------------------------------- + 101 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + +ROLLBACK; +-- router planned modifying CTEs should work fine with parallel mode +BEGIN; + WITH cte_1 AS (UPDATE table_1 SET value = 15 WHERE key = 6 RETURNING *) + SELECT count(*) FROM cte_1 ORDER BY 1; + count +--------------------------------------------------------------------- + 1 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + +ROLLBACK; +-- router planned modifying CTEs should work fine with sequential mode +BEGIN; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + WITH cte_1 AS (UPDATE table_1 SET value = 15 WHERE key = 6 RETURNING *) + SELECT count(*) FROM cte_1 ORDER BY 1; + count +--------------------------------------------------------------------- + 1 +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed +(1 row) + +ROLLBACK; +-- create distributed table with data loading +-- should mark both parallel dml and parallel ddl +DROP TABLE table_3; +CREATE TABLE table_3 (key int, value int); +INSERT INTO table_3 SELECT i, i FROM generate_series(0,100) i; +BEGIN; + SELECT create_distributed_table('table_3', 'key'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$access_tracking.table_3$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + + SELECT * FROM relation_accesses WHERE table_name IN ('table_3') ORDER BY 1; + table_name | select_access | dml_access | ddl_access +--------------------------------------------------------------------- + table_3 | not_parallel_accessed | parallel_access | parallel_access +(1 row) + +COMMIT; +SET search_path TO 'public'; +DROP SCHEMA access_tracking CASCADE; +NOTICE: drop cascades to 43 other objects +DETAIL: drop cascades to function access_tracking.relation_select_access_mode(oid) +drop cascades to function access_tracking.relation_dml_access_mode(oid) +drop cascades to function access_tracking.relation_ddl_access_mode(oid) +drop cascades to function access_tracking.distributed_relation(text) +drop cascades to function access_tracking.relation_access_mode_to_text(text,integer) +drop cascades to view access_tracking.relation_accesses +drop cascades to table access_tracking.table_1 +drop cascades to table access_tracking.table_1_90930500 +drop cascades to table access_tracking.table_1_90930501 +drop cascades to table access_tracking.table_1_90930502 +drop cascades to table access_tracking.table_1_90930503 +drop cascades to table access_tracking.table_2 +drop cascades to table access_tracking.table_2_90930504 +drop cascades to table access_tracking.table_2_90930505 +drop cascades to table access_tracking.table_2_90930506 +drop cascades to table access_tracking.table_2_90930507 +drop cascades to table access_tracking.table_4 +drop cascades to table access_tracking.table_4_90930512 +drop cascades to table access_tracking.table_4_90930513 +drop cascades to table access_tracking.table_4_90930514 +drop cascades to table access_tracking.table_4_90930515 +drop cascades to table access_tracking.table_5 +drop cascades to table access_tracking.table_5_90930516 +drop cascades to table access_tracking.table_5_90930517 +drop cascades to table access_tracking.table_5_90930518 +drop cascades to table access_tracking.table_5_90930519 +drop cascades to table access_tracking.table_6 +drop cascades to table access_tracking.table_6_90930520 +drop cascades to table access_tracking.table_7 +drop cascades to table access_tracking.table_7_90930521 +drop cascades to table access_tracking.table_7_90930522 +drop cascades to table access_tracking.table_7_90930523 +drop cascades to table access_tracking.table_7_90930524 +drop cascades to table access_tracking.partitioning_test +drop cascades to table access_tracking.partitioning_test_90930525 +drop cascades to table access_tracking.partitioning_test_90930526 +drop cascades to table access_tracking.partitioning_test_90930527 +drop cascades to table access_tracking.partitioning_test_90930528 +drop cascades to table access_tracking.table_3 +drop cascades to table access_tracking.table_3_90930541 +drop cascades to table access_tracking.table_3_90930542 +drop cascades to table access_tracking.table_3_90930543 +drop cascades to table access_tracking.table_3_90930544 diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index a2bd068ba..287205d17 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -17,6 +17,7 @@ # --- test: multi_extension test: single_node +test: relation_access_tracking_single_node test: single_node_truncate test: multi_test_helpers multi_test_helpers_superuser test: multi_cluster_management diff --git a/src/test/regress/sql/create_citus_local_table_cascade.sql b/src/test/regress/sql/create_citus_local_table_cascade.sql index 677f8ac0d..3b9cb85ae 100644 --- a/src/test/regress/sql/create_citus_local_table_cascade.sql +++ b/src/test/regress/sql/create_citus_local_table_cascade.sql @@ -49,7 +49,7 @@ BEGIN; SELECT citus_add_local_table_to_metadata('local_table_1', cascade_via_foreign_keys=>true); - -- show that we do parallel execution + -- show that we do sequential execution show citus.multi_shard_modify_mode; SELECT conname, conrelid::regclass::text, confrelid::regclass::text @@ -127,7 +127,7 @@ SELECT create_distributed_Table('distributed_table', 'col'); BEGIN; SELECT * FROM distributed_table; - -- succeeds as citus_add_local_table_to_metadata would also prefer parallel + -- fails as citus_add_local_table_to_metadata would require sequential execution -- execution like above select SELECT citus_add_local_table_to_metadata('local_table_4', cascade_via_foreign_keys=>true); ROLLBACK; diff --git a/src/test/regress/sql/relation_access_tracking_single_node.sql b/src/test/regress/sql/relation_access_tracking_single_node.sql new file mode 100644 index 000000000..9d1f7be3b --- /dev/null +++ b/src/test/regress/sql/relation_access_tracking_single_node.sql @@ -0,0 +1,596 @@ + +--- +--- tests around access tracking within transaction blocks +--- +CREATE SCHEMA access_tracking; +SET search_path TO 'access_tracking'; + +-- idempotently add node to allow this test to run without add_coordinator +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port); +SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); +RESET client_min_messages; + +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 90930500; + +CREATE OR REPLACE FUNCTION relation_select_access_mode(relationId Oid) + RETURNS int + LANGUAGE C STABLE STRICT + AS 'citus', $$relation_select_access_mode$$; + +CREATE OR REPLACE FUNCTION relation_dml_access_mode(relationId Oid) + RETURNS int + LANGUAGE C STABLE STRICT + AS 'citus', $$relation_dml_access_mode$$; + +CREATE OR REPLACE FUNCTION relation_ddl_access_mode(relationId Oid) + RETURNS int + LANGUAGE C STABLE STRICT + AS 'citus', $$relation_ddl_access_mode$$; + +CREATE OR REPLACE FUNCTION distributed_relation(relation_name text) +RETURNS bool AS +$$ +DECLARE + part_method char; +BEGIN + select partmethod INTO part_method from pg_dist_partition WHERE logicalrelid = relation_name::regclass; + IF part_method = 'h' THEN + RETURN true; + ELSE + RETURN false; + END IF; +END; +$$ LANGUAGE 'plpgsql' IMMUTABLE; + + +CREATE OR REPLACE FUNCTION relation_access_mode_to_text(relation_name text, relationShardAccess int) +RETURNS text AS +$$ +BEGIN + IF relationShardAccess = 0 and distributed_relation(relation_name) THEN + RETURN 'not_parallel_accessed'; + ELSIF relationShardAccess = 0 and NOT distributed_relation(relation_name) THEN + RETURN 'not_accessed'; + ELSIF relationShardAccess = 1 THEN + RETURN 'reference_table_access'; + ELSE + RETURN 'parallel_access'; + END IF; +END; +$$ LANGUAGE 'plpgsql' IMMUTABLE; + + + +CREATE VIEW relation_accesses AS + SELECT table_name, + relation_access_mode_to_text(table_name, relation_select_access_mode(table_name::regclass)) as select_access, + relation_access_mode_to_text(table_name, relation_dml_access_mode(table_name::regclass)) as dml_access, + relation_access_mode_to_text(table_name, relation_ddl_access_mode(table_name::regclass)) as ddl_access + FROM + ((SELECT 'table_' || i as table_name FROM generate_series(1, 7) i) UNION (SELECT 'partitioning_test') UNION (SELECT 'partitioning_test_2009') UNION (SELECT 'partitioning_test_2010')) tables; + +SET citus.shard_replication_factor TO 1; +CREATE TABLE table_1 (key int, value int); +SELECT create_distributed_table('table_1', 'key'); + +CREATE TABLE table_2 (key int, value int); +SELECT create_distributed_table('table_2', 'key'); + +CREATE TABLE table_3 (key int, value int); +SELECT create_distributed_table('table_3', 'key'); + +CREATE TABLE table_4 (key int, value int); +SELECT create_distributed_table('table_4', 'key'); + +CREATE TABLE table_5 (key int, value int); +SELECT create_distributed_table('table_5', 'key'); + +CREATE TABLE table_6 (key int, value int); +SELECT create_reference_Table('table_6'); + +INSERT INTO table_1 SELECT i, i FROM generate_series(0,100) i; +INSERT INTO table_2 SELECT i, i FROM generate_series(0,100) i; +INSERT INTO table_3 SELECT i, i FROM generate_series(0,100) i; +INSERT INTO table_4 SELECT i, i FROM generate_series(0,100) i; +INSERT INTO table_5 SELECT i, i FROM generate_series(0,100) i; +INSERT INTO table_6 SELECT i, i FROM generate_series(0,100) i; + +-- create_distributed_table works fine +BEGIN; + CREATE TABLE table_7 (key int, value int); + SELECT create_distributed_table('table_7', 'key'); + SELECT * FROM relation_accesses WHERE table_name IN ('table_7') ORDER BY 1; +COMMIT; + +-- outside the transaction blocks, the function always returns zero +SELECT count(*) FROM table_1; +SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + +-- a very simple test that first checks sequential +-- and parallel SELECTs,DMLs, and DDLs +BEGIN; + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + SELECT count(*) FROM table_1 WHERE key = 1; + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + SELECT count(*) FROM table_1 WHERE key = 1 OR key = 2; + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + INSERT INTO table_1 VALUES (1,1); + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + INSERT INTO table_1 VALUES (1,1), (2,2); + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + ALTER TABLE table_1 ADD COLUMN test_col INT; + + -- now see that the other tables are not accessed at all + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + +ROLLBACK; + + +-- this test shows that even if two multiple single shard +-- commands executed, we can treat the transaction as sequential +BEGIN; + SELECT count(*) FROM table_1 WHERE key = 1; + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + SELECT count(*) FROM table_1 WHERE key = 2; + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + INSERT INTO table_1 VALUES (1,1); + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + INSERT INTO table_1 VALUES (2,2); + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; +ROLLBACK; + +-- a sample DDL example +BEGIN; + ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key); + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; +ROLLBACK; + +-- a simple join touches single shard per table +BEGIN; + SELECT + count(*) + FROM + table_1, table_2, table_3, table_4, table_5 + WHERE + table_1.key = table_2.key AND table_2.key = table_3.key AND + table_3.key = table_4.key AND table_4.key = table_5.key AND + table_1.key = 1; + + SELECT * FROM relation_accesses WHERE table_name LIKE 'table_%' ORDER BY 1; +ROLLBACK; + +-- a simple real-time join touches all shard per table +BEGIN; + SELECT + count(*) + FROM + table_1, table_2 + WHERE + table_1.key = table_2.key; + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; +ROLLBACK; + +-- a simple real-time join touches all shard per table +-- in sequential mode +BEGIN; + + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + SELECT + count(*) + FROM + table_1, table_2 + WHERE + table_1.key = table_2.key; + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; +ROLLBACK; + +-- a simple subquery pushdown that touches all shards +BEGIN; + SELECT + count(*) + FROM + ( + + SELECT + random() + FROM + table_1, table_2, table_3, table_4, table_5 + WHERE + table_1.key = table_2.key AND table_2.key = table_3.key AND + table_3.key = table_4.key AND table_4.key = table_5.key + ) as foo; + + SELECT * FROM relation_accesses WHERE table_name LIKE 'table_%' ORDER BY 1; +ROLLBACK; + +-- simple multi shard update both sequential and parallel modes +-- note that in multi shard modify mode we always add select +-- access for all the shards accessed. But, sequential mode is OK +BEGIN; + UPDATE table_1 SET value = 15; + SELECT * FROM relation_accesses WHERE table_name = 'table_1'; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + UPDATE table_2 SET value = 15; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; +ROLLBACK; + +-- now UPDATE/DELETE with subselect pushdown +BEGIN; + UPDATE + table_1 SET value = 15 + WHERE key IN (SELECT key FROM table_2 JOIN table_3 USING (key) WHERE table_2.value = 15); + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; +ROLLBACK; + +-- INSERT .. SELECT pushdown +BEGIN; + INSERT INTO table_2 SELECT * FROM table_1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; +ROLLBACK; + +-- INSERT .. SELECT pushdown in sequential mode should be OK +BEGIN; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + + INSERT INTO table_2 SELECT * FROM table_1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; +ROLLBACK; + +-- coordinator INSERT .. SELECT +BEGIN; + -- We use offset 1 to make sure the result needs to be pulled to the coordinator, offset 0 would be optimized away + INSERT INTO table_2 SELECT * FROM table_1 OFFSET 1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; +ROLLBACK; + + + +-- recursively planned SELECT +BEGIN; + SELECT + count(*) + FROM + ( + + SELECT + random() + FROM + table_1, table_2 + WHERE + table_1.key = table_2.key + OFFSET 0 + ) as foo; + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; +ROLLBACK; + +-- recursively planned SELECT and coordinator INSERT .. SELECT +BEGIN; + INSERT INTO table_3 (key) + SELECT + * + FROM + ( + + SELECT + random() * 1000 + FROM + table_1, table_2 + WHERE + table_1.key = table_2.key + OFFSET 0 + ) as foo; + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; +ROLLBACK; + +-- recursively planned SELECT and coordinator INSERT .. SELECT +-- but modifies single shard, marked as sequential operation +BEGIN; + INSERT INTO table_3 (key) + SELECT + * + FROM + ( + + SELECT + random() * 1000 + FROM + table_1, table_2 + WHERE + table_1.key = table_2.key + AND table_1.key = 1 + OFFSET 0 + ) as foo; + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; +ROLLBACK; + +-- recursively planned SELECT and recursively planned multi-shard DELETE +BEGIN; + DELETE FROM table_3 where key IN + ( + SELECT + * + FROM + ( + SELECT + table_1.key + FROM + table_1, table_2 + WHERE + table_1.key = table_2.key + OFFSET 0 + ) as foo + ) AND value IN (SELECT key FROM table_4); + + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2', 'table_3', 'table_4') ORDER BY 1; +ROLLBACK; + +-- copy out +BEGIN; + COPY (SELECT * FROM table_1 WHERE key IN (1,2,3) ORDER BY 1) TO stdout; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; +ROLLBACK; + +-- copy in +BEGIN; + COPY table_1 FROM STDIN WITH CSV; +1,1 +2,2 +3,3 +\. + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; +ROLLBACK; + +-- copy in single shard +BEGIN; + COPY table_1 FROM STDIN WITH CSV; +1,1 +1,2 +1,3 +\. + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; +ROLLBACK; + +-- reference table accesses should always be a sequential +BEGIN; + SELECT count(*) FROM table_6; + SELECT * FROM relation_accesses WHERE table_name IN ('table_6'); + + UPDATE table_6 SET value = 15; + SELECT * FROM relation_accesses WHERE table_name IN ('table_6'); + + ALTER TABLE table_6 ADD COLUMN x INT; + SELECT * FROM relation_accesses WHERE table_name IN ('table_6'); +ROLLBACK; + +-- reference table join with a distributed table +BEGIN; + SELECT count(*) FROM table_1 JOIN table_6 USING(key); + SELECT * FROM relation_accesses WHERE table_name IN ('table_6', 'table_1') ORDER BY 1,2; +ROLLBACK; + +-- TRUNCATE should be DDL +BEGIN; + TRUNCATE table_1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; +ROLLBACK; + +-- TRUNCATE can be a sequential DDL +BEGIN; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + TRUNCATE table_1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; +ROLLBACK; + +-- TRUNCATE on a reference table should be sequential +BEGIN; + TRUNCATE table_6; + SELECT * FROM relation_accesses WHERE table_name IN ('table_6') ORDER BY 1; +ROLLBACK; + +-- creating foreign keys should consider adding the placement accesses for the referenced table +ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key); +BEGIN; + ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; +ROLLBACK; + +-- creating foreign keys should consider adding the placement accesses for the referenced table +-- in sequential mode as well +BEGIN; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + + ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; +ROLLBACK; + +CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); +SELECT create_distributed_table('partitioning_test', 'id'); + +-- Adding partition tables via CREATE TABLE should have DDL access the partitioned table as well +BEGIN; + CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1; +ROLLBACK; + +-- Adding partition tables via ATTACH PARTITION on local tables should have DDL access the partitioned table as well +CREATE TABLE partitioning_test_2009 AS SELECT * FROM partitioning_test; +BEGIN; + ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1; +COMMIT; + +-- Adding partition tables via ATTACH PARTITION on distributed tables should have DDL access the partitioned table as well +CREATE TABLE partitioning_test_2010 AS SELECT * FROM partitioning_test; +SELECT create_distributed_table('partitioning_test_2010', 'id'); +BEGIN; + ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2010 FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2010') ORDER BY 1; +COMMIT; + +-- reading from partitioned table marks all of its partitions +BEGIN; + SELECT count(*) FROM partitioning_test; + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; +COMMIT; + +-- reading from partitioned table sequentially marks all of its partitions with sequential accesses +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + SELECT count(*) FROM partitioning_test; + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; +COMMIT; + +-- updating partitioned table marks all of its partitions +BEGIN; + UPDATE partitioning_test SET time = now(); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; +COMMIT; + +-- updating partitioned table sequentially marks all of its partitions with sequential accesses +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + UPDATE partitioning_test SET time = now(); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; +COMMIT; + + +-- DDLs on partitioned table marks all of its partitions +BEGIN; + ALTER TABLE partitioning_test ADD COLUMN X INT; + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; +ROLLBACK; + +-- DDLs on partitioned table sequentially marks all of its partitions with sequential accesses +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + ALTER TABLE partitioning_test ADD COLUMN X INT; + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; +ROLLBACK; + + +-- reading from partition table marks its parent +BEGIN; + SELECT count(*) FROM partitioning_test_2009; + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; +COMMIT; + +-- rreading from partition table marks its parent with sequential accesses +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + SELECT count(*) FROM partitioning_test_2009; + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; +COMMIT; + +-- updating from partition table marks its parent +BEGIN; + UPDATE partitioning_test_2009 SET time = now(); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; +COMMIT; + +-- updating from partition table marks its parent sequential accesses +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + UPDATE partitioning_test_2009 SET time = now(); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; +COMMIT; + + +-- DDLs on partition table marks its parent +BEGIN; + CREATE INDEX i1000000 ON partitioning_test_2009 (id); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; +ROLLBACK; + +-- DDLs on partition table marks its parent in sequential mode +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + CREATE INDEX i1000000 ON partitioning_test_2009 (id); + SELECT * FROM relation_accesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; +ROLLBACK; + +-- TRUNCATE CASCADE works fine +ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); +BEGIN; + TRUNCATE table_1 CASCADE; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; +ROLLBACK; + +-- CTEs with SELECT only should work fine +BEGIN; + + WITH cte AS (SELECT count(*) FROM table_1) + SELECT * FROM cte; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; +COMMIT; + +-- CTEs with SELECT only in sequential mode should work fine +BEGIN; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + + WITH cte AS (SELECT count(*) FROM table_1) + SELECT * FROM cte; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; +COMMIT; + +-- modifying CTEs should work fine with multi-row inserts, which are by default in sequential +BEGIN; + + WITH cte_1 AS (INSERT INTO table_1 VALUES (1000,1000), (1001, 1001), (1002, 1002) RETURNING *) + SELECT * FROM cte_1 ORDER BY 1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; +ROLLBACK; + +-- modifying CTEs should work fine with parallel mode +BEGIN; + + WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *) + SELECT count(*) FROM cte_1 ORDER BY 1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; +ROLLBACK; + +-- modifying CTEs should work fine with sequential mode +BEGIN; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + + WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *) + SELECT count(*) FROM cte_1 ORDER BY 1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; +ROLLBACK; + +-- router planned modifying CTEs should work fine with parallel mode +BEGIN; + + WITH cte_1 AS (UPDATE table_1 SET value = 15 WHERE key = 6 RETURNING *) + SELECT count(*) FROM cte_1 ORDER BY 1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; +ROLLBACK; + +-- router planned modifying CTEs should work fine with sequential mode +BEGIN; + SET LOCAL citus.multi_shard_modify_mode = 'sequential'; + + WITH cte_1 AS (UPDATE table_1 SET value = 15 WHERE key = 6 RETURNING *) + SELECT count(*) FROM cte_1 ORDER BY 1; + SELECT * FROM relation_accesses WHERE table_name IN ('table_1') ORDER BY 1; +ROLLBACK; + +-- create distributed table with data loading +-- should mark both parallel dml and parallel ddl +DROP TABLE table_3; +CREATE TABLE table_3 (key int, value int); +INSERT INTO table_3 SELECT i, i FROM generate_series(0,100) i; +BEGIN; + SELECT create_distributed_table('table_3', 'key'); + SELECT * FROM relation_accesses WHERE table_name IN ('table_3') ORDER BY 1; +COMMIT; + +SET search_path TO 'public'; +DROP SCHEMA access_tracking CASCADE; From 6d6e44166f58e4b6a5e8c7474ea3cc7d65449ef7 Mon Sep 17 00:00:00 2001 From: Marco Slot Date: Fri, 29 Jul 2022 14:05:41 +0200 Subject: [PATCH 26/38] Avoid catalog read via superuser() call in DecrementSharedConnectionCounter --- .../distributed/connection/shared_connection_stats.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/backend/distributed/connection/shared_connection_stats.c b/src/backend/distributed/connection/shared_connection_stats.c index 82a354733..82ad26756 100644 --- a/src/backend/distributed/connection/shared_connection_stats.c +++ b/src/backend/distributed/connection/shared_connection_stats.c @@ -420,7 +420,7 @@ IncrementSharedConnectionCounter(const char *hostname, int port) { SharedConnStatsHashKey connKey; - if (GetMaxSharedPoolSize() == DISABLE_CONNECTION_THROTTLING) + if (MaxSharedPoolSize == DISABLE_CONNECTION_THROTTLING) { /* connection throttling disabled */ return; @@ -484,7 +484,11 @@ DecrementSharedConnectionCounter(const char *hostname, int port) { SharedConnStatsHashKey connKey; - if (GetMaxSharedPoolSize() == DISABLE_CONNECTION_THROTTLING) + /* + * Do not call GetMaxSharedPoolSize() here, since it may read from + * the catalog and we may be in the process exit handler. + */ + if (MaxSharedPoolSize == DISABLE_CONNECTION_THROTTLING) { /* connection throttling disabled */ return; From f372e93d225e6690f3b04a2c13e36bb0b5575dda Mon Sep 17 00:00:00 2001 From: aykut-bozkurt <51649454+aykut-bozkurt@users.noreply.github.com> Date: Mon, 1 Aug 2022 10:14:35 +0300 Subject: [PATCH 27/38] we supress notice log during looking up function oid to not break pg vanilla tests. (#6082) --- src/backend/distributed/commands/function.c | 13 ++++++++++- src/backend/distributed/shared_library_init.c | 15 ++++++++++++ .../distributed/utils/citus_depended_object.c | 23 +++++++++++++++++++ .../distributed/citus_depended_object.h | 2 ++ src/include/distributed/shared_library_init.h | 1 + 5 files changed, 53 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index e02f68aa4..318f6242f 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -32,6 +32,7 @@ #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "commands/extension.h" +#include "distributed/citus_depended_object.h" #include "distributed/citus_ruleutils.h" #include "distributed/citus_safe_lib.h" #include "distributed/colocation_utils.h" @@ -1438,7 +1439,17 @@ CreateFunctionStmtObjectAddress(Node *node, bool missing_ok) } } - return FunctionToObjectAddress(objectType, objectWithArgs, missing_ok); + int OldClientMinMessage = client_min_messages; + + /* suppress NOTICE if running under pg vanilla tests */ + SetLocalClientMinMessagesIfRunningPGTests(WARNING); + + List *funcAddresses = FunctionToObjectAddress(objectType, objectWithArgs, missing_ok); + + /* set it back */ + SetLocalClientMinMessagesIfRunningPGTests(OldClientMinMessage); + + return funcAddresses; } diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 600b62b69..6a9d1fa51 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -595,6 +595,21 @@ StartupCitusBackend(void) } +/* + * GetCurrentClientMinMessageLevelName returns the name of the + * the GUC client_min_messages for its specified value. + */ +const char * +GetClientMinMessageLevelNameForValue(int minMessageLevel) +{ + struct config_enum record = { 0 }; + record.options = log_level_options; + const char *clientMinMessageLevelName = config_enum_lookup_by_value(&record, + minMessageLevel); + return clientMinMessageLevelName; +} + + /* * RegisterConnectionCleanup cleans up any resources left at the end of the * session. We prefer to cleanup before shared memory exit to make sure that diff --git a/src/backend/distributed/utils/citus_depended_object.c b/src/backend/distributed/utils/citus_depended_object.c index b844c3515..6424595bf 100644 --- a/src/backend/distributed/utils/citus_depended_object.c +++ b/src/backend/distributed/utils/citus_depended_object.c @@ -33,6 +33,8 @@ #include "distributed/citus_depended_object.h" #include "distributed/metadata_cache.h" #include "distributed/listutils.h" +#include "distributed/log_utils.h" +#include "distributed/shared_library_init.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "nodes/parsenodes.h" @@ -76,6 +78,27 @@ SetLocalHideCitusDependentObjectsDisabledWhenAlreadyEnabled(void) } +/* + * SetLocalClientMinMessagesIfRunningPGTests sets client_min_message locally to the given value + * if EnableUnsupportedFeatureMessages is set to false. + */ +void +SetLocalClientMinMessagesIfRunningPGTests(int clientMinMessageLevel) +{ + if (EnableUnsupportedFeatureMessages) + { + return; + } + + const char *clientMinMessageLevelName = GetClientMinMessageLevelNameForValue( + clientMinMessageLevel); + + set_config_option("client_min_messages", clientMinMessageLevelName, + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); +} + + /* * HideCitusDependentObjectsOnQueriesOfPgMetaTables adds a NOT is_citus_depended_object(oid, oid) expr * to the quals of meta class RTEs that we are interested in. diff --git a/src/include/distributed/citus_depended_object.h b/src/include/distributed/citus_depended_object.h index 61abfa68a..027186f4e 100644 --- a/src/include/distributed/citus_depended_object.h +++ b/src/include/distributed/citus_depended_object.h @@ -17,6 +17,8 @@ extern bool HideCitusDependentObjects; +extern void SetLocalClientMinMessagesIfRunningPGTests(int + clientMinMessageLevel); extern void SetLocalHideCitusDependentObjectsDisabledWhenAlreadyEnabled(void); extern bool HideCitusDependentObjectsOnQueriesOfPgMetaTables(Node *node, void *context); extern bool IsPgLocksTable(RangeTblEntry *rte); diff --git a/src/include/distributed/shared_library_init.h b/src/include/distributed/shared_library_init.h index 485ab553f..63a7147af 100644 --- a/src/include/distributed/shared_library_init.h +++ b/src/include/distributed/shared_library_init.h @@ -23,5 +23,6 @@ extern IsColumnarTableAmTable_type extern_IsColumnarTableAmTable; extern ReadColumnarOptions_type extern_ReadColumnarOptions; extern void StartupCitusBackend(void); +extern const char * GetClientMinMessageLevelNameForValue(int minMessageLevel); #endif /* SHARED_LIBRARY_INIT_H */ From 0a04b115aa549e0b250b272b2bc6630d052930c5 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Mon, 1 Aug 2022 13:39:56 +0300 Subject: [PATCH 28/38] Add CHANGELOG entries for 11.0.5 (#6108) --- CHANGELOG.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a628cefd9..26f752cf2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,28 @@ +### citus v11.0.5 (August 1, 2022) ### + +* Avoids possible information leakage about existing users + +* Allows using `WITH HOLD` cursors with parameters + +* Fixes a bug that could cause failures in `INSERT INTO .. SELECT` + +* Fixes a bug that prevents pushing down `IN` expressions when using columnar + custom scan + +* Fixes a concurrency bug between creating a co-located distributed table and + shard moves + +* Fixes a crash that can happen due to catalog read in `shmem_exit` + +* Fixes an unexpected error caused by constraints when moving shards + +* Fixes an unexpected error for foreign tables when upgrading Postgres + +* Prevents adding local table into metadata if there is a view with circular + dependencies on it + +* Reduces memory consumption of index name fix for partitioned tables + ### citus v11.0.4 (July 13, 2022) ### * Fixes a bug that prevents promoting read-replicas as primaries From f9b02946b125483e1439d374129541c893592db4 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Mon, 1 Aug 2022 14:32:36 +0300 Subject: [PATCH 29/38] Delete PG version_above_ten alternative test outputs (#6111) --- src/test/regress/expected/.gitignore | 2 - .../input/multi_alter_table_statements.source | 4 - .../input/multi_multiuser_load_data.source | 3 - .../multi_alter_table_statements.source | 8 - .../multi_alter_table_statements_0.source | 1237 ----------------- .../output/multi_multiuser_load_data.source | 7 - .../output/multi_multiuser_load_data_0.source | 39 - 7 files changed, 1300 deletions(-) delete mode 100644 src/test/regress/output/multi_alter_table_statements_0.source delete mode 100644 src/test/regress/output/multi_multiuser_load_data_0.source diff --git a/src/test/regress/expected/.gitignore b/src/test/regress/expected/.gitignore index 209f06da8..acb41a987 100644 --- a/src/test/regress/expected/.gitignore +++ b/src/test/regress/expected/.gitignore @@ -5,7 +5,6 @@ /multi_agg_distinct.out /multi_agg_type_conversion.out /multi_alter_table_statements.out -/multi_alter_table_statements_0.out /multi_behavioral_analytics_create_table.out /multi_behavioral_analytics_create_table_superuser.out /multi_complex_count_distinct.out @@ -14,7 +13,6 @@ /multi_load_data_superuser.out /multi_load_more_data.out /multi_multiuser_load_data.out -/multi_multiuser_load_data_0.out /multi_mx_copy_data.out /multi_outer_join.out /multi_outer_join_reference.out diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index 4b9f26cec..2e9883175 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -2,10 +2,6 @@ -- MULTI_ALTER_TABLE_STATEMENTS -- --- this test has different output per version -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; - ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000; diff --git a/src/test/regress/input/multi_multiuser_load_data.source b/src/test/regress/input/multi_multiuser_load_data.source index 5681347c6..3f332eddc 100644 --- a/src/test/regress/input/multi_multiuser_load_data.source +++ b/src/test/regress/input/multi_multiuser_load_data.source @@ -8,9 +8,6 @@ -- the shard placement policy to the local-node-first policy as other regression -- tests expect the placements to be in that order. -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; - SET citusdb.shard_placement_policy TO 'local-node-first'; -- load as superuser diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index 7a93bcb7b..a5bdee20e 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -1,14 +1,6 @@ -- -- MULTI_ALTER_TABLE_STATEMENTS -- --- this test has different output per version -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; - version_above_ten -------------------- - t -(1 row) - ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000; -- Check that we can run ALTER TABLE statements on distributed tables. -- We set the shardid sequence here so that the shardids in this test diff --git a/src/test/regress/output/multi_alter_table_statements_0.source b/src/test/regress/output/multi_alter_table_statements_0.source deleted file mode 100644 index c6a549052..000000000 --- a/src/test/regress/output/multi_alter_table_statements_0.source +++ /dev/null @@ -1,1237 +0,0 @@ --- --- MULTI_ALTER_TABLE_STATEMENTS --- --- this test has different output per version -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; - version_above_ten -------------------- - f -(1 row) - -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000; --- Check that we can run ALTER TABLE statements on distributed tables. --- We set the shardid sequence here so that the shardids in this test --- aren't affected by changes to the previous tests. -CREATE TABLE lineitem_alter ( - l_orderkey bigint not null, - l_partkey integer not null, - l_suppkey integer not null, - l_linenumber integer not null, - l_quantity decimal(15, 2) not null, - l_extendedprice decimal(15, 2) not null, - l_discount decimal(15, 2) not null, - l_tax decimal(15, 2) not null, - l_returnflag char(1) not null, - l_linestatus char(1) not null, - l_shipdate date not null, - l_commitdate date not null, - l_receiptdate date not null, - l_shipinstruct char(25) not null, - l_shipmode char(10) not null, - l_comment varchar(44) not null - ) - WITH ( fillfactor = 80 ); -SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); - create_distributed_table --------------------------- - -(1 row) - -\copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' --- verify that the storage options made it to the table definitions -SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; - relname | reloptions -----------------+----------------- - lineitem_alter | {fillfactor=80} -(1 row) - -\c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' ORDER BY relname; - relname | reloptions ------------------------+----------------- - lineitem_alter_220000 | {fillfactor=80} -(1 row) - -\c - - - :master_port --- Verify that we can add columns -ALTER TABLE lineitem_alter ADD COLUMN float_column FLOAT; -ALTER TABLE lineitem_alter ADD COLUMN date_column DATE; -ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER DEFAULT 1; -ALTER TABLE lineitem_alter ADD COLUMN int_column2 INTEGER DEFAULT 2; -ALTER TABLE lineitem_alter ADD COLUMN null_column INTEGER; --- show changed schema on one worker -\c - - - :worker_1_port -SELECT attname, atttypid::regtype -FROM - (SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc - JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) -ORDER BY attnum; - attname | atttypid ------------------+------------------- - tableoid | oid - cmax | cid - xmax | xid - cmin | cid - xmin | xid - ctid | tid - l_orderkey | bigint - l_partkey | integer - l_suppkey | integer - l_linenumber | integer - l_quantity | numeric - l_extendedprice | numeric - l_discount | numeric - l_tax | numeric - l_returnflag | character - l_linestatus | character - l_shipdate | date - l_commitdate | date - l_receiptdate | date - l_shipinstruct | character - l_shipmode | character - l_comment | character varying - float_column | double precision - date_column | date - int_column1 | integer - int_column2 | integer - null_column | integer -(27 rows) - -\c - - - :master_port -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ------------------+-----------------------+----------- - l_orderkey | bigint | not null - l_partkey | integer | not null - l_suppkey | integer | not null - l_linenumber | integer | not null - l_quantity | numeric(15,2) | not null - l_extendedprice | numeric(15,2) | not null - l_discount | numeric(15,2) | not null - l_tax | numeric(15,2) | not null - l_returnflag | character(1) | not null - l_linestatus | character(1) | not null - l_shipdate | date | not null - l_commitdate | date | not null - l_receiptdate | date | not null - l_shipinstruct | character(25) | not null - l_shipmode | character(10) | not null - l_comment | character varying(44) | not null - float_column | double precision | - date_column | date | - int_column1 | integer | default 1 - int_column2 | integer | default 2 - null_column | integer | -(21 rows) - -SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; - float_column | count ---------------+------- - | 6000 -(1 row) - -SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; - int_column1 | count --------------+------- - 1 | 6000 -(1 row) - --- Verify that SET|DROP DEFAULT works -ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1; -ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT; --- \copy to verify that default values take effect -\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; - float_column | count ---------------+------- - | 6000 - 1 | 6000 -(2 rows) - -SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; - int_column1 | count --------------+------- - | 6000 - 1 | 6000 -(2 rows) - --- Verify that SET NOT NULL works -ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL; -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ------------------+-----------------------+-------------------- - l_orderkey | bigint | not null - l_partkey | integer | not null - l_suppkey | integer | not null - l_linenumber | integer | not null - l_quantity | numeric(15,2) | not null - l_extendedprice | numeric(15,2) | not null - l_discount | numeric(15,2) | not null - l_tax | numeric(15,2) | not null - l_returnflag | character(1) | not null - l_linestatus | character(1) | not null - l_shipdate | date | not null - l_commitdate | date | not null - l_receiptdate | date | not null - l_shipinstruct | character(25) | not null - l_shipmode | character(10) | not null - l_comment | character varying(44) | not null - float_column | double precision | default 1 - date_column | date | - int_column1 | integer | - int_column2 | integer | not null default 2 - null_column | integer | -(21 rows) - --- Drop default so that NULLs will be inserted for this column -ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT; --- \copy should fail because it will try to insert NULLs for a NOT NULL column --- Note, this operation will create a table on the workers but it won't be in the metadata -\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -ERROR: null value in column "int_column2" violates not-null constraint -DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 1996-03-13, 1996-02-12, 1996-03-22, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null). --- Verify that DROP NOT NULL works -ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ------------------+-----------------------+----------- - l_orderkey | bigint | not null - l_partkey | integer | not null - l_suppkey | integer | not null - l_linenumber | integer | not null - l_quantity | numeric(15,2) | not null - l_extendedprice | numeric(15,2) | not null - l_discount | numeric(15,2) | not null - l_tax | numeric(15,2) | not null - l_returnflag | character(1) | not null - l_linestatus | character(1) | not null - l_shipdate | date | not null - l_commitdate | date | not null - l_receiptdate | date | not null - l_shipinstruct | character(25) | not null - l_shipmode | character(10) | not null - l_comment | character varying(44) | not null - float_column | double precision | default 1 - date_column | date | - int_column1 | integer | - int_column2 | integer | - null_column | integer | -(21 rows) - --- \copy should succeed now -\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -SELECT count(*) from lineitem_alter; - count -------- - 18000 -(1 row) - --- Verify that SET DATA TYPE works -SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2; - int_column2 | pg_typeof | count --------------+-----------+------- - | integer | 6000 - 2 | integer | 12000 -(2 rows) - -ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT; -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ------------------+-----------------------+----------- - l_orderkey | bigint | not null - l_partkey | integer | not null - l_suppkey | integer | not null - l_linenumber | integer | not null - l_quantity | numeric(15,2) | not null - l_extendedprice | numeric(15,2) | not null - l_discount | numeric(15,2) | not null - l_tax | numeric(15,2) | not null - l_returnflag | character(1) | not null - l_linestatus | character(1) | not null - l_shipdate | date | not null - l_commitdate | date | not null - l_receiptdate | date | not null - l_shipinstruct | character(25) | not null - l_shipmode | character(10) | not null - l_comment | character varying(44) | not null - float_column | double precision | default 1 - date_column | date | - int_column1 | integer | - int_column2 | double precision | - null_column | integer | -(21 rows) - -SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2; - int_column2 | pg_typeof | count --------------+------------------+------- - | double precision | 6000 - 2 | double precision | 12000 -(2 rows) - --- Verify that DROP COLUMN works -ALTER TABLE lineitem_alter DROP COLUMN int_column1; -ALTER TABLE lineitem_alter DROP COLUMN float_column; -ALTER TABLE lineitem_alter DROP COLUMN date_column; --- Verify that RENAME COLUMN works -ALTER TABLE lineitem_alter RENAME COLUMN l_orderkey TO l_orderkey_renamed; -SELECT SUM(l_orderkey_renamed) FROM lineitem_alter; - sum ----------- - 53620791 -(1 row) - --- Verify that IF EXISTS works as expected -ALTER TABLE non_existent_table ADD COLUMN new_column INTEGER; -ERROR: relation "non_existent_table" does not exist -ALTER TABLE IF EXISTS non_existent_table ADD COLUMN new_column INTEGER; -NOTICE: relation "non_existent_table" does not exist, skipping -ALTER TABLE IF EXISTS lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE INTEGER; -ALTER TABLE lineitem_alter DROP COLUMN non_existent_column; -ERROR: column "non_existent_column" of relation "lineitem_alter" does not exist -ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS non_existent_column; -NOTICE: column "non_existent_column" of relation "lineitem_alter" does not exist, skipping -ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS int_column2; --- Verify with IF EXISTS for extant table -ALTER TABLE IF EXISTS lineitem_alter RENAME COLUMN l_orderkey_renamed TO l_orderkey; -SELECT SUM(l_orderkey) FROM lineitem_alter; - sum ----------- - 53620791 -(1 row) - -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ------------------+-----------------------+----------- - l_orderkey | bigint | not null - l_partkey | integer | not null - l_suppkey | integer | not null - l_linenumber | integer | not null - l_quantity | numeric(15,2) | not null - l_extendedprice | numeric(15,2) | not null - l_discount | numeric(15,2) | not null - l_tax | numeric(15,2) | not null - l_returnflag | character(1) | not null - l_linestatus | character(1) | not null - l_shipdate | date | not null - l_commitdate | date | not null - l_receiptdate | date | not null - l_shipinstruct | character(25) | not null - l_shipmode | character(10) | not null - l_comment | character varying(44) | not null - null_column | integer | -(17 rows) - --- Verify that we can execute commands with multiple subcommands -ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER, - ADD COLUMN int_column2 INTEGER; -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ------------------+-----------------------+----------- - l_orderkey | bigint | not null - l_partkey | integer | not null - l_suppkey | integer | not null - l_linenumber | integer | not null - l_quantity | numeric(15,2) | not null - l_extendedprice | numeric(15,2) | not null - l_discount | numeric(15,2) | not null - l_tax | numeric(15,2) | not null - l_returnflag | character(1) | not null - l_linestatus | character(1) | not null - l_shipdate | date | not null - l_commitdate | date | not null - l_receiptdate | date | not null - l_shipinstruct | character(25) | not null - l_shipmode | character(10) | not null - l_comment | character varying(44) | not null - null_column | integer | - int_column1 | integer | - int_column2 | integer | -(19 rows) - -ALTER TABLE lineitem_alter ADD COLUMN int_column3 INTEGER, - ALTER COLUMN int_column1 SET STATISTICS 10; -ERROR: alter table command is currently unsupported -DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. -ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2; -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ------------------+-----------------------+----------- - l_orderkey | bigint | not null - l_partkey | integer | not null - l_suppkey | integer | not null - l_linenumber | integer | not null - l_quantity | numeric(15,2) | not null - l_extendedprice | numeric(15,2) | not null - l_discount | numeric(15,2) | not null - l_tax | numeric(15,2) | not null - l_returnflag | character(1) | not null - l_linestatus | character(1) | not null - l_shipdate | date | not null - l_commitdate | date | not null - l_receiptdate | date | not null - l_shipinstruct | character(25) | not null - l_shipmode | character(10) | not null - l_comment | character varying(44) | not null - null_column | integer | -(17 rows) - --- Verify that we cannot execute alter commands on the distribution column -ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey DROP NOT NULL; -ERROR: cannot execute ALTER TABLE command involving partition column -ALTER TABLE lineitem_alter DROP COLUMN l_orderkey; -ERROR: cannot execute ALTER TABLE command involving partition column --- Verify that we error out on unsupported statement types -ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey SET STATISTICS 100; -ERROR: alter table command is currently unsupported -DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. -ALTER TABLE lineitem_alter DROP CONSTRAINT IF EXISTS non_existent_contraint; -NOTICE: constraint "non_existent_contraint" of relation "lineitem_alter" does not exist, skipping -ALTER TABLE lineitem_alter SET WITHOUT OIDS; -ERROR: alter table command is currently unsupported -DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. --- Verify that we error out in case of postgres errors on supported statement --- types -ALTER TABLE lineitem_alter ADD COLUMN new_column non_existent_type; -ERROR: type "non_existent_type" does not exist -LINE 1: ALTER TABLE lineitem_alter ADD COLUMN new_column non_existen... - ^ -ALTER TABLE lineitem_alter ALTER COLUMN null_column SET NOT NULL; -ERROR: column "null_column" contains null values -CONTEXT: while executing command on localhost:57638 -ALTER TABLE lineitem_alter ALTER COLUMN l_partkey SET DEFAULT 'a'; -ERROR: invalid input syntax for integer: "a" --- Verify that we error out on RENAME CONSTRAINT statement -ALTER TABLE lineitem_alter RENAME CONSTRAINT constraint_a TO constraint_b; -ERROR: renaming constraints belonging to distributed tables is currently unsupported --- Verify that IF EXISTS works as expected with RENAME statements -ALTER TABLE non_existent_table RENAME TO non_existent_table_renamed; -ERROR: relation "non_existent_table" does not exist -ALTER TABLE IF EXISTS non_existent_table RENAME TO non_existent_table_renamed; -NOTICE: relation "non_existent_table" does not exist, skipping -ALTER TABLE IF EXISTS non_existent_table RENAME COLUMN column1 TO column2; -NOTICE: relation "non_existent_table" does not exist, skipping --- Verify that none of the failed alter table commands took effect on the master --- node -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ------------------+-----------------------+----------- - l_orderkey | bigint | not null - l_partkey | integer | not null - l_suppkey | integer | not null - l_linenumber | integer | not null - l_quantity | numeric(15,2) | not null - l_extendedprice | numeric(15,2) | not null - l_discount | numeric(15,2) | not null - l_tax | numeric(15,2) | not null - l_returnflag | character(1) | not null - l_linestatus | character(1) | not null - l_shipdate | date | not null - l_commitdate | date | not null - l_receiptdate | date | not null - l_shipinstruct | character(25) | not null - l_shipmode | character(10) | not null - l_comment | character varying(44) | not null - null_column | integer | -(17 rows) - --- verify that non-propagated ddl commands are allowed inside a transaction block -SET citus.enable_ddl_propagation to false; -BEGIN; -CREATE INDEX temp_index_1 ON lineitem_alter(l_linenumber); -COMMIT; -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------+---------------- - temp_index_1 | lineitem_alter -(1 row) - -DROP INDEX temp_index_1; --- verify that single distributed ddl commands are allowed inside a transaction block -SET citus.enable_ddl_propagation to true; -BEGIN; -CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); -COMMIT; -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------+---------------- - temp_index_2 | lineitem_alter -(1 row) - -DROP INDEX temp_index_2; --- and so are multiple ddl statements -BEGIN; -CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); -ALTER TABLE lineitem_alter ADD COLUMN first integer; -COMMIT; -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ------------------+-----------------------+----------- - l_orderkey | bigint | not null - l_partkey | integer | not null - l_suppkey | integer | not null - l_linenumber | integer | not null - l_quantity | numeric(15,2) | not null - l_extendedprice | numeric(15,2) | not null - l_discount | numeric(15,2) | not null - l_tax | numeric(15,2) | not null - l_returnflag | character(1) | not null - l_linestatus | character(1) | not null - l_shipdate | date | not null - l_commitdate | date | not null - l_receiptdate | date | not null - l_shipinstruct | character(25) | not null - l_shipmode | character(10) | not null - l_comment | character varying(44) | not null - null_column | integer | - first | integer | -(18 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'temp_index_2'::regclass; - Column | Type | Definition -------------+--------+------------ - l_orderkey | bigint | l_orderkey -(1 row) - -ALTER TABLE lineitem_alter DROP COLUMN first; -DROP INDEX temp_index_2; --- ensure that user-specified rollback causes full rollback -BEGIN; -CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); -CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); -ROLLBACK; -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ------------+----------- -(0 rows) - --- ensure that errors cause full rollback -BEGIN; -CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); -CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); -ERROR: relation "temp_index_2" already exists -ROLLBACK; -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ------------+----------- -(0 rows) - --- verify that SAVEPOINT is allowed... -BEGIN; -CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); -SAVEPOINT my_savepoint; -CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); -ROLLBACK; --- and also rolling back to it is also allowed -BEGIN; -CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); -SAVEPOINT my_savepoint; -CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); -ROLLBACK TO my_savepoint; -COMMIT; -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------+---------------- - temp_index_2 | lineitem_alter -(1 row) - -DROP INDEX temp_index_2; --- Add column on only one worker... -\c - - - :worker_2_port -ALTER TABLE lineitem_alter_220000 ADD COLUMN first integer; -\c - - - :master_port --- and try to add it in a multi-statement block, which fails -BEGIN; -CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); -ALTER TABLE lineitem_alter ADD COLUMN first integer; -ERROR: column "first" of relation "lineitem_alter_220000" already exists -CONTEXT: while executing command on localhost:57638 -COMMIT; --- Nothing from the block should have committed -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ------------+----------- -(0 rows) - --- Create single-shard table (to avoid deadlocks in the upcoming test hackery) -CREATE TABLE single_shard_items (id integer NOT NULL, name text); -SET citus.shard_count TO 1; -SET citus.shard_replication_factor TO 2; -SELECT create_distributed_table('single_shard_items', 'id', 'hash'); - create_distributed_table --------------------------- - -(1 row) - --- Verify that ALTER TABLE .. REPLICATION IDENTITY [USING INDEX]* .. works -CREATE UNIQUE INDEX replica_idx on single_shard_items(id); -SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; - relreplident --------------- - d -(1 row) - -SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); - run_command_on_workers ------------------------- - (localhost,57637,t,d) - (localhost,57638,t,d) -(2 rows) - -ALTER TABLE single_shard_items REPLICA IDENTITY nothing; -SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; - relreplident --------------- - n -(1 row) - -SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); - run_command_on_workers ------------------------- - (localhost,57637,t,n) - (localhost,57638,t,n) -(2 rows) - -ALTER TABLE single_shard_items REPLICA IDENTITY full; -SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; - relreplident --------------- - f -(1 row) - -SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); - run_command_on_workers ------------------------- - (localhost,57637,t,f) - (localhost,57638,t,f) -(2 rows) - -ALTER TABLE single_shard_items REPLICA IDENTITY USING INDEX replica_idx; -SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; - relreplident --------------- - i -(1 row) - -SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); - run_command_on_workers ------------------------- - (localhost,57637,t,i) - (localhost,57638,t,i) -(2 rows) - -ALTER TABLE single_shard_items REPLICA IDENTITY default, REPLICA IDENTITY USING INDEX replica_idx, REPLICA IDENTITY nothing; -SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; - relreplident --------------- - n -(1 row) - -SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); - run_command_on_workers ------------------------- - (localhost,57637,t,n) - (localhost,57638,t,n) -(2 rows) - -ALTER TABLE single_shard_items ADD COLUMN test_col int, REPLICA IDENTITY full; -DROP INDEX replica_idx; -ALTER TABLE single_shard_items REPLICA IDENTITY default; --- Drop the column from the worker... -\c - - - :worker_2_port -ALTER TABLE lineitem_alter_220000 DROP COLUMN first; --- Create table to trigger at-xact-end (deferred) failure -CREATE TABLE ddl_commands (command text UNIQUE DEFERRABLE INITIALLY DEFERRED); --- Use an event trigger to log all DDL event tags in it -CREATE FUNCTION log_ddl_tag() RETURNS event_trigger AS $ldt$ - BEGIN - INSERT INTO ddl_commands VALUES (tg_tag); - END; -$ldt$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_tag(); -\c - - - :master_port --- The above trigger will cause failure at transaction end on one placement. --- We'll test 2PC first, as it should handle this "best" (no divergence) -SET citus.multi_shard_commit_protocol TO '2pc'; -BEGIN; -CREATE INDEX single_index_2 ON single_shard_items(id); -CREATE INDEX single_index_3 ON single_shard_items(name); -COMMIT; -ERROR: duplicate key value violates unique constraint "ddl_commands_command_key" -DETAIL: Key (command)=(CREATE INDEX) already exists. -CONTEXT: while executing command on localhost:57638 --- Nothing from the block should have committed -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; - indexname | tablename ------------+----------- -(0 rows) - --- Now try with 2pc off -SET citus.multi_shard_commit_protocol TO '1pc'; -BEGIN; -CREATE INDEX single_index_2 ON single_shard_items(id); -CREATE INDEX single_index_3 ON single_shard_items(name); -COMMIT; -WARNING: duplicate key value violates unique constraint "ddl_commands_command_key" -DETAIL: Key (command)=(CREATE INDEX) already exists. -CONTEXT: while executing command on localhost:57638 -WARNING: failed to commit transaction on localhost:57638 --- The block should have committed with a warning -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; - indexname | tablename -----------------+-------------------- - single_index_2 | single_shard_items - single_index_3 | single_shard_items -(2 rows) - -\c - - - :worker_2_port -DROP EVENT TRIGGER log_ddl_tag; -DROP FUNCTION log_ddl_tag(); -DROP TABLE ddl_commands; -\c - - - :master_port --- Distributed SELECTs may appear after ALTER -BEGIN; -CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); -SELECT count(*) FROM lineitem_alter; - count -------- - 18000 -(1 row) - -ROLLBACK; --- and before -BEGIN; -SELECT count(*) FROM lineitem_alter; - count -------- - 18000 -(1 row) - -CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); -COMMIT; -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------+---------------- - temp_index_2 | lineitem_alter -(1 row) - -DROP INDEX temp_index_2; ---- verify that distributed ddl commands can be used with 2pc -SET citus.multi_shard_commit_protocol TO '2pc'; -CREATE INDEX temp_index_3 ON lineitem_alter(l_orderkey); -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------+---------------- - temp_index_3 | lineitem_alter -(1 row) - -DROP INDEX temp_index_3; -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ------------+----------- -(0 rows) - -RESET citus.multi_shard_commit_protocol; --- verify that not any of shard placements are marked as failed when a query failure occurs -CREATE TABLE test_ab (a int, b int); -SET citus.shard_count TO 8; -SELECT create_distributed_table('test_ab', 'a', 'hash'); - create_distributed_table --------------------------- - -(1 row) - -INSERT INTO test_ab VALUES (2, 10); -INSERT INTO test_ab VALUES (2, 11); -CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a); -ERROR: could not create unique index "temp_unique_index_1_220011" -DETAIL: Key (a)=(2) is duplicated. -CONTEXT: while executing command on localhost:57638 -SELECT shardid FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard -WHERE logicalrelid='test_ab'::regclass AND shardstate=3; - shardid ---------- -(0 rows) - --- Check that the schema on the worker still looks reasonable -\c - - - :worker_1_port -SELECT attname, atttypid::regtype -FROM - (SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc - JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) -ORDER BY attnum; - attname | atttypid --------------------------------+------------------- - tableoid | oid - cmax | cid - xmax | xid - cmin | cid - xmin | xid - ctid | tid - l_orderkey | bigint - l_partkey | integer - l_suppkey | integer - l_linenumber | integer - l_quantity | numeric - l_extendedprice | numeric - l_discount | numeric - l_tax | numeric - l_returnflag | character - l_linestatus | character - l_shipdate | date - l_commitdate | date - l_receiptdate | date - l_shipinstruct | character - l_shipmode | character - l_comment | character varying - ........pg.dropped.17........ | - - ........pg.dropped.18........ | - - ........pg.dropped.19........ | - - ........pg.dropped.20........ | - - null_column | integer - ........pg.dropped.22........ | - - ........pg.dropped.23........ | - - ........pg.dropped.24........ | - -(30 rows) - -\c - - - :master_port --- verify that we can rename distributed tables -SHOW citus.enable_ddl_propagation; - citus.enable_ddl_propagation ------------------------------- - on -(1 row) - -ALTER TABLE lineitem_alter RENAME TO lineitem_renamed; --- verify rename is performed -SELECT relname FROM pg_class WHERE relname = 'lineitem_renamed'; - relname ------------------- - lineitem_renamed -(1 row) - --- show rename worked on one worker, too -\c - - - :worker_1_port -SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_renamed%' ORDER BY relname; - relname -------------------------- - lineitem_renamed_220000 - lineitem_renamed_220001 - lineitem_renamed_220003 -(3 rows) - -\c - - - :master_port --- revert it to original name -ALTER TABLE lineitem_renamed RENAME TO lineitem_alter; --- show rename worked on one worker, too -\c - - - :worker_1_port -SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname; - relname ------------------------ - lineitem_alter_220000 - lineitem_alter_220001 - lineitem_alter_220003 -(3 rows) - -\c - - - :master_port --- verify that we can set and reset storage parameters -ALTER TABLE lineitem_alter SET(fillfactor=40); -SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; - relname | reloptions -----------------+----------------- - lineitem_alter | {fillfactor=40} -(1 row) - -\c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname; - relname | reloptions ------------------------+----------------- - lineitem_alter_220000 | {fillfactor=40} - lineitem_alter_220001 | {fillfactor=40} - lineitem_alter_220003 | {fillfactor=40} -(3 rows) - -\c - - - :master_port -ALTER TABLE lineitem_alter RESET(fillfactor); -SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; - relname | reloptions -----------------+------------ - lineitem_alter | -(1 row) - -\c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname; - relname | reloptions ------------------------+------------ - lineitem_alter_220000 | - lineitem_alter_220001 | - lineitem_alter_220003 | -(3 rows) - -\c - - - :master_port --- verify that we can rename indexes on distributed tables -CREATE INDEX temp_index_1 ON lineitem_alter(l_linenumber); -ALTER INDEX temp_index_1 RENAME TO idx_lineitem_linenumber; --- verify rename is performed -SELECT relname FROM pg_class WHERE relname = 'idx_lineitem_linenumber'; - relname -------------------------- - idx_lineitem_linenumber -(1 row) - --- show rename worked on one worker, too -\c - - - :worker_1_port -SELECT relname FROM pg_class WHERE relname LIKE 'idx_lineitem_linenumber%' ORDER BY relname; - relname --------------------------------- - idx_lineitem_linenumber_220000 - idx_lineitem_linenumber_220001 - idx_lineitem_linenumber_220003 -(3 rows) - -\c - - - :master_port --- now get rid of the index -DROP INDEX idx_lineitem_linenumber; --- verify that we don't intercept DDL commands if propagation is turned off -SET citus.enable_ddl_propagation to false; --- table rename statement can be performed on the coordinator only now -ALTER TABLE lineitem_alter RENAME TO lineitem_renamed; --- verify rename is performed -SELECT relname FROM pg_class WHERE relname = 'lineitem_alter' or relname = 'lineitem_renamed'; - relname ------------------- - lineitem_renamed -(1 row) - --- revert it to original name -ALTER TABLE lineitem_renamed RENAME TO lineitem_alter; --- this column is added to master table and not workers -ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int; --- verify newly added column is not present in a worker shard -\c - - - :worker_1_port -SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0; -ERROR: column "column_only_added_to_master" does not exist -LINE 1: SELECT column_only_added_to_master FROM lineitem_alter_22000... - ^ -\c - - - :master_port --- ddl propagation flag is reset to default, disable it again -SET citus.enable_ddl_propagation to false; --- following query succeeds since it accesses an previously existing column -SELECT l_orderkey FROM lineitem_alter LIMIT 0; - l_orderkey ------------- -(0 rows) - --- make master and workers have the same schema again -ALTER TABLE lineitem_alter DROP COLUMN column_only_added_to_master; --- now this should succeed -SELECT * FROM lineitem_alter LIMIT 0; - l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment | null_column -------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+----------------+------------+-----------+------------- -(0 rows) - --- previously unsupported statements are accepted by postgresql now -ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey SET STATISTICS 100; -ALTER TABLE lineitem_alter DROP CONSTRAINT IF EXISTS non_existent_contraint; -NOTICE: constraint "non_existent_contraint" of relation "lineitem_alter" does not exist, skipping -ALTER TABLE lineitem_alter SET WITHOUT OIDS; --- distribution column still cannot be dropped. -ALTER TABLE lineitem_alter DROP COLUMN l_orderkey; -ERROR: cannot execute ALTER TABLE command dropping partition column --- Even unique indexes on l_partkey (non-partition column) are allowed. --- Citus would have prevented that. -CREATE UNIQUE INDEX unique_lineitem_partkey on lineitem_alter(l_partkey); -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename --------------------------+---------------- - unique_lineitem_partkey | lineitem_alter -(1 row) - --- verify index is not created on worker -\c - - - :worker_1_port -SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alter_%'; - indexname | tablename ------------+----------- -(0 rows) - -\c - - - :master_port --- verify alter table and drop sequence in the same transaction does not cause deadlock -SET citus.shard_count TO 4; -SET citus.shard_replication_factor TO 2; -CREATE TABLE sequence_deadlock_test (a serial, b serial); -SELECT create_distributed_table('sequence_deadlock_test', 'a'); - create_distributed_table --------------------------- - -(1 row) - -BEGIN; -ALTER TABLE sequence_deadlock_test ADD COLUMN c int; --- suppress notice message caused by DROP ... CASCADE to prevent pg version difference -SET client_min_messages TO 'WARNING'; -DROP SEQUENCE sequence_deadlock_test_b_seq CASCADE; -RESET client_min_messages; -END; -DROP TABLE sequence_deadlock_test; --- verify enable/disable trigger all works -SET citus.shard_replication_factor TO 1; -SET citus.shard_count TO 1; -CREATE TABLE trigger_table ( - id int, - value text -); -SELECT create_distributed_table('trigger_table', 'id'); - create_distributed_table --------------------------- - -(1 row) - --- first set a trigger on a shard -\c - - - :worker_1_port -CREATE FUNCTION update_value() RETURNS trigger AS $up$ - BEGIN - NEW.value := 'trigger enabled'; - RETURN NEW; - END; -$up$ LANGUAGE plpgsql; -CREATE TRIGGER update_value -BEFORE INSERT ON trigger_table_220017 -FOR EACH ROW EXECUTE PROCEDURE update_value(); -\c - - - :master_port -INSERT INTO trigger_table VALUES (1, 'trigger disabled'); -SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; - value | count ------------------+------- - trigger enabled | 1 -(1 row) - -ALTER TABLE trigger_table DISABLE TRIGGER ALL; -INSERT INTO trigger_table VALUES (1, 'trigger disabled'); -SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; - value | count -------------------+------- - trigger disabled | 1 - trigger enabled | 1 -(2 rows) - -ALTER TABLE trigger_table ENABLE TRIGGER ALL; -INSERT INTO trigger_table VALUES (1, 'trigger disabled'); -SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; - value | count -------------------+------- - trigger disabled | 1 - trigger enabled | 2 -(2 rows) - -DROP TABLE trigger_table; --- test ALTER TABLE ALL IN TABLESPACE --- we expect that it will warn out -CREATE TABLESPACE super_fast_ssd LOCATION '@abs_srcdir@/data'; -ALTER TABLE ALL IN TABLESPACE pg_default SET TABLESPACE super_fast_ssd; -WARNING: not propagating ALTER TABLE ALL IN TABLESPACE commands to worker nodes -HINT: Connect to worker nodes directly to manually move all tables. -ALTER TABLE ALL IN TABLESPACE super_fast_ssd SET TABLESPACE pg_default; -WARNING: not propagating ALTER TABLE ALL IN TABLESPACE commands to worker nodes -HINT: Connect to worker nodes directly to manually move all tables. -DROP TABLESPACE super_fast_ssd; --- Cleanup the table and its shards -SET citus.enable_ddl_propagation to true; -CREATE USER alter_table_owner WITH LOGIN; -GRANT USAGE ON SCHEMA public TO alter_table_owner; -\c - alter_table_owner - :master_port --- should not be able to access table without permission -SELECT count(*) FROM lineitem_alter; -ERROR: permission denied for relation lineitem_alter --- should not be able to drop the table as non table owner -DROP TABLE lineitem_alter; -ERROR: must be owner of relation lineitem_alter -\c - postgres - :master_port -ALTER TABLE lineitem_alter OWNER TO alter_table_owner; -\c - alter_table_owner - :master_port --- should be able to query the table as table owner -SELECT count(*) FROM lineitem_alter; - count -------- - 18000 -(1 row) - --- should be able to drop the table as table owner -DROP TABLE lineitem_alter; --- check that nothing's left over on workers, other than the leftover shard created --- during the unsuccessful COPY -\c - postgres - :worker_1_port -SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; - relname ------------------------ - lineitem_alter_220002 -(1 row) - -\c - - - :master_port --- drop the roles created -REVOKE ALL ON SCHEMA PUBLIC FROM alter_table_owner; -DROP ROLE alter_table_owner; -SELECT run_command_on_workers('DROP ROLE alter_table_owner'); - run_command_on_workers ---------------------------------- - (localhost,57637,t,"DROP ROLE") - (localhost,57638,t,"DROP ROLE") -(2 rows) - --- Test alter table with drop table in the same transaction -BEGIN; -CREATE TABLE test_table_1(id int); -SELECT create_distributed_table('test_table_1','id'); - create_distributed_table --------------------------- - -(1 row) - -ALTER TABLE test_table_1 ADD CONSTRAINT u_key UNIQUE(id); -DROP TABLE test_table_1; -END; --- There should be no test_table_1 shard on workers -\c - - - :worker_1_port -SELECT relname FROM pg_class WHERE relname LIKE 'test_table_1%'; - relname ---------- -(0 rows) - -\c - - - :master_port --- Test WITH options on a normal simple hash-distributed table -CREATE TABLE hash_dist(id bigint primary key, f1 text) WITH (fillfactor=40); -SELECT create_distributed_table('hash_dist','id'); - create_distributed_table --------------------------- - -(1 row) - --- verify that the storage options made it to the table definitions -SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist'; - relname | reloptions ------------+----------------- - hash_dist | {fillfactor=40} -(1 row) - -\c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist%' ORDER BY relname; - relname | reloptions -------------------+----------------- - hash_dist_220022 | {fillfactor=40} - hash_dist_220023 | {fillfactor=40} - hash_dist_220024 | {fillfactor=40} - hash_dist_220025 | {fillfactor=40} -(4 rows) - -\c - - - :master_port --- verify that we can set and reset index storage parameters -ALTER INDEX hash_dist_pkey SET(fillfactor=40); -SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey'; - relname | reloptions -----------------+----------------- - hash_dist_pkey | {fillfactor=40} -(1 row) - -\c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname; - relname | reloptions ------------------------+----------------- - hash_dist_pkey_220022 | {fillfactor=40} - hash_dist_pkey_220023 | {fillfactor=40} - hash_dist_pkey_220024 | {fillfactor=40} - hash_dist_pkey_220025 | {fillfactor=40} -(4 rows) - -\c - - - :master_port -ALTER INDEX hash_dist_pkey RESET(fillfactor); -SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey'; - relname | reloptions -----------------+------------ - hash_dist_pkey | -(1 row) - -\c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname; - relname | reloptions ------------------------+------------ - hash_dist_pkey_220022 | - hash_dist_pkey_220023 | - hash_dist_pkey_220024 | - hash_dist_pkey_220025 | -(4 rows) - -\c - - - :master_port --- verify error message on ALTER INDEX, SET TABLESPACE is unsupported -ALTER INDEX hash_dist_pkey SET TABLESPACE foo; -ERROR: alter index ... set tablespace ... is currently unsupported -DETAIL: Only RENAME TO, SET (), and RESET () are supported. --- verify that we can add indexes with new storage options -CREATE UNIQUE INDEX another_index ON hash_dist(id) WITH (fillfactor=50); --- show the index and its storage options on coordinator, then workers -SELECT relname, reloptions FROM pg_class WHERE relname = 'another_index'; - relname | reloptions ----------------+----------------- - another_index | {fillfactor=50} -(1 row) - -\c - - - :worker_1_port -SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index%' ORDER BY relname; - relname | reloptions -----------------------+----------------- - another_index_220022 | {fillfactor=50} - another_index_220023 | {fillfactor=50} - another_index_220024 | {fillfactor=50} - another_index_220025 | {fillfactor=50} -(4 rows) - -\c - - - :master_port --- get rid of the index -DROP INDEX another_index; --- check if we fail properly when a column with un-supported constraint is added --- UNIQUE, PRIMARY KEY on non-distribution column is not supported --- CHECK, FOREIGN KEY, UNIQE, PRIMARY KEY cannot be added together with ADD COLUMN -SET citus.shard_replication_factor TO 1; -CREATE TABLE test_table_1(id int); -SELECT create_distributed_table('test_table_1', 'id'); - create_distributed_table --------------------------- - -(1 row) - -ALTER TABLE test_table_1 ADD COLUMN test_col int UNIQUE; -ERROR: cannot create constraint on "test_table_1" -DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). -ALTER TABLE test_table_1 ADD COLUMN test_col int PRIMARY KEY; -ERROR: cannot create constraint on "test_table_1" -DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). -ALTER TABLE test_table_1 ADD COLUMN test_col int CHECK (test_col > 3); -ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints -DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names -HINT: You can issue each command separately such as ALTER TABLE test_table_1 ADD COLUMN test_col data_type; ALTER TABLE test_table_1 ADD CONSTRAINT constraint_name CHECK (check_expression); -CREATE TABLE reference_table(i int UNIQUE); -SELECT create_reference_table('reference_table'); - create_reference_table ------------------------- - -(1 row) - -ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES reference_table(i) ON DELETE CASCADE; -ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints -DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names -HINT: You can issue each command separately such as ALTER TABLE test_table_1 ADD COLUMN test_col data_type; ALTER TABLE test_table_1 ADD CONSTRAINT constraint_name FOREIGN KEY (test_col) REFERENCES reference_table(i) ON DELETE CASCADE; -ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES reference_table(i) ON DELETE CASCADE ON UPDATE SET NULL; -ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints -DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names -HINT: You can issue each command separately such as ALTER TABLE test_table_1 ADD COLUMN test_col data_type; ALTER TABLE test_table_1 ADD CONSTRAINT constraint_name FOREIGN KEY (test_col) REFERENCES reference_table(i) ON DELETE CASCADE ON UPDATE SET NULL; -DROP TABLE reference_table; -CREATE TABLE referenced_table(i int UNIQUE); -SELECT create_distributed_table('referenced_table', 'i'); - create_distributed_table --------------------------- - -(1 row) - -ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES referenced_table(i); -ERROR: cannot create foreign key constraint -DETAIL: Foreign keys are supported in two cases, either in between two colocated tables including partition column in the same ordinal in the both tables or from distributed to reference tables -DROP TABLE referenced_table, test_table_1; diff --git a/src/test/regress/output/multi_multiuser_load_data.source b/src/test/regress/output/multi_multiuser_load_data.source index 90a77fb2b..f320d34a8 100644 --- a/src/test/regress/output/multi_multiuser_load_data.source +++ b/src/test/regress/output/multi_multiuser_load_data.source @@ -6,13 +6,6 @@ -- citusdb.shard_max_size. These values are manually set in pg_regress.c. We also set -- the shard placement policy to the local-node-first policy as other regression -- tests expect the placements to be in that order. -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; - version_above_ten -------------------- - t -(1 row) - SET citusdb.shard_placement_policy TO 'local-node-first'; -- load as superuser \copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' diff --git a/src/test/regress/output/multi_multiuser_load_data_0.source b/src/test/regress/output/multi_multiuser_load_data_0.source deleted file mode 100644 index 7a4429d32..000000000 --- a/src/test/regress/output/multi_multiuser_load_data_0.source +++ /dev/null @@ -1,39 +0,0 @@ --- --- MULTI_MULTIUSER_LOAD_DATA --- --- Tests for loading data in a distributed cluster. Please note that the number --- of shards uploaded depends on two config values: citusdb.shard_replication_factor and --- citusdb.shard_max_size. These values are manually set in pg_regress.c. We also set --- the shard placement policy to the local-node-first policy as other regression --- tests expect the placements to be in that order. -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; - version_above_ten -------------------- - f -(1 row) - -SET citusdb.shard_placement_policy TO 'local-node-first'; --- load as superuser -\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' --- as user with ALL access -SET ROLE full_access; -\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -RESET ROLE; --- as user with SELECT access, should fail -SET ROLE read_access; -\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -ERROR: permission denied for relation lineitem -RESET ROLE; --- as user with no access, should fail -SET ROLE no_access; -\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -ERROR: permission denied for relation lineitem -RESET ROLE; -SET ROLE full_access; -\copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' -\copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' -\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' -\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|' -\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' From 85324f3accbfeee3375293c6bbf6c332778622c4 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Mon, 1 Aug 2022 15:22:02 +0300 Subject: [PATCH 30/38] Clean up multi_shard_commit_protocol guc leftovers (#6110) --- src/backend/distributed/commands/multi_copy.c | 6 +-- .../operations/modify_multiple_shards.c | 4 +- .../isolation_create_restore_point.out | 18 ------- ...olation_distributed_deadlock_detection.out | 51 ------------------- .../input/multi_alter_table_statements.source | 19 ++----- .../multi_alter_table_statements.source | 24 ++------- .../spec/isolation_create_restore_point.spec | 3 -- ...lation_distributed_deadlock_detection.spec | 15 +----- 8 files changed, 9 insertions(+), 131 deletions(-) diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index 1061feb28..d795fddde 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -30,11 +30,7 @@ * By default, COPY uses normal transactions on the workers. In the case of * hash or range-partitioned tables, this can cause a problem when some of the * transactions fail to commit while others have succeeded. To ensure no data - * is lost, COPY can use two-phase commit, by increasing max_prepared_transactions - * on the worker and setting citus.multi_shard_commit_protocol to '2pc'. The default - * is '1pc'. This is not a problem for append-partitioned tables because new - * shards are created and in the case of failure, metadata changes are rolled - * back on the master node. + * is lost, COPY uses two-phase commit. * * Parsing options are processed and enforced on the node where copy command * is run, while constraints are enforced on the worker. In either case, diff --git a/src/backend/distributed/operations/modify_multiple_shards.c b/src/backend/distributed/operations/modify_multiple_shards.c index cb740dabb..ba87108aa 100644 --- a/src/backend/distributed/operations/modify_multiple_shards.c +++ b/src/backend/distributed/operations/modify_multiple_shards.c @@ -5,9 +5,7 @@ * * This file contains master_modify_multiple_shards function, which takes a update * or delete query and runs it worker shards of the distributed table. The distributed - * modify operation can be done within a distributed transaction and committed in - * one-phase or two-phase fashion, depending on the citus.multi_shard_commit_protocol - * setting. + * modify operation can be done within a distributed transaction. * * Copyright (c) Citus Data, Inc. * diff --git a/src/test/regress/expected/isolation_create_restore_point.out b/src/test/regress/expected/isolation_create_restore_point.out index bf71fa5ed..3b1bdf9eb 100644 --- a/src/test/regress/expected/isolation_create_restore_point.out +++ b/src/test/regress/expected/isolation_create_restore_point.out @@ -8,7 +8,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-create-distributed: CREATE TABLE test_create_distributed_table (test_id integer NOT NULL, data text); @@ -40,7 +39,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-insert: INSERT INTO restore_table VALUES (1,'hello'); @@ -65,7 +63,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-modify-multiple: UPDATE restore_table SET data = 'world'; @@ -90,7 +87,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-ddl: ALTER TABLE restore_table ADD COLUMN x int; @@ -116,7 +112,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-copy: COPY restore_table FROM PROGRAM 'echo 1,hello' WITH CSV; @@ -141,7 +136,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-recover: SELECT recover_prepared_transactions(); @@ -172,7 +166,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-drop: DROP TABLE restore_table; @@ -198,7 +191,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-add-node: SELECT 1 FROM master_add_inactive_node('localhost', 9999); @@ -229,7 +221,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-remove-node: SELECT master_remove_node('localhost', 9999); @@ -260,7 +251,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test-2'); @@ -351,7 +341,6 @@ step s2-create-restore: (1 row) step s1-multi-statement: - SET citus.multi_shard_commit_protocol TO '2pc'; BEGIN; INSERT INTO restore_table VALUES (1,'hello'); INSERT INTO restore_table VALUES (2,'hello'); @@ -370,7 +359,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-create-reference: CREATE TABLE test_create_reference_table (test_id integer NOT NULL, data text); @@ -402,7 +390,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-insert-ref: INSERT INTO restore_ref_table VALUES (1,'hello'); @@ -428,7 +415,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-modify-multiple-ref: UPDATE restore_ref_table SET data = 'world'; @@ -454,7 +440,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-ddl-ref: ALTER TABLE restore_ref_table ADD COLUMN x int; @@ -480,7 +465,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-copy-ref: COPY restore_ref_table FROM PROGRAM 'echo 1,hello' WITH CSV; @@ -506,7 +490,6 @@ create_reference_table step s1-begin: BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; step s1-drop-ref: DROP TABLE restore_ref_table; @@ -592,7 +575,6 @@ step s2-create-restore: (1 row) step s1-multi-statement-ref: - SET citus.multi_shard_commit_protocol TO '2pc'; BEGIN; INSERT INTO restore_ref_table VALUES (1,'hello'); INSERT INTO restore_ref_table VALUES (2,'hello'); diff --git a/src/test/regress/expected/isolation_distributed_deadlock_detection.out b/src/test/regress/expected/isolation_distributed_deadlock_detection.out index 5e60e40be..087207bf4 100644 --- a/src/test/regress/expected/isolation_distributed_deadlock_detection.out +++ b/src/test/regress/expected/isolation_distributed_deadlock_detection.out @@ -90,57 +90,6 @@ step s2-commit: COMMIT; -starting permutation: s1-begin s2-begin s1-set-2pc s2-set-2pc s1-update-1 s2-update-2 s2-update-1 deadlock-checker-call s1-update-2 deadlock-checker-call s1-commit s2-commit -step s1-begin: - BEGIN; - -step s2-begin: - BEGIN; - -step s1-set-2pc: - set citus.multi_shard_commit_protocol TO '2pc'; - -step s2-set-2pc: - set citus.multi_shard_commit_protocol TO '2pc'; - -step s1-update-1: - UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; - -step s2-update-2: - UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; - -step s2-update-1: - UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; - -step deadlock-checker-call: - SELECT check_distributed_deadlocks(); - -check_distributed_deadlocks ---------------------------------------------------------------------- -f -(1 row) - -step s1-update-2: - UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; - -step deadlock-checker-call: - SELECT check_distributed_deadlocks(); - -check_distributed_deadlocks ---------------------------------------------------------------------- -t -(1 row) - -step s2-update-1: <... completed> -ERROR: canceling the transaction since it was involved in a distributed deadlock -step s1-update-2: <... completed> -step s1-commit: - COMMIT; - -step s2-commit: - COMMIT; - - starting permutation: s1-begin s2-begin s1-update-1 s2-update-2 s1-update-2 deadlock-checker-call s2-upsert-select-all deadlock-checker-call s1-commit s2-commit step s1-begin: BEGIN; diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index 2e9883175..eb19c6692 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -305,19 +305,7 @@ CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_ta \c - - - :master_port -- The above trigger will cause failure at transaction end on one placement. --- We'll test 2PC first, as it should handle this "best" (no divergence) -SET citus.multi_shard_commit_protocol TO '2pc'; -BEGIN; -CREATE INDEX single_index_2 ON single_shard_items(id); -CREATE INDEX single_index_3 ON single_shard_items(name); -COMMIT; - --- Nothing from the block should have committed -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; - --- Even if 1PC is picked for multi-shard commands --- Citus always uses 2PC for replication > 1 -SET citus.multi_shard_commit_protocol TO '1pc'; +-- Citus always uses 2PC. 2PC should handle this "best" (no divergence) BEGIN; CREATE INDEX single_index_2 ON single_shard_items(id); CREATE INDEX single_index_3 ON single_shard_items(name); @@ -346,13 +334,12 @@ COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; DROP INDEX temp_index_2; ---- verify that distributed ddl commands can be used with 2pc -SET citus.multi_shard_commit_protocol TO '2pc'; +-- verify that distributed ddl commands are allowed without transaction block as well +-- Reminder: Now Citus always uses 2PC CREATE INDEX temp_index_3 ON lineitem_alter(l_orderkey); SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; DROP INDEX temp_index_3; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; -RESET citus.multi_shard_commit_protocol; -- verify that not any of shard placements are marked as failed when a query failure occurs CREATE TABLE test_ab (a int, b int); diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index a5bdee20e..99d4a28cd 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -652,24 +652,7 @@ RESET citus.enable_metadata_sync; CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_tag(); \c - - - :master_port -- The above trigger will cause failure at transaction end on one placement. --- We'll test 2PC first, as it should handle this "best" (no divergence) -SET citus.multi_shard_commit_protocol TO '2pc'; -BEGIN; -CREATE INDEX single_index_2 ON single_shard_items(id); -CREATE INDEX single_index_3 ON single_shard_items(name); -COMMIT; -ERROR: duplicate key value violates unique constraint "ddl_commands_command_key" -DETAIL: Key (command)=(CREATE INDEX) already exists. -CONTEXT: while executing command on localhost:57638 --- Nothing from the block should have committed -SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; - indexname | tablename ------------+----------- -(0 rows) - --- Even if 1PC is picked for multi-shard commands --- Citus always uses 2PC for replication > 1 -SET citus.multi_shard_commit_protocol TO '1pc'; +-- Citus always uses 2PC. 2PC should handle this "best" (no divergence) BEGIN; CREATE INDEX single_index_2 ON single_shard_items(id); CREATE INDEX single_index_3 ON single_shard_items(name); @@ -715,8 +698,8 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; (1 row) DROP INDEX temp_index_2; ---- verify that distributed ddl commands can be used with 2pc -SET citus.multi_shard_commit_protocol TO '2pc'; +-- verify that distributed ddl commands are allowed without transaction block as well +-- Reminder: Now Citus always uses 2PC CREATE INDEX temp_index_3 ON lineitem_alter(l_orderkey); SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; indexname | tablename @@ -730,7 +713,6 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; -----------+----------- (0 rows) -RESET citus.multi_shard_commit_protocol; -- verify that not any of shard placements are marked as failed when a query failure occurs CREATE TABLE test_ab (a int, b int); SET citus.shard_count TO 8; diff --git a/src/test/regress/spec/isolation_create_restore_point.spec b/src/test/regress/spec/isolation_create_restore_point.spec index fbf18879f..2cdc66f85 100644 --- a/src/test/regress/spec/isolation_create_restore_point.spec +++ b/src/test/regress/spec/isolation_create_restore_point.spec @@ -17,7 +17,6 @@ session "s1" step "s1-begin" { BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; } step "s1-create-reference" @@ -54,7 +53,6 @@ step "s1-modify-multiple-ref" step "s1-multi-statement-ref" { - SET citus.multi_shard_commit_protocol TO '2pc'; BEGIN; INSERT INTO restore_ref_table VALUES (1,'hello'); INSERT INTO restore_ref_table VALUES (2,'hello'); @@ -63,7 +61,6 @@ step "s1-multi-statement-ref" step "s1-multi-statement" { - SET citus.multi_shard_commit_protocol TO '2pc'; BEGIN; INSERT INTO restore_table VALUES (1,'hello'); INSERT INTO restore_table VALUES (2,'hello'); diff --git a/src/test/regress/spec/isolation_distributed_deadlock_detection.spec b/src/test/regress/spec/isolation_distributed_deadlock_detection.spec index 27e18ea1c..19526453c 100644 --- a/src/test/regress/spec/isolation_distributed_deadlock_detection.spec +++ b/src/test/regress/spec/isolation_distributed_deadlock_detection.spec @@ -63,11 +63,6 @@ step "s1-insert-local-10" INSERT INTO local_deadlock_table VALUES (10, 10); } -step "s1-set-2pc" -{ - set citus.multi_shard_commit_protocol TO '2pc'; -} - step "s1-update-1-rep-2" { UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1; @@ -145,11 +140,6 @@ step "s2-insert-local-10" INSERT INTO local_deadlock_table VALUES (10, 10); } -step "s2-set-2pc" -{ - set citus.multi_shard_commit_protocol TO '2pc'; -} - step "s2-update-1-rep-2" { UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1; @@ -315,15 +305,12 @@ step "deadlock-checker-call" SELECT check_distributed_deadlocks(); } -// simplest case, loop with two nodes +// simplest case, loop with two nodes (Reminder: Citus uses 2PC) permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s2-update-1" "deadlock-checker-call" "s1-update-2" "deadlock-checker-call" "s1-commit" "s2-commit" // simplest case with replication factor 2 permutation "s1-begin" "s2-begin" "s1-update-1-rep-2" "s2-update-2-rep-2" "s2-update-1-rep-2" "deadlock-checker-call" "s1-update-2-rep-2" "deadlock-checker-call" "s1-commit" "s2-commit" -// simplest case with 2pc enabled -permutation "s1-begin" "s2-begin" "s1-set-2pc" "s2-set-2pc" "s1-update-1" "s2-update-2" "s2-update-1" "deadlock-checker-call" "s1-update-2" "deadlock-checker-call" "s1-commit" "s2-commit" - // simplest case with multi-shard query is cancelled permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s1-update-2" "deadlock-checker-call" "s2-upsert-select-all" "deadlock-checker-call" "s1-commit" "s2-commit" From 5060d0ab17d17ec6236f2e5cb04322a923290b29 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Mon, 1 Aug 2022 15:38:19 +0300 Subject: [PATCH 31/38] Remove leftover PG version_above_11 checks from tests (#6112) --- .../regress/expected/multi_drop_extension.out | 4 --- ...licate_reference_tables_to_coordinator.out | 4 --- .../expected/undistribute_table_cascade.out | 26 ++----------------- .../undistribute_table_cascade_mx.out | 16 +++++------- src/test/regress/sql/multi_drop_extension.sql | 4 --- ...licate_reference_tables_to_coordinator.sql | 4 --- .../sql/undistribute_table_cascade.sql | 26 ++----------------- .../sql/undistribute_table_cascade_mx.sql | 11 +------- 8 files changed, 11 insertions(+), 84 deletions(-) diff --git a/src/test/regress/expected/multi_drop_extension.out b/src/test/regress/expected/multi_drop_extension.out index 2b2175367..426c011f7 100644 --- a/src/test/regress/expected/multi_drop_extension.out +++ b/src/test/regress/expected/multi_drop_extension.out @@ -60,14 +60,10 @@ SELECT create_reference_table('ref'); CREATE INDEX CONCURRENTLY ref_concurrent_idx_x ON ref(x); CREATE INDEX CONCURRENTLY ref_concurrent_idx_y ON ref(x); -SELECT substring(current_Setting('server_version'), '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven REINDEX INDEX CONCURRENTLY ref_concurrent_idx_x; REINDEX INDEX CONCURRENTLY ref_concurrent_idx_y; REINDEX TABLE CONCURRENTLY ref; REINDEX SCHEMA CONCURRENTLY test_schema; -\endif SET search_path TO public; \set VERBOSITY TERSE DROP SCHEMA test_schema CASCADE; diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index a23b44ffa..5debf1507 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -19,11 +19,7 @@ SELECT create_reference_table('squares'); INSERT INTO squares SELECT i, i * i FROM generate_series(1, 10) i; NOTICE: executing the copy locally for shard xxxxx CREATE INDEX CONCURRENTLY squares_a_idx ON squares (a); -SELECT substring(current_Setting('server_version'), '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven REINDEX INDEX CONCURRENTLY squares_a_idx; -\endif DROP INDEX CONCURRENTLY squares_a_idx; -- should be executed locally SELECT count(*) FROM squares; diff --git a/src/test/regress/expected/undistribute_table_cascade.out b/src/test/regress/expected/undistribute_table_cascade.out index d40f41fe4..8adee0c3e 100644 --- a/src/test/regress/expected/undistribute_table_cascade.out +++ b/src/test/regress/expected/undistribute_table_cascade.out @@ -347,18 +347,7 @@ ERROR: relation "non_existent_table" does not exist at character 27 CREATE TABLE local_table(a int); SELECT undistribute_table('local_table', cascade_via_foreign_keys=>true); ERROR: cannot undistribute table because the table is not distributed --- as pg < 12 doesn't support foreign keys between partitioned tables, --- define below foreign key conditionally instead of adding another --- test output -DO $proc$ -BEGIN -IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN - EXECUTE - $$ - ALTER TABLE partitioned_table_1 ADD CONSTRAINT fkey_15 FOREIGN KEY (col_1) REFERENCES partitioned_table_1(col_1); - $$; -END IF; -END$proc$; +ALTER TABLE partitioned_table_1 ADD CONSTRAINT fkey_15 FOREIGN KEY (col_1) REFERENCES partitioned_table_1(col_1); BEGIN; SELECT undistribute_table('partitioned_table_1', cascade_via_foreign_keys=>true); undistribute_table @@ -481,18 +470,7 @@ BEGIN; SELECT undistribute_table('partitioned_table_1', cascade_via_foreign_keys=>true); ERROR: cannot cascade operation via foreign keys as partition table undistribute_table_cascade.partitioned_table_2_100_200 involved in a foreign key relationship that is not inherited from it's parent table ROLLBACK; --- as pg < 12 doesn't support foreign keys between partitioned tables, --- define below foreign key conditionally instead of adding another --- test output -DO $proc$ -BEGIN -IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN - EXECUTE - $$ - ALTER TABLE partitioned_table_1 ADD CONSTRAINT fkey_13 FOREIGN KEY (col_1) REFERENCES partitioned_table_2(col_1); - $$; -END IF; -END$proc$; +ALTER TABLE partitioned_table_1 ADD CONSTRAINT fkey_13 FOREIGN KEY (col_1) REFERENCES partitioned_table_2(col_1); BEGIN; -- For pg versions 11, 12 & 13, partitioned_table_1 references to reference_table_3 -- and partitioned_table_2 references to reference_table_3. diff --git a/src/test/regress/expected/undistribute_table_cascade_mx.out b/src/test/regress/expected/undistribute_table_cascade_mx.out index 90318700f..89ab08882 100644 --- a/src/test/regress/expected/undistribute_table_cascade_mx.out +++ b/src/test/regress/expected/undistribute_table_cascade_mx.out @@ -185,16 +185,12 @@ SELECT alter_distributed_table ('users', shard_count=>10); -- first drop the column that has a foreign key since -- alter_table_set_access_method doesn't support foreign keys ALTER TABLE users DROP country_id; --- set access method to columnar if pg version > 11 -DO $proc$ -BEGIN -IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN - EXECUTE - $$ - SELECT alter_table_set_access_method('users', 'columnar'); - $$; -END IF; -END$proc$; +SELECT alter_table_set_access_method('users', 'columnar'); + alter_table_set_access_method +--------------------------------------------------------------------- + +(1 row) + SELECT COUNT(*) FROM pg_class s JOIN pg_depend d ON d.objid=s.oid AND d.classid='pg_class'::regclass AND d.refclassid='pg_class'::regclass diff --git a/src/test/regress/sql/multi_drop_extension.sql b/src/test/regress/sql/multi_drop_extension.sql index 8fd1daf27..224aa630e 100644 --- a/src/test/regress/sql/multi_drop_extension.sql +++ b/src/test/regress/sql/multi_drop_extension.sql @@ -52,14 +52,10 @@ SELECT create_reference_table('ref'); CREATE INDEX CONCURRENTLY ref_concurrent_idx_x ON ref(x); CREATE INDEX CONCURRENTLY ref_concurrent_idx_y ON ref(x); -SELECT substring(current_Setting('server_version'), '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven REINDEX INDEX CONCURRENTLY ref_concurrent_idx_x; REINDEX INDEX CONCURRENTLY ref_concurrent_idx_y; REINDEX TABLE CONCURRENTLY ref; REINDEX SCHEMA CONCURRENTLY test_schema; -\endif SET search_path TO public; \set VERBOSITY TERSE diff --git a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql index a3a5c94c8..78eb8c9c7 100644 --- a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql +++ b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql @@ -17,11 +17,7 @@ SELECT create_reference_table('squares'); INSERT INTO squares SELECT i, i * i FROM generate_series(1, 10) i; CREATE INDEX CONCURRENTLY squares_a_idx ON squares (a); -SELECT substring(current_Setting('server_version'), '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven REINDEX INDEX CONCURRENTLY squares_a_idx; -\endif DROP INDEX CONCURRENTLY squares_a_idx; -- should be executed locally diff --git a/src/test/regress/sql/undistribute_table_cascade.sql b/src/test/regress/sql/undistribute_table_cascade.sql index 478df6188..65437c5ac 100644 --- a/src/test/regress/sql/undistribute_table_cascade.sql +++ b/src/test/regress/sql/undistribute_table_cascade.sql @@ -200,18 +200,7 @@ SELECT undistribute_table('non_existent_table', cascade_via_foreign_keys=>true); CREATE TABLE local_table(a int); SELECT undistribute_table('local_table', cascade_via_foreign_keys=>true); --- as pg < 12 doesn't support foreign keys between partitioned tables, --- define below foreign key conditionally instead of adding another --- test output -DO $proc$ -BEGIN -IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN - EXECUTE - $$ - ALTER TABLE partitioned_table_1 ADD CONSTRAINT fkey_15 FOREIGN KEY (col_1) REFERENCES partitioned_table_1(col_1); - $$; -END IF; -END$proc$; +ALTER TABLE partitioned_table_1 ADD CONSTRAINT fkey_15 FOREIGN KEY (col_1) REFERENCES partitioned_table_1(col_1); BEGIN; SELECT undistribute_table('partitioned_table_1', cascade_via_foreign_keys=>true); @@ -318,18 +307,7 @@ BEGIN; SELECT undistribute_table('partitioned_table_1', cascade_via_foreign_keys=>true); ROLLBACK; --- as pg < 12 doesn't support foreign keys between partitioned tables, --- define below foreign key conditionally instead of adding another --- test output -DO $proc$ -BEGIN -IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN - EXECUTE - $$ - ALTER TABLE partitioned_table_1 ADD CONSTRAINT fkey_13 FOREIGN KEY (col_1) REFERENCES partitioned_table_2(col_1); - $$; -END IF; -END$proc$; +ALTER TABLE partitioned_table_1 ADD CONSTRAINT fkey_13 FOREIGN KEY (col_1) REFERENCES partitioned_table_2(col_1); BEGIN; -- For pg versions 11, 12 & 13, partitioned_table_1 references to reference_table_3 diff --git a/src/test/regress/sql/undistribute_table_cascade_mx.sql b/src/test/regress/sql/undistribute_table_cascade_mx.sql index 1141fe139..cf4be2fd6 100644 --- a/src/test/regress/sql/undistribute_table_cascade_mx.sql +++ b/src/test/regress/sql/undistribute_table_cascade_mx.sql @@ -112,16 +112,7 @@ SELECT alter_distributed_table ('users', shard_count=>10); -- alter_table_set_access_method doesn't support foreign keys ALTER TABLE users DROP country_id; --- set access method to columnar if pg version > 11 -DO $proc$ -BEGIN -IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN - EXECUTE - $$ - SELECT alter_table_set_access_method('users', 'columnar'); - $$; -END IF; -END$proc$; +SELECT alter_table_set_access_method('users', 'columnar'); SELECT COUNT(*) FROM pg_class s From abffa6c3b930a03214d59f705fa581b4734c81b6 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Mon, 1 Aug 2022 19:10:36 +0200 Subject: [PATCH 32/38] Use shard split copy code for blocking shard moves (#6098) The new shard copy code that was created for shard splits has some advantages over the old shard copy code. The old code was using worker_append_table_to_shard, which wrote to disk twice. And it also didn't use binary copy when that was possible. Both of these issues were fixed in the new copy code. This PR starts using this new copy logic also for shard moves, not just for shard splits. On my local machine I created a single shard table like this. ```sql set citus.shard_count = 1; create table t(id bigint, a bigint); select create_distributed_table('t', 'id'); INSERT into t(id, a) SELECT i, i from generate_series(1, 100000000) i; ``` I then turned `fsync` off to make sure I wasn't bottlenecked by disk. Finally I moved this shard between nodes with `citus_move_shard_placement` with `block_writes`. Before this PR a move took ~127s, after this PR it took only ~38s. So for this small test this resulted in spending ~70% less time. And I also tried the same test for a table that contained large strings: ```sql set citus.shard_count = 1; create table t(id bigint, a bigint, content text); select create_distributed_table('t', 'id'); INSERT into t(id, a, content) SELECT i, i, 'aunethautnehoautnheaotnuhetnohueoutnehotnuhetncouhaeohuaeochgrhgd.athbetndairgexdbuhaobulrhdbaetoausnetohuracehousncaoehuesousnaceohuenacouhancoexdaseohusnaetobuetnoduhasneouhaceohusnaoetcuhmsnaetohuacoeuhebtokteaoshetouhsanetouhaoug.lcuahesonuthaseauhcoerhuaoecuh.lg;rcydabsnetabuesabhenth' from generate_series(1, 20000000) i; ``` --- .../distributed/operations/repair_shards.c | 64 +++++++- .../worker_copy_table_to_node_udf.c | 65 ++++++++ .../distributed/sql/citus--11.0-3--11.1-1.sql | 1 + .../sql/downgrades/citus--11.1-1--11.0-3.sql | 4 + .../udfs/worker_copy_table_to_node/11.1-1.sql | 8 + .../udfs/worker_copy_table_to_node/latest.sql | 8 + .../distributed/utils/reference_table_utils.c | 9 ++ .../failure_offline_move_shard_placement.out | 28 +--- src/test/regress/expected/multi_extension.out | 3 +- .../regress/expected/shard_rebalancer.out | 142 +++++++++--------- src/test/regress/expected/tableam.out | 65 ++++---- .../expected/upgrade_list_citus_objects.out | 3 +- .../expected/worker_copy_table_to_node.out | 81 ++++++++++ src/test/regress/multi_schedule | 3 +- src/test/regress/operations_schedule | 1 + .../failure_offline_move_shard_placement.sql | 16 +- src/test/regress/sql/shard_rebalancer.sql | 107 ++++--------- src/test/regress/sql/tableam.sql | 17 ++- .../regress/sql/worker_copy_table_to_node.sql | 49 ++++++ 19 files changed, 442 insertions(+), 232 deletions(-) create mode 100644 src/backend/distributed/operations/worker_copy_table_to_node_udf.c create mode 100644 src/backend/distributed/sql/udfs/worker_copy_table_to_node/11.1-1.sql create mode 100644 src/backend/distributed/sql/udfs/worker_copy_table_to_node/latest.sql create mode 100644 src/test/regress/expected/worker_copy_table_to_node.out create mode 100644 src/test/regress/sql/worker_copy_table_to_node.sql diff --git a/src/backend/distributed/operations/repair_shards.c b/src/backend/distributed/operations/repair_shards.c index 26928fd3a..6f5443ba3 100644 --- a/src/backend/distributed/operations/repair_shards.c +++ b/src/backend/distributed/operations/repair_shards.c @@ -20,6 +20,7 @@ #include "access/htup_details.h" #include "catalog/pg_class.h" #include "catalog/pg_enum.h" +#include "distributed/adaptive_executor.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" @@ -38,6 +39,7 @@ #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/shard_rebalancer.h" +#include "distributed/shard_split.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" @@ -129,6 +131,7 @@ static List * PostLoadShardCreationCommandList(ShardInterval *shardInterval, int32 sourceNodePort); static ShardCommandList * CreateShardCommandList(ShardInterval *shardInterval, List *ddlCommandList); +static char * CreateShardCopyCommand(ShardInterval *shard, WorkerNode *targetNode); /* declarations for dynamic loading */ @@ -1180,6 +1183,9 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, ALLOCSET_DEFAULT_SIZES); MemoryContext oldContext = MemoryContextSwitchTo(localContext); + WorkerNode *sourceNode = FindWorkerNode(sourceNodeName, sourceNodePort); + WorkerNode *targetNode = FindWorkerNode(targetNodeName, targetNodePort); + /* iterate through the colocated shards and copy each */ ShardInterval *shardInterval = NULL; foreach_ptr(shardInterval, shardIntervalList) @@ -1199,9 +1205,12 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, char *tableOwner = TableOwner(shardInterval->relationId); SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, tableOwner, ddlCommandList); + } - ddlCommandList = NIL; - + int taskId = 0; + List *copyTaskList = NIL; + foreach_ptr(shardInterval, shardIntervalList) + { /* * Skip copying data for partitioned tables, because they contain no * data themselves. Their partitions do contain data, but those are @@ -1209,13 +1218,35 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, */ if (!PartitionedTable(shardInterval->relationId)) { - ddlCommandList = CopyShardContentsCommandList(shardInterval, sourceNodeName, - sourceNodePort); + char *copyCommand = CreateShardCopyCommand( + shardInterval, targetNode); + + Task *copyTask = CreateBasicTask( + INVALID_JOB_ID, + taskId, + READ_TASK, + copyCommand); + + ShardPlacement *taskPlacement = CitusMakeNode(ShardPlacement); + SetPlacementNodeMetadata(taskPlacement, sourceNode); + + copyTask->taskPlacementList = list_make1(taskPlacement); + + copyTaskList = lappend(copyTaskList, copyTask); + taskId++; } - ddlCommandList = list_concat( - ddlCommandList, + } + + ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, copyTaskList, + MaxAdaptiveExecutorPoolSize, + NULL /* jobIdList (ignored by API implementation) */); + + foreach_ptr(shardInterval, shardIntervalList) + { + List *ddlCommandList = PostLoadShardCreationCommandList(shardInterval, sourceNodeName, - sourceNodePort)); + sourceNodePort); + char *tableOwner = TableOwner(shardInterval->relationId); SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, tableOwner, ddlCommandList); @@ -1278,6 +1309,25 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, } +/* + * CreateShardCopyCommand constructs the command to copy a shard to another + * worker node. This command needs to be run on the node wher you want to copy + * the shard from. + */ +static char * +CreateShardCopyCommand(ShardInterval *shard, + WorkerNode *targetNode) +{ + char *shardName = ConstructQualifiedShardName(shard); + StringInfo query = makeStringInfo(); + appendStringInfo(query, + "SELECT pg_catalog.worker_copy_table_to_node(%s::regclass, %u);", + quote_literal_cstr(shardName), + targetNode->nodeId); + return query->data; +} + + /* * CopyPartitionShardsCommandList gets a shardInterval which is a shard that * belongs to partitioned table (this is asserted). diff --git a/src/backend/distributed/operations/worker_copy_table_to_node_udf.c b/src/backend/distributed/operations/worker_copy_table_to_node_udf.c new file mode 100644 index 000000000..46391160c --- /dev/null +++ b/src/backend/distributed/operations/worker_copy_table_to_node_udf.c @@ -0,0 +1,65 @@ +/*------------------------------------------------------------------------- + * + * worker_copy_table_to_node_udf.c + * + * This file implements the worker_copy_table_to_node UDF. This UDF can be + * used to copy the data in a shard (or other table) from one worker node to + * another. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/multi_executor.h" +#include "distributed/worker_shard_copy.h" + +PG_FUNCTION_INFO_V1(worker_copy_table_to_node); + +/* + * worker_copy_table_to_node copies a shard from this worker to another worker + * + * SQL signature: + * + * worker_copy_table_to_node( + * source_table regclass, + * target_node_id integer + * ) RETURNS VOID + */ +Datum +worker_copy_table_to_node(PG_FUNCTION_ARGS) +{ + Oid relationId = PG_GETARG_OID(0); + uint32_t targetNodeId = PG_GETARG_INT32(1); + + Oid schemaOid = get_rel_namespace(relationId); + char *relationSchemaName = get_namespace_name(schemaOid); + char *relationName = get_rel_name(relationId); + char *relationQualifiedName = quote_qualified_identifier( + relationSchemaName, + relationName); + + EState *executor = CreateExecutorState(); + DestReceiver *destReceiver = CreateShardCopyDestReceiver( + executor, + list_make2(relationSchemaName, relationName), + targetNodeId); + + StringInfo selectShardQueryForCopy = makeStringInfo(); + appendStringInfo(selectShardQueryForCopy, + "SELECT * FROM %s;", relationQualifiedName); + + ParamListInfo params = NULL; + ExecuteQueryStringIntoDestReceiver(selectShardQueryForCopy->data, params, + destReceiver); + + FreeExecutorState(executor); + + PG_RETURN_VOID(); +} diff --git a/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql b/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql index f8b956378..e71f9362b 100644 --- a/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql +++ b/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql @@ -69,3 +69,4 @@ DROP FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_ #include "udfs/get_all_active_transactions/11.1-1.sql" #include "udfs/citus_split_shard_by_split_points/11.1-1.sql" #include "udfs/worker_split_copy/11.1-1.sql" +#include "udfs/worker_copy_table_to_node/11.1-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql b/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql index 26430a9f6..7261a31db 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql @@ -73,6 +73,10 @@ DROP FUNCTION pg_catalog.worker_split_copy( splitCopyInfos pg_catalog.split_copy_info[]); DROP TYPE pg_catalog.split_copy_info; +DROP FUNCTION pg_catalog.worker_copy_table_to_node( + source_table regclass, + target_node_id integer); + DROP FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8); diff --git a/src/backend/distributed/sql/udfs/worker_copy_table_to_node/11.1-1.sql b/src/backend/distributed/sql/udfs/worker_copy_table_to_node/11.1-1.sql new file mode 100644 index 000000000..ebe093dee --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_copy_table_to_node/11.1-1.sql @@ -0,0 +1,8 @@ +CREATE OR REPLACE FUNCTION pg_catalog.worker_copy_table_to_node( + source_table regclass, + target_node_id integer) +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$worker_copy_table_to_node$$; +COMMENT ON FUNCTION pg_catalog.worker_copy_table_to_node(regclass, integer) + IS 'Perform copy of a shard'; diff --git a/src/backend/distributed/sql/udfs/worker_copy_table_to_node/latest.sql b/src/backend/distributed/sql/udfs/worker_copy_table_to_node/latest.sql new file mode 100644 index 000000000..ebe093dee --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_copy_table_to_node/latest.sql @@ -0,0 +1,8 @@ +CREATE OR REPLACE FUNCTION pg_catalog.worker_copy_table_to_node( + source_table regclass, + target_node_id integer) +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$worker_copy_table_to_node$$; +COMMENT ON FUNCTION pg_catalog.worker_copy_table_to_node(regclass, integer) + IS 'Perform copy of a shard'; diff --git a/src/backend/distributed/utils/reference_table_utils.c b/src/backend/distributed/utils/reference_table_utils.c index e271d0ceb..e0cab96d6 100644 --- a/src/backend/distributed/utils/reference_table_utils.c +++ b/src/backend/distributed/utils/reference_table_utils.c @@ -207,6 +207,15 @@ EnsureReferenceTablesExistOnAllNodesExtended(char transferMode) CopyShardPlacementToWorkerNodeQuery(sourceShardPlacement, newWorkerNode, transferMode); + + /* + * The placement copy command uses distributed execution to copy + * the shard. This is allowed when indicating that the backend is a + * rebalancer backend. + */ + ExecuteCriticalRemoteCommand(connection, + "SET LOCAL application_name TO " + CITUS_REBALANCER_NAME); ExecuteCriticalRemoteCommand(connection, placementCopyCommand->data); RemoteTransactionCommit(connection); } diff --git a/src/test/regress/expected/failure_offline_move_shard_placement.out b/src/test/regress/expected/failure_offline_move_shard_placement.out index a6ecee18e..bdd45449b 100644 --- a/src/test/regress/expected/failure_offline_move_shard_placement.out +++ b/src/test/regress/expected/failure_offline_move_shard_placement.out @@ -91,8 +91,8 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard_offline.t"). SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); ERROR: canceling statement due to user request --- failure on blocking append_table_to_shard operation on target node -SELECT citus.mitmproxy('conn.onQuery(query="worker_append_table_to_shard").kill()'); +-- failure on blocking COPY operation on target node +SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); mitmproxy --------------------------------------------------------------------- @@ -101,8 +101,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_append_table_to_shard").kill( SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx --- cancellation on blocking append_table_to_shard operation on target node -SELECT citus.mitmproxy('conn.onQuery(query="worker_append_table_to_shard").cancel(' || :pid || ')'); +while executing command on localhost:xxxxx +-- cancellation on blocking COPY operation on target node +SELECT citus.mitmproxy('conn.onQuery(query="COPY").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -129,25 +130,6 @@ SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT").cancel(' || :pid || SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); ERROR: canceling statement due to user request --- failure on CopyData operation on source node -SELECT citus.mitmproxy('conn.onCopyData().kill()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT master_move_shard_placement(200, 'localhost', :worker_2_proxy_port, 'localhost', :worker_1_port, 'block_writes'); -ERROR: could not copy table "t_200" from "localhost:xxxxx" -CONTEXT: while executing command on localhost:xxxxx --- cancellation on CopyData operation on source node -SELECT citus.mitmproxy('conn.onCopyData().cancel(' || :pid || ')'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT master_move_shard_placement(200, 'localhost', :worker_2_proxy_port, 'localhost', :worker_1_port, 'block_writes'); -ERROR: canceling statement due to user request CALL citus_cleanup_orphaned_shards(); -- Verify that the shard is not moved and the number of rows are still 100k SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index c2624894c..0067bdbad 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1097,10 +1097,11 @@ SELECT * FROM multi_extension.print_extension_changes(); table columnar.stripe | | function citus_locks() SETOF record | function citus_split_shard_by_split_points(bigint,text[],integer[],citus.shard_transfer_mode) void + | function worker_copy_table_to_node(regclass,integer) void | function worker_split_copy(bigint,split_copy_info[]) void | type split_copy_info | view citus_locks -(26 rows) +(27 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index b9d928acb..aab94dad1 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -416,18 +416,31 @@ SELECT unnest(shard_placement_replication_array( 2 )); ERROR: could not find a target for shard xxxxx +SET client_min_messages TO WARNING; +set citus.shard_count = 4; +-- Create a distributed table with all shards on a single node, so that we can +-- use this as an under-replicated +SET citus.shard_replication_factor TO 1; +SELECT * from master_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', false); + master_set_node_property +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE replication_test_table(int_column int); +SELECT create_distributed_table('replication_test_table', 'int_column'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +UPDATE pg_dist_partition SET repmodel = 'c' WHERE logicalrelid = 'replication_test_table'::regclass; +INSERT INTO replication_test_table SELECT * FROM generate_series(1, 100); -- Ensure that shard_replication_factor is 2 during replicate_table_shards -- and rebalance_table_shards tests SET citus.shard_replication_factor TO 2; --- Turn off NOTICE messages -SET client_min_messages TO WARNING; --- Create a single-row test data for shard rebalancer test shards -CREATE TABLE shard_rebalancer_test_data AS SELECT 1::int as int_column; --- Test replicate_table_shards, which will in turn test update_shard_placement --- in copy mode. -CREATE TABLE replication_test_table(int_column int); -SELECT master_create_distributed_table('replication_test_table', 'int_column', 'append'); - master_create_distributed_table +SELECT * from master_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', true); + master_set_node_property --------------------------------------------------------------------- (1 row) @@ -438,37 +451,14 @@ CREATE VIEW replication_test_table_placements_per_node AS AND shardstate != 4 GROUP BY nodename, nodeport ORDER BY nodename, nodeport; -WARNING: "view replication_test_table_placements_per_node" has dependency to "table replication_test_table" that is not in Citus' metadata -DETAIL: "view replication_test_table_placements_per_node" will be created only locally -HINT: Distribute "table replication_test_table" first to distribute "view replication_test_table_placements_per_node" --- Create four shards with replication factor 2, and delete the placements --- with smaller port number to simulate under-replicated shards. -SELECT count(master_create_empty_shard('replication_test_table')) - FROM generate_series(1, 4); - count ---------------------------------------------------------------------- - 4 -(1 row) - -DELETE FROM pg_dist_shard_placement WHERE placementid in ( - SELECT pg_dist_shard_placement.placementid - FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard - WHERE logicalrelid = 'replication_test_table'::regclass - AND (nodename, nodeport) = (SELECT nodename, nodeport FROM pg_dist_shard_placement - ORDER BY nodename, nodeport limit 1) -); --- Upload the test data to the shards -\COPY replication_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard xxxxx) -\COPY replication_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard xxxxx) -\COPY replication_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard xxxxx) -\COPY replication_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard xxxxx) --- Verify that there is one node with all placements SELECT * FROM replication_test_table_placements_per_node; count --------------------------------------------------------------------- 4 (1 row) +-- Test replicate_table_shards, which will in turn test update_shard_placement +-- in copy mode. -- Check excluded_shard_list by excluding three shards with smaller ids SELECT replicate_table_shards('replication_test_table', excluded_shard_list := excluded_shard_list, @@ -540,19 +530,22 @@ SELECT * FROM replication_test_table_placements_per_node; SELECT count(*) FROM replication_test_table; count --------------------------------------------------------------------- - 4 + 100 (1 row) DROP TABLE public.replication_test_table CASCADE; -- Test rebalance_table_shards, which will in turn test update_shard_placement -- in move mode. +SET citus.shard_replication_factor TO 1; +SET citus.shard_count TO 6; CREATE TABLE rebalance_test_table(int_column int); -SELECT master_create_distributed_table('rebalance_test_table', 'int_column', 'append'); - master_create_distributed_table +SELECT create_distributed_table('rebalance_test_table', 'int_column'); + create_distributed_table --------------------------------------------------------------------- (1 row) +UPDATE pg_dist_partition SET repmodel = 'c' WHERE logicalrelid = 'rebalance_test_table'::regclass; CREATE VIEW table_placements_per_node AS SELECT nodeport, logicalrelid::regclass, count(*) FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard @@ -566,9 +559,6 @@ LANGUAGE SQL AS $$ SET citus.shard_replication_factor TO 1; - SELECT count(master_create_empty_shard(rel)) - FROM generate_series(1, 6); - SELECT count(master_move_shard_placement(shardid, src.nodename, src.nodeport::int, dst.nodename, dst.nodeport::int, @@ -582,12 +572,7 @@ $$; CALL create_unbalanced_shards('rebalance_test_table'); SET citus.shard_replication_factor TO 2; -- Upload the test data to the shards -\COPY rebalance_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard xxxxx) -\COPY rebalance_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard xxxxx) -\COPY rebalance_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard xxxxx) -\COPY rebalance_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard xxxxx) -\COPY rebalance_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard xxxxx) -\COPY rebalance_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard xxxxx) +INSERT INTO rebalance_test_table SELECT * FROM generate_series(1, 100); -- Verify that there is one node with all placements SELECT * FROM table_placements_per_node; nodeport | logicalrelid | count @@ -772,7 +757,7 @@ SELECT * FROM table_placements_per_node; SELECT count(*) FROM rebalance_test_table; count --------------------------------------------------------------------- - 6 + 100 (1 row) DROP TABLE rebalance_test_table; @@ -863,21 +848,39 @@ INSERT INTO test_schema_support.imbalanced_table_local VALUES(4); CREATE TABLE test_schema_support.imbalanced_table ( id integer not null ); -SELECT master_create_distributed_table('test_schema_support.imbalanced_table', 'id', 'append'); - master_create_distributed_table +SET citus.shard_count = 3; +SET citus.shard_replication_factor TO 1; +SELECT * from master_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', false); + master_set_node_property +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('test_schema_support.imbalanced_table', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO test_schema_support.imbalanced_table SELECT * FROM generate_series(1, 100); +UPDATE pg_dist_partition SET repmodel = 'c' WHERE logicalrelid = 'test_schema_support.imbalanced_table'::regclass; +SELECT * from master_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', true); + master_set_node_property +--------------------------------------------------------------------- + +(1 row) + +SET citus.shard_count = 4; +-- copy one of the shards to the other node, this is to test that the +-- rebalancer takes into account all copies of a placement +SET citus.shard_replication_factor TO 2; +SELECT replicate_table_shards('test_schema_support.imbalanced_table', max_shard_copies := 1, shard_transfer_mode := 'block_writes'); + replicate_table_shards --------------------------------------------------------------------- (1 row) SET citus.shard_replication_factor TO 1; -SELECT master_create_empty_shard('test_schema_support.imbalanced_table') AS shardid \gset -COPY test_schema_support.imbalanced_table FROM STDIN WITH (format 'csv', append_to_shard :shardid); -SET citus.shard_replication_factor TO 2; -SELECT master_create_empty_shard('test_schema_support.imbalanced_table') AS shardid \gset -COPY test_schema_support.imbalanced_table FROM STDIN WITH (format 'csv', append_to_shard :shardid); -SET citus.shard_replication_factor TO 1; -SELECT master_create_empty_shard('test_schema_support.imbalanced_table') AS shardid \gset -COPY test_schema_support.imbalanced_table FROM STDIN WITH (format 'csv', append_to_shard :shardid); -- imbalanced_table is now imbalanced -- Shard counts in each node before rebalance SELECT * FROM public.table_placements_per_node; @@ -891,7 +894,7 @@ SELECT * FROM public.table_placements_per_node; SELECT COUNT(*) FROM imbalanced_table; count --------------------------------------------------------------------- - 12 + 100 (1 row) -- Test rebalance operation @@ -915,13 +918,13 @@ SELECT * FROM public.table_placements_per_node; SELECT COUNT(*) FROM imbalanced_table; count --------------------------------------------------------------------- - 12 + 100 (1 row) -DROP TABLE public.shard_rebalancer_test_data; DROP TABLE test_schema_support.imbalanced_table; DROP TABLE test_schema_support.imbalanced_table_local; SET citus.shard_replication_factor TO 1; +SET citus.shard_count = 4; CREATE TABLE colocated_rebalance_test(id integer); CREATE TABLE colocated_rebalance_test2(id integer); SELECT create_distributed_table('colocated_rebalance_test', 'id'); @@ -1073,14 +1076,14 @@ CALL citus_cleanup_orphaned_shards(); select * from pg_dist_placement ORDER BY placementid; placementid | shardid | shardstate | shardlength | groupid --------------------------------------------------------------------- - 150 | 123023 | 1 | 0 | 14 - 153 | 123024 | 1 | 0 | 14 - 156 | 123027 | 1 | 0 | 14 - 157 | 123028 | 1 | 0 | 14 - 158 | 123021 | 1 | 0 | 16 - 159 | 123025 | 1 | 0 | 16 - 160 | 123022 | 1 | 0 | 16 - 161 | 123026 | 1 | 0 | 16 + 146 | 123023 | 1 | 0 | 14 + 149 | 123024 | 1 | 0 | 14 + 152 | 123027 | 1 | 0 | 14 + 153 | 123028 | 1 | 0 | 14 + 154 | 123021 | 1 | 0 | 16 + 155 | 123025 | 1 | 0 | 16 + 156 | 123022 | 1 | 0 | 16 + 157 | 123026 | 1 | 0 | 16 (8 rows) -- Move all shards to worker1 again @@ -2123,8 +2126,7 @@ SET citus.shard_replication_factor TO 2; SELECT replicate_table_shards('dist_table_test_3', max_shard_copies := 4, shard_transfer_mode:='block_writes'); ERROR: Table 'dist_table_test_3' is streaming replicated. Shards of streaming replicated tables cannot be copied -- Mark table as coordinator replicated in order to be able to test replicate_table_shards -UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN - ('dist_table_test_3'::regclass); +UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid = 'dist_table_test_3'::regclass; SELECT replicate_table_shards('dist_table_test_3', max_shard_copies := 4, shard_transfer_mode:='block_writes'); replicate_table_shards --------------------------------------------------------------------- diff --git a/src/test/regress/expected/tableam.out b/src/test/regress/expected/tableam.out index e211e2bf1..242cb5310 100644 --- a/src/test/regress/expected/tableam.out +++ b/src/test/regress/expected/tableam.out @@ -5,7 +5,7 @@ SET citus.shard_count TO 4; create schema test_tableam; set search_path to test_tableam; SELECT public.run_command_on_coordinator_and_workers($Q$ - SET citus.enable_ddl_propagation TO off; + SET citus.enable_ddl_propagation TO off; CREATE FUNCTION fake_am_handler(internal) RETURNS table_am_handler AS 'citus' @@ -26,7 +26,7 @@ ALTER EXTENSION citus ADD ACCESS METHOD fake_am; create table test_hash_dist(id int, val int) using fake_am; insert into test_hash_dist values (1, 1); WARNING: fake_tuple_insert -select create_distributed_table('test_hash_dist','id'); +select create_distributed_table('test_hash_dist','id', colocate_with := 'none'); WARNING: fake_scan_getnextslot CONTEXT: SQL statement "SELECT TRUE FROM test_tableam.test_hash_dist LIMIT 1" WARNING: fake_scan_getnextslot @@ -168,16 +168,20 @@ SELECT * FROM master_get_table_ddl_events('test_range_dist'); -- select a.shardid, a.nodeport FROM pg_dist_shard b, pg_dist_shard_placement a -WHERE a.shardid=b.shardid AND logicalrelid = 'test_range_dist'::regclass::oid +WHERE a.shardid=b.shardid AND logicalrelid = 'test_hash_dist'::regclass::oid ORDER BY a.shardid, nodeport; shardid | nodeport --------------------------------------------------------------------- - 60005 | 57637 - 60006 | 57638 -(2 rows) + 60000 | 57637 + 60001 | 57638 + 60002 | 57637 + 60003 | 57638 +(4 rows) +-- Change repmodel to allow master_copy_shard_placement +UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid = 'test_hash_dist'::regclass; SELECT master_copy_shard_placement( - get_shard_id_for_distribution_column('test_range_dist', '1'), + get_shard_id_for_distribution_column('test_hash_dist', '1'), 'localhost', :worker_1_port, 'localhost', :worker_2_port, do_repair := false, @@ -189,55 +193,42 @@ SELECT master_copy_shard_placement( select a.shardid, a.nodeport FROM pg_dist_shard b, pg_dist_shard_placement a -WHERE a.shardid=b.shardid AND logicalrelid = 'test_range_dist'::regclass::oid +WHERE a.shardid=b.shardid AND logicalrelid = 'test_hash_dist'::regclass::oid ORDER BY a.shardid, nodeport; shardid | nodeport --------------------------------------------------------------------- - 60005 | 57637 - 60005 | 57638 - 60006 | 57638 -(3 rows) + 60000 | 57637 + 60000 | 57638 + 60001 | 57638 + 60002 | 57637 + 60003 | 57638 +(5 rows) -- verify that data was copied correctly \c - - - :worker_1_port -select * from test_tableam.test_range_dist_60005 ORDER BY id; -WARNING: fake_scan_getnextslot -WARNING: fake_scan_getnextslot -WARNING: fake_scan_getnextslot -WARNING: fake_scan_getnextslot +select * from test_tableam.test_hash_dist_60000 ORDER BY id; WARNING: fake_scan_getnextslot WARNING: fake_scan_getnextslot WARNING: fake_scan_getnextslot id | val --------------------------------------------------------------------- - 0 | 0 1 | 1 - 1 | -1 - 2 | 4 - 3 | 9 - 7 | 9 -(6 rows) + 1 | 1 +(2 rows) \c - - - :worker_2_port -select * from test_tableam.test_range_dist_60005 ORDER BY id; -WARNING: fake_scan_getnextslot -WARNING: fake_scan_getnextslot -WARNING: fake_scan_getnextslot -WARNING: fake_scan_getnextslot +select * from test_tableam.test_hash_dist_60000 ORDER BY id; WARNING: fake_scan_getnextslot WARNING: fake_scan_getnextslot WARNING: fake_scan_getnextslot id | val --------------------------------------------------------------------- - 0 | 0 1 | 1 - 1 | -1 - 2 | 4 - 3 | 9 - 7 | 9 -(6 rows) + 1 | 1 +(2 rows) \c - - - :master_port +set search_path to test_tableam; -- -- Test that partitioned tables work correctly with a fake_am table -- @@ -254,15 +245,15 @@ SELECT create_distributed_table('test_partitioned', 'id'); NOTICE: Copying data from local table... NOTICE: copying the data has completed DETAIL: The local data in the table is no longer visible, but is still on disk. -HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.test_partitioned_p1$$) +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$test_tableam.test_partitioned_p1$$) WARNING: fake_scan_getnextslot -CONTEXT: SQL statement "SELECT TRUE FROM public.test_partitioned_p2 LIMIT 1" +CONTEXT: SQL statement "SELECT TRUE FROM test_tableam.test_partitioned_p2 LIMIT 1" WARNING: fake_scan_getnextslot NOTICE: Copying data from local table... WARNING: fake_scan_getnextslot NOTICE: copying the data has completed DETAIL: The local data in the table is no longer visible, but is still on disk. -HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.test_partitioned_p2$$) +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$test_tableam.test_partitioned_p2$$) create_distributed_table --------------------------------------------------------------------- diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 0271d4a77..ecf0e0e4d 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -206,6 +206,7 @@ ORDER BY 1; function worker_apply_shard_ddl_command(bigint,text) function worker_apply_shard_ddl_command(bigint,text,text) function worker_change_sequence_dependency(regclass,regclass,regclass) + function worker_copy_table_to_node(regclass,integer) function worker_create_or_alter_role(text,text,text) function worker_create_or_replace_object(text) function worker_create_or_replace_object(text[]) @@ -263,5 +264,5 @@ ORDER BY 1; view citus_stat_statements view pg_dist_shard_placement view time_partitions -(255 rows) +(256 rows) diff --git a/src/test/regress/expected/worker_copy_table_to_node.out b/src/test/regress/expected/worker_copy_table_to_node.out new file mode 100644 index 000000000..76f440189 --- /dev/null +++ b/src/test/regress/expected/worker_copy_table_to_node.out @@ -0,0 +1,81 @@ +CREATE SCHEMA worker_copy_table_to_node; +SET search_path TO worker_copy_table_to_node; +SET citus.shard_count TO 1; -- single shard table for ease of testing +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 62629600; +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +CREATE TABLE t(a int); +INSERT INTO t SELECT generate_series(1, 100); +CREATE TABLE ref(a int); +INSERT INTO ref SELECT generate_series(1, 100); +select create_distributed_table('t', 'a'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$worker_copy_table_to_node.t$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +select create_reference_table('ref'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$worker_copy_table_to_node.ref$$) + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +\c - - - :worker_2_port +SET search_path TO worker_copy_table_to_node; +-- Create empty shard on worker 2 too +CREATE TABLE t_62629600(a int); +\c - - - :worker_1_port +SET search_path TO worker_copy_table_to_node; +-- Make sure that the UDF doesn't work on Citus tables +SELECT worker_copy_table_to_node('t', :worker_1_node); +ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +SELECT worker_copy_table_to_node('ref', :worker_1_node); +ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +-- It should work on shards +SELECT worker_copy_table_to_node('t_62629600', :worker_1_node); + worker_copy_table_to_node +--------------------------------------------------------------------- + +(1 row) + +SELECT count(*) FROM t; + count +--------------------------------------------------------------------- + 200 +(1 row) + +SELECT count(*) FROM t_62629600; + count +--------------------------------------------------------------------- + 200 +(1 row) + +SELECT worker_copy_table_to_node('t_62629600', :worker_2_node); + worker_copy_table_to_node +--------------------------------------------------------------------- + +(1 row) + +\c - - - :worker_2_port +SET search_path TO worker_copy_table_to_node; +SELECT count(*) FROM t_62629600; + count +--------------------------------------------------------------------- + 200 +(1 row) + +\c - - - :master_port +SET search_path TO worker_copy_table_to_node; +SET client_min_messages TO WARNING; +DROP SCHEMA worker_copy_table_to_node CASCADE; diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index f9136008f..58cfc87c8 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -58,7 +58,8 @@ test: cte_inline recursive_view_local_table values sequences_with_different_type test: pg13 pg12 # run pg14 sequentially as it syncs metadata test: pg14 -test: tableam drop_column_partitioned_table +test: drop_column_partitioned_table +test: tableam # ---------- # Miscellaneous tests to check our query planning behavior diff --git a/src/test/regress/operations_schedule b/src/test/regress/operations_schedule index 2692f212f..353eabfcd 100644 --- a/src/test/regress/operations_schedule +++ b/src/test/regress/operations_schedule @@ -3,6 +3,7 @@ test: multi_cluster_management test: multi_test_catalog_views test: shard_rebalancer_unit test: shard_rebalancer +test: worker_copy_table_to_node test: foreign_key_to_reference_shard_rebalance test: multi_move_mx test: shard_move_deferred_delete diff --git a/src/test/regress/sql/failure_offline_move_shard_placement.sql b/src/test/regress/sql/failure_offline_move_shard_placement.sql index 81683398b..1b02da1e9 100644 --- a/src/test/regress/sql/failure_offline_move_shard_placement.sql +++ b/src/test/regress/sql/failure_offline_move_shard_placement.sql @@ -57,12 +57,12 @@ SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost' SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard_offline.t").cancel(' || :pid || ')'); SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); --- failure on blocking append_table_to_shard operation on target node -SELECT citus.mitmproxy('conn.onQuery(query="worker_append_table_to_shard").kill()'); +-- failure on blocking COPY operation on target node +SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()'); SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); --- cancellation on blocking append_table_to_shard operation on target node -SELECT citus.mitmproxy('conn.onQuery(query="worker_append_table_to_shard").cancel(' || :pid || ')'); +-- cancellation on blocking COPY operation on target node +SELECT citus.mitmproxy('conn.onQuery(query="COPY").cancel(' || :pid || ')'); SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); -- failure on adding constraints on target node @@ -73,14 +73,6 @@ SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost' SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT").cancel(' || :pid || ')'); SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); --- failure on CopyData operation on source node -SELECT citus.mitmproxy('conn.onCopyData().kill()'); -SELECT master_move_shard_placement(200, 'localhost', :worker_2_proxy_port, 'localhost', :worker_1_port, 'block_writes'); - --- cancellation on CopyData operation on source node -SELECT citus.mitmproxy('conn.onCopyData().cancel(' || :pid || ')'); -SELECT master_move_shard_placement(200, 'localhost', :worker_2_proxy_port, 'localhost', :worker_1_port, 'block_writes'); - CALL citus_cleanup_orphaned_shards(); -- Verify that the shard is not moved and the number of rows are still 100k diff --git a/src/test/regress/sql/shard_rebalancer.sql b/src/test/regress/sql/shard_rebalancer.sql index b16356a4a..0d482998b 100644 --- a/src/test/regress/sql/shard_rebalancer.sql +++ b/src/test/regress/sql/shard_rebalancer.sql @@ -291,24 +291,22 @@ SELECT unnest(shard_placement_replication_array( 2 )); --- Ensure that shard_replication_factor is 2 during replicate_table_shards --- and rebalance_table_shards tests - -SET citus.shard_replication_factor TO 2; - --- Turn off NOTICE messages - SET client_min_messages TO WARNING; --- Create a single-row test data for shard rebalancer test shards - -CREATE TABLE shard_rebalancer_test_data AS SELECT 1::int as int_column; - --- Test replicate_table_shards, which will in turn test update_shard_placement --- in copy mode. - +set citus.shard_count = 4; +-- Create a distributed table with all shards on a single node, so that we can +-- use this as an under-replicated +SET citus.shard_replication_factor TO 1; +SELECT * from master_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', false); CREATE TABLE replication_test_table(int_column int); -SELECT master_create_distributed_table('replication_test_table', 'int_column', 'append'); +SELECT create_distributed_table('replication_test_table', 'int_column'); +UPDATE pg_dist_partition SET repmodel = 'c' WHERE logicalrelid = 'replication_test_table'::regclass; +INSERT INTO replication_test_table SELECT * FROM generate_series(1, 100); + +-- Ensure that shard_replication_factor is 2 during replicate_table_shards +-- and rebalance_table_shards tests +SET citus.shard_replication_factor TO 2; +SELECT * from master_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', true); CREATE VIEW replication_test_table_placements_per_node AS SELECT count(*) FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard @@ -317,31 +315,12 @@ CREATE VIEW replication_test_table_placements_per_node AS GROUP BY nodename, nodeport ORDER BY nodename, nodeport; --- Create four shards with replication factor 2, and delete the placements --- with smaller port number to simulate under-replicated shards. - -SELECT count(master_create_empty_shard('replication_test_table')) - FROM generate_series(1, 4); - -DELETE FROM pg_dist_shard_placement WHERE placementid in ( - SELECT pg_dist_shard_placement.placementid - FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard - WHERE logicalrelid = 'replication_test_table'::regclass - AND (nodename, nodeport) = (SELECT nodename, nodeport FROM pg_dist_shard_placement - ORDER BY nodename, nodeport limit 1) -); - --- Upload the test data to the shards - -\COPY replication_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard 123000) -\COPY replication_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard 123001) -\COPY replication_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard 123002) -\COPY replication_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard 123003) - --- Verify that there is one node with all placements SELECT * FROM replication_test_table_placements_per_node; +-- Test replicate_table_shards, which will in turn test update_shard_placement +-- in copy mode. + -- Check excluded_shard_list by excluding three shards with smaller ids SELECT replicate_table_shards('replication_test_table', @@ -386,8 +365,11 @@ DROP TABLE public.replication_test_table CASCADE; -- Test rebalance_table_shards, which will in turn test update_shard_placement -- in move mode. +SET citus.shard_replication_factor TO 1; +SET citus.shard_count TO 6; CREATE TABLE rebalance_test_table(int_column int); -SELECT master_create_distributed_table('rebalance_test_table', 'int_column', 'append'); +SELECT create_distributed_table('rebalance_test_table', 'int_column'); +UPDATE pg_dist_partition SET repmodel = 'c' WHERE logicalrelid = 'rebalance_test_table'::regclass; CREATE VIEW table_placements_per_node AS SELECT nodeport, logicalrelid::regclass, count(*) @@ -404,9 +386,6 @@ LANGUAGE SQL AS $$ SET citus.shard_replication_factor TO 1; - SELECT count(master_create_empty_shard(rel)) - FROM generate_series(1, 6); - SELECT count(master_move_shard_placement(shardid, src.nodename, src.nodeport::int, dst.nodename, dst.nodeport::int, @@ -424,12 +403,7 @@ SET citus.shard_replication_factor TO 2; -- Upload the test data to the shards -\COPY rebalance_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard 123004) -\COPY rebalance_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard 123005) -\COPY rebalance_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard 123006) -\COPY rebalance_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard 123007) -\COPY rebalance_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard 123008) -\COPY rebalance_test_table FROM PROGRAM 'echo 1' WITH (format 'csv', append_to_shard 123009) +INSERT INTO rebalance_test_table SELECT * FROM generate_series(1, 100); -- Verify that there is one node with all placements @@ -604,34 +578,20 @@ CREATE TABLE test_schema_support.imbalanced_table ( id integer not null ); -SELECT master_create_distributed_table('test_schema_support.imbalanced_table', 'id', 'append'); - +SET citus.shard_count = 3; SET citus.shard_replication_factor TO 1; -SELECT master_create_empty_shard('test_schema_support.imbalanced_table') AS shardid \gset -COPY test_schema_support.imbalanced_table FROM STDIN WITH (format 'csv', append_to_shard :shardid); -1 -2 -3 -4 -\. +SELECT * from master_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', false); +SELECT create_distributed_table('test_schema_support.imbalanced_table', 'id'); +INSERT INTO test_schema_support.imbalanced_table SELECT * FROM generate_series(1, 100); +UPDATE pg_dist_partition SET repmodel = 'c' WHERE logicalrelid = 'test_schema_support.imbalanced_table'::regclass; +SELECT * from master_set_node_property('localhost', :worker_1_port, 'shouldhaveshards', true); +SET citus.shard_count = 4; +-- copy one of the shards to the other node, this is to test that the +-- rebalancer takes into account all copies of a placement SET citus.shard_replication_factor TO 2; -SELECT master_create_empty_shard('test_schema_support.imbalanced_table') AS shardid \gset -COPY test_schema_support.imbalanced_table FROM STDIN WITH (format 'csv', append_to_shard :shardid); -1 -2 -3 -4 -\. - +SELECT replicate_table_shards('test_schema_support.imbalanced_table', max_shard_copies := 1, shard_transfer_mode := 'block_writes'); SET citus.shard_replication_factor TO 1; -SELECT master_create_empty_shard('test_schema_support.imbalanced_table') AS shardid \gset -COPY test_schema_support.imbalanced_table FROM STDIN WITH (format 'csv', append_to_shard :shardid); -1 -2 -3 -4 -\. -- imbalanced_table is now imbalanced @@ -652,11 +612,11 @@ SELECT * FROM public.table_placements_per_node; -- Row count in imbalanced table after rebalance SELECT COUNT(*) FROM imbalanced_table; -DROP TABLE public.shard_rebalancer_test_data; DROP TABLE test_schema_support.imbalanced_table; DROP TABLE test_schema_support.imbalanced_table_local; SET citus.shard_replication_factor TO 1; +SET citus.shard_count = 4; CREATE TABLE colocated_rebalance_test(id integer); CREATE TABLE colocated_rebalance_test2(id integer); @@ -1276,8 +1236,7 @@ SET citus.shard_replication_factor TO 2; SELECT replicate_table_shards('dist_table_test_3', max_shard_copies := 4, shard_transfer_mode:='block_writes'); -- Mark table as coordinator replicated in order to be able to test replicate_table_shards -UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN - ('dist_table_test_3'::regclass); +UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid = 'dist_table_test_3'::regclass; SELECT replicate_table_shards('dist_table_test_3', max_shard_copies := 4, shard_transfer_mode:='block_writes'); diff --git a/src/test/regress/sql/tableam.sql b/src/test/regress/sql/tableam.sql index 47845492a..f0ed5cfca 100644 --- a/src/test/regress/sql/tableam.sql +++ b/src/test/regress/sql/tableam.sql @@ -26,7 +26,7 @@ ALTER EXTENSION citus ADD ACCESS METHOD fake_am; create table test_hash_dist(id int, val int) using fake_am; insert into test_hash_dist values (1, 1); -select create_distributed_table('test_hash_dist','id'); +select create_distributed_table('test_hash_dist','id', colocate_with := 'none'); select * from test_hash_dist; insert into test_hash_dist values (1, 1); @@ -86,11 +86,14 @@ SELECT * FROM master_get_table_ddl_events('test_range_dist'); select a.shardid, a.nodeport FROM pg_dist_shard b, pg_dist_shard_placement a -WHERE a.shardid=b.shardid AND logicalrelid = 'test_range_dist'::regclass::oid +WHERE a.shardid=b.shardid AND logicalrelid = 'test_hash_dist'::regclass::oid ORDER BY a.shardid, nodeport; +-- Change repmodel to allow master_copy_shard_placement +UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid = 'test_hash_dist'::regclass; + SELECT master_copy_shard_placement( - get_shard_id_for_distribution_column('test_range_dist', '1'), + get_shard_id_for_distribution_column('test_hash_dist', '1'), 'localhost', :worker_1_port, 'localhost', :worker_2_port, do_repair := false, @@ -98,19 +101,21 @@ SELECT master_copy_shard_placement( select a.shardid, a.nodeport FROM pg_dist_shard b, pg_dist_shard_placement a -WHERE a.shardid=b.shardid AND logicalrelid = 'test_range_dist'::regclass::oid +WHERE a.shardid=b.shardid AND logicalrelid = 'test_hash_dist'::regclass::oid ORDER BY a.shardid, nodeport; -- verify that data was copied correctly \c - - - :worker_1_port -select * from test_tableam.test_range_dist_60005 ORDER BY id; +select * from test_tableam.test_hash_dist_60000 ORDER BY id; \c - - - :worker_2_port -select * from test_tableam.test_range_dist_60005 ORDER BY id; +select * from test_tableam.test_hash_dist_60000 ORDER BY id; \c - - - :master_port +set search_path to test_tableam; + -- -- Test that partitioned tables work correctly with a fake_am table -- diff --git a/src/test/regress/sql/worker_copy_table_to_node.sql b/src/test/regress/sql/worker_copy_table_to_node.sql new file mode 100644 index 000000000..fa0703a25 --- /dev/null +++ b/src/test/regress/sql/worker_copy_table_to_node.sql @@ -0,0 +1,49 @@ +CREATE SCHEMA worker_copy_table_to_node; +SET search_path TO worker_copy_table_to_node; +SET citus.shard_count TO 1; -- single shard table for ease of testing +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 62629600; + +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset + +CREATE TABLE t(a int); +INSERT INTO t SELECT generate_series(1, 100); + +CREATE TABLE ref(a int); +INSERT INTO ref SELECT generate_series(1, 100); + +select create_distributed_table('t', 'a'); +select create_reference_table('ref'); + +\c - - - :worker_2_port +SET search_path TO worker_copy_table_to_node; + +-- Create empty shard on worker 2 too +CREATE TABLE t_62629600(a int); + +\c - - - :worker_1_port +SET search_path TO worker_copy_table_to_node; + +-- Make sure that the UDF doesn't work on Citus tables +SELECT worker_copy_table_to_node('t', :worker_1_node); +SELECT worker_copy_table_to_node('ref', :worker_1_node); + +-- It should work on shards +SELECT worker_copy_table_to_node('t_62629600', :worker_1_node); + +SELECT count(*) FROM t; +SELECT count(*) FROM t_62629600; + +SELECT worker_copy_table_to_node('t_62629600', :worker_2_node); + +\c - - - :worker_2_port +SET search_path TO worker_copy_table_to_node; + +SELECT count(*) FROM t_62629600; + +\c - - - :master_port +SET search_path TO worker_copy_table_to_node; + +SET client_min_messages TO WARNING; +DROP SCHEMA worker_copy_table_to_node CASCADE; From c7b51025ab3d64f26f23314bf18ab32b76f892b9 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Tue, 2 Aug 2022 09:39:00 +0200 Subject: [PATCH 33/38] Add missing ifdef for PG 15 --- src/backend/distributed/deparser/ruleutils_14.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/deparser/ruleutils_14.c b/src/backend/distributed/deparser/ruleutils_14.c index bde8e1b23..3fa6b1986 100644 --- a/src/backend/distributed/deparser/ruleutils_14.c +++ b/src/backend/distributed/deparser/ruleutils_14.c @@ -18,7 +18,8 @@ #include "pg_config.h" -#if (PG_VERSION_NUM >= PG_VERSION_14) && (PG_VERSION_NUM < PG_VERSION_15) +/* We should drop PG 15 support from this file, this is only for testing purposes until #6085 is merged. */ +#if (PG_VERSION_NUM >= PG_VERSION_14) && (PG_VERSION_NUM <= PG_VERSION_15) #include "postgres.h" From 28e22c4abf0d6274bfbe5aec5f07494a09c946c1 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Tue, 2 Aug 2022 11:56:28 +0300 Subject: [PATCH 34/38] Reduce log level to avoid alternative output for PG15 (#6118) We are reducing the log level here to avoid alternative test output in PG15 because of the change in the display of SQL-standard function's arguments in INSERT/SELECT in PG15. The log level changes can be reverted when we drop support for PG14 Relevant PG commit: a8d8445a7b2f80f6d0bfe97b19f90bd2cbef8759 --- .../replicate_reference_tables_to_coordinator.out | 7 ++++++- src/test/regress/expected/with_dml.out | 12 ++++++------ .../replicate_reference_tables_to_coordinator.sql | 6 ++++++ src/test/regress/sql/with_dml.sql | 6 ++++++ 4 files changed, 24 insertions(+), 7 deletions(-) diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index 5debf1507..2ee4a6a7e 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -472,8 +472,13 @@ BEGIN; INSERT INTO local_table VALUES (1), (2), (3); INSERT INTO numbers SELECT * FROM generate_series(1, 100); NOTICE: executing the copy locally for shard xxxxx +-- We are reducing the log level here to avoid alternative test output +-- in PG15 because of the change in the display of SQL-standard +-- function's arguments in INSERT/SELECT in PG15. +-- The log level changes can be reverted when we drop support for PG14 +SET client_min_messages TO WARNING; INSERT INTO numbers SELECT * FROM numbers; -NOTICE: executing the command locally: INSERT INTO replicate_ref_to_coordinator.numbers_8000001 AS citus_table_alias (a) SELECT a FROM replicate_ref_to_coordinator.numbers_8000001 numbers +RESET client_min_messages; SELECT COUNT(*) FROM local_table JOIN numbers using (a); NOTICE: executing the command locally: SELECT count(*) AS count FROM (replicate_ref_to_coordinator.local_table JOIN replicate_ref_to_coordinator.numbers_8000001 numbers(a) USING (a)) count diff --git a/src/test/regress/expected/with_dml.out b/src/test/regress/expected/with_dml.out index 07a25f686..b5141db33 100644 --- a/src/test/regress/expected/with_dml.out +++ b/src/test/regress/expected/with_dml.out @@ -118,6 +118,11 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator -- otherwise the coordinator insert select fails -- since COPY cannot be executed SET citus.force_max_query_parallelization TO on; +-- We are reducing the log level here to avoid alternative test output +-- in PG15 because of the change in the display of SQL-standard +-- function's arguments in INSERT/SELECT in PG15. +-- The log level changes can be reverted when we drop support for PG14 +SET client_min_messages TO LOG; WITH copy_to_other_table AS ( INSERT INTO distributed_table SELECT * @@ -141,13 +146,8 @@ INSERT INTO second_distributed_table EXCEPT SELECT * FROM copy_to_other_table; -DEBUG: distributed INSERT ... SELECT can only select from distributed tables -DEBUG: generating subplan XXX_1 for CTE copy_to_other_table: INSERT INTO with_dml.distributed_table (tenant_id, dept) SELECT tenant_id, dept FROM with_dml.second_distributed_table WHERE (dept OPERATOR(pg_catalog.=) 3) ON CONFLICT(tenant_id) DO UPDATE SET dept = 4 RETURNING distributed_table.tenant_id, distributed_table.dept -DEBUG: generating subplan XXX_2 for CTE main_table_deleted: DELETE FROM with_dml.distributed_table WHERE ((dept OPERATOR(pg_catalog.<) 10) AND (NOT (EXISTS (SELECT 1 FROM with_dml.second_distributed_table WHERE ((second_distributed_table.dept OPERATOR(pg_catalog.=) 1) AND (second_distributed_table.tenant_id OPERATOR(pg_catalog.=) distributed_table.tenant_id)))))) RETURNING tenant_id, dept -DEBUG: generating subplan XXX_3 for subquery SELECT main_table_deleted.tenant_id, main_table_deleted.dept FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer)) main_table_deleted EXCEPT SELECT copy_to_other_table.tenant_id, copy_to_other_table.dept FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer)) copy_to_other_table -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT tenant_id, dept FROM (SELECT intermediate_result.tenant_id, intermediate_result.dept FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, dept integer)) citus_insert_select_subquery -DEBUG: Collecting INSERT ... SELECT results on coordinator SET citus.force_max_query_parallelization TO off; +SET client_min_messages TO DEBUG1; -- CTE inside the UPDATE statement UPDATE second_distributed_table diff --git a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql index 78eb8c9c7..fe29340ba 100644 --- a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql +++ b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql @@ -229,7 +229,13 @@ ROLLBACK; BEGIN; INSERT INTO local_table VALUES (1), (2), (3); INSERT INTO numbers SELECT * FROM generate_series(1, 100); +-- We are reducing the log level here to avoid alternative test output +-- in PG15 because of the change in the display of SQL-standard +-- function's arguments in INSERT/SELECT in PG15. +-- The log level changes can be reverted when we drop support for PG14 +SET client_min_messages TO WARNING; INSERT INTO numbers SELECT * FROM numbers; +RESET client_min_messages; SELECT COUNT(*) FROM local_table JOIN numbers using (a); UPDATE numbers SET a = a + 1; SELECT COUNT(*) FROM local_table JOIN numbers using (a); diff --git a/src/test/regress/sql/with_dml.sql b/src/test/regress/sql/with_dml.sql index 0a3052c2f..8602a961b 100644 --- a/src/test/regress/sql/with_dml.sql +++ b/src/test/regress/sql/with_dml.sql @@ -97,6 +97,11 @@ INSERT INTO distributed_table -- otherwise the coordinator insert select fails -- since COPY cannot be executed SET citus.force_max_query_parallelization TO on; +-- We are reducing the log level here to avoid alternative test output +-- in PG15 because of the change in the display of SQL-standard +-- function's arguments in INSERT/SELECT in PG15. +-- The log level changes can be reverted when we drop support for PG14 +SET client_min_messages TO LOG; WITH copy_to_other_table AS ( INSERT INTO distributed_table SELECT * @@ -122,6 +127,7 @@ INSERT INTO second_distributed_table FROM copy_to_other_table; SET citus.force_max_query_parallelization TO off; +SET client_min_messages TO DEBUG1; -- CTE inside the UPDATE statement UPDATE From 8866d9ac32ace5ad7a6debe7105588fd42da3279 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Tue, 2 Aug 2022 16:58:59 +0200 Subject: [PATCH 35/38] Reduce setup time of check-minimal and check-minimal-mx (#6117) This change reduces the setup time of our minimal schedules in two ways: 1. Don't run `multi_cluster_managament`, but instead run a much smaller sql file with almost the same results. `multi_cluster_management` adds and removes lots of nodes and tests all kinds of failure scenarios. This is not needed for the minimal schedules. The only reason we were using it there was to get a working cluster of the layout that the tests expected. The new `minimal_cluster_management` test achieves this with much less work, going from ~2s to ~0.5s. 2. Parallelize a bit more of the helper tests. --- .../expected/minimal_cluster_management.out | 64 +++++++++++++++++++ .../expected/multi_test_catalog_views.out | 14 +++- src/test/regress/minimal_schedule | 6 +- src/test/regress/mx_minimal_schedule | 5 +- .../sql/minimal_cluster_management.sql | 40 ++++++++++++ .../regress/sql/multi_test_catalog_views.sql | 13 +++- 6 files changed, 132 insertions(+), 10 deletions(-) create mode 100644 src/test/regress/expected/minimal_cluster_management.out create mode 100644 src/test/regress/sql/minimal_cluster_management.sql diff --git a/src/test/regress/expected/minimal_cluster_management.out b/src/test/regress/expected/minimal_cluster_management.out new file mode 100644 index 000000000..af3ac84f3 --- /dev/null +++ b/src/test/regress/expected/minimal_cluster_management.out @@ -0,0 +1,64 @@ +SET citus.next_shard_id TO 1220000; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000; +ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1; +-- Tests functions related to cluster membership +-- add the nodes to the cluster with the same nodeids and groupids that +-- multi_cluster_management.sql creates +ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 18; +ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 16; +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 16; +ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 14; +SELECT 1 FROM master_add_node('localhost', :worker_1_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- Create the same colocation groups as multi_cluster_management.sql +SET citus.shard_count TO 16; +SET citus.shard_replication_factor TO 1; +CREATE TABLE cluster_management_test (col_1 text, col_2 int); +SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE cluster_management_test; +CREATE TABLE test_reference_table (y int primary key, name text); +SELECT create_reference_table('test_reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE test_reference_table; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 2; +CREATE TABLE cluster_management_test (col_1 text, col_2 int); +SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE cluster_management_test; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +CREATE TABLE test_dist (x int, y int); +SELECT create_distributed_table('test_dist', 'x'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE test_dist; +ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 30; +ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 18; +ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 83; diff --git a/src/test/regress/expected/multi_test_catalog_views.out b/src/test/regress/expected/multi_test_catalog_views.out index f99b3d7d7..8c255f94e 100644 --- a/src/test/regress/expected/multi_test_catalog_views.out +++ b/src/test/regress/expected/multi_test_catalog_views.out @@ -1,8 +1,17 @@ +-- create a temporary custom version of this function that's normally defined +-- in multi_test_helpers, so that this file can be run parallel with +-- multi_test_helpers during the minimal schedules +CREATE OR REPLACE FUNCTION run_command_on_master_and_workers_temp(p_sql text) +RETURNS void LANGUAGE plpgsql AS $$ +BEGIN + EXECUTE p_sql; + PERFORM run_command_on_workers(p_sql); +END;$$; -- The following views are intended as alternatives to \d commands, whose -- output changed in PostgreSQL 10. In particular, they must be used any time -- a test wishes to print out the structure of a relation, which previously -- was safely accomplished by a \d invocation. -SELECT run_command_on_master_and_workers( +SELECT run_command_on_master_and_workers_temp( $desc_views$ CREATE VIEW table_fkey_cols AS SELECT rc.constraint_name AS "name", @@ -97,8 +106,9 @@ ORDER BY a.attrelid, a.attnum; $desc_views$ ); - run_command_on_master_and_workers + run_command_on_master_and_workers_temp --------------------------------------------------------------------- (1 row) +DROP FUNCTION run_command_on_master_and_workers_temp(p_sql text); diff --git a/src/test/regress/minimal_schedule b/src/test/regress/minimal_schedule index 6ea784918..ef2d3dc65 100644 --- a/src/test/regress/minimal_schedule +++ b/src/test/regress/minimal_schedule @@ -1,4 +1,2 @@ -test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers -test: multi_cluster_management -test: multi_test_catalog_views -test: tablespace +test: minimal_cluster_management +test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers multi_test_catalog_views tablespace diff --git a/src/test/regress/mx_minimal_schedule b/src/test/regress/mx_minimal_schedule index c697f79dc..5b0f943c7 100644 --- a/src/test/regress/mx_minimal_schedule +++ b/src/test/regress/mx_minimal_schedule @@ -1,9 +1,8 @@ # ---------- # Only run few basic tests to set up a testing environment # ---------- -test: multi_cluster_management -test: multi_test_helpers multi_test_helpers_superuser -test: multi_test_catalog_views +test: minimal_cluster_management +test: multi_test_helpers multi_test_helpers_superuser multi_test_catalog_views # the following test has to be run sequentially test: base_enable_mx diff --git a/src/test/regress/sql/minimal_cluster_management.sql b/src/test/regress/sql/minimal_cluster_management.sql new file mode 100644 index 000000000..424daccac --- /dev/null +++ b/src/test/regress/sql/minimal_cluster_management.sql @@ -0,0 +1,40 @@ +SET citus.next_shard_id TO 1220000; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000; +ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1; + +-- Tests functions related to cluster membership + +-- add the nodes to the cluster with the same nodeids and groupids that +-- multi_cluster_management.sql creates +ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 18; +ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 16; +SELECT 1 FROM master_add_node('localhost', :worker_2_port); +ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 16; +ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 14; +SELECT 1 FROM master_add_node('localhost', :worker_1_port); + +-- Create the same colocation groups as multi_cluster_management.sql +SET citus.shard_count TO 16; +SET citus.shard_replication_factor TO 1; +CREATE TABLE cluster_management_test (col_1 text, col_2 int); +SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); +DROP TABLE cluster_management_test; + +CREATE TABLE test_reference_table (y int primary key, name text); +SELECT create_reference_table('test_reference_table'); +DROP TABLE test_reference_table; + +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 2; +CREATE TABLE cluster_management_test (col_1 text, col_2 int); +SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); +DROP TABLE cluster_management_test; + +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +CREATE TABLE test_dist (x int, y int); +SELECT create_distributed_table('test_dist', 'x'); +DROP TABLE test_dist; +ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 30; +ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 18; +ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 83; diff --git a/src/test/regress/sql/multi_test_catalog_views.sql b/src/test/regress/sql/multi_test_catalog_views.sql index f2c5a50cf..bb1442edf 100644 --- a/src/test/regress/sql/multi_test_catalog_views.sql +++ b/src/test/regress/sql/multi_test_catalog_views.sql @@ -1,8 +1,18 @@ +-- create a temporary custom version of this function that's normally defined +-- in multi_test_helpers, so that this file can be run parallel with +-- multi_test_helpers during the minimal schedules +CREATE OR REPLACE FUNCTION run_command_on_master_and_workers_temp(p_sql text) +RETURNS void LANGUAGE plpgsql AS $$ +BEGIN + EXECUTE p_sql; + PERFORM run_command_on_workers(p_sql); +END;$$; + -- The following views are intended as alternatives to \d commands, whose -- output changed in PostgreSQL 10. In particular, they must be used any time -- a test wishes to print out the structure of a relation, which previously -- was safely accomplished by a \d invocation. -SELECT run_command_on_master_and_workers( +SELECT run_command_on_master_and_workers_temp( $desc_views$ CREATE VIEW table_fkey_cols AS SELECT rc.constraint_name AS "name", @@ -98,3 +108,4 @@ ORDER BY a.attrelid, a.attnum; $desc_views$ ); +DROP FUNCTION run_command_on_master_and_workers_temp(p_sql text); From 57ce4cf8c4a62b791922aea9de30507f131f9df3 Mon Sep 17 00:00:00 2001 From: aykutbozkurt Date: Thu, 21 Jul 2022 10:34:32 +0300 Subject: [PATCH 36/38] use address method to decide if we should run preprocess and postprocess steps for a distributed object --- .../distributed/commands/utility_hook.c | 16 ++++++++-- .../distributed/utils/citus_depended_object.c | 32 +++++++++++++++++++ .../distributed/citus_depended_object.h | 2 ++ 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index dde27bc97..40b3e1c62 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -45,6 +45,7 @@ #include "commands/tablecmds.h" #include "distributed/adaptive_executor.h" #include "distributed/backend_data.h" +#include "distributed/citus_depended_object.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/commands/multi_copy.h" @@ -376,6 +377,7 @@ ProcessUtilityInternal(PlannedStmt *pstmt, { Node *parsetree = pstmt->utilityStmt; List *ddlJobs = NIL; + bool distOpsHasInvalidObject = false; if (IsA(parsetree, ExplainStmt) && IsA(((ExplainStmt *) parsetree)->query, Query)) @@ -542,6 +544,16 @@ ProcessUtilityInternal(PlannedStmt *pstmt, parsetree = pstmt->utilityStmt; ops = GetDistributeObjectOps(parsetree); + /* + * Preprocess and qualify steps can cause pg tests to fail because of the + * unwanted citus related warnings or early error logs related to invalid address. + * Therefore, we first check if all addresses in the given statement are valid. + * Then, we do not execute qualify and preprocess if any address is invalid to + * prevent before-mentioned citus related messages. PG will complain about the + * invalid address, so we are safe to not execute qualify and preprocess. + */ + distOpsHasInvalidObject = DistOpsHasInvalidObject(parsetree, ops); + /* * For some statements Citus defines a Qualify function. The goal of this function * is to take any ambiguity from the statement that is contextual on either the @@ -551,12 +563,12 @@ ProcessUtilityInternal(PlannedStmt *pstmt, * deserialize calls for the statement portable to other postgres servers, the * workers in our case. */ - if (ops && ops->qualify) + if (ops && ops->qualify && !distOpsHasInvalidObject) { ops->qualify(parsetree); } - if (ops && ops->preprocess) + if (ops && ops->preprocess && !distOpsHasInvalidObject) { ddlJobs = ops->preprocess(parsetree, queryString, context); } diff --git a/src/backend/distributed/utils/citus_depended_object.c b/src/backend/distributed/utils/citus_depended_object.c index 6424595bf..9c0055b1f 100644 --- a/src/backend/distributed/utils/citus_depended_object.c +++ b/src/backend/distributed/utils/citus_depended_object.c @@ -308,3 +308,35 @@ GetCitusDependedObjectArgs(int pgMetaTableVarno, int pgMetaTableOid) return list_make2((Node *) metaTableOidConst, (Node *) oidVar); } + + +/* + * DistOpsHasInvalidObject returns true if any address in the given node + * is invalid; otherwise, returns false. If ops is null or it has no + * implemented address method, we return false. + * + * If EnableUnsupportedFeatureMessages is active, then we return false. + */ +bool +DistOpsHasInvalidObject(Node *node, const DistributeObjectOps *ops) +{ + if (EnableUnsupportedFeatureMessages) + { + return false; + } + + if (ops && ops->address) + { + List *objectAddresses = ops->address(node, true); + ObjectAddress *objectAddress = NULL; + foreach_ptr(objectAddress, objectAddresses) + { + if (!OidIsValid(objectAddress->objectId)) + { + return true; + } + } + } + + return false; +} diff --git a/src/include/distributed/citus_depended_object.h b/src/include/distributed/citus_depended_object.h index 027186f4e..55b1369fb 100644 --- a/src/include/distributed/citus_depended_object.h +++ b/src/include/distributed/citus_depended_object.h @@ -12,6 +12,7 @@ #ifndef CITUS_DEPENDED_OBJECT_H #define CITUS_DEPENDED_OBJECT_H +#include "distributed/commands.h" #include "nodes/nodes.h" #include "nodes/parsenodes.h" @@ -22,5 +23,6 @@ extern void SetLocalClientMinMessagesIfRunningPGTests(int extern void SetLocalHideCitusDependentObjectsDisabledWhenAlreadyEnabled(void); extern bool HideCitusDependentObjectsOnQueriesOfPgMetaTables(Node *node, void *context); extern bool IsPgLocksTable(RangeTblEntry *rte); +extern bool DistOpsHasInvalidObject(Node *node, const DistributeObjectOps *ops); #endif /* CITUS_DEPENDED_OBJECT_H */ From c98a68662a27808afb8bf9679dfc9181483e7198 Mon Sep 17 00:00:00 2001 From: aykutbozkurt Date: Mon, 1 Aug 2022 13:39:14 +0300 Subject: [PATCH 37/38] introduces operation type for dist ops --- .../commands/distribute_object_ops.c | 120 ++++++++++++++ src/backend/distributed/commands/role.c | 7 + src/backend/distributed/commands/statistics.c | 16 +- .../distributed/commands/utility_hook.c | 5 +- .../distributed/utils/citus_depended_object.c | 149 +++++++++++++++++- src/include/distributed/commands.h | 11 ++ 6 files changed, 302 insertions(+), 6 deletions(-) diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 78f72d828..b64531381 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -23,6 +23,7 @@ static DistributeObjectOps NoDistributeOps = { .qualify = NULL, .preprocess = NULL, .postprocess = NULL, + .operationType = DIST_OPS_NONE, .address = NULL, .markDistributed = false, }; @@ -32,6 +33,7 @@ static DistributeObjectOps Aggregate_AlterObjectSchema = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_FUNCTION, + .operationType = DIST_OPS_ALTER, .address = AlterFunctionSchemaStmtObjectAddress, .markDistributed = false, }; @@ -41,6 +43,7 @@ static DistributeObjectOps Aggregate_AlterOwner = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_FUNCTION, + .operationType = DIST_OPS_ALTER, .address = AlterFunctionOwnerObjectAddress, .markDistributed = false, }; @@ -50,6 +53,7 @@ static DistributeObjectOps Aggregate_Define = { .preprocess = NULL, .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, .objectType = OBJECT_AGGREGATE, + .operationType = DIST_OPS_CREATE, .address = DefineAggregateStmtObjectAddress, .markDistributed = true, }; @@ -58,6 +62,7 @@ static DistributeObjectOps Aggregate_Drop = { .qualify = NULL, .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -67,6 +72,7 @@ static DistributeObjectOps Aggregate_Rename = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_FUNCTION, + .operationType = DIST_OPS_ALTER, .address = RenameFunctionStmtObjectAddress, .markDistributed = false, }; @@ -76,6 +82,7 @@ static DistributeObjectOps Any_AlterEnum = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_TYPE, + .operationType = DIST_OPS_ALTER, .address = AlterEnumStmtObjectAddress, .markDistributed = false, }; @@ -84,6 +91,7 @@ static DistributeObjectOps Any_AlterExtension = { .qualify = NULL, .preprocess = PreprocessAlterExtensionUpdateStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = AlterExtensionUpdateStmtObjectAddress, .markDistributed = false, }; @@ -92,6 +100,7 @@ static DistributeObjectOps Any_AlterExtensionContents = { .qualify = NULL, .preprocess = PreprocessAlterExtensionContentsStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -101,6 +110,7 @@ static DistributeObjectOps Any_AlterForeignServer = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_FOREIGN_SERVER, + .operationType = DIST_OPS_ALTER, .address = AlterForeignServerStmtObjectAddress, .markDistributed = false, }; @@ -109,6 +119,7 @@ static DistributeObjectOps Any_AlterFunction = { .qualify = QualifyAlterFunctionStmt, .preprocess = PreprocessAlterFunctionStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = AlterFunctionStmtObjectAddress, .markDistributed = false, }; @@ -117,6 +128,7 @@ static DistributeObjectOps Any_AlterPolicy = { .qualify = NULL, .preprocess = PreprocessAlterPolicyStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -125,6 +137,7 @@ static DistributeObjectOps Any_AlterRole = { .qualify = NULL, .preprocess = NULL, .postprocess = PostprocessAlterRoleStmt, + .operationType = DIST_OPS_ALTER, .address = AlterRoleStmtObjectAddress, .markDistributed = false, }; @@ -133,6 +146,7 @@ static DistributeObjectOps Any_AlterRoleSet = { .qualify = QualifyAlterRoleSetStmt, .preprocess = PreprocessAlterRoleSetStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = AlterRoleSetStmtObjectAddress, .markDistributed = false, }; @@ -141,6 +155,7 @@ static DistributeObjectOps Any_AlterTableMoveAll = { .qualify = NULL, .preprocess = PreprocessAlterTableMoveAllStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -149,6 +164,7 @@ static DistributeObjectOps Any_Cluster = { .qualify = NULL, .preprocess = PreprocessClusterStmt, .postprocess = NULL, + .operationType = DIST_OPS_NONE, .address = NULL, .markDistributed = false, }; @@ -158,6 +174,7 @@ static DistributeObjectOps Any_CompositeType = { .preprocess = NULL, .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, .objectType = OBJECT_TYPE, + .operationType = DIST_OPS_CREATE, .featureFlag = &EnableCreateTypePropagation, .address = CompositeTypeStmtObjectAddress, .markDistributed = true, @@ -168,6 +185,7 @@ static DistributeObjectOps Any_CreateDomain = { .preprocess = NULL, .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, .objectType = OBJECT_DOMAIN, + .operationType = DIST_OPS_CREATE, .address = CreateDomainStmtObjectAddress, .markDistributed = true, }; @@ -177,6 +195,7 @@ static DistributeObjectOps Any_CreateEnum = { .preprocess = NULL, .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, .objectType = OBJECT_TYPE, + .operationType = DIST_OPS_CREATE, .featureFlag = &EnableCreateTypePropagation, .address = CreateEnumStmtObjectAddress, .markDistributed = true, @@ -186,6 +205,7 @@ static DistributeObjectOps Any_CreateExtension = { .qualify = NULL, .preprocess = NULL, .postprocess = PostprocessCreateExtensionStmt, + .operationType = DIST_OPS_CREATE, .address = CreateExtensionStmtObjectAddress, .markDistributed = true, }; @@ -194,6 +214,7 @@ static DistributeObjectOps Any_CreateFunction = { .qualify = NULL, .preprocess = PreprocessCreateFunctionStmt, .postprocess = PostprocessCreateFunctionStmt, + .operationType = DIST_OPS_CREATE, .address = CreateFunctionStmtObjectAddress, .markDistributed = true, }; @@ -202,6 +223,7 @@ static DistributeObjectOps Any_View = { .qualify = NULL, .preprocess = PreprocessViewStmt, .postprocess = PostprocessViewStmt, + .operationType = DIST_OPS_CREATE, .address = ViewStmtObjectAddress, .markDistributed = true, }; @@ -210,6 +232,7 @@ static DistributeObjectOps Any_CreatePolicy = { .qualify = NULL, .preprocess = NULL, .postprocess = PostprocessCreatePolicyStmt, + .operationType = DIST_OPS_CREATE, .address = NULL, .markDistributed = false, }; @@ -218,6 +241,7 @@ static DistributeObjectOps Any_CreateRole = { .qualify = NULL, .preprocess = PreprocessCreateRoleStmt, .postprocess = NULL, + .operationType = DIST_OPS_CREATE, .address = CreateRoleStmtObjectAddress, .markDistributed = true, }; @@ -226,6 +250,7 @@ static DistributeObjectOps Any_DropRole = { .qualify = NULL, .preprocess = PreprocessDropRoleStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -235,6 +260,7 @@ static DistributeObjectOps Any_CreateForeignServer = { .preprocess = NULL, .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, .objectType = OBJECT_FOREIGN_SERVER, + .operationType = DIST_OPS_CREATE, .address = CreateForeignServerStmtObjectAddress, .markDistributed = true, }; @@ -243,6 +269,7 @@ static DistributeObjectOps Any_CreateSchema = { .qualify = NULL, .preprocess = PreprocessCreateSchemaStmt, .postprocess = NULL, + .operationType = DIST_OPS_CREATE, .address = CreateSchemaStmtObjectAddress, .markDistributed = true, }; @@ -251,6 +278,7 @@ static DistributeObjectOps Any_CreateStatistics = { .qualify = QualifyCreateStatisticsStmt, .preprocess = PreprocessCreateStatisticsStmt, .postprocess = PostprocessCreateStatisticsStmt, + .operationType = DIST_OPS_CREATE, .address = CreateStatisticsStmtObjectAddress, .markDistributed = false, }; @@ -259,6 +287,7 @@ static DistributeObjectOps Any_CreateTrigger = { .qualify = NULL, .preprocess = NULL, .postprocess = PostprocessCreateTriggerStmt, + .operationType = DIST_OPS_CREATE, .address = CreateTriggerStmtObjectAddress, .markDistributed = false, }; @@ -267,6 +296,7 @@ static DistributeObjectOps Any_Grant = { .qualify = NULL, .preprocess = PreprocessGrantStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -275,6 +305,7 @@ static DistributeObjectOps Any_GrantRole = { .qualify = NULL, .preprocess = PreprocessGrantRoleStmt, .postprocess = PostprocessGrantRoleStmt, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -283,6 +314,7 @@ static DistributeObjectOps Any_Index = { .qualify = NULL, .preprocess = PreprocessIndexStmt, .postprocess = PostprocessIndexStmt, + .operationType = DIST_OPS_CREATE, .address = NULL, .markDistributed = false, }; @@ -291,6 +323,7 @@ static DistributeObjectOps Any_Reindex = { .qualify = NULL, .preprocess = PreprocessReindexStmt, .postprocess = NULL, + .operationType = DIST_OPS_NONE, .address = ReindexStmtObjectAddress, .markDistributed = false, }; @@ -299,6 +332,7 @@ static DistributeObjectOps Any_Rename = { .qualify = NULL, .preprocess = PreprocessRenameStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -307,6 +341,7 @@ static DistributeObjectOps Attribute_Rename = { .qualify = QualifyRenameAttributeStmt, .preprocess = PreprocessRenameAttributeStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = RenameAttributeStmtObjectAddress, .markDistributed = false, }; @@ -316,6 +351,7 @@ static DistributeObjectOps Collation_AlterObjectSchema = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_COLLATION, + .operationType = DIST_OPS_ALTER, .address = AlterCollationSchemaStmtObjectAddress, .markDistributed = false, }; @@ -325,6 +361,7 @@ static DistributeObjectOps Collation_AlterOwner = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_COLLATION, + .operationType = DIST_OPS_ALTER, .address = AlterCollationOwnerObjectAddress, .markDistributed = false, }; @@ -334,6 +371,7 @@ static DistributeObjectOps Collation_Define = { .preprocess = NULL, .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, .objectType = OBJECT_COLLATION, + .operationType = DIST_OPS_CREATE, .address = DefineCollationStmtObjectAddress, .markDistributed = true, }; @@ -342,6 +380,7 @@ static DistributeObjectOps Collation_Drop = { .qualify = QualifyDropCollationStmt, .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -351,6 +390,7 @@ static DistributeObjectOps Collation_Rename = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_COLLATION, + .operationType = DIST_OPS_ALTER, .address = RenameCollationStmtObjectAddress, .markDistributed = false, }; @@ -360,6 +400,7 @@ static DistributeObjectOps Database_AlterOwner = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_DATABASE, + .operationType = DIST_OPS_ALTER, .featureFlag = &EnableAlterDatabaseOwner, .address = AlterDatabaseOwnerObjectAddress, .markDistributed = false, @@ -370,6 +411,7 @@ static DistributeObjectOps Domain_Alter = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_DOMAIN, + .operationType = DIST_OPS_ALTER, .address = AlterDomainStmtObjectAddress, .markDistributed = false, }; @@ -379,6 +421,7 @@ static DistributeObjectOps Domain_AlterObjectSchema = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_DOMAIN, + .operationType = DIST_OPS_ALTER, .address = AlterTypeSchemaStmtObjectAddress, .markDistributed = false, }; @@ -388,6 +431,7 @@ static DistributeObjectOps Domain_AlterOwner = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_DOMAIN, + .operationType = DIST_OPS_ALTER, .address = AlterDomainOwnerStmtObjectAddress, .markDistributed = false, }; @@ -396,6 +440,7 @@ static DistributeObjectOps Domain_Drop = { .qualify = QualifyDropDomainStmt, .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -405,6 +450,7 @@ static DistributeObjectOps Domain_Rename = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_DOMAIN, + .operationType = DIST_OPS_ALTER, .address = RenameDomainStmtObjectAddress, .markDistributed = false, }; @@ -415,6 +461,7 @@ static DistributeObjectOps Domain_RenameConstraint = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_DOMAIN, + .operationType = DIST_OPS_ALTER, .address = DomainRenameConstraintStmtObjectAddress, .markDistributed = false, }; @@ -423,6 +470,7 @@ static DistributeObjectOps Extension_AlterObjectSchema = { .qualify = NULL, .preprocess = PreprocessAlterExtensionSchemaStmt, .postprocess = PostprocessAlterExtensionSchemaStmt, + .operationType = DIST_OPS_ALTER, .address = AlterExtensionSchemaStmtObjectAddress, .markDistributed = false, }; @@ -431,6 +479,7 @@ static DistributeObjectOps Extension_Drop = { .qualify = NULL, .preprocess = PreprocessDropExtensionStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -439,6 +488,7 @@ static DistributeObjectOps FDW_Grant = { .qualify = NULL, .preprocess = PreprocessGrantOnFDWStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -447,6 +497,7 @@ static DistributeObjectOps ForeignServer_Drop = { .qualify = NULL, .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -455,6 +506,7 @@ static DistributeObjectOps ForeignServer_Grant = { .qualify = NULL, .preprocess = PreprocessGrantOnForeignServerStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -464,6 +516,7 @@ static DistributeObjectOps ForeignServer_Rename = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_FOREIGN_SERVER, + .operationType = DIST_OPS_ALTER, .address = RenameForeignServerStmtObjectAddress, .markDistributed = false, }; @@ -473,6 +526,7 @@ static DistributeObjectOps ForeignServer_AlterOwner = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_FOREIGN_SERVER, + .operationType = DIST_OPS_ALTER, .address = AlterForeignServerOwnerStmtObjectAddress, .markDistributed = false, }; @@ -481,6 +535,7 @@ static DistributeObjectOps ForeignTable_AlterTable = { .qualify = NULL, .preprocess = PreprocessAlterTableStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -489,6 +544,7 @@ static DistributeObjectOps Function_AlterObjectDepends = { .qualify = QualifyAlterFunctionDependsStmt, .preprocess = PreprocessAlterFunctionDependsStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = AlterFunctionDependsStmtObjectAddress, .markDistributed = false, }; @@ -498,6 +554,7 @@ static DistributeObjectOps Function_AlterObjectSchema = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_FUNCTION, + .operationType = DIST_OPS_ALTER, .address = AlterFunctionSchemaStmtObjectAddress, .markDistributed = false, }; @@ -507,6 +564,7 @@ static DistributeObjectOps Function_AlterOwner = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_FUNCTION, + .operationType = DIST_OPS_ALTER, .address = AlterFunctionOwnerObjectAddress, .markDistributed = false, }; @@ -515,6 +573,7 @@ static DistributeObjectOps Function_Drop = { .qualify = NULL, .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -523,6 +582,7 @@ static DistributeObjectOps Function_Grant = { .qualify = NULL, .preprocess = PreprocessGrantOnFunctionStmt, .postprocess = PostprocessGrantOnFunctionStmt, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -531,6 +591,7 @@ static DistributeObjectOps View_Drop = { .qualify = QualifyDropViewStmt, .preprocess = PreprocessDropViewStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = DropViewStmtObjectAddress, .markDistributed = false, }; @@ -540,6 +601,7 @@ static DistributeObjectOps Function_Rename = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_FUNCTION, + .operationType = DIST_OPS_ALTER, .address = RenameFunctionStmtObjectAddress, .markDistributed = false, }; @@ -548,6 +610,7 @@ static DistributeObjectOps Index_AlterTable = { .qualify = NULL, .preprocess = PreprocessAlterTableStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -556,6 +619,7 @@ static DistributeObjectOps Index_Drop = { .qualify = NULL, .preprocess = PreprocessDropIndexStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -564,6 +628,7 @@ static DistributeObjectOps Policy_Drop = { .qualify = NULL, .preprocess = PreprocessDropPolicyStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -572,6 +637,7 @@ static DistributeObjectOps Procedure_AlterObjectDepends = { .qualify = QualifyAlterFunctionDependsStmt, .preprocess = PreprocessAlterFunctionDependsStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = AlterFunctionDependsStmtObjectAddress, .markDistributed = false, }; @@ -581,6 +647,7 @@ static DistributeObjectOps Procedure_AlterObjectSchema = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_FUNCTION, + .operationType = DIST_OPS_ALTER, .address = AlterFunctionSchemaStmtObjectAddress, .markDistributed = false, }; @@ -590,6 +657,7 @@ static DistributeObjectOps Procedure_AlterOwner = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_FUNCTION, + .operationType = DIST_OPS_ALTER, .address = AlterFunctionOwnerObjectAddress, .markDistributed = false, }; @@ -598,6 +666,7 @@ static DistributeObjectOps Procedure_Drop = { .qualify = NULL, .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -606,6 +675,7 @@ static DistributeObjectOps Procedure_Grant = { .qualify = NULL, .preprocess = PreprocessGrantOnFunctionStmt, .postprocess = PostprocessGrantOnFunctionStmt, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -615,6 +685,7 @@ static DistributeObjectOps Procedure_Rename = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_FUNCTION, + .operationType = DIST_OPS_ALTER, .address = RenameFunctionStmtObjectAddress, .markDistributed = false, }; @@ -623,6 +694,7 @@ static DistributeObjectOps Routine_AlterObjectDepends = { .qualify = QualifyAlterFunctionDependsStmt, .preprocess = PreprocessAlterFunctionDependsStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = AlterFunctionDependsStmtObjectAddress, .markDistributed = false, }; @@ -631,6 +703,7 @@ static DistributeObjectOps Sequence_Alter = { .qualify = NULL, .preprocess = PreprocessAlterSequenceStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = AlterSequenceStmtObjectAddress, .markDistributed = false, }; @@ -639,6 +712,7 @@ static DistributeObjectOps Sequence_AlterObjectSchema = { .qualify = QualifyAlterSequenceSchemaStmt, .preprocess = PreprocessAlterSequenceSchemaStmt, .postprocess = PostprocessAlterSequenceSchemaStmt, + .operationType = DIST_OPS_ALTER, .address = AlterSequenceSchemaStmtObjectAddress, .markDistributed = false, }; @@ -647,6 +721,7 @@ static DistributeObjectOps Sequence_AlterOwner = { .qualify = QualifyAlterSequenceOwnerStmt, .preprocess = PreprocessAlterSequenceOwnerStmt, .postprocess = PostprocessAlterSequenceOwnerStmt, + .operationType = DIST_OPS_ALTER, .address = AlterSequenceOwnerStmtObjectAddress, .markDistributed = false, }; @@ -655,6 +730,7 @@ static DistributeObjectOps Sequence_Drop = { .qualify = QualifyDropSequenceStmt, .preprocess = PreprocessDropSequenceStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = SequenceDropStmtObjectAddress, .markDistributed = false, }; @@ -663,6 +739,7 @@ static DistributeObjectOps Sequence_Grant = { .qualify = QualifyGrantOnSequenceStmt, .preprocess = PreprocessGrantOnSequenceStmt, .postprocess = PostprocessGrantOnSequenceStmt, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -671,6 +748,7 @@ static DistributeObjectOps Sequence_Rename = { .qualify = QualifyRenameSequenceStmt, .preprocess = PreprocessRenameSequenceStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = RenameSequenceStmtObjectAddress, .markDistributed = false, }; @@ -680,6 +758,7 @@ static DistributeObjectOps TextSearchConfig_Alter = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_TSCONFIGURATION, + .operationType = DIST_OPS_ALTER, .address = AlterTextSearchConfigurationStmtObjectAddress, .markDistributed = false, }; @@ -689,6 +768,7 @@ static DistributeObjectOps TextSearchConfig_AlterObjectSchema = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_TSCONFIGURATION, + .operationType = DIST_OPS_ALTER, .address = AlterTextSearchConfigurationSchemaStmtObjectAddress, .markDistributed = false, }; @@ -698,6 +778,7 @@ static DistributeObjectOps TextSearchConfig_AlterOwner = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_TSCONFIGURATION, + .operationType = DIST_OPS_ALTER, .address = AlterTextSearchConfigurationOwnerObjectAddress, .markDistributed = false, }; @@ -707,6 +788,7 @@ static DistributeObjectOps TextSearchConfig_Comment = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_TSCONFIGURATION, + .operationType = DIST_OPS_ALTER, .address = TextSearchConfigurationCommentObjectAddress, .markDistributed = false, }; @@ -716,6 +798,7 @@ static DistributeObjectOps TextSearchConfig_Define = { .preprocess = NULL, .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, .objectType = OBJECT_TSCONFIGURATION, + .operationType = DIST_OPS_CREATE, .address = CreateTextSearchConfigurationObjectAddress, .markDistributed = true, }; @@ -724,6 +807,7 @@ static DistributeObjectOps TextSearchConfig_Drop = { .qualify = QualifyDropTextSearchConfigurationStmt, .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = DropTextSearchConfigObjectAddress, .markDistributed = false, }; @@ -733,6 +817,7 @@ static DistributeObjectOps TextSearchConfig_Rename = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_TSCONFIGURATION, + .operationType = DIST_OPS_ALTER, .address = RenameTextSearchConfigurationStmtObjectAddress, .markDistributed = false, }; @@ -742,6 +827,7 @@ static DistributeObjectOps TextSearchDict_Alter = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_TSDICTIONARY, + .operationType = DIST_OPS_ALTER, .address = AlterTextSearchDictionaryStmtObjectAddress, .markDistributed = false, }; @@ -751,6 +837,7 @@ static DistributeObjectOps TextSearchDict_AlterObjectSchema = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_TSDICTIONARY, + .operationType = DIST_OPS_ALTER, .address = AlterTextSearchDictionarySchemaStmtObjectAddress, .markDistributed = false, }; @@ -760,6 +847,7 @@ static DistributeObjectOps TextSearchDict_AlterOwner = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_TSDICTIONARY, + .operationType = DIST_OPS_ALTER, .address = AlterTextSearchDictOwnerObjectAddress, .markDistributed = false, }; @@ -769,6 +857,7 @@ static DistributeObjectOps TextSearchDict_Comment = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_TSDICTIONARY, + .operationType = DIST_OPS_ALTER, .address = TextSearchDictCommentObjectAddress, .markDistributed = false, }; @@ -778,6 +867,7 @@ static DistributeObjectOps TextSearchDict_Define = { .preprocess = NULL, .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, .objectType = OBJECT_TSDICTIONARY, + .operationType = DIST_OPS_CREATE, .address = CreateTextSearchDictObjectAddress, .markDistributed = true, }; @@ -786,6 +876,7 @@ static DistributeObjectOps TextSearchDict_Drop = { .qualify = QualifyDropTextSearchDictionaryStmt, .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = DropTextSearchDictObjectAddress, .markDistributed = false, }; @@ -795,6 +886,7 @@ static DistributeObjectOps TextSearchDict_Rename = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_TSDICTIONARY, + .operationType = DIST_OPS_ALTER, .address = RenameTextSearchDictionaryStmtObjectAddress, .markDistributed = false, }; @@ -803,6 +895,7 @@ static DistributeObjectOps Trigger_AlterObjectDepends = { .qualify = NULL, .preprocess = PreprocessAlterTriggerDependsStmt, .postprocess = PostprocessAlterTriggerDependsStmt, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -812,6 +905,7 @@ static DistributeObjectOps Routine_AlterObjectSchema = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_FUNCTION, + .operationType = DIST_OPS_ALTER, .address = AlterFunctionSchemaStmtObjectAddress, .markDistributed = false, }; @@ -821,6 +915,7 @@ static DistributeObjectOps Routine_AlterOwner = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_FUNCTION, + .operationType = DIST_OPS_ALTER, .address = AlterFunctionOwnerObjectAddress, .markDistributed = false, }; @@ -829,6 +924,7 @@ static DistributeObjectOps Routine_Drop = { .qualify = NULL, .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -837,6 +933,7 @@ static DistributeObjectOps Routine_Grant = { .qualify = NULL, .preprocess = PreprocessGrantOnFunctionStmt, .postprocess = PostprocessGrantOnFunctionStmt, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -846,6 +943,7 @@ static DistributeObjectOps Routine_Rename = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_FUNCTION, + .operationType = DIST_OPS_ALTER, .address = RenameFunctionStmtObjectAddress, .markDistributed = false, }; @@ -854,6 +952,7 @@ static DistributeObjectOps Schema_Drop = { .qualify = NULL, .preprocess = PreprocessDropSchemaStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -862,6 +961,7 @@ static DistributeObjectOps Schema_Grant = { .qualify = NULL, .preprocess = PreprocessGrantOnSchemaStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -871,6 +971,7 @@ static DistributeObjectOps Schema_Rename = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_SCHEMA, + .operationType = DIST_OPS_ALTER, .address = AlterSchemaRenameStmtObjectAddress, .markDistributed = false, }; @@ -879,6 +980,7 @@ static DistributeObjectOps Statistics_Alter = { .qualify = QualifyAlterStatisticsStmt, .preprocess = PreprocessAlterStatisticsStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -887,6 +989,7 @@ static DistributeObjectOps Statistics_AlterObjectSchema = { .qualify = QualifyAlterStatisticsSchemaStmt, .preprocess = PreprocessAlterStatisticsSchemaStmt, .postprocess = PostprocessAlterStatisticsSchemaStmt, + .operationType = DIST_OPS_ALTER, .address = AlterStatisticsSchemaStmtObjectAddress, .markDistributed = false, }; @@ -894,6 +997,7 @@ static DistributeObjectOps Statistics_AlterOwner = { .deparse = DeparseAlterStatisticsOwnerStmt, .qualify = QualifyAlterStatisticsOwnerStmt, .preprocess = PreprocessAlterStatisticsOwnerStmt, + .operationType = DIST_OPS_ALTER, .postprocess = PostprocessAlterStatisticsOwnerStmt, .address = NULL, .markDistributed = false, @@ -903,6 +1007,7 @@ static DistributeObjectOps Statistics_Drop = { .qualify = QualifyDropStatisticsStmt, .preprocess = PreprocessDropStatisticsStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = DropStatisticsObjectAddress, .markDistributed = false, }; @@ -911,6 +1016,7 @@ static DistributeObjectOps Statistics_Rename = { .qualify = QualifyAlterStatisticsRenameStmt, .preprocess = PreprocessAlterStatisticsRenameStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -919,6 +1025,7 @@ static DistributeObjectOps Table_AlterTable = { .qualify = NULL, .preprocess = PreprocessAlterTableStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = NULL, .markDistributed = false, }; @@ -927,6 +1034,7 @@ static DistributeObjectOps Table_AlterObjectSchema = { .qualify = QualifyAlterTableSchemaStmt, .preprocess = PreprocessAlterTableSchemaStmt, .postprocess = PostprocessAlterTableSchemaStmt, + .operationType = DIST_OPS_ALTER, .address = AlterTableSchemaStmtObjectAddress, .markDistributed = false, }; @@ -935,6 +1043,7 @@ static DistributeObjectOps Table_Drop = { .qualify = NULL, .preprocess = PreprocessDropTableStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -944,6 +1053,7 @@ static DistributeObjectOps Type_AlterObjectSchema = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_TYPE, + .operationType = DIST_OPS_ALTER, .address = AlterTypeSchemaStmtObjectAddress, .markDistributed = false, }; @@ -960,6 +1070,7 @@ static DistributeObjectOps View_AlterObjectSchema = { .qualify = QualifyAlterViewSchemaStmt, .preprocess = PreprocessAlterViewSchemaStmt, .postprocess = PostprocessAlterViewSchemaStmt, + .operationType = DIST_OPS_ALTER, .address = AlterViewSchemaStmtObjectAddress, .markDistributed = false, }; @@ -969,6 +1080,7 @@ static DistributeObjectOps Type_AlterOwner = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = PostprocessAlterDistributedObjectStmt, .objectType = OBJECT_TYPE, + .operationType = DIST_OPS_ALTER, .address = AlterTypeOwnerObjectAddress, .markDistributed = false, }; @@ -978,6 +1090,7 @@ static DistributeObjectOps Type_AlterTable = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_TYPE, + .operationType = DIST_OPS_ALTER, .address = AlterTypeStmtObjectAddress, .markDistributed = false, }; @@ -994,6 +1107,7 @@ static DistributeObjectOps View_AlterView = { .qualify = QualifyAlterViewStmt, .preprocess = PreprocessAlterViewStmt, .postprocess = PostprocessAlterViewStmt, + .operationType = DIST_OPS_ALTER, .address = AlterViewStmtObjectAddress, .markDistributed = false, }; @@ -1002,6 +1116,7 @@ static DistributeObjectOps Type_Drop = { .qualify = NULL, .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -1010,6 +1125,7 @@ static DistributeObjectOps Trigger_Drop = { .qualify = NULL, .preprocess = PreprocessDropTriggerStmt, .postprocess = NULL, + .operationType = DIST_OPS_DROP, .address = NULL, .markDistributed = false, }; @@ -1019,6 +1135,7 @@ static DistributeObjectOps Type_Rename = { .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_TYPE, + .operationType = DIST_OPS_ALTER, .address = RenameTypeStmtObjectAddress, .markDistributed = false, }; @@ -1027,6 +1144,7 @@ static DistributeObjectOps Vacuum_Analyze = { .qualify = NULL, .preprocess = NULL, .postprocess = PostprocessVacuumStmt, + .operationType = DIST_OPS_NONE, .address = NULL, .markDistributed = false, }; @@ -1042,6 +1160,7 @@ static DistributeObjectOps View_Rename = { .qualify = QualifyRenameViewStmt, .preprocess = PreprocessRenameViewStmt, .postprocess = NULL, + .operationType = DIST_OPS_ALTER, .address = RenameViewStmtObjectAddress, .markDistributed = false, }; @@ -1049,6 +1168,7 @@ static DistributeObjectOps Trigger_Rename = { .deparse = NULL, .qualify = NULL, .preprocess = NULL, + .operationType = DIST_OPS_ALTER, .postprocess = PostprocessAlterTriggerRenameStmt, .address = NULL, .markDistributed = false, diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index dcec5c2cc..765fd50df 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -124,6 +124,13 @@ RoleSpecToObjectAddress(RoleSpec *role, bool missing_ok) Oid roleOid = get_rolespec_oid(role, missing_ok); ObjectAddressSet(*address, AuthIdRelationId, roleOid); } + else + { + /* + * If rolespec is null, role can be 'ALL'. We should be returning a pseudo-valid oid. + */ + ObjectAddressSet(*address, AuthIdRelationId, OID_MAX); + } return list_make1(address); } diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c index a85d2db48..f6a9dc610 100644 --- a/src/backend/distributed/commands/statistics.c +++ b/src/backend/distributed/commands/statistics.c @@ -359,9 +359,19 @@ AlterStatisticsSchemaStmtObjectAddress(Node *node, bool missingOk) AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); ObjectAddress *address = palloc0(sizeof(ObjectAddress)); - String *statName = llast((List *) stmt->object); - Oid statsOid = get_statistics_object_oid(list_make2(makeString(stmt->newschema), - statName), missingOk); + List *statName = (List *) stmt->object; + Oid statsOid = get_statistics_object_oid(statName, true); + + if (statsOid == InvalidOid) + { + /* + * couldn't find the stat, might have already been moved to the new schema, we + * construct a new stat name that uses the new schema to search in. + */ + List *newStatName = list_make2(makeString(stmt->newschema), llast(statName)); + statsOid = get_statistics_object_oid(newStatName, missingOk); + } + ObjectAddressSet(*address, StatisticExtRelationId, statsOid); return list_make1(address); diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 40b3e1c62..a6ae56afa 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -550,7 +550,10 @@ ProcessUtilityInternal(PlannedStmt *pstmt, * Therefore, we first check if all addresses in the given statement are valid. * Then, we do not execute qualify and preprocess if any address is invalid to * prevent before-mentioned citus related messages. PG will complain about the - * invalid address, so we are safe to not execute qualify and preprocess. + * invalid address, so we are safe to not execute qualify and preprocess. Also + * note that we should not guard any step after standardProcess_Utility with + * the flag distOpsHasInvalidObject because PG would have already failed the + * transaction. */ distOpsHasInvalidObject = DistOpsHasInvalidObject(parsetree, ops); diff --git a/src/backend/distributed/utils/citus_depended_object.c b/src/backend/distributed/utils/citus_depended_object.c index 9c0055b1f..07e004f1c 100644 --- a/src/backend/distributed/utils/citus_depended_object.c +++ b/src/backend/distributed/utils/citus_depended_object.c @@ -32,6 +32,7 @@ #include "catalog/pg_type.h" #include "distributed/citus_depended_object.h" #include "distributed/metadata_cache.h" +#include "distributed/commands.h" #include "distributed/listutils.h" #include "distributed/log_utils.h" #include "distributed/shared_library_init.h" @@ -48,6 +49,8 @@ bool HideCitusDependentObjects = false; static Node * CreateCitusDependentObjectExpr(int pgMetaTableVarno, int pgMetaTableOid); static List * GetCitusDependedObjectArgs(int pgMetaTableVarno, int pgMetaTableOid); +static bool StatementContainsIfExist(Node *node); +static bool AlterRoleSetStatementContainsAll(Node *node); /* * IsPgLocksTable returns true if RTE is pg_locks table. @@ -315,12 +318,26 @@ GetCitusDependedObjectArgs(int pgMetaTableVarno, int pgMetaTableOid) * is invalid; otherwise, returns false. If ops is null or it has no * implemented address method, we return false. * - * If EnableUnsupportedFeatureMessages is active, then we return false. + * We have some dist ops for which we should not validate. + * 1) We should not validate CREATE statements because no address exists + * here yet. + * 2) We should not validate '[DROP|ALTER] IF EXISTS' statements because it is ok + * by the semantics even if any object is invalid. + * 3) We should not validate 'ALTER ROLE ALL [SET|UNSET] because for the role ALL + * AlterRoleSetStmtObjectAddress returns an invalid address even though it should not. */ bool DistOpsHasInvalidObject(Node *node, const DistributeObjectOps *ops) { - if (EnableUnsupportedFeatureMessages) + if (ops && ops->operationType == DIST_OPS_CREATE) + { + return false; + } + else if (StatementContainsIfExist(node)) + { + return false; + } + else if (AlterRoleSetStatementContainsAll(node)) { return false; } @@ -340,3 +357,131 @@ DistOpsHasInvalidObject(Node *node, const DistributeObjectOps *ops) return false; } + + +/* + * StatementContainsIfExist returns true if the statement contains + * IF EXIST syntax. + */ +static bool +StatementContainsIfExist(Node *node) +{ + if (node == NULL) + { + return false; + } + + switch (nodeTag(node)) + { + case T_DropStmt: + { + DropStmt *dropStmt = castNode(DropStmt, node); + return dropStmt->missing_ok; + } + + case T_DropRoleStmt: + { + DropRoleStmt *dropRoleStmt = castNode(DropRoleStmt, node); + return dropRoleStmt->missing_ok; + } + + case T_DropdbStmt: + { + DropdbStmt *dropdbStmt = castNode(DropdbStmt, node); + return dropdbStmt->missing_ok; + } + + case T_DropTableSpaceStmt: + { + DropTableSpaceStmt *dropTableSpaceStmt = castNode(DropTableSpaceStmt, node); + return dropTableSpaceStmt->missing_ok; + } + + case T_DropUserMappingStmt: + { + DropUserMappingStmt *dropUserMappingStmt = castNode(DropUserMappingStmt, + node); + return dropUserMappingStmt->missing_ok; + } + + case T_DropSubscriptionStmt: + { + DropSubscriptionStmt *dropSubscriptionStmt = castNode(DropSubscriptionStmt, + node); + return dropSubscriptionStmt->missing_ok; + } + + case T_AlterTableStmt: + { + AlterTableStmt *alterTableStmt = castNode(AlterTableStmt, node); + return alterTableStmt->missing_ok; + } + + case T_AlterDomainStmt: + { + AlterDomainStmt *alterDomainStmt = castNode(AlterDomainStmt, node); + return alterDomainStmt->missing_ok; + } + + case T_AlterSeqStmt: + { + AlterSeqStmt *alterSeqStmt = castNode(AlterSeqStmt, node); + return alterSeqStmt->missing_ok; + } + + case T_AlterStatsStmt: + { + AlterStatsStmt *alterStatsStmt = castNode(AlterStatsStmt, node); + return alterStatsStmt->missing_ok; + } + + case T_RenameStmt: + { + RenameStmt *renameStmt = castNode(RenameStmt, node); + return renameStmt->missing_ok; + } + + case T_AlterObjectSchemaStmt: + { + AlterObjectSchemaStmt *alterObjectSchemaStmt = castNode(AlterObjectSchemaStmt, + node); + return alterObjectSchemaStmt->missing_ok; + } + + case T_AlterTSConfigurationStmt: + { + AlterTSConfigurationStmt *alterTSConfigurationStmt = castNode( + AlterTSConfigurationStmt, node); + return alterTSConfigurationStmt->missing_ok; + } + + default: + { + return false; + } + } +} + + +/* + * AlterRoleSetStatementContainsAll returns true if the statement is a + * ALTER ROLE ALL (SET / RESET). + */ +static bool +AlterRoleSetStatementContainsAll(Node *node) +{ + if (node == NULL) + { + return false; + } + + if (nodeTag(node) == T_AlterRoleSetStmt) + { + /* rolespec is null for the role 'ALL' */ + AlterRoleSetStmt *alterRoleSetStmt = castNode(AlterRoleSetStmt, node); + + return alterRoleSetStmt->role == NULL; + } + + return false; +} diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index f421a1255..ffd2782bb 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -39,6 +39,14 @@ extern void SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(Oid Oid partitionRelationId); +typedef enum DistOpsOperationType +{ + DIST_OPS_NONE, + DIST_OPS_CREATE, + DIST_OPS_ALTER, + DIST_OPS_DROP, +} DistOpsOperationType; + /* * DistributeObjectOps specifies handlers for node/object type pairs. @@ -74,6 +82,9 @@ typedef struct DistributeObjectOps * common propagation functions will not propagate the creation of the object. */ bool *featureFlag; + + /* specifies the type of the operation */ + DistOpsOperationType operationType; } DistributeObjectOps; #define CITUS_TRUNCATE_TRIGGER_NAME "citus_truncate_trigger" From 7387c7ed3d198a2ea836b7b2547d6cff2f6834f0 Mon Sep 17 00:00:00 2001 From: aykutbozkurt Date: Tue, 2 Aug 2022 13:06:54 +0300 Subject: [PATCH 38/38] address method should take parameter isPostprocess --- src/backend/distributed/commands/collation.c | 8 +- src/backend/distributed/commands/common.c | 10 +- src/backend/distributed/commands/database.c | 2 +- src/backend/distributed/commands/domain.c | 10 +- src/backend/distributed/commands/extension.c | 8 +- .../distributed/commands/foreign_server.c | 8 +- src/backend/distributed/commands/function.c | 21 +- src/backend/distributed/commands/index.c | 2 +- src/backend/distributed/commands/role.c | 17 +- src/backend/distributed/commands/schema.c | 4 +- src/backend/distributed/commands/sequence.c | 24 +-- src/backend/distributed/commands/statistics.c | 28 ++- src/backend/distributed/commands/table.c | 6 +- .../distributed/commands/text_search.c | 33 ++-- src/backend/distributed/commands/trigger.c | 4 +- src/backend/distributed/commands/type.c | 16 +- .../distributed/commands/utility_hook.c | 6 +- src/backend/distributed/commands/view.c | 22 +-- .../distributed/deparser/objectaddress.c | 8 +- .../distributed/utils/citus_depended_object.c | 28 ++- .../worker/worker_create_or_replace.c | 2 +- src/include/distributed/commands.h | 180 +++++++++++------- src/include/distributed/deparser.h | 6 +- 23 files changed, 260 insertions(+), 193 deletions(-) diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c index 834e847a1..8904ab674 100644 --- a/src/backend/distributed/commands/collation.c +++ b/src/backend/distributed/commands/collation.c @@ -170,7 +170,7 @@ CreateCollationDDLsIdempotent(Oid collationId) List * -AlterCollationOwnerObjectAddress(Node *node, bool missing_ok) +AlterCollationOwnerObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); Relation relation; @@ -192,7 +192,7 @@ AlterCollationOwnerObjectAddress(Node *node, bool missing_ok) * of the RenameStmt. Errors if missing_ok is false. */ List * -RenameCollationStmtObjectAddress(Node *node, bool missing_ok) +RenameCollationStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_COLLATION); @@ -215,7 +215,7 @@ RenameCollationStmtObjectAddress(Node *node, bool missing_ok) * schemas. */ List * -AlterCollationSchemaStmtObjectAddress(Node *node, bool missing_ok) +AlterCollationSchemaStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); Assert(stmt->objectType == OBJECT_COLLATION); @@ -297,7 +297,7 @@ GenerateBackupNameForCollationCollision(const ObjectAddress *address) List * -DefineCollationStmtObjectAddress(Node *node, bool missing_ok) +DefineCollationStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { DefineStmt *stmt = castNode(DefineStmt, node); Assert(stmt->kind == OBJECT_COLLATION); diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c index 0441abe05..797981d47 100644 --- a/src/backend/distributed/commands/common.c +++ b/src/backend/distributed/commands/common.c @@ -63,7 +63,7 @@ PostprocessCreateDistributedObjectFromCatalogStmt(Node *stmt, const char *queryS return NIL; } - List *addresses = GetObjectAddressListFromParseTree(stmt, false); + List *addresses = GetObjectAddressListFromParseTree(stmt, false, true); /* the code-path only supports a single object */ Assert(list_length(addresses) == 1); @@ -121,7 +121,7 @@ PreprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString, const DistributeObjectOps *ops = GetDistributeObjectOps(stmt); Assert(ops != NULL); - List *addresses = GetObjectAddressListFromParseTree(stmt, false); + List *addresses = GetObjectAddressListFromParseTree(stmt, false, false); /* the code-path only supports a single object */ Assert(list_length(addresses) == 1); @@ -170,7 +170,7 @@ PostprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString) const DistributeObjectOps *ops = GetDistributeObjectOps(stmt); Assert(ops != NULL); - List *addresses = GetObjectAddressListFromParseTree(stmt, false); + List *addresses = GetObjectAddressListFromParseTree(stmt, false, true); /* the code-path only supports a single object */ Assert(list_length(addresses) == 1); @@ -296,7 +296,7 @@ PreprocessDropDistributedObjectStmt(Node *node, const char *queryString, * the drop tsdict statement. */ List * -DropTextSearchDictObjectAddress(Node *node, bool missing_ok) +DropTextSearchDictObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { DropStmt *stmt = castNode(DropStmt, node); @@ -321,7 +321,7 @@ DropTextSearchDictObjectAddress(Node *node, bool missing_ok) * the drop tsconfig statement. */ List * -DropTextSearchConfigObjectAddress(Node *node, bool missing_ok) +DropTextSearchConfigObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { DropStmt *stmt = castNode(DropStmt, node); diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 2bd03d5d8..208d570eb 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -41,7 +41,7 @@ bool EnableAlterDatabaseOwner = true; * object of the AlterOwnerStmt. Errors if missing_ok is false. */ List * -AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok) +AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); Assert(stmt->objectType == OBJECT_DATABASE); diff --git a/src/backend/distributed/commands/domain.c b/src/backend/distributed/commands/domain.c index 50d195d58..6c1bea4fd 100644 --- a/src/backend/distributed/commands/domain.c +++ b/src/backend/distributed/commands/domain.c @@ -230,7 +230,7 @@ MakeCollateClauseFromOid(Oid collationOid) * the domain cannot be found in the local catalog. */ List * -CreateDomainStmtObjectAddress(Node *node, bool missing_ok) +CreateDomainStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CreateDomainStmt *stmt = castNode(CreateDomainStmt, node); @@ -249,7 +249,7 @@ CreateDomainStmtObjectAddress(Node *node, bool missing_ok) * found. */ List * -AlterDomainStmtObjectAddress(Node *node, bool missing_ok) +AlterDomainStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterDomainStmt *stmt = castNode(AlterDomainStmt, node); @@ -264,7 +264,7 @@ AlterDomainStmtObjectAddress(Node *node, bool missing_ok) * error if the domain cannot be found. */ List * -DomainRenameConstraintStmtObjectAddress(Node *node, bool missing_ok) +DomainRenameConstraintStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { RenameStmt *stmt = castNode(RenameStmt, node); @@ -279,7 +279,7 @@ DomainRenameConstraintStmtObjectAddress(Node *node, bool missing_ok) * cannot be found. */ List * -AlterDomainOwnerStmtObjectAddress(Node *node, bool missing_ok) +AlterDomainOwnerStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); Assert(stmt->objectType == OBJECT_DOMAIN); @@ -295,7 +295,7 @@ AlterDomainOwnerStmtObjectAddress(Node *node, bool missing_ok) * found. */ List * -RenameDomainStmtObjectAddress(Node *node, bool missing_ok) +RenameDomainStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_DOMAIN); diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index 122c68dfa..542a41c10 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -181,7 +181,7 @@ PostprocessCreateExtensionStmt(Node *node, const char *queryString) (void *) createExtensionStmtSql, ENABLE_DDL_PROPAGATION); - List *extensionAddresses = GetObjectAddressListFromParseTree(node, false); + List *extensionAddresses = GetObjectAddressListFromParseTree(node, false, true); /* the code-path only supports a single object */ Assert(list_length(extensionAddresses) == 1); @@ -413,7 +413,7 @@ PreprocessAlterExtensionSchemaStmt(Node *node, const char *queryString, List * PostprocessAlterExtensionSchemaStmt(Node *node, const char *queryString) { - List *extensionAddresses = GetObjectAddressListFromParseTree(node, false); + List *extensionAddresses = GetObjectAddressListFromParseTree(node, false, true); /* the code-path only supports a single object */ Assert(list_length(extensionAddresses) == 1); @@ -1134,7 +1134,7 @@ GetDependentFDWsToExtension(Oid extensionId) * the subject of the AlterObjectSchemaStmt. Errors if missing_ok is false. */ List * -AlterExtensionSchemaStmtObjectAddress(Node *node, bool missing_ok) +AlterExtensionSchemaStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); Assert(stmt->objectType == OBJECT_EXTENSION); @@ -1162,7 +1162,7 @@ AlterExtensionSchemaStmtObjectAddress(Node *node, bool missing_ok) * the subject of the AlterExtensionStmt. Errors if missing_ok is false. */ List * -AlterExtensionUpdateStmtObjectAddress(Node *node, bool missing_ok) +AlterExtensionUpdateStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterExtensionStmt *stmt = castNode(AlterExtensionStmt, node); const char *extensionName = stmt->extname; diff --git a/src/backend/distributed/commands/foreign_server.c b/src/backend/distributed/commands/foreign_server.c index b8fcf0412..7d19f9336 100644 --- a/src/backend/distributed/commands/foreign_server.c +++ b/src/backend/distributed/commands/foreign_server.c @@ -42,7 +42,7 @@ static List * GetObjectAddressByServerName(char *serverName, bool missing_ok); * was set to true. */ List * -CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok) +CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CreateForeignServerStmt *stmt = castNode(CreateForeignServerStmt, node); @@ -59,7 +59,7 @@ CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok) * was set to true. */ List * -AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok) +AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterForeignServerStmt *stmt = castNode(AlterForeignServerStmt, node); @@ -124,7 +124,7 @@ PreprocessGrantOnForeignServerStmt(Node *node, const char *queryString, * was set to true. */ List * -RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok) +RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_FOREIGN_SERVER); @@ -142,7 +142,7 @@ RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok) * was set to true. */ List * -AlterForeignServerOwnerStmtObjectAddress(Node *node, bool missing_ok) +AlterForeignServerOwnerStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); char *serverName = strVal(stmt->object); diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 318f6242f..d04728252 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -1372,7 +1372,8 @@ PostprocessCreateFunctionStmt(Node *node, const char *queryString) return NIL; } - List *functionAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false); + List *functionAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false, + true); /* the code-path only supports a single object */ Assert(list_length(functionAddresses) == 1); @@ -1416,7 +1417,7 @@ PostprocessCreateFunctionStmt(Node *node, const char *queryString) * normal postgres error for unfound functions. */ List * -CreateFunctionStmtObjectAddress(Node *node, bool missing_ok) +CreateFunctionStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CreateFunctionStmt *stmt = castNode(CreateFunctionStmt, node); ObjectType objectType = OBJECT_FUNCTION; @@ -1461,7 +1462,7 @@ CreateFunctionStmtObjectAddress(Node *node, bool missing_ok) * objectId in the address can be invalid if missing_ok was set to true. */ List * -DefineAggregateStmtObjectAddress(Node *node, bool missing_ok) +DefineAggregateStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { DefineStmt *stmt = castNode(DefineStmt, node); @@ -1514,7 +1515,7 @@ PreprocessAlterFunctionStmt(Node *node, const char *queryString, AlterFunctionStmt *stmt = castNode(AlterFunctionStmt, node); AssertObjectTypeIsFunctional(stmt->objtype); - List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, false); + List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, false, false); /* the code-path only supports a single object */ Assert(list_length(addresses) == 1); @@ -1576,7 +1577,7 @@ PreprocessAlterFunctionDependsStmt(Node *node, const char *queryString, return NIL; } - List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, true); + List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, true, false); /* the code-path only supports a single object */ Assert(list_length(addresses) == 1); @@ -1610,7 +1611,7 @@ PreprocessAlterFunctionDependsStmt(Node *node, const char *queryString, * missing_ok is set to false the lookup will raise an error. */ List * -AlterFunctionDependsStmtObjectAddress(Node *node, bool missing_ok) +AlterFunctionDependsStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterObjectDependsStmt *stmt = castNode(AlterObjectDependsStmt, node); AssertObjectTypeIsFunctional(stmt->objectType); @@ -1626,7 +1627,7 @@ AlterFunctionDependsStmtObjectAddress(Node *node, bool missing_ok) * was unable to find the function/procedure that was the target of the statement. */ List * -AlterFunctionStmtObjectAddress(Node *node, bool missing_ok) +AlterFunctionStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterFunctionStmt *stmt = castNode(AlterFunctionStmt, node); return FunctionToObjectAddress(stmt->objtype, stmt->func, missing_ok); @@ -1638,7 +1639,7 @@ AlterFunctionStmtObjectAddress(Node *node, bool missing_ok) * subject of the RenameStmt. Errors if missing_ok is false. */ List * -RenameFunctionStmtObjectAddress(Node *node, bool missing_ok) +RenameFunctionStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { RenameStmt *stmt = castNode(RenameStmt, node); return FunctionToObjectAddress(stmt->renameType, @@ -1651,7 +1652,7 @@ RenameFunctionStmtObjectAddress(Node *node, bool missing_ok) * subject of the AlterOwnerStmt. Errors if missing_ok is false. */ List * -AlterFunctionOwnerObjectAddress(Node *node, bool missing_ok) +AlterFunctionOwnerObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); return FunctionToObjectAddress(stmt->objectType, @@ -1669,7 +1670,7 @@ AlterFunctionOwnerObjectAddress(Node *node, bool missing_ok) * the schemas. */ List * -AlterFunctionSchemaStmtObjectAddress(Node *node, bool missing_ok) +AlterFunctionSchemaStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); AssertObjectTypeIsFunctional(stmt->objectType); diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index 8fef77dc0..008f4fa90 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -658,7 +658,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand, * else, we add invalid address. */ List * -ReindexStmtObjectAddress(Node *stmt, bool missing_ok) +ReindexStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess) { ReindexStmt *reindexStatement = castNode(ReindexStmt, stmt); diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 765fd50df..74d14751e 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -88,7 +88,7 @@ bool EnableAlterRoleSetPropagation = true; * was unable to find the role that was the target of the statement. */ List * -AlterRoleStmtObjectAddress(Node *node, bool missing_ok) +AlterRoleStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterRoleStmt *stmt = castNode(AlterRoleStmt, node); return RoleSpecToObjectAddress(stmt->role, missing_ok); @@ -101,7 +101,7 @@ AlterRoleStmtObjectAddress(Node *node, bool missing_ok) * was unable to find the role that was the target of the statement. */ List * -AlterRoleSetStmtObjectAddress(Node *node, bool missing_ok) +AlterRoleSetStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterRoleSetStmt *stmt = castNode(AlterRoleSetStmt, node); return RoleSpecToObjectAddress(stmt->role, missing_ok); @@ -124,13 +124,6 @@ RoleSpecToObjectAddress(RoleSpec *role, bool missing_ok) Oid roleOid = get_rolespec_oid(role, missing_ok); ObjectAddressSet(*address, AuthIdRelationId, roleOid); } - else - { - /* - * If rolespec is null, role can be 'ALL'. We should be returning a pseudo-valid oid. - */ - ObjectAddressSet(*address, AuthIdRelationId, OID_MAX); - } return list_make1(address); } @@ -144,7 +137,7 @@ RoleSpecToObjectAddress(RoleSpec *role, bool missing_ok) List * PostprocessAlterRoleStmt(Node *node, const char *queryString) { - List *addresses = GetObjectAddressListFromParseTree(node, false); + List *addresses = GetObjectAddressListFromParseTree(node, false, true); /* the code-path only supports a single object */ Assert(list_length(addresses) == 1); @@ -219,7 +212,7 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString, return NIL; } - List *addresses = GetObjectAddressListFromParseTree(node, false); + List *addresses = GetObjectAddressListFromParseTree(node, false, false); /* the code-path only supports a single object */ Assert(list_length(addresses) == 1); @@ -1195,7 +1188,7 @@ ConfigGenericNameCompare(const void *a, const void *b) * to true. */ List * -CreateRoleStmtObjectAddress(Node *node, bool missing_ok) +CreateRoleStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CreateRoleStmt *stmt = castNode(CreateRoleStmt, node); Oid roleOid = get_role_oid(stmt->role, missing_ok); diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index 571064130..3a5ee5bde 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -184,7 +184,7 @@ PreprocessGrantOnSchemaStmt(Node *node, const char *queryString, * the object of the CreateSchemaStmt. Errors if missing_ok is false. */ List * -CreateSchemaStmtObjectAddress(Node *node, bool missing_ok) +CreateSchemaStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node); @@ -214,7 +214,7 @@ CreateSchemaStmtObjectAddress(Node *node, bool missing_ok) * the object of the RenameStmt. Errors if missing_ok is false. */ List * -AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok) +AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_SCHEMA); diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index a5311c714..99faad4a8 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -318,7 +318,7 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString, * statement. */ List * -SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok) +SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess) { DropStmt *dropSeqStmt = castNode(DropStmt, stmt); @@ -357,7 +357,7 @@ PreprocessRenameSequenceStmt(Node *node, const char *queryString, ProcessUtility Assert(stmt->renameType == OBJECT_SEQUENCE); List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, - stmt->missing_ok); + stmt->missing_ok, false); /* the code-path only supports a single object */ Assert(list_length(addresses) == 1); @@ -384,7 +384,7 @@ PreprocessRenameSequenceStmt(Node *node, const char *queryString, ProcessUtility * subject of the RenameStmt. */ List * -RenameSequenceStmtObjectAddress(Node *node, bool missing_ok) +RenameSequenceStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_SEQUENCE); @@ -421,7 +421,7 @@ PreprocessAlterSequenceStmt(Node *node, const char *queryString, AlterSeqStmt *stmt = castNode(AlterSeqStmt, node); List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, - stmt->missing_ok); + stmt->missing_ok, false); /* the code-path only supports a single object */ Assert(list_length(addresses) == 1); @@ -504,7 +504,7 @@ SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress) * subject of the AlterSeqStmt. */ List * -AlterSequenceStmtObjectAddress(Node *node, bool missing_ok) +AlterSequenceStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterSeqStmt *stmt = castNode(AlterSeqStmt, node); @@ -531,7 +531,7 @@ PreprocessAlterSequenceSchemaStmt(Node *node, const char *queryString, Assert(stmt->objectType == OBJECT_SEQUENCE); List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, - stmt->missing_ok); + stmt->missing_ok, false); /* the code-path only supports a single object */ Assert(list_length(addresses) == 1); @@ -558,7 +558,7 @@ PreprocessAlterSequenceSchemaStmt(Node *node, const char *queryString, * the subject of the AlterObjectSchemaStmt. */ List * -AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok) +AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); Assert(stmt->objectType == OBJECT_SEQUENCE); @@ -609,7 +609,7 @@ PostprocessAlterSequenceSchemaStmt(Node *node, const char *queryString) AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); Assert(stmt->objectType == OBJECT_SEQUENCE); List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, - stmt->missing_ok); + stmt->missing_ok, true); /* the code-path only supports a single object */ Assert(list_length(addresses) == 1); @@ -640,7 +640,8 @@ PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString, AlterTableStmt *stmt = castNode(AlterTableStmt, node); Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); - List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false); + List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false, + false); /* the code-path only supports a single object */ Assert(list_length(sequenceAddresses) == 1); @@ -667,7 +668,7 @@ PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString, * subject of the AlterOwnerStmt. */ List * -AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok) +AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); @@ -692,7 +693,8 @@ PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString) AlterTableStmt *stmt = castNode(AlterTableStmt, node); Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); - List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false); + List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false, + true); /* the code-path only supports a single object */ Assert(list_length(sequenceAddresses) == 1); diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c index f6a9dc610..a65d6c0fe 100644 --- a/src/backend/distributed/commands/statistics.c +++ b/src/backend/distributed/commands/statistics.c @@ -120,7 +120,8 @@ PostprocessCreateStatisticsStmt(Node *node, const char *queryString) } bool missingOk = false; - List *objectAddresses = GetObjectAddressListFromParseTree((Node *) stmt, missingOk); + List *objectAddresses = GetObjectAddressListFromParseTree((Node *) stmt, missingOk, + true); /* the code-path only supports a single object */ Assert(list_length(objectAddresses) == 1); @@ -140,7 +141,7 @@ PostprocessCreateStatisticsStmt(Node *node, const char *queryString) * was set to true. */ List * -CreateStatisticsStmtObjectAddress(Node *node, bool missingOk) +CreateStatisticsStmtObjectAddress(Node *node, bool missingOk, bool isPostprocess) { CreateStatsStmt *stmt = castNode(CreateStatsStmt, node); @@ -215,7 +216,7 @@ PreprocessDropStatisticsStmt(Node *node, const char *queryString, * statement. */ List * -DropStatisticsObjectAddress(Node *node, bool missing_ok) +DropStatisticsObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { DropStmt *dropStatisticsStmt = castNode(DropStmt, node); Assert(dropStatisticsStmt->removeType == OBJECT_STATISTIC_EXT); @@ -334,7 +335,8 @@ PostprocessAlterStatisticsSchemaStmt(Node *node, const char *queryString) } bool missingOk = false; - List *objectAddresses = GetObjectAddressListFromParseTree((Node *) stmt, missingOk); + List *objectAddresses = GetObjectAddressListFromParseTree((Node *) stmt, missingOk, + true); /* the code-path only supports a single object */ Assert(list_length(objectAddresses) == 1); @@ -354,23 +356,29 @@ PostprocessAlterStatisticsSchemaStmt(Node *node, const char *queryString) * was set to true. */ List * -AlterStatisticsSchemaStmtObjectAddress(Node *node, bool missingOk) +AlterStatisticsSchemaStmtObjectAddress(Node *node, bool missingOk, bool isPostprocess) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); ObjectAddress *address = palloc0(sizeof(ObjectAddress)); - List *statName = (List *) stmt->object; - Oid statsOid = get_statistics_object_oid(statName, true); + Oid statsOid = InvalidOid; - if (statsOid == InvalidOid) + List *statName = (List *) stmt->object; + + if (isPostprocess) { /* - * couldn't find the stat, might have already been moved to the new schema, we - * construct a new stat name that uses the new schema to search in. + * we should search the object in the new schema because the method is + * called during postprocess, standard_utility should have already moved + * the stat into new schema. */ List *newStatName = list_make2(makeString(stmt->newschema), llast(statName)); statsOid = get_statistics_object_oid(newStatName, missingOk); } + else + { + statsOid = get_statistics_object_oid(statName, missingOk); + } ObjectAddressSet(*address, StatisticExtRelationId, statsOid); diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index a1f9a685e..d8fb4f59d 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -649,7 +649,7 @@ PostprocessAlterTableSchemaStmt(Node *node, const char *queryString) /* * We will let Postgres deal with missing_ok */ - List *tableAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true); + List *tableAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true, true); /* the code-path only supports a single object */ Assert(list_length(tableAddresses) == 1); @@ -1786,7 +1786,7 @@ PreprocessAlterTableSchemaStmt(Node *node, const char *queryString, } List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, - stmt->missing_ok); + stmt->missing_ok, false); /* the code-path only supports a single object */ Assert(list_length(addresses) == 1); @@ -3369,7 +3369,7 @@ ErrorIfUnsupportedAlterAddConstraintStmt(AlterTableStmt *alterTableStatement) * be found in either of the schemas. */ List * -AlterTableSchemaStmtObjectAddress(Node *node, bool missing_ok) +AlterTableSchemaStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); Assert(stmt->objectType == OBJECT_TABLE || stmt->objectType == OBJECT_FOREIGN_TABLE); diff --git a/src/backend/distributed/commands/text_search.c b/src/backend/distributed/commands/text_search.c index 22ff5df2f..54dfdae85 100644 --- a/src/backend/distributed/commands/text_search.c +++ b/src/backend/distributed/commands/text_search.c @@ -570,7 +570,8 @@ get_ts_parser_namelist(Oid tsparserOid) * the text search configuration described in the statement doesn't exist. */ List * -CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok) +CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok, bool + isPostprocess) { DefineStmt *stmt = castNode(DefineStmt, node); Assert(stmt->kind == OBJECT_TSCONFIGURATION); @@ -589,7 +590,7 @@ CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok) * the text search dictionary described in the statement doesn't exist. */ List * -CreateTextSearchDictObjectAddress(Node *node, bool missing_ok) +CreateTextSearchDictObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { DefineStmt *stmt = castNode(DefineStmt, node); Assert(stmt->kind == OBJECT_TSDICTIONARY); @@ -608,7 +609,8 @@ CreateTextSearchDictObjectAddress(Node *node, bool missing_ok) * exist based on the missing_ok flag passed in by the caller. */ List * -RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) +RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess) { RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_TSCONFIGURATION); @@ -627,7 +629,8 @@ RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) * exist based on the missing_ok flag passed in by the caller. */ List * -RenameTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok) +RenameTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess) { RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_TSDICTIONARY); @@ -646,7 +649,8 @@ RenameTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok) * exist based on the missing_ok flag passed in by the caller. */ List * -AlterTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) +AlterTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess) { AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node); @@ -664,7 +668,8 @@ AlterTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) * exist based on the missing_ok flag passed in by the caller. */ List * -AlterTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok) +AlterTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess) { AlterTSDictionaryStmt *stmt = castNode(AlterTSDictionaryStmt, node); @@ -686,7 +691,8 @@ AlterTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok) * in edgecases will be raised by postgres while executing the move. */ List * -AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok) +AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); Assert(stmt->objectType == OBJECT_TSCONFIGURATION); @@ -739,7 +745,8 @@ AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok) * in edgecases will be raised by postgres while executing the move. */ List * -AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, bool missing_ok) +AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); Assert(stmt->objectType == OBJECT_TSDICTIONARY); @@ -788,7 +795,8 @@ AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, bool missing_ok) * configuration does not exist based on the missing_ok flag passed in by the caller. */ List * -TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok) +TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok, bool + isPostprocess) { CommentStmt *stmt = castNode(CommentStmt, node); Assert(stmt->objtype == OBJECT_TSCONFIGURATION); @@ -807,7 +815,7 @@ TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok) * exist based on the missing_ok flag passed in by the caller. */ List * -TextSearchDictCommentObjectAddress(Node *node, bool missing_ok) +TextSearchDictCommentObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CommentStmt *stmt = castNode(CommentStmt, node); Assert(stmt->objtype == OBJECT_TSDICTIONARY); @@ -826,7 +834,8 @@ TextSearchDictCommentObjectAddress(Node *node, bool missing_ok) * configuration does not exist based on the missing_ok flag passed in by the caller. */ List * -AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok) +AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok, bool + isPostprocess) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); Relation relation = NULL; @@ -850,7 +859,7 @@ AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok) * configuration does not exist based on the missing_ok flag passed in by the caller. */ List * -AlterTextSearchDictOwnerObjectAddress(Node *node, bool missing_ok) +AlterTextSearchDictOwnerObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); Relation relation = NULL; diff --git a/src/backend/distributed/commands/trigger.c b/src/backend/distributed/commands/trigger.c index 9d9d62342..299ffcc32 100644 --- a/src/backend/distributed/commands/trigger.c +++ b/src/backend/distributed/commands/trigger.c @@ -224,7 +224,7 @@ PostprocessCreateTriggerStmt(Node *node, const char *queryString) EnsureCoordinator(); ErrorOutForTriggerIfNotSupported(relationId); - List *objectAddresses = GetObjectAddressListFromParseTree(node, missingOk); + List *objectAddresses = GetObjectAddressListFromParseTree(node, missingOk, true); /* the code-path only supports a single object */ Assert(list_length(objectAddresses) == 1); @@ -246,7 +246,7 @@ PostprocessCreateTriggerStmt(Node *node, const char *queryString) * was set to true. */ List * -CreateTriggerStmtObjectAddress(Node *node, bool missingOk) +CreateTriggerStmtObjectAddress(Node *node, bool missingOk, bool isPostprocess) { CreateTrigStmt *createTriggerStmt = castNode(CreateTrigStmt, node); diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index 3a10ab6a0..3e641fad0 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -117,7 +117,7 @@ PreprocessRenameTypeAttributeStmt(Node *node, const char *queryString, Assert(stmt->renameType == OBJECT_ATTRIBUTE); Assert(stmt->relationType == OBJECT_TYPE); - List *typeAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false); + List *typeAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false, false); /* the code-path only supports a single object */ Assert(list_length(typeAddresses) == 1); @@ -305,7 +305,7 @@ EnumValsList(Oid typeOid) * to true. */ List * -CompositeTypeStmtObjectAddress(Node *node, bool missing_ok) +CompositeTypeStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CompositeTypeStmt *stmt = castNode(CompositeTypeStmt, node); TypeName *typeName = MakeTypeNameFromRangeVar(stmt->typevar); @@ -326,7 +326,7 @@ CompositeTypeStmtObjectAddress(Node *node, bool missing_ok) * to true. */ List * -CreateEnumStmtObjectAddress(Node *node, bool missing_ok) +CreateEnumStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CreateEnumStmt *stmt = castNode(CreateEnumStmt, node); TypeName *typeName = makeTypeNameFromNameList(stmt->typeName); @@ -347,7 +347,7 @@ CreateEnumStmtObjectAddress(Node *node, bool missing_ok) * to true. */ List * -AlterTypeStmtObjectAddress(Node *node, bool missing_ok) +AlterTypeStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE); @@ -366,7 +366,7 @@ AlterTypeStmtObjectAddress(Node *node, bool missing_ok) * object of the AlterEnumStmt. Errors is missing_ok is false. */ List * -AlterEnumStmtObjectAddress(Node *node, bool missing_ok) +AlterEnumStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterEnumStmt *stmt = castNode(AlterEnumStmt, node); TypeName *typeName = makeTypeNameFromNameList(stmt->typeName); @@ -383,7 +383,7 @@ AlterEnumStmtObjectAddress(Node *node, bool missing_ok) * of the RenameStmt. Errors if missing_ok is false. */ List * -RenameTypeStmtObjectAddress(Node *node, bool missing_ok) +RenameTypeStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_TYPE); @@ -407,7 +407,7 @@ RenameTypeStmtObjectAddress(Node *node, bool missing_ok) * schemas. */ List * -AlterTypeSchemaStmtObjectAddress(Node *node, bool missing_ok) +AlterTypeSchemaStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); Assert(stmt->objectType == OBJECT_TYPE || stmt->objectType == OBJECT_DOMAIN); @@ -487,7 +487,7 @@ RenameTypeAttributeStmtObjectAddress(Node *node, bool missing_ok) * of the AlterOwnerStmt. Errors if missing_ok is false. */ List * -AlterTypeOwnerObjectAddress(Node *node, bool missing_ok) +AlterTypeOwnerObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); Assert(stmt->objectType == OBJECT_TYPE); diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index a6ae56afa..38ff4a379 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -552,8 +552,8 @@ ProcessUtilityInternal(PlannedStmt *pstmt, * prevent before-mentioned citus related messages. PG will complain about the * invalid address, so we are safe to not execute qualify and preprocess. Also * note that we should not guard any step after standardProcess_Utility with - * the flag distOpsHasInvalidObject because PG would have already failed the - * transaction. + * the flag distOpsHasInvalidObject because PG would have already failed the + * transaction. */ distOpsHasInvalidObject = DistOpsHasInvalidObject(parsetree, ops); @@ -872,7 +872,7 @@ ProcessUtilityInternal(PlannedStmt *pstmt, */ if (ops && ops->markDistributed) { - List *addresses = GetObjectAddressListFromParseTree(parsetree, false); + List *addresses = GetObjectAddressListFromParseTree(parsetree, false, true); ObjectAddress *address = NULL; foreach_ptr(address, addresses) { diff --git a/src/backend/distributed/commands/view.c b/src/backend/distributed/commands/view.c index ce7119875..8fc9c7dc0 100644 --- a/src/backend/distributed/commands/view.c +++ b/src/backend/distributed/commands/view.c @@ -94,7 +94,7 @@ PostprocessViewStmt(Node *node, const char *queryString) return NIL; } - List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false); + List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false, true); /* the code-path only supports a single object */ Assert(list_length(viewAddresses) == 1); @@ -158,7 +158,7 @@ PostprocessViewStmt(Node *node, const char *queryString) * CREATE [OR REPLACE] VIEW statement. */ List * -ViewStmtObjectAddress(Node *node, bool missing_ok) +ViewStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { ViewStmt *stmt = castNode(ViewStmt, node); @@ -226,7 +226,7 @@ PreprocessDropViewStmt(Node *node, const char *queryString, ProcessUtilityContex * statement. */ List * -DropViewStmtObjectAddress(Node *stmt, bool missing_ok) +DropViewStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess) { DropStmt *dropStmt = castNode(DropStmt, stmt); @@ -489,7 +489,7 @@ PreprocessAlterViewStmt(Node *node, const char *queryString, ProcessUtilityConte { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true); + List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true, false); /* the code-path only supports a single object */ Assert(list_length(viewAddresses) == 1); @@ -531,7 +531,7 @@ PostprocessAlterViewStmt(Node *node, const char *queryString) AlterTableStmt *stmt = castNode(AlterTableStmt, node); Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_VIEW); - List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true); + List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true, true); /* the code-path only supports a single object */ Assert(list_length(viewAddresses) == 1); @@ -563,7 +563,7 @@ PostprocessAlterViewStmt(Node *node, const char *queryString) * ALTER VIEW statement. */ List * -AlterViewStmtObjectAddress(Node *node, bool missing_ok) +AlterViewStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); Oid viewOid = RangeVarGetRelid(stmt->relation, NoLock, missing_ok); @@ -583,7 +583,7 @@ List * PreprocessRenameViewStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { - List *viewAddresses = GetObjectAddressListFromParseTree(node, true); + List *viewAddresses = GetObjectAddressListFromParseTree(node, true, false); /* the code-path only supports a single object */ Assert(list_length(viewAddresses) == 1); @@ -622,7 +622,7 @@ PreprocessRenameViewStmt(Node *node, const char *queryString, * of the RenameStmt. Errors if missing_ok is false. */ List * -RenameViewStmtObjectAddress(Node *node, bool missing_ok) +RenameViewStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { RenameStmt *stmt = castNode(RenameStmt, node); @@ -645,7 +645,7 @@ PreprocessAlterViewSchemaStmt(Node *node, const char *queryString, { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true); + List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true, false); /* the code-path only supports a single object */ Assert(list_length(viewAddresses) == 1); @@ -687,7 +687,7 @@ PostprocessAlterViewSchemaStmt(Node *node, const char *queryString) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true); + List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true, true); /* the code-path only supports a single object */ Assert(list_length(viewAddresses) == 1); @@ -709,7 +709,7 @@ PostprocessAlterViewSchemaStmt(Node *node, const char *queryString) * of the alter schema statement. */ List * -AlterViewSchemaStmtObjectAddress(Node *node, bool missing_ok) +AlterViewSchemaStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); diff --git a/src/backend/distributed/deparser/objectaddress.c b/src/backend/distributed/deparser/objectaddress.c index c6638b4e6..d835a3b1a 100644 --- a/src/backend/distributed/deparser/objectaddress.c +++ b/src/backend/distributed/deparser/objectaddress.c @@ -24,7 +24,7 @@ * tree. */ List * -GetObjectAddressListFromParseTree(Node *parseTree, bool missing_ok) +GetObjectAddressListFromParseTree(Node *parseTree, bool missing_ok, bool isPostprocess) { const DistributeObjectOps *ops = GetDistributeObjectOps(parseTree); @@ -33,12 +33,12 @@ GetObjectAddressListFromParseTree(Node *parseTree, bool missing_ok) ereport(ERROR, (errmsg("unsupported statement to get object address for"))); } - return ops->address(parseTree, missing_ok); + return ops->address(parseTree, missing_ok, isPostprocess); } List * -RenameAttributeStmtObjectAddress(Node *node, bool missing_ok) +RenameAttributeStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_ATTRIBUTE); @@ -68,7 +68,7 @@ RenameAttributeStmtObjectAddress(Node *node, bool missing_ok) * to true. */ List * -CreateExtensionStmtObjectAddress(Node *node, bool missing_ok) +CreateExtensionStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CreateExtensionStmt *stmt = castNode(CreateExtensionStmt, node); ObjectAddress *address = palloc0(sizeof(ObjectAddress)); diff --git a/src/backend/distributed/utils/citus_depended_object.c b/src/backend/distributed/utils/citus_depended_object.c index 07e004f1c..101db1856 100644 --- a/src/backend/distributed/utils/citus_depended_object.c +++ b/src/backend/distributed/utils/citus_depended_object.c @@ -316,35 +316,43 @@ GetCitusDependedObjectArgs(int pgMetaTableVarno, int pgMetaTableOid) /* * DistOpsHasInvalidObject returns true if any address in the given node * is invalid; otherwise, returns false. If ops is null or it has no - * implemented address method, we return false. - * - * We have some dist ops for which we should not validate. - * 1) We should not validate CREATE statements because no address exists - * here yet. - * 2) We should not validate '[DROP|ALTER] IF EXISTS' statements because it is ok - * by the semantics even if any object is invalid. - * 3) We should not validate 'ALTER ROLE ALL [SET|UNSET] because for the role ALL - * AlterRoleSetStmtObjectAddress returns an invalid address even though it should not. + * implemented address method, we return false. We also have some dist ops + * for which we should not validate and return false. */ bool DistOpsHasInvalidObject(Node *node, const DistributeObjectOps *ops) { if (ops && ops->operationType == DIST_OPS_CREATE) { + /* + * We should not validate CREATE statements because no address exists + * here yet. + */ return false; } else if (StatementContainsIfExist(node)) { + /* + * We should not validate '[DROP|ALTER] IF EXISTS' statements because it is ok + * by the semantics even if any object is invalid. + */ return false; } else if (AlterRoleSetStatementContainsAll(node)) { + /* + * We should not validate 'ALTER ROLE ALL [SET|UNSET] because for the role ALL + * AlterRoleSetStmtObjectAddress returns an invalid address even though it should not. + */ return false; } if (ops && ops->address) { - List *objectAddresses = ops->address(node, true); + bool missingOk = true; + bool isPostprocess = false; + List *objectAddresses = ops->address(node, missingOk, isPostprocess); + ObjectAddress *objectAddress = NULL; foreach_ptr(objectAddress, objectAddresses) { diff --git a/src/backend/distributed/worker/worker_create_or_replace.c b/src/backend/distributed/worker/worker_create_or_replace.c index c6b749621..572cf1420 100644 --- a/src/backend/distributed/worker/worker_create_or_replace.c +++ b/src/backend/distributed/worker/worker_create_or_replace.c @@ -181,7 +181,7 @@ WorkerCreateOrReplaceObject(List *sqlStatements) * same subject. */ Node *parseTree = ParseTreeNode(linitial(sqlStatements)); - List *addresses = GetObjectAddressListFromParseTree(parseTree, true); + List *addresses = GetObjectAddressListFromParseTree(parseTree, true, false); Assert(list_length(addresses) == 1); /* We have already asserted that we have exactly 1 address in the addresses. */ diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index ffd2782bb..e1d38ed3c 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -39,6 +39,7 @@ extern void SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(Oid Oid partitionRelationId); +/* DistOpsOperationType to be used in DistributeObjectOps */ typedef enum DistOpsOperationType { DIST_OPS_NONE, @@ -60,7 +61,8 @@ typedef enum DistOpsOperationType * preprocess: executed before standard_ProcessUtility. * postprocess: executed after standard_ProcessUtility. * address: return an ObjectAddress for the subject of the statement. - * 2nd parameter is missing_ok. + * 2nd parameter is missing_ok, and + * 3rd parameter is isPostProcess. * markDistribued: true if the object will be distributed. * * preprocess/postprocess return a List of DDLJobs. @@ -71,7 +73,7 @@ typedef struct DistributeObjectOps void (*qualify)(Node *); List * (*preprocess)(Node *, const char *, ProcessUtilityContext); List * (*postprocess)(Node *, const char *); - List * (*address)(Node *, bool); + List * (*address)(Node *, bool, bool); bool markDistributed; /* fields used by common implementations, omitted for specialized implementations */ @@ -158,8 +160,10 @@ extern List * PostprocessAlterDistributedObjectStmt(Node *stmt, const char *quer extern List * PreprocessDropDistributedObjectStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern List * DropTextSearchConfigObjectAddress(Node *node, bool missing_ok); -extern List * DropTextSearchDictObjectAddress(Node *node, bool missing_ok); +extern List * DropTextSearchConfigObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); +extern List * DropTextSearchDictObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); /* index.c */ typedef void (*PGIndexProcessor)(Form_pg_index, List **, int); @@ -172,24 +176,33 @@ extern bool CallDistributedProcedureRemotely(CallStmt *callStmt, DestReceiver *d /* collation.c - forward declarations */ extern char * CreateCollationDDL(Oid collationId); extern List * CreateCollationDDLsIdempotent(Oid collationId); -extern List * AlterCollationOwnerObjectAddress(Node *stmt, bool missing_ok); -extern List * RenameCollationStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * AlterCollationOwnerObjectAddress(Node *stmt, bool missing_ok, bool + isPostprocess); +extern List * RenameCollationStmtObjectAddress(Node *stmt, bool missing_ok, bool + isPostprocess); extern List * AlterCollationSchemaStmtObjectAddress(Node *stmt, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern char * GenerateBackupNameForCollationCollision(const ObjectAddress *address); -extern List * DefineCollationStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * DefineCollationStmtObjectAddress(Node *stmt, bool missing_ok, bool + isPostprocess); /* database.c - forward declarations */ -extern List * AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok); +extern List * AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); extern List * DatabaseOwnerDDLCommands(const ObjectAddress *address); /* domain.c - forward declarations */ -extern List * CreateDomainStmtObjectAddress(Node *node, bool missing_ok); -extern List * AlterDomainStmtObjectAddress(Node *node, bool missing_ok); +extern List * CreateDomainStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); +extern List * AlterDomainStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); extern List * DomainRenameConstraintStmtObjectAddress(Node *node, - bool missing_ok); -extern List * AlterDomainOwnerStmtObjectAddress(Node *node, bool missing_ok); -extern List * RenameDomainStmtObjectAddress(Node *node, bool missing_ok); + bool missing_ok, bool + isPostprocess); +extern List * AlterDomainOwnerStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); +extern List * RenameDomainStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); extern CreateDomainStmt * RecreateDomainStmt(Oid domainOid); extern Oid get_constraint_typid(Oid conoid); @@ -222,9 +235,9 @@ extern List * PreprocessAlterExtensionContentsStmt(Node *node, processUtilityContext); extern List * CreateExtensionDDLCommand(const ObjectAddress *extensionAddress); extern List * AlterExtensionSchemaStmtObjectAddress(Node *stmt, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * AlterExtensionUpdateStmtObjectAddress(Node *stmt, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern void CreateExtensionWithVersion(char *extname, char *extVersion); extern void AlterExtensionUpdateStmt(char *extname, char *extVersion); extern int GetExtensionVersionNumber(char *extVersion); @@ -276,11 +289,14 @@ extern Acl * GetPrivilegesForFDW(Oid FDWOid); extern List * PreprocessGrantOnForeignServerStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern List * CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok); -extern List * AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok); -extern List * RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok); +extern List * CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); +extern List * AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); +extern List * RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); extern List * AlterForeignServerOwnerStmtObjectAddress(Node *node, bool - missing_ok); + missing_ok, bool isPostprocess); extern List * GetForeignServerCreateDDLCommand(Oid serverId); @@ -296,25 +312,25 @@ extern List * PreprocessCreateFunctionStmt(Node *stmt, const char *queryString, extern List * PostprocessCreateFunctionStmt(Node *stmt, const char *queryString); extern List * CreateFunctionStmtObjectAddress(Node *stmt, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * DefineAggregateStmtObjectAddress(Node *stmt, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * PreprocessAlterFunctionStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * AlterFunctionStmtObjectAddress(Node *stmt, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * RenameFunctionStmtObjectAddress(Node *stmt, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * AlterFunctionOwnerObjectAddress(Node *stmt, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * AlterFunctionSchemaStmtObjectAddress(Node *stmt, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * PreprocessAlterFunctionDependsStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * AlterFunctionDependsStmtObjectAddress(Node *stmt, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * PreprocessGrantOnFunctionStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessGrantOnFunctionStmt(Node *node, const char *queryString); @@ -341,7 +357,7 @@ extern LOCKMODE GetCreateIndexRelationLockMode(IndexStmt *createIndexStatement); extern List * PreprocessReindexStmt(Node *ReindexStatement, const char *ReindexCommand, ProcessUtilityContext processUtilityContext); -extern List * ReindexStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * ReindexStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess); extern List * PreprocessDropIndexStmt(Node *dropIndexStatement, const char *dropIndexCommand, ProcessUtilityContext processUtilityContext); @@ -354,7 +370,8 @@ extern List * ExecuteFunctionOnEachTableIndex(Oid relationId, PGIndexProcessor extern bool IsReindexWithParam_compat(ReindexStmt *stmt, char *paramName); /* objectaddress.c - forward declarations */ -extern List * CreateExtensionStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * CreateExtensionStmtObjectAddress(Node *stmt, bool missing_ok, bool + isPostprocess); /* policy.c - forward declarations */ @@ -391,9 +408,9 @@ extern List * PreprocessAlterRoleSetStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * GenerateAlterRoleSetCommandForRole(Oid roleid); extern List * AlterRoleStmtObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * AlterRoleSetStmtObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * PreprocessCreateRoleStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PreprocessDropRoleStmt(Node *stmt, const char *queryString, @@ -402,7 +419,8 @@ extern List * PreprocessGrantRoleStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessGrantRoleStmt(Node *stmt, const char *queryString); extern List * GenerateCreateOrAlterRoleCommand(Oid roleOid); -extern List * CreateRoleStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * CreateRoleStmtObjectAddress(Node *stmt, bool missing_ok, bool + isPostprocess); extern void UnmarkRolesDistributed(List *roles); extern List * FilterDistributedRoles(List *roles); @@ -416,8 +434,10 @@ extern List * PreprocessAlterObjectSchemaStmt(Node *alterObjectSchemaStmt, const char *alterObjectSchemaCommand); extern List * PreprocessGrantOnSchemaStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern List * CreateSchemaStmtObjectAddress(Node *node, bool missing_ok); -extern List * AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok); +extern List * CreateSchemaStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); +extern List * AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); /* sequence.c - forward declarations */ extern List * PreprocessAlterSequenceStmt(Node *node, const char *queryString, @@ -431,16 +451,21 @@ extern List * PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryStri extern List * PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString); extern List * PreprocessDropSequenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern List * SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok, bool + isPostprocess); extern List * PreprocessRenameSequenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PreprocessGrantOnSequenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessGrantOnSequenceStmt(Node *node, const char *queryString); -extern List * AlterSequenceStmtObjectAddress(Node *node, bool missing_ok); -extern List * AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok); -extern List * AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok); -extern List * RenameSequenceStmtObjectAddress(Node *node, bool missing_ok); +extern List * AlterSequenceStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); +extern List * AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); +extern List * AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); +extern List * RenameSequenceStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); extern void ErrorIfUnsupportedSeqStmt(CreateSeqStmt *createSeqStmt); extern void ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt); extern char * GenerateBackupNameForSequenceCollision(const ObjectAddress *address); @@ -451,10 +476,12 @@ extern void RenameExistingSequenceWithDifferentTypeIfExists(RangeVar *sequence, extern List * PreprocessCreateStatisticsStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessCreateStatisticsStmt(Node *node, const char *queryString); -extern List * CreateStatisticsStmtObjectAddress(Node *node, bool missingOk); +extern List * CreateStatisticsStmtObjectAddress(Node *node, bool missingOk, bool + isPostprocess); extern List * PreprocessDropStatisticsStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern List * DropStatisticsObjectAddress(Node *node, bool missing_ok); +extern List * DropStatisticsObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); extern List * PreprocessAlterStatisticsRenameStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); @@ -462,7 +489,8 @@ extern List * PreprocessAlterStatisticsSchemaStmt(Node *node, const char *queryS ProcessUtilityContext processUtilityContext); extern List * PostprocessAlterStatisticsSchemaStmt(Node *node, const char *queryString); -extern List * AlterStatisticsSchemaStmtObjectAddress(Node *node, bool missingOk); +extern List * AlterStatisticsSchemaStmtObjectAddress(Node *node, bool missingOk, bool + isPostprocess); extern List * PreprocessAlterStatisticsStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PreprocessAlterStatisticsOwnerStmt(Node *node, const char *queryString, @@ -504,7 +532,7 @@ extern void ErrorIfUnsupportedConstraint(Relation relation, char distributionMet char referencingReplicationModel, Var *distributionColumn, uint32 colocationId); extern List * AlterTableSchemaStmtObjectAddress(Node *stmt, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * MakeNameListFromRangeVar(const RangeVar *rel); extern Oid GetSequenceOid(Oid relationId, AttrNumber attnum); extern bool ConstrTypeUsesIndex(ConstrType constrType); @@ -516,29 +544,38 @@ extern List * GetCreateTextSearchDictionaryStatements(const ObjectAddress *addre extern List * CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address); extern List * CreateTextSearchDictDDLCommandsIdempotent(const ObjectAddress *address); extern List * CreateTextSearchConfigurationObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool + isPostprocess); extern List * CreateTextSearchDictObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * RenameTextSearchConfigurationStmtObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool + isPostprocess); extern List * RenameTextSearchDictionaryStmtObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool + isPostprocess); extern List * AlterTextSearchConfigurationStmtObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool + isPostprocess); extern List * AlterTextSearchDictionaryStmtObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool + isPostprocess); extern List * AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool + isPostprocess); extern List * AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool + isPostprocess); extern List * TextSearchConfigurationCommentObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool + isPostprocess); extern List * TextSearchDictCommentObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * AlterTextSearchConfigurationOwnerObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool + isPostprocess); extern List * AlterTextSearchDictOwnerObjectAddress(Node *node, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern char * GenerateBackupNameForTextSearchConfiguration(const ObjectAddress *address); extern char * GenerateBackupNameForTextSearchDict(const ObjectAddress *address); extern List * get_ts_config_namelist(Oid tsconfigOid); @@ -551,16 +588,20 @@ extern List * PreprocessRenameTypeAttributeStmt(Node *stmt, const char *queryStr ProcessUtilityContext processUtilityContext); extern Node * CreateTypeStmtByObjectAddress(const ObjectAddress *address); -extern List * CompositeTypeStmtObjectAddress(Node *stmt, bool missing_ok); -extern List * CreateEnumStmtObjectAddress(Node *stmt, bool missing_ok); -extern List * AlterTypeStmtObjectAddress(Node *stmt, bool missing_ok); -extern List * AlterEnumStmtObjectAddress(Node *stmt, bool missing_ok); -extern List * RenameTypeStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * CompositeTypeStmtObjectAddress(Node *stmt, bool missing_ok, bool + isPostprocess); +extern List * CreateEnumStmtObjectAddress(Node *stmt, bool missing_ok, bool + isPostprocess); +extern List * AlterTypeStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess); +extern List * AlterEnumStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess); +extern List * RenameTypeStmtObjectAddress(Node *stmt, bool missing_ok, bool + isPostprocess); extern List * AlterTypeSchemaStmtObjectAddress(Node *stmt, - bool missing_ok); + bool missing_ok, bool isPostprocess); extern List * RenameTypeAttributeStmtObjectAddress(Node *stmt, bool missing_ok); -extern List * AlterTypeOwnerObjectAddress(Node *stmt, bool missing_ok); +extern List * AlterTypeOwnerObjectAddress(Node *stmt, bool missing_ok, bool + isPostprocess); extern List * CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress); extern char * GenerateBackupNameForTypeCollision(const ObjectAddress *address); @@ -581,11 +622,11 @@ extern List * PostprocessVacuumStmt(Node *node, const char *vacuumCommand); extern List * PreprocessViewStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessViewStmt(Node *node, const char *queryString); -extern List * ViewStmtObjectAddress(Node *node, bool missing_ok); -extern List * AlterViewStmtObjectAddress(Node *node, bool missing_ok); +extern List * ViewStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); +extern List * AlterViewStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); extern List * PreprocessDropViewStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern List * DropViewStmtObjectAddress(Node *node, bool missing_ok); +extern List * DropViewStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); extern char * CreateViewDDLCommand(Oid viewOid); extern List * GetViewCreationCommandsOfTable(Oid relationId); extern List * GetViewCreationTableDDLCommandsOfTable(Oid relationId); @@ -599,11 +640,13 @@ extern List * PreprocessAlterViewStmt(Node *node, const char *queryString, extern List * PostprocessAlterViewStmt(Node *node, const char *queryString); extern List * PreprocessRenameViewStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern List * RenameViewStmtObjectAddress(Node *node, bool missing_ok); +extern List * RenameViewStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); extern List * PreprocessAlterViewSchemaStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessAlterViewSchemaStmt(Node *node, const char *queryString); -extern List * AlterViewSchemaStmtObjectAddress(Node *node, bool missing_ok); +extern List * AlterViewSchemaStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); extern bool IsViewRenameStmt(RenameStmt *renameStmt); /* trigger.c - forward declarations */ @@ -611,7 +654,8 @@ extern List * GetExplicitTriggerCommandList(Oid relationId); extern HeapTuple GetTriggerTupleById(Oid triggerId, bool missingOk); extern List * GetExplicitTriggerIdList(Oid relationId); extern List * PostprocessCreateTriggerStmt(Node *node, const char *queryString); -extern List * CreateTriggerStmtObjectAddress(Node *node, bool missingOk); +extern List * CreateTriggerStmtObjectAddress(Node *node, bool missingOk, bool + isPostprocess); extern void CreateTriggerEventExtendNames(CreateTrigStmt *createTriggerStmt, char *schemaName, uint64 shardId); extern List * PostprocessAlterTriggerRenameStmt(Node *node, const char *queryString); diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index 23bfbae78..9ac15b6ac 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -148,8 +148,10 @@ extern void QualifyAlterTypeOwnerStmt(Node *stmt); extern char * GetTypeNamespaceNameByNameList(List *names); extern Oid TypeOidGetNamespaceOid(Oid typeOid); -extern List * GetObjectAddressListFromParseTree(Node *parseTree, bool missing_ok); -extern List * RenameAttributeStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * GetObjectAddressListFromParseTree(Node *parseTree, bool missing_ok, bool + isPostprocess); +extern List * RenameAttributeStmtObjectAddress(Node *stmt, bool missing_ok, bool + isPostprocess); /* forward declarations for deparse_view_stmts.c */ extern void QualifyDropViewStmt(Node *node);