From 43e046ddc401e9eb073f282f5c35f862a0023000 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Fri, 12 Aug 2022 10:00:34 +0200 Subject: [PATCH] Support Sequences owned by columns before distributing tables There are 3 different ways that a sequence can be interacting with tables. (1) and (2) are already supported. This commit adds support for (3). (1) column DEFAULT nextval('seq'): The dependency is roughly like below, and ExpandCitusSupportedTypes() is responsible for finding the depending sequences. schema <--- table <--- column <---- default value ^ | |------------------ sequence <--------| (2) serial columns: Bigserial/small serial etc: The dependency is roughly like below, and ExpandCitusSupportedTypes() is responsible for finding the depending sequences. schema <--- table <--- column <---- default value ^ | | | sequence <--------| (3) Sequence OWNED BY table.column: Added support for this type of resolution in this commit. The dependency is almost like the following, and ExpandCitusSupportedTypes() is NOT responsible for finding the dependency. schema <--- table <--- column ^ | sequence (cherry picked from commit 9ec8e627c1e4449d19f0aad0c907a2dc619978a4) --- .../commands/create_distributed_table.c | 50 ++- src/backend/distributed/commands/sequence.c | 55 ++- src/backend/distributed/commands/table.c | 8 +- src/backend/distributed/metadata/dependency.c | 197 +++++++++++ .../distributed/metadata/metadata_sync.c | 33 +- src/include/distributed/metadata_sync.h | 24 +- .../regress/expected/sequencenes_owned_by.out | 314 ++++++++++++++++++ .../expected/upgrade_post_11_before.out | 296 +++++++++++++++++ src/test/regress/multi_1_schedule | 1 + src/test/regress/sql/sequencenes_owned_by.sql | 161 +++++++++ .../regress/sql/upgrade_post_11_before.sql | 253 ++++++++++++++ 11 files changed, 1331 insertions(+), 61 deletions(-) create mode 100644 src/test/regress/expected/sequencenes_owned_by.out create mode 100644 src/test/regress/expected/upgrade_post_11_before.out create mode 100644 src/test/regress/sql/sequencenes_owned_by.sql create mode 100644 src/test/regress/sql/upgrade_post_11_before.sql diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 9d36b2af1..79f2c3a45 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -114,6 +114,8 @@ static void EnsureLocalTableEmptyIfNecessary(Oid relationId, char distributionMe static bool ShouldLocalTableBeEmpty(Oid relationId, char distributionMethod, bool viaDeprecatedAPI); static void EnsureCitusTableCanBeCreated(Oid relationOid); +static void EnsureDistributedSequencesHaveOneType(Oid relationId, + List *seqInfoList); static List * GetFKeyCreationCommandsRelationInvolvedWithTableType(Oid relationId, int tableTypeFlag); static Oid DropFKeysAndUndistributeTable(Oid relationId); @@ -604,17 +606,23 @@ EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId) Oid citusTableId = InvalidOid; foreach_oid(citusTableId, citusTableIdList) { - List *attnumList = NIL; - List *dependentSequenceList = NIL; - GetDependentSequencesWithRelation(citusTableId, &attnumList, - &dependentSequenceList, 0); - ListCell *attnumCell = NULL; - ListCell *dependentSequenceCell = NULL; - forboth(attnumCell, attnumList, dependentSequenceCell, - dependentSequenceList) + List *seqInfoList = NIL; + GetDependentSequencesWithRelation(citusTableId, &seqInfoList, 0); + + SequenceInfo *seqInfo = NULL; + foreach_ptr(seqInfo, seqInfoList) { - AttrNumber currentAttnum = lfirst_int(attnumCell); - Oid currentSeqOid = lfirst_oid(dependentSequenceCell); + AttrNumber currentAttnum = seqInfo->attributeNumber; + Oid currentSeqOid = seqInfo->sequenceOid; + + if (!seqInfo->isNextValDefault) + { + /* + * If a sequence is not on the nextval, we don't need any check. + * This is a dependent sequence via ALTER SEQUENCE .. OWNED BY col + */ + continue; + } /* * If another distributed table is using the same sequence @@ -702,21 +710,27 @@ MarkSequenceDistributedAndPropagateDependencies(Oid sequenceOid) * in which the sequence is used as default is supported for each sequence in input * dependentSequenceList, and then alters the sequence type if not the same with the column type. */ -void -EnsureDistributedSequencesHaveOneType(Oid relationId, List *dependentSequenceList, - List *attnumList) +static void +EnsureDistributedSequencesHaveOneType(Oid relationId, List *seqInfoList) { - ListCell *attnumCell = NULL; - ListCell *dependentSequenceCell = NULL; - forboth(attnumCell, attnumList, dependentSequenceCell, dependentSequenceList) + SequenceInfo *seqInfo = NULL; + foreach_ptr(seqInfo, seqInfoList) { - AttrNumber attnum = lfirst_int(attnumCell); - Oid sequenceOid = lfirst_oid(dependentSequenceCell); + if (!seqInfo->isNextValDefault) + { + /* + * If a sequence is not on the nextval, we don't need any check. + * This is a dependent sequence via ALTER SEQUENCE .. OWNED BY col + */ + continue; + } /* * We should make sure that the type of the column that uses * that sequence is supported */ + Oid sequenceOid = seqInfo->sequenceOid; + AttrNumber attnum = seqInfo->attributeNumber; Oid attributeTypeId = GetAttributeTypeOid(relationId, attnum); EnsureSequenceTypeSupported(sequenceOid, attributeTypeId); diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index dd7390b45..fdbcf84cb 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -168,42 +168,41 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList, attributeIndex++) { Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex); - if (attributeForm->attisdropped || !attributeForm->atthasdef) - { - /* - * If this column has already been dropped or it has no DEFAULT - * definition, skip it. - */ - continue; - } - if (attributeForm->attgenerated == ATTRIBUTE_GENERATED_STORED) + if (attributeForm->attisdropped || + attributeForm->attgenerated == ATTRIBUTE_GENERATED_STORED) { - /* skip columns with GENERATED AS ALWAYS expressions */ + /* skip dropped columns and columns with GENERATED AS ALWAYS expressions */ continue; } char *columnName = NameStr(attributeForm->attname); - *columnNameList = lappend(*columnNameList, columnName); - List *columnOwnedSequences = GetSequencesOwnedByColumn(relationId, attributeIndex + 1); - Oid ownedSequenceId = InvalidOid; - if (list_length(columnOwnedSequences) != 0) + if (attributeForm->atthasdef && list_length(columnOwnedSequences) == 0) { /* - * A column might only own one sequence. We intentionally use - * GetSequencesOwnedByColumn macro and pick initial oid from the - * list instead of using getOwnedSequence. This is both because - * getOwnedSequence is removed in pg13 and is also because it - * errors out if column does not have any sequences. + * Even if there are no owned sequences, the code path still + * expects the columnName to be filled such that it can DROP + * DEFAULT for the existing nextval('seq') columns. */ - Assert(list_length(columnOwnedSequences) == 1); - ownedSequenceId = linitial_oid(columnOwnedSequences); + *ownedSequenceIdList = lappend_oid(*ownedSequenceIdList, InvalidOid); + *columnNameList = lappend(*columnNameList, columnName); + + continue; } - *ownedSequenceIdList = lappend_oid(*ownedSequenceIdList, ownedSequenceId); + Oid ownedSequenceId = InvalidOid; + foreach_oid(ownedSequenceId, columnOwnedSequences) + { + /* + * A column might have multiple sequences one via OWNED BY one another + * via bigserial/default nextval. + */ + *ownedSequenceIdList = lappend_oid(*ownedSequenceIdList, ownedSequenceId); + *columnNameList = lappend(*columnNameList, columnName); + } } relation_close(relation, NoLock); @@ -443,17 +442,15 @@ SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress) Oid citusTableId = InvalidOid; foreach_oid(citusTableId, citusTableIdList) { - List *attnumList = NIL; - List *dependentSequenceList = NIL; - GetDependentSequencesWithRelation(citusTableId, &attnumList, - &dependentSequenceList, 0); - Oid currentSeqOid = InvalidOid; - foreach_oid(currentSeqOid, dependentSequenceList) + List *seqInfoList = NIL; + GetDependentSequencesWithRelation(citusTableId, &seqInfoList, 0); + SequenceInfo *seqInfo = NULL; + foreach_ptr(seqInfo, seqInfoList) { /* * This sequence is used in a distributed table */ - if (currentSeqOid == sequenceAddress->objectId) + if (seqInfo->sequenceOid == sequenceAddress->objectId) { return citusTableId; } diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index e5a8101ea..fe396e7d9 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -2409,11 +2409,9 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) * changing the type of the column should not be allowed for now */ AttrNumber attnum = get_attnum(relationId, command->name); - List *attnumList = NIL; - List *dependentSequenceList = NIL; - GetDependentSequencesWithRelation(relationId, &attnumList, - &dependentSequenceList, attnum); - if (dependentSequenceList != NIL) + List *seqInfoList = NIL; + GetDependentSequencesWithRelation(relationId, &seqInfoList, attnum); + if (seqInfoList != NIL) { ereport(ERROR, (errmsg("cannot execute ALTER COLUMN TYPE .. command " "because the column involves a default coming " diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index a23e9ca33..397646bdf 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -965,6 +965,203 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe } +/* + * ExpandForPgVanilla only expands only comosite types because other types + * will find their dependencies in pg_depend. The method should only be called by + * is_citus_depended_object udf. + */ +static List * +ExpandForPgVanilla(ObjectAddressCollector *collector, + ObjectAddress target) +{ + /* should only be called if GUC is enabled */ + Assert(HideCitusDependentObjects == true); + + List *result = NIL; + + if (target.classId == TypeRelationId && get_typtype(target.objectId) == + TYPTYPE_COMPOSITE) + { + /* + * types depending on other types are not captured in pg_depend, instead + * they are described with their dependencies by the relation that + * describes the composite type. + */ + Oid typeRelationId = get_typ_typrelid(target.objectId); + DependencyDefinition *dependency = + CreateObjectAddressDependencyDef(RelationRelationId, + typeRelationId); + result = lappend(result, dependency); + } + + return result; +} + + +/* + * GetDependentRoleIdsFDW returns a list of role oids that has privileges on the + * FDW with the given object id. + */ +static List * +GetDependentRoleIdsFDW(Oid FDWOid) +{ + List *roleIds = NIL; + + Acl *aclEntry = GetPrivilegesForFDW(FDWOid); + + if (aclEntry == NULL) + { + return NIL; + } + + AclItem *privileges = ACL_DAT(aclEntry); + int numberOfPrivsGranted = ACL_NUM(aclEntry); + + for (int i = 0; i < numberOfPrivsGranted; i++) + { + roleIds = lappend_oid(roleIds, privileges[i].ai_grantee); + } + + return roleIds; +} + + +/* + * ExpandRolesToGroups returns a list of object addresses pointing to roles that roleid + * depends on. + */ +static List * +ExpandRolesToGroups(Oid roleid) +{ + Relation pgAuthMembers = table_open(AuthMemRelationId, AccessShareLock); + HeapTuple tuple = NULL; + + ScanKeyData scanKey[1]; + const int scanKeyCount = 1; + + /* scan pg_auth_members for member = $1 via index pg_auth_members_member_role_index */ + ScanKeyInit(&scanKey[0], Anum_pg_auth_members_member, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(roleid)); + + SysScanDesc scanDescriptor = systable_beginscan(pgAuthMembers, AuthMemMemRoleIndexId, + true, NULL, scanKeyCount, scanKey); + + List *roles = NIL; + while ((tuple = systable_getnext(scanDescriptor)) != NULL) + { + Form_pg_auth_members membership = (Form_pg_auth_members) GETSTRUCT(tuple); + + DependencyDefinition *definition = palloc0(sizeof(DependencyDefinition)); + definition->mode = DependencyObjectAddress; + ObjectAddressSet(definition->data.address, AuthIdRelationId, membership->roleid); + + roles = lappend(roles, definition); + } + + systable_endscan(scanDescriptor); + table_close(pgAuthMembers, AccessShareLock); + + return roles; +} + + +/* + * GetViewRuleReferenceDependencyList returns the dependencies of the view's + * internal rule dependencies. + */ +static List * +GetViewRuleReferenceDependencyList(Oid viewId) +{ + List *dependencyTupleList = GetPgDependTuplesForDependingObjects(RelationRelationId, + viewId); + List *nonInternalDependenciesOfDependingRules = NIL; + + HeapTuple depTup = NULL; + foreach_ptr(depTup, dependencyTupleList) + { + Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup); + + /* + * Dependencies of the internal rule dependency should be handled as the dependency + * of referenced view object. + * + * PG doesn't keep dependency relation between views and dependent objects directly + * but it keeps an internal dependency relation between the view and the rule, then + * keeps the dependent objects of the view as non-internal dependencies of the + * internally dependent rule object. + */ + if (pg_depend->deptype == DEPENDENCY_INTERNAL && pg_depend->classid == + RewriteRelationId) + { + ObjectAddress ruleAddress = { 0 }; + ObjectAddressSet(ruleAddress, RewriteRelationId, pg_depend->objid); + + /* Expand results with the noninternal dependencies of it */ + List *ruleDependencies = DependencyDefinitionFromPgDepend(ruleAddress); + + DependencyDefinition *dependencyDef = NULL; + foreach_ptr(dependencyDef, ruleDependencies) + { + /* + * Follow all dependencies of the internally dependent rule dependencies + * except it is an internal dependency of view itself. + */ + if (dependencyDef->data.pg_depend.deptype == DEPENDENCY_INTERNAL || + (dependencyDef->data.pg_depend.refclassid == RelationRelationId && + dependencyDef->data.pg_depend.refobjid == viewId)) + { + continue; + } + + nonInternalDependenciesOfDependingRules = + lappend(nonInternalDependenciesOfDependingRules, dependencyDef); + } + } + } + + return nonInternalDependenciesOfDependingRules; +} + + +/* + * GetRelationSequenceDependencyList returns the sequence dependency definition + * list for the given relation. + */ +static List * +GetRelationSequenceDependencyList(Oid relationId) +{ + List *seqInfoList = NIL; + GetDependentSequencesWithRelation(relationId, &seqInfoList, 0); + + List *seqIdList = NIL; + SequenceInfo *seqInfo = NULL; + foreach_ptr(seqInfo, seqInfoList) + { + seqIdList = lappend_oid(seqIdList, seqInfo->sequenceOid); + } + + List *sequenceDependencyDefList = + CreateObjectAddressDependencyDefList(RelationRelationId, seqIdList); + + return sequenceDependencyDefList; +} + + +/* + * GetRelationFunctionDependencyList returns the function dependency definition + * list for the given relation. + */ +static List * +GetRelationFunctionDependencyList(Oid relationId) +{ + List *dependentFunctionOids = GetDependentFunctionsWithRelation(relationId); + List *functionDependencyDefList = + CreateObjectAddressDependencyDefList(ProcedureRelationId, dependentFunctionOids); + + return functionDependencyDefList; +} + + /* * GetRelationStatsSchemaDependencyList returns a list of DependencyDefinition * objects for the schemas that statistics' of the relation with relationId depends. diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 6ce5859f2..6ec1b5371 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -1226,10 +1226,10 @@ GetAttributeTypeOid(Oid relationId, AttrNumber attnum) * attribute of the relationId. */ void -GetDependentSequencesWithRelation(Oid relationId, List **attnumList, - List **dependentSequenceList, AttrNumber attnum) +GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList, + AttrNumber attnum) { - Assert(*attnumList == NIL && *dependentSequenceList == NIL); + Assert(*seqInfoList == NIL); List *attrdefResult = NIL; List *attrdefAttnumResult = NIL; @@ -1266,9 +1266,26 @@ GetDependentSequencesWithRelation(Oid relationId, List **attnumList, deprec->refobjsubid != 0 && deprec->deptype == DEPENDENCY_AUTO) { + /* + * We are going to generate corresponding SequenceInfo + * in the following loop. + */ attrdefResult = lappend_oid(attrdefResult, deprec->objid); attrdefAttnumResult = lappend_int(attrdefAttnumResult, deprec->refobjsubid); } + else if (deprec->deptype == DEPENDENCY_AUTO && + deprec->refobjsubid != 0 && + deprec->classid == RelationRelationId && + get_rel_relkind(deprec->objid) == RELKIND_SEQUENCE) + { + SequenceInfo *seqInfo = (SequenceInfo *) palloc(sizeof(SequenceInfo)); + + seqInfo->sequenceOid = deprec->objid; + seqInfo->attributeNumber = deprec->refobjsubid; + seqInfo->isNextValDefault = false; + + *seqInfoList = lappend(*seqInfoList, seqInfo); + } } systable_endscan(scan); @@ -1295,9 +1312,13 @@ GetDependentSequencesWithRelation(Oid relationId, List **attnumList, if (list_length(sequencesFromAttrDef) == 1) { - *dependentSequenceList = list_concat(*dependentSequenceList, - sequencesFromAttrDef); - *attnumList = lappend_int(*attnumList, attrdefAttnum); + SequenceInfo *seqInfo = (SequenceInfo *) palloc(sizeof(SequenceInfo)); + + seqInfo->sequenceOid = linitial_oid(sequencesFromAttrDef); + seqInfo->attributeNumber = attrdefAttnum; + seqInfo->isNextValDefault = true; + + *seqInfoList = lappend(*seqInfoList, seqInfo); } } } diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h index 89a00aa2b..26a9fed10 100644 --- a/src/include/distributed/metadata_sync.h +++ b/src/include/distributed/metadata_sync.h @@ -27,6 +27,24 @@ typedef enum METADATA_SYNC_FAILED_SYNC = 2 } MetadataSyncResult; +/* + * Information about dependent sequences. We do not have the + * dependent relationId as no caller needs. But, could be added + * here if needed. + */ +typedef struct SequenceInfo +{ + Oid sequenceOid; + int attributeNumber; + + /* + * true for nexval(seq) -- which also includes serials + * false when only OWNED BY col + */ + bool isNextValDefault; +} SequenceInfo; + + /* Functions declarations for metadata syncing */ extern void StartMetadataSyncToNode(const char *nodeNameString, int32 nodePort); extern bool ClusterHasKnownMetadataWorkers(void); @@ -55,9 +73,9 @@ extern bool ShouldInitiateMetadataSync(bool *lockFailure); extern List * SequenceDDLCommandsForTable(Oid relationId); extern List * GetSequencesFromAttrDef(Oid attrdefOid); -extern void GetDependentSequencesWithRelation(Oid relationId, List **attnumList, - List **dependentSequenceList, AttrNumber - attnum); +extern void GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList, + AttrNumber attnum); +extern List * GetDependentFunctionsWithRelation(Oid relationId); extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum); #define DELETE_ALL_NODES "TRUNCATE pg_dist_node CASCADE" diff --git a/src/test/regress/expected/sequencenes_owned_by.out b/src/test/regress/expected/sequencenes_owned_by.out new file mode 100644 index 000000000..1d9cd1136 --- /dev/null +++ b/src/test/regress/expected/sequencenes_owned_by.out @@ -0,0 +1,314 @@ +CREATE SCHEMA "sequence tests"; +SET search_path TO "sequence tests"; +CREATE SEQUENCE "sc 1"; +-- the same sequence is on nextval and owned by the same column +CREATE TABLE test(a int, b bigint default nextval ('"sc 1"')); +ALTER SEQUENCE "sc 1" OWNED BY test.b; +SELECT create_distributed_table('test','a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- show that "sc 1" is distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass$$); + result +--------------------------------------------------------------------- + "sequence tests"."sc 1" +(1 row) + +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND objid = '"sequence tests"."sc 1"'::regclass; + pg_identify_object_as_address +--------------------------------------------------------------------- + (sequence,"{""sequence tests"",""sc 1""}",{}) +(1 row) + +-- this is not supported for already distributed tables, which we might relax in the future +ALTER SEQUENCE "sc 1" OWNED BY test.b; +ERROR: Altering a distributed sequence is currently not supported. +-- drop cascades into the sequence as well +DROP TABLE test CASCADE; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass$$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.sc 1" does not exist +(1 row) + +CREATE SEQUENCE "sc 1"; +-- a sequence is on nextval and owned by another column +CREATE TABLE test(a int, b bigint default nextval ('"sc 1"'), c bigint); +ALTER SEQUENCE "sc 1" OWNED BY test.c; +SELECT create_distributed_table('test','a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- show that "sc 1" is distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass$$); + result +--------------------------------------------------------------------- + "sequence tests"."sc 1" +(1 row) + +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND objid = '"sequence tests"."sc 1"'::regclass; + pg_identify_object_as_address +--------------------------------------------------------------------- + (sequence,"{""sequence tests"",""sc 1""}",{}) +(1 row) + +-- drop cascades into the schema as well +DROP TABLE test CASCADE; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass$$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.sc 1" does not exist +(1 row) + +CREATE SEQUENCE "sc 1"; +CREATE SEQUENCE "sc 2"; +-- a different sequence is on nextval and owned by another column +CREATE TABLE test(a int, b bigint default nextval ('"sc 1"'), c bigint); +ALTER SEQUENCE "sc 2" OWNED BY test.c; +SELECT create_distributed_table('test','a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- show that "sc 1" and "sc 2" are distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text || ',' || '"sequence tests"."sc 2"'::regclass::text; $$); + result +--------------------------------------------------------------------- + "sequence tests"."sc 1","sequence tests"."sc 2" +(1 row) + +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND (objid = '"sequence tests"."sc 1"'::regclass OR objid = '"sequence tests"."sc 2"'::regclass) ORDER BY 1; + pg_identify_object_as_address +--------------------------------------------------------------------- + (sequence,"{""sequence tests"",""sc 1""}",{}) + (sequence,"{""sequence tests"",""sc 2""}",{}) +(2 rows) + +-- drop cascades into the sc2 as well as it is OWNED BY +DROP TABLE test CASCADE; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 2"'::regclass::text; $$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.sc 2" does not exist +(1 row) + +-- and, we manually drop sc1 +DROP SEQUENCE "sc 1"; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text; $$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.sc 1" does not exist +(1 row) + +CREATE SEQUENCE "sc 1"; +CREATE SEQUENCE "sc 2"; +-- a different sequence is on nextval, one different column owned by a sequence, and one bigserial +CREATE TABLE test(a int, b bigint default nextval ('"sc 1"'), c bigint, d bigserial); +ALTER SEQUENCE "sc 2" OWNED BY test.c; +SELECT create_distributed_table('test','a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- show that "sc 1", "sc 2" and test_d_seq are distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text || ',' || '"sequence tests"."sc 2"'::regclass::text || ',' || '"sequence tests"."test_d_seq"'::regclass::text; $$); + result +--------------------------------------------------------------------- + "sequence tests"."sc 1","sequence tests"."sc 2","sequence tests".test_d_seq +(1 row) + +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND (objid = '"sequence tests"."sc 1"'::regclass OR objid = '"sequence tests"."sc 2"'::regclass OR objid = '"sequence tests"."test_d_seq"'::regclass) ORDER BY 1; + pg_identify_object_as_address +--------------------------------------------------------------------- + (sequence,"{""sequence tests"",""sc 1""}",{}) + (sequence,"{""sequence tests"",""sc 2""}",{}) + (sequence,"{""sequence tests"",test_d_seq}",{}) +(3 rows) + +-- drop cascades into the schema as well +DROP TABLE test CASCADE; +CREATE SEQUENCE "sc 1"; +ERROR: relation "sc 1" already exists +CREATE SEQUENCE "sc 2"; +-- a different sequence is on nextval, one column owned by a sequence and it is bigserial +CREATE TABLE test(a int, b bigint default nextval ('"sc 1"'), c bigint, d bigserial); +ALTER SEQUENCE "sc 2" OWNED BY test.d; +SELECT create_distributed_table('test','a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- show that "sc 1" and "sc 2" are distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text || ',' || '"sequence tests"."sc 2"'::regclass::text || ',' || '"sequence tests"."test_d_seq"'::regclass::text; $$); + result +--------------------------------------------------------------------- + "sequence tests"."sc 1","sequence tests"."sc 2","sequence tests".test_d_seq +(1 row) + +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND (objid = '"sequence tests"."sc 1"'::regclass OR objid = '"sequence tests"."sc 2"'::regclass OR objid = '"sequence tests"."test_d_seq"'::regclass) ORDER BY 1; + pg_identify_object_as_address +--------------------------------------------------------------------- + (sequence,"{""sequence tests"",""sc 1""}",{}) + (sequence,"{""sequence tests"",""sc 2""}",{}) + (sequence,"{""sequence tests"",test_d_seq}",{}) +(3 rows) + +-- drop cascades into the sc2 and test_d_seq as well +DROP TABLE test CASCADE; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 2"'::regclass::text $$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.sc 2" does not exist +(1 row) + +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."test_d_seq"'::regclass::text $$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.test_d_seq" does not exist +(1 row) + +-- and, we manually drop sc1 +DROP SEQUENCE "sc 1"; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text; $$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.sc 1" does not exist +(1 row) + +CREATE SEQUENCE "sc 1"; +CREATE SEQUENCE "sc 2"; +-- a different sequence is on nextval, one column owned by multiple sequences and it is bigserial +CREATE TABLE test(a int, b bigint default nextval ('"sc 1"'), c bigint, d bigserial); +ALTER SEQUENCE "sc 1" OWNED BY test.d; +ALTER SEQUENCE "sc 2" OWNED BY test.d; +SELECT create_distributed_table('test','a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- show that "sc 1", "sc 2" and test_d_seq are distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text || ',' || '"sequence tests"."sc 2"'::regclass::text || ',' || '"sequence tests"."test_d_seq"'::regclass::text; $$); + result +--------------------------------------------------------------------- + "sequence tests"."sc 1","sequence tests"."sc 2","sequence tests".test_d_seq +(1 row) + +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND (objid = '"sequence tests"."sc 1"'::regclass OR objid = '"sequence tests"."sc 2"'::regclass OR objid = '"sequence tests"."test_d_seq"'::regclass) ORDER BY 1; + pg_identify_object_as_address +--------------------------------------------------------------------- + (sequence,"{""sequence tests"",""sc 1""}",{}) + (sequence,"{""sequence tests"",""sc 2""}",{}) + (sequence,"{""sequence tests"",test_d_seq}",{}) +(3 rows) + +-- drop cascades into the all the sequences as well +DROP TABLE test CASCADE; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 2"'::regclass::text $$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.sc 2" does not exist +(1 row) + +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."test_d_seq"'::regclass::text $$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.test_d_seq" does not exist +(1 row) + +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text; $$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.sc 1" does not exist +(1 row) + +-- Citus local tables handles sequences slightly differently, so lets have one complex example +-- which is combination of all the examples above +CREATE SEQUENCE "sc 1"; +CREATE SEQUENCE "sc 2"; +CREATE SEQUENCE "sc 3"; +CREATE TABLE date_partitioned_citus_local_table_seq( measureid bigserial, col_a bigint, col_b bigserial, eventdate date, measure_data jsonb, PRIMARY KEY (measureid, eventdate)) PARTITION BY RANGE(eventdate); +SELECT create_time_partitions('date_partitioned_citus_local_table_seq', INTERVAL '1 month', '2022-01-01', '2021-01-01'); + create_time_partitions +--------------------------------------------------------------------- + t +(1 row) + +ALTER SEQUENCE "sc 1" OWNED BY date_partitioned_citus_local_table_seq.col_a; +ALTER SEQUENCE "sc 2" OWNED BY date_partitioned_citus_local_table_seq.col_a; +ALTER SEQUENCE "sc 3" OWNED BY date_partitioned_citus_local_table_seq.col_b; +ALTER SEQUENCE "sc 2" OWNED BY date_partitioned_citus_local_table_seq.col_b; +SELECT citus_add_local_table_to_metadata('date_partitioned_citus_local_table_seq'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +-- show that "sc 1", "sc 2" and test_d_seq are distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text || ',' || '"sequence tests"."sc 2"'::regclass::text || ',' || '"sequence tests"."sc 3"'::regclass::text || ',' || '"sequence tests"."date_partitioned_citus_local_table_seq_col_b_seq"'::regclass::text; $$); + result +--------------------------------------------------------------------- + "sequence tests"."sc 1","sequence tests"."sc 2","sequence tests"."sc 3","sequence tests".date_partitioned_citus_local_table_seq_col_b_seq +(1 row) + +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND (objid IN ('"sequence tests"."sc 1"'::regclass, '"sequence tests"."sc 2"'::regclass, '"sequence tests"."sc 3"'::regclass, '"sequence tests"."date_partitioned_citus_local_table_seq_col_b_seq"'::regclass)) ORDER BY 1; + pg_identify_object_as_address +--------------------------------------------------------------------- + (sequence,"{""sequence tests"",date_partitioned_citus_local_table_seq_col_b_seq}",{}) + (sequence,"{""sequence tests"",""sc 1""}",{}) + (sequence,"{""sequence tests"",""sc 2""}",{}) + (sequence,"{""sequence tests"",""sc 3""}",{}) +(4 rows) + +-- this is not supported for Citus local tables as well, one day we might relax +ALTER SEQUENCE "sc 2" OWNED BY date_partitioned_citus_local_table_seq.col_a; +ERROR: Altering a distributed sequence is currently not supported. +-- drop cascades to all sequneces +DROP TABLE date_partitioned_citus_local_table_seq; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass$$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.sc 1" does not exist +(1 row) + +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 2"'::regclass$$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.sc 2" does not exist +(1 row) + +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 3"'::regclass$$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.sc 3" does not exist +(1 row) + +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."date_partitioned_citus_local_table_seq_col_b_seq"'::regclass$$); + result +--------------------------------------------------------------------- + ERROR: relation "sequence tests.date_partitioned_citus_local_table_seq_col_b_seq" does not exist +(1 row) + +DROP SCHEMA "sequence tests" CASCADE; diff --git a/src/test/regress/expected/upgrade_post_11_before.out b/src/test/regress/expected/upgrade_post_11_before.out new file mode 100644 index 000000000..05ff85578 --- /dev/null +++ b/src/test/regress/expected/upgrade_post_11_before.out @@ -0,0 +1,296 @@ +-- test cases for #3970 +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +CREATE SCHEMA post_11_upgrade; +SET search_path = post_11_upgrade; +--1. create a partitioned table, and a vanilla table that will be colocated with this table +CREATE TABLE part_table ( + work_ymdt timestamp without time zone NOT NULL, + seq bigint NOT NULL, + my_seq bigint NOT NULL, + work_memo character varying(150), + CONSTRAINT work_memo_check CHECK ((octet_length((work_memo)::text) <= 150)), + PRIMARY KEY(seq, work_ymdt) +) +PARTITION BY RANGE (work_ymdt); +CREATE TABLE dist(seq bigint UNIQUE); +--2. perform create_distributed_table +SELECT create_distributed_table('part_table', 'seq'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('dist','seq'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +--3. add a partitions +CREATE TABLE part_table_p202008 PARTITION OF part_table FOR VALUES FROM ('2020-08-01 00:00:00') TO ('2020-09-01 00:00:00'); +CREATE TABLE part_table_p202009 PARTITION OF part_table FOR VALUES FROM ('2020-09-01 00:00:00') TO ('2020-10-01 00:00:00'); +--3. create indexes +CREATE INDEX i_part_1 ON part_table(seq); +CREATE INDEX i_part_2 ON part_table(my_seq, seq); +CREATE INDEX i_part_3 ON part_table(work_memo, seq); +CREATE TABLE sensors( +measureid integer, +eventdatetime date, +measure_data jsonb, +PRIMARY KEY (measureid, eventdatetime, measure_data)) +PARTITION BY RANGE(eventdatetime); +CREATE TABLE sensors_old PARTITION OF sensors FOR VALUES FROM ('2000-01-01') TO ('2020-01-01'); +CREATE TABLE sensors_2020_01_01 PARTITION OF sensors FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); +CREATE TABLE sensors_news PARTITION OF sensors FOR VALUES FROM ('2020-05-01') TO ('2025-01-01'); +CREATE INDEX index_on_parent ON sensors(lower(measureid::text)); +CREATE INDEX index_on_child ON sensors_2020_01_01(lower(measure_data::text)); +CREATE INDEX hash_index ON sensors USING HASH((measure_data->'IsFailed')); +CREATE INDEX index_with_include ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime); +CREATE STATISTICS s1 (dependencies) ON measureid, eventdatetime FROM sensors; +CREATE STATISTICS s2 (dependencies) ON measureid, eventdatetime FROM sensors_2020_01_01; +ALTER INDEX index_on_parent ALTER COLUMN 1 SET STATISTICS 1000; +ALTER INDEX index_on_child ALTER COLUMN 1 SET STATISTICS 1000; +CLUSTER sensors_2020_01_01 USING index_on_child; +SELECT create_distributed_table('sensors', 'measureid'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- create a colocated distributed tables and create foreign keys FROM/TO +-- the partitions +CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); +SELECT create_distributed_table('colocated_dist_table', 'measureid'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CLUSTER colocated_dist_table USING colocated_dist_table_pkey; +WARNING: not propagating CLUSTER command to worker nodes +CREATE TABLE colocated_partitioned_table( + measureid integer, + eventdatetime date, + PRIMARY KEY (measureid, eventdatetime)) +PARTITION BY RANGE(eventdatetime); +CREATE TABLE colocated_partitioned_table_2020_01_01 PARTITION OF colocated_partitioned_table FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); +SELECT create_distributed_table('colocated_partitioned_table', 'measureid'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CLUSTER colocated_partitioned_table_2020_01_01 USING colocated_partitioned_table_2020_01_01_pkey; +WARNING: not propagating CLUSTER command to worker nodes +CREATE TABLE reference_table (measureid integer PRIMARY KEY); +SELECT create_reference_table('reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +-- this table is used to make sure that index backed +-- replica identites can have clustered indexes +-- and no index statistics +CREATE TABLE index_backed_rep_identity(key int NOT NULL); +CREATE UNIQUE INDEX uqx ON index_backed_rep_identity(key); +ALTER TABLE index_backed_rep_identity REPLICA IDENTITY USING INDEX uqx; +CLUSTER index_backed_rep_identity USING uqx; +SELECT create_distributed_table('index_backed_rep_identity', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- from parent to regular dist +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); +-- from parent to parent +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_parent FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table(measureid, eventdatetime); +-- from parent to child +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_child FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid, eventdatetime); +-- load some data +INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_partitioned_table SELECT i, '2020-01-05' FROM generate_series(0,1000)i; +INSERT INTO sensors SELECT i, '2020-01-05', '{}' FROM generate_series(0,1000)i; +-- table for recursive view +CREATE TABLE employees (employee_id int, manager_id int, full_name text); +SELECT create_distributed_table('employees', 'employee_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- table for owned_by_extension +-- note that tables owned by extension are +-- not added to the pg_dist_object, and assumed +-- to exists on all nodes via the extension +CREATE TABLE owned_by_extension_table (employee_id int, manager_id int, full_name text); +ALTER EXTENSION plpgsql ADD TABLE post_11_upgrade.owned_by_extension_table; +NOTICE: Citus does not propagate adding/dropping member objects +HINT: You can add/drop the member objects on the workers as well. +SELECT create_distributed_table('owned_by_extension_table', 'employee_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT run_command_on_workers($$CREATE TABLE post_11_upgrade.owned_by_extension_table (employee_id int, manager_id int, full_name text);$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57636,t,"CREATE TABLE") + (localhost,57637,t,"CREATE TABLE") +(2 rows) + +SELECT run_command_on_workers($$ALTER EXTENSION plpgsql ADD TABLE post_11_upgrade.owned_by_extension_table;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57636,t,"ALTER EXTENSION") + (localhost,57637,t,"ALTER EXTENSION") +(2 rows) + +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION post_11_upgrade.partial_index_test_config ( parser = default ); +SELECT 1 FROM run_command_on_workers($$CREATE TEXT SEARCH CONFIGURATION post_11_upgrade.partial_index_test_config ( parser = default );$$); + ?column? +--------------------------------------------------------------------- + 1 + 1 +(2 rows) + +CREATE OR REPLACE FUNCTION post_11_upgrade.func_in_transaction_def() +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +SELECT run_command_on_workers('SET citus.enable_ddl_propagation TO off; +CREATE OR REPLACE FUNCTION post_11_upgrade.func_in_transaction_def() +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$;'); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57636,t,SET) + (localhost,57637,t,SET) +(2 rows) + +CREATE TYPE post_11_upgrade.my_type AS (a int); +CREATE VIEW post_11_upgrade.view_for_upgrade_test AS SELECT * FROM sensors; +-- one normally would not need views on the workers pre-11, but still +-- nice test to have +SELECT run_command_on_workers('SET citus.enable_ddl_propagation TO off; +CREATE VIEW post_11_upgrade.view_for_upgrade_test AS SELECT * FROM sensors;'); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57636,t,SET) + (localhost,57637,t,SET) +(2 rows) + +-- a non-distributed type dependency to a view +-- both the view and the type should be distributed after the upgrade +CREATE TYPE post_11_upgrade.my_type_for_view AS (a int); +CREATE VIEW post_11_upgrade.view_for_upgrade_test_my_type (casted) AS SELECT row(measureid)::post_11_upgrade.my_type_for_view FROM sensors; +-- a local type, table and view, should not be distributed +-- after the upgrade +CREATE TYPE post_11_upgrade.local_type AS (a int); +CREATE TABLE post_11_upgrade.non_dist_table_for_view(a int, b post_11_upgrade.local_type); +CREATE VIEW post_11_upgrade.non_dist_upgrade_test_view AS SELECT * FROM non_dist_table_for_view; +-- a local table joined with a distributed table. In other words, the view has a local table dependency +-- and should not be distributed after the upgrade +CREATE TABLE post_11_upgrade.non_dist_dist_table_for_view(a int); +CREATE VIEW post_11_upgrade.non_dist_upgrade_test_view_local_join AS SELECT * FROM non_dist_table_for_view JOIN sensors ON (true); +-- a view selecting from multiple +-- distributed/reference tables should be marked as distributed +CREATE VIEW post_11_upgrade.non_dist_upgrade_multiple_dist_view AS SELECT colocated_dist_table.* FROM colocated_dist_table JOIN sensors ON (true) JOIN reference_table ON (true); +-- a view selecting from reference table should be fine +CREATE VIEW post_11_upgrade.non_dist_upgrade_ref_view AS SELECT * FROM reference_table; +-- a view selecting from another (distributed) view should also be distributed +CREATE VIEW post_11_upgrade.non_dist_upgrade_ref_view_2 AS SELECT * FROM non_dist_upgrade_ref_view; +-- materialized views never becomes distributed +CREATE MATERIALIZED VIEW post_11_upgrade.materialized_view AS SELECT * FROM reference_table; +CREATE VIEW post_11_upgrade.owned_by_extension_view AS SELECT * FROM reference_table; +ALTER EXTENSION plpgsql ADD VIEW post_11_upgrade.owned_by_extension_view; +-- temporary views should not be marked as distributed +CREATE VIEW pg_temp.temp_view_1 AS SELECT * FROM reference_table; +CREATE temporary VIEW temp_view_2 AS SELECT * FROM reference_table; +-- we should be able to distribute recursive views +CREATE OR REPLACE RECURSIVE VIEW reporting_line (employee_id, subordinates) AS +SELECT employee_id, + full_name AS subordinates +FROM employees +WHERE manager_id IS NULL +UNION ALL +SELECT e.employee_id, + (rl.subordinates || ' > ' || e.full_name) AS subordinates +FROM employees e +INNER JOIN reporting_line rl ON e.manager_id = rl.employee_id; +-- v_test_1 and v_test_2 becomes circularly dependend views +-- so we should not try to distribute any of the views +CREATE VIEW post_11_upgrade.v_test_1 AS SELECT * FROM sensors; +CREATE VIEW post_11_upgrade.v_test_2 AS SELECT * FROM sensors; +CREATE OR REPLACE VIEW post_11_upgrade.v_test_1 AS SELECT sensors.* FROM sensors JOIN v_test_2 USING (measureid); +CREATE OR REPLACE VIEW post_11_upgrade.v_test_2 AS SELECT sensors.* FROM sensors JOIN v_test_1 USING (measureid); +-- views that do not depeend on anything should be distributed +CREATE VIEW post_11_upgrade.depends_on_nothing_1 AS SELECT * FROM (VALUES (1)) as values; +CREATE VIEW post_11_upgrade.depends_on_nothing_2 AS SELECT 1; +-- views depends pg/citus objects should be distributed +CREATE VIEW post_11_upgrade.depends_on_pg AS SELECT * FROM pg_class; +CREATE VIEW post_11_upgrade.depends_on_citus AS SELECT * FROM pg_dist_partition; +-- views depend on sequences only should be distributed +CREATE SEQUENCE post_11_upgrade.seq_bigint AS bigint INCREMENT BY 3 CACHE 10 CYCLE; +CREATE VIEW post_11_upgrade.depends_on_seq AS SELECT nextval('post_11_upgrade.seq_bigint'); +-- views depend on a sequence and a local table should not be distributed +CREATE VIEW post_11_upgrade.depends_on_seq_and_no_support AS SELECT nextval('post_11_upgrade.seq_bigint') FROM post_11_upgrade.non_dist_table_for_view; +RESET citus.enable_ddl_propagation; +CREATE TABLE sensors_parser( + measureid integer, + eventdatetime date, + measure_data jsonb, + name text, + col_with_def int DEFAULT post_11_upgrade.func_in_transaction_def(), + col_with_type post_11_upgrade.my_type, + PRIMARY KEY (measureid, eventdatetime, measure_data) +) PARTITION BY RANGE(eventdatetime); +CREATE TABLE sensors_parser_a_partition PARTITION OF sensors_parser FOR VALUES FROM ('2000-01-01') TO ('2020-01-01'); +CREATE INDEX sensors_parser_search_name ON sensors_parser USING gin (to_tsvector('partial_index_test_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('sensors_parser', 'measureid'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SET citus.enable_ddl_propagation TO off; +CREATE COLLATION post_11_upgrade.german_phonebook_unpropagated (provider = icu, locale = 'de-u-co-phonebk'); +SELECT 1 FROM run_command_on_workers($$CREATE COLLATION post_11_upgrade.german_phonebook_unpropagated (provider = icu, locale = 'de-u-co-phonebk');$$); + ?column? +--------------------------------------------------------------------- + 1 + 1 +(2 rows) + +SET citus.enable_ddl_propagation TO on; +CREATE TABLE test_propagate_collate(id int, t2 text COLLATE german_phonebook_unpropagated); +SELECT create_distributed_table('test_propagate_collate', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE SEQUENCE "SC1"; +CREATE SEQUENCE "unrelated_sequence"; +CREATE TABLE test(a int, b int default nextval ('"SC1"')); +ALTER SEQUENCE "unrelated_sequence" OWNED BY test.b; +SELECT create_distributed_table('test','a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index e49a22dd4..26c50e465 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -292,6 +292,7 @@ test: create_citus_local_table_cascade test: fkeys_between_local_ref test: auto_undist_citus_local test: mx_regular_user +test: sequencenes_owned_by test: remove_coordinator # ---------- diff --git a/src/test/regress/sql/sequencenes_owned_by.sql b/src/test/regress/sql/sequencenes_owned_by.sql new file mode 100644 index 000000000..d9604f9a4 --- /dev/null +++ b/src/test/regress/sql/sequencenes_owned_by.sql @@ -0,0 +1,161 @@ +CREATE SCHEMA "sequence tests"; +SET search_path TO "sequence tests"; + +CREATE SEQUENCE "sc 1"; + +-- the same sequence is on nextval and owned by the same column +CREATE TABLE test(a int, b bigint default nextval ('"sc 1"')); +ALTER SEQUENCE "sc 1" OWNED BY test.b; +SELECT create_distributed_table('test','a'); + +-- show that "sc 1" is distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass$$); +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND objid = '"sequence tests"."sc 1"'::regclass; + +-- this is not supported for already distributed tables, which we might relax in the future +ALTER SEQUENCE "sc 1" OWNED BY test.b; + +-- drop cascades into the sequence as well +DROP TABLE test CASCADE; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass$$); + + +CREATE SEQUENCE "sc 1"; + +-- a sequence is on nextval and owned by another column +CREATE TABLE test(a int, b bigint default nextval ('"sc 1"'), c bigint); +ALTER SEQUENCE "sc 1" OWNED BY test.c; +SELECT create_distributed_table('test','a'); + +-- show that "sc 1" is distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass$$); +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND objid = '"sequence tests"."sc 1"'::regclass; + +-- drop cascades into the schema as well +DROP TABLE test CASCADE; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass$$); + +CREATE SEQUENCE "sc 1"; +CREATE SEQUENCE "sc 2"; + +-- a different sequence is on nextval and owned by another column +CREATE TABLE test(a int, b bigint default nextval ('"sc 1"'), c bigint); +ALTER SEQUENCE "sc 2" OWNED BY test.c; +SELECT create_distributed_table('test','a'); + +-- show that "sc 1" and "sc 2" are distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text || ',' || '"sequence tests"."sc 2"'::regclass::text; $$); +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND (objid = '"sequence tests"."sc 1"'::regclass OR objid = '"sequence tests"."sc 2"'::regclass) ORDER BY 1; + +-- drop cascades into the sc2 as well as it is OWNED BY +DROP TABLE test CASCADE; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 2"'::regclass::text; $$); + +-- and, we manually drop sc1 +DROP SEQUENCE "sc 1"; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text; $$); + +CREATE SEQUENCE "sc 1"; +CREATE SEQUENCE "sc 2"; + +-- a different sequence is on nextval, one different column owned by a sequence, and one bigserial +CREATE TABLE test(a int, b bigint default nextval ('"sc 1"'), c bigint, d bigserial); +ALTER SEQUENCE "sc 2" OWNED BY test.c; +SELECT create_distributed_table('test','a'); + +-- show that "sc 1", "sc 2" and test_d_seq are distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text || ',' || '"sequence tests"."sc 2"'::regclass::text || ',' || '"sequence tests"."test_d_seq"'::regclass::text; $$); +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND (objid = '"sequence tests"."sc 1"'::regclass OR objid = '"sequence tests"."sc 2"'::regclass OR objid = '"sequence tests"."test_d_seq"'::regclass) ORDER BY 1; + +-- drop cascades into the schema as well +DROP TABLE test CASCADE; + + +CREATE SEQUENCE "sc 1"; +CREATE SEQUENCE "sc 2"; + +-- a different sequence is on nextval, one column owned by a sequence and it is bigserial +CREATE TABLE test(a int, b bigint default nextval ('"sc 1"'), c bigint, d bigserial); +ALTER SEQUENCE "sc 2" OWNED BY test.d; +SELECT create_distributed_table('test','a'); + +-- show that "sc 1" and "sc 2" are distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text || ',' || '"sequence tests"."sc 2"'::regclass::text || ',' || '"sequence tests"."test_d_seq"'::regclass::text; $$); +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND (objid = '"sequence tests"."sc 1"'::regclass OR objid = '"sequence tests"."sc 2"'::regclass OR objid = '"sequence tests"."test_d_seq"'::regclass) ORDER BY 1; + +-- drop cascades into the sc2 and test_d_seq as well +DROP TABLE test CASCADE; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 2"'::regclass::text $$); +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."test_d_seq"'::regclass::text $$); + +-- and, we manually drop sc1 +DROP SEQUENCE "sc 1"; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text; $$); + +CREATE SEQUENCE "sc 1"; +CREATE SEQUENCE "sc 2"; + +-- a different sequence is on nextval, one column owned by multiple sequences and it is bigserial +CREATE TABLE test(a int, b bigint default nextval ('"sc 1"'), c bigint, d bigserial); +ALTER SEQUENCE "sc 1" OWNED BY test.d; +ALTER SEQUENCE "sc 2" OWNED BY test.d; + +SELECT create_distributed_table('test','a'); + +-- show that "sc 1", "sc 2" and test_d_seq are distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text || ',' || '"sequence tests"."sc 2"'::regclass::text || ',' || '"sequence tests"."test_d_seq"'::regclass::text; $$); +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND (objid = '"sequence tests"."sc 1"'::regclass OR objid = '"sequence tests"."sc 2"'::regclass OR objid = '"sequence tests"."test_d_seq"'::regclass) ORDER BY 1; + +-- drop cascades into the all the sequences as well +DROP TABLE test CASCADE; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 2"'::regclass::text $$); +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."test_d_seq"'::regclass::text $$); +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text; $$); + + +-- Citus local tables handles sequences slightly differently, so lets have one complex example +-- which is combination of all the examples above +CREATE SEQUENCE "sc 1"; +CREATE SEQUENCE "sc 2"; +CREATE SEQUENCE "sc 3"; + +CREATE TABLE date_partitioned_citus_local_table_seq( measureid bigserial, col_a bigint, col_b bigserial, eventdate date, measure_data jsonb, PRIMARY KEY (measureid, eventdate)) PARTITION BY RANGE(eventdate); +SELECT create_time_partitions('date_partitioned_citus_local_table_seq', INTERVAL '1 month', '2022-01-01', '2021-01-01'); + +ALTER SEQUENCE "sc 1" OWNED BY date_partitioned_citus_local_table_seq.col_a; +ALTER SEQUENCE "sc 2" OWNED BY date_partitioned_citus_local_table_seq.col_a; +ALTER SEQUENCE "sc 3" OWNED BY date_partitioned_citus_local_table_seq.col_b; +ALTER SEQUENCE "sc 2" OWNED BY date_partitioned_citus_local_table_seq.col_b; + +SELECT citus_add_local_table_to_metadata('date_partitioned_citus_local_table_seq'); + + +-- show that "sc 1", "sc 2" and test_d_seq are distributed +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass::text || ',' || '"sequence tests"."sc 2"'::regclass::text || ',' || '"sequence tests"."sc 3"'::regclass::text || ',' || '"sequence tests"."date_partitioned_citus_local_table_seq_col_b_seq"'::regclass::text; $$); +SELECT + pg_identify_object_as_address(classid, objid, objsubid) +FROM pg_dist_object WHERE classid = 1259 AND (objid IN ('"sequence tests"."sc 1"'::regclass, '"sequence tests"."sc 2"'::regclass, '"sequence tests"."sc 3"'::regclass, '"sequence tests"."date_partitioned_citus_local_table_seq_col_b_seq"'::regclass)) ORDER BY 1; + +-- this is not supported for Citus local tables as well, one day we might relax +ALTER SEQUENCE "sc 2" OWNED BY date_partitioned_citus_local_table_seq.col_a; + +-- drop cascades to all sequneces +DROP TABLE date_partitioned_citus_local_table_seq; +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 1"'::regclass$$); +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 2"'::regclass$$); +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."sc 3"'::regclass$$); +SELECT DISTINCT result FROM run_command_on_workers($$SELECT '"sequence tests"."date_partitioned_citus_local_table_seq_col_b_seq"'::regclass$$); + +DROP SCHEMA "sequence tests" CASCADE; diff --git a/src/test/regress/sql/upgrade_post_11_before.sql b/src/test/regress/sql/upgrade_post_11_before.sql new file mode 100644 index 000000000..9d17c78ac --- /dev/null +++ b/src/test/regress/sql/upgrade_post_11_before.sql @@ -0,0 +1,253 @@ + +-- test cases for #3970 +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; + +CREATE SCHEMA post_11_upgrade; +SET search_path = post_11_upgrade; + +--1. create a partitioned table, and a vanilla table that will be colocated with this table +CREATE TABLE part_table ( + work_ymdt timestamp without time zone NOT NULL, + seq bigint NOT NULL, + my_seq bigint NOT NULL, + work_memo character varying(150), + CONSTRAINT work_memo_check CHECK ((octet_length((work_memo)::text) <= 150)), + PRIMARY KEY(seq, work_ymdt) +) +PARTITION BY RANGE (work_ymdt); + +CREATE TABLE dist(seq bigint UNIQUE); + +--2. perform create_distributed_table +SELECT create_distributed_table('part_table', 'seq'); +SELECT create_distributed_table('dist','seq'); + +--3. add a partitions +CREATE TABLE part_table_p202008 PARTITION OF part_table FOR VALUES FROM ('2020-08-01 00:00:00') TO ('2020-09-01 00:00:00'); +CREATE TABLE part_table_p202009 PARTITION OF part_table FOR VALUES FROM ('2020-09-01 00:00:00') TO ('2020-10-01 00:00:00'); + +--3. create indexes +CREATE INDEX i_part_1 ON part_table(seq); +CREATE INDEX i_part_2 ON part_table(my_seq, seq); +CREATE INDEX i_part_3 ON part_table(work_memo, seq); + + +CREATE TABLE sensors( +measureid integer, +eventdatetime date, +measure_data jsonb, +PRIMARY KEY (measureid, eventdatetime, measure_data)) +PARTITION BY RANGE(eventdatetime); + +CREATE TABLE sensors_old PARTITION OF sensors FOR VALUES FROM ('2000-01-01') TO ('2020-01-01'); +CREATE TABLE sensors_2020_01_01 PARTITION OF sensors FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); +CREATE TABLE sensors_news PARTITION OF sensors FOR VALUES FROM ('2020-05-01') TO ('2025-01-01'); + +CREATE INDEX index_on_parent ON sensors(lower(measureid::text)); +CREATE INDEX index_on_child ON sensors_2020_01_01(lower(measure_data::text)); +CREATE INDEX hash_index ON sensors USING HASH((measure_data->'IsFailed')); +CREATE INDEX index_with_include ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime); + +CREATE STATISTICS s1 (dependencies) ON measureid, eventdatetime FROM sensors; +CREATE STATISTICS s2 (dependencies) ON measureid, eventdatetime FROM sensors_2020_01_01; + +ALTER INDEX index_on_parent ALTER COLUMN 1 SET STATISTICS 1000; +ALTER INDEX index_on_child ALTER COLUMN 1 SET STATISTICS 1000; + +CLUSTER sensors_2020_01_01 USING index_on_child; +SELECT create_distributed_table('sensors', 'measureid'); + + +-- create a colocated distributed tables and create foreign keys FROM/TO +-- the partitions +CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); +SELECT create_distributed_table('colocated_dist_table', 'measureid'); + +CLUSTER colocated_dist_table USING colocated_dist_table_pkey; + +CREATE TABLE colocated_partitioned_table( + measureid integer, + eventdatetime date, + PRIMARY KEY (measureid, eventdatetime)) +PARTITION BY RANGE(eventdatetime); + +CREATE TABLE colocated_partitioned_table_2020_01_01 PARTITION OF colocated_partitioned_table FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); +SELECT create_distributed_table('colocated_partitioned_table', 'measureid'); + +CLUSTER colocated_partitioned_table_2020_01_01 USING colocated_partitioned_table_2020_01_01_pkey; + +CREATE TABLE reference_table (measureid integer PRIMARY KEY); +SELECT create_reference_table('reference_table'); + +-- this table is used to make sure that index backed +-- replica identites can have clustered indexes +-- and no index statistics +CREATE TABLE index_backed_rep_identity(key int NOT NULL); +CREATE UNIQUE INDEX uqx ON index_backed_rep_identity(key); +ALTER TABLE index_backed_rep_identity REPLICA IDENTITY USING INDEX uqx; +CLUSTER index_backed_rep_identity USING uqx; +SELECT create_distributed_table('index_backed_rep_identity', 'key'); + +-- from parent to regular dist +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); + +-- from parent to parent +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_parent FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table(measureid, eventdatetime); + +-- from parent to child +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_child FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid, eventdatetime); + +-- load some data +INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_partitioned_table SELECT i, '2020-01-05' FROM generate_series(0,1000)i; +INSERT INTO sensors SELECT i, '2020-01-05', '{}' FROM generate_series(0,1000)i; + +-- table for recursive view +CREATE TABLE employees (employee_id int, manager_id int, full_name text); +SELECT create_distributed_table('employees', 'employee_id'); + +-- table for owned_by_extension +-- note that tables owned by extension are +-- not added to the pg_dist_object, and assumed +-- to exists on all nodes via the extension +CREATE TABLE owned_by_extension_table (employee_id int, manager_id int, full_name text); +ALTER EXTENSION plpgsql ADD TABLE post_11_upgrade.owned_by_extension_table; +SELECT create_distributed_table('owned_by_extension_table', 'employee_id'); +SELECT run_command_on_workers($$CREATE TABLE post_11_upgrade.owned_by_extension_table (employee_id int, manager_id int, full_name text);$$); +SELECT run_command_on_workers($$ALTER EXTENSION plpgsql ADD TABLE post_11_upgrade.owned_by_extension_table;$$); + +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION post_11_upgrade.partial_index_test_config ( parser = default ); +SELECT 1 FROM run_command_on_workers($$CREATE TEXT SEARCH CONFIGURATION post_11_upgrade.partial_index_test_config ( parser = default );$$); + +CREATE OR REPLACE FUNCTION post_11_upgrade.func_in_transaction_def() +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +SELECT run_command_on_workers('SET citus.enable_ddl_propagation TO off; +CREATE OR REPLACE FUNCTION post_11_upgrade.func_in_transaction_def() +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$;'); + +CREATE TYPE post_11_upgrade.my_type AS (a int); +CREATE VIEW post_11_upgrade.view_for_upgrade_test AS SELECT * FROM sensors; + +-- one normally would not need views on the workers pre-11, but still +-- nice test to have +SELECT run_command_on_workers('SET citus.enable_ddl_propagation TO off; +CREATE VIEW post_11_upgrade.view_for_upgrade_test AS SELECT * FROM sensors;'); + + +-- a non-distributed type dependency to a view +-- both the view and the type should be distributed after the upgrade +CREATE TYPE post_11_upgrade.my_type_for_view AS (a int); +CREATE VIEW post_11_upgrade.view_for_upgrade_test_my_type (casted) AS SELECT row(measureid)::post_11_upgrade.my_type_for_view FROM sensors; + +-- a local type, table and view, should not be distributed +-- after the upgrade +CREATE TYPE post_11_upgrade.local_type AS (a int); +CREATE TABLE post_11_upgrade.non_dist_table_for_view(a int, b post_11_upgrade.local_type); +CREATE VIEW post_11_upgrade.non_dist_upgrade_test_view AS SELECT * FROM non_dist_table_for_view; + +-- a local table joined with a distributed table. In other words, the view has a local table dependency +-- and should not be distributed after the upgrade +CREATE TABLE post_11_upgrade.non_dist_dist_table_for_view(a int); +CREATE VIEW post_11_upgrade.non_dist_upgrade_test_view_local_join AS SELECT * FROM non_dist_table_for_view JOIN sensors ON (true); + +-- a view selecting from multiple +-- distributed/reference tables should be marked as distributed +CREATE VIEW post_11_upgrade.non_dist_upgrade_multiple_dist_view AS SELECT colocated_dist_table.* FROM colocated_dist_table JOIN sensors ON (true) JOIN reference_table ON (true); + +-- a view selecting from reference table should be fine +CREATE VIEW post_11_upgrade.non_dist_upgrade_ref_view AS SELECT * FROM reference_table; + +-- a view selecting from another (distributed) view should also be distributed +CREATE VIEW post_11_upgrade.non_dist_upgrade_ref_view_2 AS SELECT * FROM non_dist_upgrade_ref_view; + +-- materialized views never becomes distributed +CREATE MATERIALIZED VIEW post_11_upgrade.materialized_view AS SELECT * FROM reference_table; + +CREATE VIEW post_11_upgrade.owned_by_extension_view AS SELECT * FROM reference_table; +ALTER EXTENSION plpgsql ADD VIEW post_11_upgrade.owned_by_extension_view; + +-- temporary views should not be marked as distributed +CREATE VIEW pg_temp.temp_view_1 AS SELECT * FROM reference_table; +CREATE temporary VIEW temp_view_2 AS SELECT * FROM reference_table; + +-- we should be able to distribute recursive views +CREATE OR REPLACE RECURSIVE VIEW reporting_line (employee_id, subordinates) AS +SELECT employee_id, + full_name AS subordinates +FROM employees +WHERE manager_id IS NULL +UNION ALL +SELECT e.employee_id, + (rl.subordinates || ' > ' || e.full_name) AS subordinates +FROM employees e +INNER JOIN reporting_line rl ON e.manager_id = rl.employee_id; + +-- v_test_1 and v_test_2 becomes circularly dependend views +-- so we should not try to distribute any of the views +CREATE VIEW post_11_upgrade.v_test_1 AS SELECT * FROM sensors; +CREATE VIEW post_11_upgrade.v_test_2 AS SELECT * FROM sensors; +CREATE OR REPLACE VIEW post_11_upgrade.v_test_1 AS SELECT sensors.* FROM sensors JOIN v_test_2 USING (measureid); +CREATE OR REPLACE VIEW post_11_upgrade.v_test_2 AS SELECT sensors.* FROM sensors JOIN v_test_1 USING (measureid); + +-- views that do not depeend on anything should be distributed +CREATE VIEW post_11_upgrade.depends_on_nothing_1 AS SELECT * FROM (VALUES (1)) as values; +CREATE VIEW post_11_upgrade.depends_on_nothing_2 AS SELECT 1; + +-- views depends pg/citus objects should be distributed +CREATE VIEW post_11_upgrade.depends_on_pg AS SELECT * FROM pg_class; +CREATE VIEW post_11_upgrade.depends_on_citus AS SELECT * FROM pg_dist_partition; + +-- views depend on sequences only should be distributed +CREATE SEQUENCE post_11_upgrade.seq_bigint AS bigint INCREMENT BY 3 CACHE 10 CYCLE; +CREATE VIEW post_11_upgrade.depends_on_seq AS SELECT nextval('post_11_upgrade.seq_bigint'); + +-- views depend on a sequence and a local table should not be distributed +CREATE VIEW post_11_upgrade.depends_on_seq_and_no_support AS SELECT nextval('post_11_upgrade.seq_bigint') FROM post_11_upgrade.non_dist_table_for_view; + +RESET citus.enable_ddl_propagation; + +CREATE TABLE sensors_parser( + measureid integer, + eventdatetime date, + measure_data jsonb, + name text, + col_with_def int DEFAULT post_11_upgrade.func_in_transaction_def(), + col_with_type post_11_upgrade.my_type, + PRIMARY KEY (measureid, eventdatetime, measure_data) +) PARTITION BY RANGE(eventdatetime); +CREATE TABLE sensors_parser_a_partition PARTITION OF sensors_parser FOR VALUES FROM ('2000-01-01') TO ('2020-01-01'); +CREATE INDEX sensors_parser_search_name ON sensors_parser USING gin (to_tsvector('partial_index_test_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('sensors_parser', 'measureid'); + + +SET citus.enable_ddl_propagation TO off; +CREATE COLLATION post_11_upgrade.german_phonebook_unpropagated (provider = icu, locale = 'de-u-co-phonebk'); +SELECT 1 FROM run_command_on_workers($$CREATE COLLATION post_11_upgrade.german_phonebook_unpropagated (provider = icu, locale = 'de-u-co-phonebk');$$); +SET citus.enable_ddl_propagation TO on; + +CREATE TABLE test_propagate_collate(id int, t2 text COLLATE german_phonebook_unpropagated); +SELECT create_distributed_table('test_propagate_collate', 'id'); + + +CREATE SEQUENCE "SC1"; +CREATE SEQUENCE "unrelated_sequence"; + +CREATE TABLE test(a int, b int default nextval ('"SC1"')); +ALTER SEQUENCE "unrelated_sequence" OWNED BY test.b; +SELECT create_distributed_table('test','a');