diff --git a/.circleci/config.yml b/.circleci/config.yml index 2a0ac95c7..34ddf769e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -520,12 +520,6 @@ workflows: version: 2 build_and_test: jobs: - - check-merge-to-enterprise: - filters: - branches: - ignore: - - /release-[0-9]+\.[0-9]+.*/ # match with releaseX.Y.* - - build: name: build-13 pg_major: 13 @@ -618,6 +612,81 @@ workflows: make: check-failure requires: [build-13] + - test-citus: + name: 'test-13_check-enterprise' + pg_major: 13 + image_tag: '<< pipeline.parameters.pg13_version >>' + make: check-enterprise + requires: [build-13] + - test-citus: + name: 'test-13_check-enterprise-isolation' + pg_major: 13 + image_tag: '<< pipeline.parameters.pg13_version >>' + make: check-enterprise-isolation + requires: [build-13] + - test-citus: + name: 'test-13_check-enterprise-isolation-logicalrep-1' + pg_major: 13 + image_tag: '<< pipeline.parameters.pg13_version >>' + make: check-enterprise-isolation-logicalrep-1 + requires: [build-13] + - test-citus: + name: 'test-13_check-enterprise-isolation-logicalrep-2' + pg_major: 13 + image_tag: '<< pipeline.parameters.pg13_version >>' + make: check-enterprise-isolation-logicalrep-2 + requires: [build-13] + - test-citus: + name: 'test-13_check-enterprise-isolation-logicalrep-3' + pg_major: 13 + image_tag: '<< pipeline.parameters.pg13_version >>' + make: check-enterprise-isolation-logicalrep-3 + requires: [build-13] + - test-citus: + name: 'test-13_check-enterprise-failure' + pg_major: 13 + image: citus/failtester + image_tag: '<< pipeline.parameters.pg13_version >>' + make: check-enterprise-failure + requires: [build-13] + + - test-citus: + name: 'test-14_check-enterprise' + pg_major: 14 + image_tag: '<< pipeline.parameters.pg14_version >>' + make: check-enterprise + requires: [build-14] + - test-citus: + name: 'test-14_check-enterprise-isolation' + pg_major: 14 + image_tag: '<< pipeline.parameters.pg14_version >>' + make: check-enterprise-isolation + requires: [build-14] + - test-citus: + name: 'test-14_check-enterprise-isolation-logicalrep-1' + pg_major: 14 + image_tag: '<< pipeline.parameters.pg14_version >>' + make: check-enterprise-isolation-logicalrep-1 + requires: [build-14] + - test-citus: + name: 'test-14_check-enterprise-isolation-logicalrep-2' + pg_major: 14 + image_tag: '<< pipeline.parameters.pg14_version >>' + make: check-enterprise-isolation-logicalrep-2 + requires: [build-14] + - test-citus: + name: 'test-14_check-enterprise-isolation-logicalrep-3' + pg_major: 14 + image_tag: '<< pipeline.parameters.pg14_version >>' + make: check-enterprise-isolation-logicalrep-3 + requires: [build-14] + - test-citus: + name: 'test-14_check-enterprise-failure' + pg_major: 14 + image: citus/failtester + image_tag: '<< pipeline.parameters.pg14_version >>' + make: check-enterprise-failure + requires: [build-14] - test-citus: name: 'test-14_check-multi' pg_major: 14 diff --git a/Makefile b/Makefile index 7569fd6ce..a8a2cda88 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ endif include Makefile.global -all: extension +all: extension pg_send_cancellation # build extension extension: $(citus_top_builddir)/src/include/citus_version.h @@ -30,14 +30,24 @@ clean-extension: clean-full: $(MAKE) -C src/backend/distributed/ clean-full .PHONY: extension install-extension clean-extension clean-full -# Add to generic targets -install: install-extension install-headers + install-downgrades: $(MAKE) -C src/backend/distributed/ install-downgrades -install-all: install-headers +install-all: install-headers install-pg_send_cancellation $(MAKE) -C src/backend/distributed/ install-all -clean: clean-extension +# build citus_send_cancellation binary +pg_send_cancellation: + $(MAKE) -C src/bin/pg_send_cancellation/ all +install-pg_send_cancellation: pg_send_cancellation + $(MAKE) -C src/bin/pg_send_cancellation/ install +clean-pg_send_cancellation: + $(MAKE) -C src/bin/pg_send_cancellation/ clean +.PHONY: pg_send_cancellation install-pg_send_cancellation clean-pg_send_cancellation + +# Add to generic targets +install: install-extension install-headers install-pg_send_cancellation +clean: clean-extension clean-pg_send_cancellation # apply or check style reindent: diff --git a/src/backend/distributed/Makefile b/src/backend/distributed/Makefile index a2ffcc142..087f8520a 100644 --- a/src/backend/distributed/Makefile +++ b/src/backend/distributed/Makefile @@ -22,7 +22,7 @@ SUBDIRS = . commands connection ddl deparser executor metadata operations planne # columnar modules SUBDIRS += ../columnar # enterprise modules -SUBDIRS += +SUBDIRS += replication # Symlinks are not copied over to the build directory if a separete build # directory is used during configure (such as on CI) diff --git a/src/backend/distributed/commands/README.md b/src/backend/distributed/commands/README.md index 633bf56e5..e86771275 100644 --- a/src/backend/distributed/commands/README.md +++ b/src/backend/distributed/commands/README.md @@ -12,8 +12,8 @@ they are often moved to files that are named after the command. | `create_distributed_table.c` | Implementation of UDF's for creating distributed tables | | `drop_distributed_table.c` | Implementation for dropping metadata for partitions of distributed tables | | `extension.c` | Implementation of `CREATE EXTENSION` commands for citus specific checks | -| `foreign_constraint.c` | Implementation of helper functions for foreign key constraints | -| `grant.c` | Placeholder for code granting users access to relations, implemented as enterprise feature | +| `foreign_constraint.c` | Implementation of and helper functions for foreign key constraints | +| `grant.c` | Implementation of `GRANT` commands for roles/users on relations | | `index.c` | Implementation of commands specific to indices on distributed tables | | `multi_copy.c` | Implementation of `COPY` command. There are multiple different copy modes which are described in detail below | | `policy.c` | Implementation of `CREATE\ALTER POLICY` commands. | diff --git a/src/backend/distributed/commands/cluster.c b/src/backend/distributed/commands/cluster.c index 977409947..37fc7bf92 100644 --- a/src/backend/distributed/commands/cluster.c +++ b/src/backend/distributed/commands/cluster.c @@ -10,41 +10,105 @@ #include "postgres.h" +#include "distributed/pg_version_constants.h" + +#include "commands/defrem.h" + #include "catalog/namespace.h" #include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/listutils.h" #include "distributed/metadata_cache.h" -/* placeholder for PreprocessClusterStmt */ +static bool IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt); + +/* + * PreprocessClusterStmt first determines whether a given cluster statement involves + * a distributed table. If so (and if it is supported, i.e. no verbose), it + * creates a DDLJob to encapsulate information needed during the worker node + * portion of DDL execution before returning that DDLJob in a List. If no + * distributed table is involved, this function returns NIL. + */ List * PreprocessClusterStmt(Node *node, const char *clusterCommand, ProcessUtilityContext processUtilityContext) { ClusterStmt *clusterStmt = castNode(ClusterStmt, node); - bool showPropagationWarning = false; + bool missingOK = false; + DDLJob *ddlJob = NULL; - /* CLUSTER all */ if (clusterStmt->relation == NULL) { - showPropagationWarning = true; + ereport(WARNING, (errmsg("not propagating CLUSTER command to worker nodes"), + errhint("Provide a specific table in order to CLUSTER " + "distributed tables."))); + + return NIL; } - else + + /* PostgreSQL uses access exclusive lock for CLUSTER command */ + Oid relationId = RangeVarGetRelid(clusterStmt->relation, AccessExclusiveLock, + missingOK); + + /* + * If the table does not exist, don't do anything here to allow PostgreSQL + * to throw the appropriate error or notice message later. + */ + if (!OidIsValid(relationId)) { - bool missingOK = false; + return NIL; + } - Oid relationId = RangeVarGetRelid(clusterStmt->relation, AccessShareLock, - missingOK); + /* we have no planning to do unless the table is distributed */ + bool isCitusRelation = IsCitusTable(relationId); + if (!isCitusRelation) + { + return NIL; + } - if (OidIsValid(relationId)) +#if PG_VERSION_NUM >= 120000 + if (IsClusterStmtVerbose_compat(clusterStmt)) +#else + if (clusterStmt->verbose) +#endif + { + ereport(ERROR, (errmsg("cannot run CLUSTER command"), + errdetail("VERBOSE option is currently unsupported " + "for distributed tables."))); + } + + ddlJob = palloc0(sizeof(DDLJob)); + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); + ddlJob->metadataSyncCommand = clusterCommand; + ddlJob->taskList = DDLTaskList(relationId, clusterCommand); + + return list_make1(ddlJob); +} + + +/* + * IsClusterStmtVerbose_compat returns true if the given statement + * is a cluster statement with verbose option. + */ +static bool +IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt) +{ +#if PG_VERSION_NUM < PG_VERSION_14 + if (clusterStmt->options & CLUOPT_VERBOSE) + { + return true; + } + return false; +#else + DefElem *opt = NULL; + foreach_ptr(opt, clusterStmt->params) + { + if (strcmp(opt->defname, "verbose") == 0) { - showPropagationWarning = IsCitusTable(relationId); + return defGetBoolean(opt); } } - - if (showPropagationWarning) - { - ereport(WARNING, (errmsg("not propagating CLUSTER command to worker nodes"))); - } - - return NIL; + return false; +#endif } diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index db1834d69..ef80a5d6a 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -33,7 +33,7 @@ static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid); static Oid get_database_owner(Oid db_oid); /* controlled via GUC */ -bool EnableAlterDatabaseOwner = false; +bool EnableAlterDatabaseOwner = true; /* diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index e1e51fc5d..2f8182fe6 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -412,7 +412,10 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) case OCLASS_PROC: { - return CreateFunctionDDLCommandsIdempotent(dependency); + List *DDLCommands = CreateFunctionDDLCommandsIdempotent(dependency); + List *grantDDLCommands = GrantOnFunctionDDLCommands(dependency->objectId); + DDLCommands = list_concat(DDLCommands, grantDDLCommands); + return DDLCommands; } case OCLASS_ROLE: @@ -455,7 +458,13 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) case OCLASS_FOREIGN_SERVER: { - return GetForeignServerCreateDDLCommand(dependency->objectId); + Oid serverId = dependency->objectId; + + List *DDLCommands = GetForeignServerCreateDDLCommand(serverId); + List *grantDDLCommands = GrantOnForeignServerDDLCommands(serverId); + DDLCommands = list_concat(DDLCommands, grantDDLCommands); + + return DDLCommands; } default: diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 4e4c5aa82..d516ad777 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -208,7 +208,23 @@ static DistributeObjectOps Any_View = { static DistributeObjectOps Any_CreatePolicy = { .deparse = NULL, .qualify = NULL, - .preprocess = PreprocessCreatePolicyStmt, + .preprocess = NULL, + .postprocess = PostprocessCreatePolicyStmt, + .address = NULL, + .markDistributed = false, +}; +static DistributeObjectOps Any_CreateRole = { + .deparse = DeparseCreateRoleStmt, + .qualify = NULL, + .preprocess = PreprocessCreateRoleStmt, + .postprocess = NULL, + .address = CreateRoleStmtObjectAddress, + .markDistributed = true, +}; +static DistributeObjectOps Any_DropRole = { + .deparse = DeparseDropRoleStmt, + .qualify = NULL, + .preprocess = PreprocessDropRoleStmt, .postprocess = NULL, .address = NULL, .markDistributed = false, @@ -254,6 +270,14 @@ static DistributeObjectOps Any_Grant = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps Any_GrantRole = { + .deparse = DeparseGrantRoleStmt, + .qualify = NULL, + .preprocess = PreprocessGrantRoleStmt, + .postprocess = PostprocessGrantRoleStmt, + .address = NULL, + .markDistributed = false, +}; static DistributeObjectOps Any_Index = { .deparse = NULL, .qualify = NULL, @@ -410,6 +434,14 @@ static DistributeObjectOps Extension_Drop = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps FDW_Grant = { + .deparse = DeparseGrantOnFDWStmt, + .qualify = NULL, + .preprocess = PreprocessGrantOnFDWStmt, + .postprocess = NULL, + .address = NULL, + .markDistributed = false, +}; static DistributeObjectOps ForeignServer_Drop = { .deparse = DeparseDropForeignServerStmt, .qualify = NULL, @@ -418,6 +450,14 @@ static DistributeObjectOps ForeignServer_Drop = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps ForeignServer_Grant = { + .deparse = DeparseGrantOnForeignServerStmt, + .qualify = NULL, + .preprocess = PreprocessGrantOnForeignServerStmt, + .postprocess = NULL, + .address = NULL, + .markDistributed = false, +}; static DistributeObjectOps ForeignServer_Rename = { .deparse = DeparseAlterForeignServerRenameStmt, .qualify = NULL, @@ -478,6 +518,14 @@ static DistributeObjectOps Function_Drop = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps Function_Grant = { + .deparse = DeparseGrantOnFunctionStmt, + .qualify = NULL, + .preprocess = PreprocessGrantOnFunctionStmt, + .postprocess = PostprocessGrantOnFunctionStmt, + .address = NULL, + .markDistributed = false, +}; static DistributeObjectOps View_Drop = { .deparse = DeparseDropViewStmt, .qualify = QualifyDropViewStmt, @@ -553,6 +601,14 @@ static DistributeObjectOps Procedure_Drop = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps Procedure_Grant = { + .deparse = DeparseGrantOnFunctionStmt, + .qualify = NULL, + .preprocess = PreprocessGrantOnFunctionStmt, + .postprocess = PostprocessGrantOnFunctionStmt, + .address = NULL, + .markDistributed = false, +}; static DistributeObjectOps Procedure_Rename = { .deparse = DeparseRenameFunctionStmt, .qualify = QualifyRenameFunctionStmt, @@ -602,6 +658,14 @@ static DistributeObjectOps Sequence_Drop = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps Sequence_Grant = { + .deparse = DeparseGrantOnSequenceStmt, + .qualify = QualifyGrantOnSequenceStmt, + .preprocess = PreprocessGrantOnSequenceStmt, + .postprocess = PostprocessGrantOnSequenceStmt, + .address = NULL, + .markDistributed = false, +}; static DistributeObjectOps Sequence_Rename = { .deparse = DeparseRenameSequenceStmt, .qualify = QualifyRenameSequenceStmt, @@ -768,6 +832,14 @@ static DistributeObjectOps Routine_Drop = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps Routine_Grant = { + .deparse = DeparseGrantOnFunctionStmt, + .qualify = NULL, + .preprocess = PreprocessGrantOnFunctionStmt, + .postprocess = PostprocessGrantOnFunctionStmt, + .address = NULL, + .markDistributed = false, +}; static DistributeObjectOps Routine_Rename = { .deparse = DeparseRenameFunctionStmt, .qualify = QualifyRenameFunctionStmt, @@ -1350,6 +1422,11 @@ GetDistributeObjectOps(Node *node) return &Any_CreatePolicy; } + case T_CreateRoleStmt: + { + return &Any_CreateRole; + } + case T_CreateSchemaStmt: { return &Any_CreateSchema; @@ -1397,6 +1474,11 @@ GetDistributeObjectOps(Node *node) } } + case T_DropRoleStmt: + { + return &Any_DropRole; + } + case T_DropStmt: { DropStmt *stmt = castNode(DropStmt, node); @@ -1504,6 +1586,11 @@ GetDistributeObjectOps(Node *node) } } + case T_GrantRoleStmt: + { + return &Any_GrantRole; + } + case T_GrantStmt: { GrantStmt *stmt = castNode(GrantStmt, node); @@ -1514,6 +1601,36 @@ GetDistributeObjectOps(Node *node) return &Schema_Grant; } + case OBJECT_SEQUENCE: + { + return &Sequence_Grant; + } + + case OBJECT_FDW: + { + return &FDW_Grant; + } + + case OBJECT_FOREIGN_SERVER: + { + return &ForeignServer_Grant; + } + + case OBJECT_FUNCTION: + { + return &Function_Grant; + } + + case OBJECT_PROCEDURE: + { + return &Procedure_Grant; + } + + case OBJECT_ROUTINE: + { + return &Routine_Grant; + } + default: { return &Any_Grant; diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index 8738b8125..bdc8e6a0e 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -12,7 +12,10 @@ #include "access/genam.h" #include "citus_version.h" +#include "catalog/dependency.h" +#include "catalog/pg_depend.h" #include "catalog/pg_extension_d.h" +#include "catalog/pg_foreign_data_wrapper.h" #include "commands/defrem.h" #include "commands/extension.h" #include "distributed/citus_ruleutils.h" @@ -27,9 +30,12 @@ #include "distributed/multi_executor.h" #include "distributed/relation_access_tracking.h" #include "distributed/transaction_management.h" +#include "foreign/foreign.h" #include "nodes/makefuncs.h" #include "utils/lsyscache.h" #include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/syscache.h" /* Local functions forward declarations for helper functions */ @@ -42,6 +48,7 @@ static List * GetAllViews(void); static bool ShouldPropagateExtensionCommand(Node *parseTree); static bool IsAlterExtensionSetSchemaCitus(Node *parseTree); static Node * RecreateExtensionStmt(Oid extensionOid); +static List * GenerateGrantCommandsOnExtesionDependentFDWs(Oid extensionId); /* @@ -820,6 +827,12 @@ CreateExtensionDDLCommand(const ObjectAddress *extensionAddress) List *ddlCommands = list_make1((void *) ddlCommand); + /* any privilege granted on FDWs that belong to the extension should be included */ + List *FDWGrants = + GenerateGrantCommandsOnExtesionDependentFDWs(extensionAddress->objectId); + + ddlCommands = list_concat(ddlCommands, FDWGrants); + return ddlCommands; } @@ -878,6 +891,88 @@ RecreateExtensionStmt(Oid extensionOid) } +/* + * GenerateGrantCommandsOnExtesionDependentFDWs returns a list of commands that GRANTs + * the privileges on FDWs that are depending on the given extension. + */ +static List * +GenerateGrantCommandsOnExtesionDependentFDWs(Oid extensionId) +{ + List *commands = NIL; + List *FDWOids = GetDependentFDWsToExtension(extensionId); + + Oid FDWOid = InvalidOid; + foreach_oid(FDWOid, FDWOids) + { + Acl *aclEntry = GetPrivilegesForFDW(FDWOid); + + if (aclEntry == NULL) + { + continue; + } + + AclItem *privileges = ACL_DAT(aclEntry); + int numberOfPrivsGranted = ACL_NUM(aclEntry); + + for (int i = 0; i < numberOfPrivsGranted; i++) + { + commands = list_concat(commands, + GenerateGrantOnFDWQueriesFromAclItem(FDWOid, + &privileges[i])); + } + } + + return commands; +} + + +/* + * GetDependentFDWsToExtension gets an extension oid and returns the list of oids of FDWs + * that are depending on the given extension. + */ +List * +GetDependentFDWsToExtension(Oid extensionId) +{ + List *extensionFDWs = NIL; + ScanKeyData key[3]; + int scanKeyCount = 3; + HeapTuple tup; + + Relation pgDepend = table_open(DependRelationId, AccessShareLock); + + ScanKeyInit(&key[0], + Anum_pg_depend_refclassid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(ExtensionRelationId)); + ScanKeyInit(&key[1], + Anum_pg_depend_refobjid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(extensionId)); + ScanKeyInit(&key[2], + Anum_pg_depend_classid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(ForeignDataWrapperRelationId)); + + SysScanDesc scan = systable_beginscan(pgDepend, InvalidOid, false, + NULL, scanKeyCount, key); + + while (HeapTupleIsValid(tup = systable_getnext(scan))) + { + Form_pg_depend pgDependEntry = (Form_pg_depend) GETSTRUCT(tup); + + if (pgDependEntry->deptype == DEPENDENCY_EXTENSION) + { + extensionFDWs = lappend_oid(extensionFDWs, pgDependEntry->objid); + } + } + + systable_endscan(scan); + table_close(pgDepend, AccessShareLock); + + return extensionFDWs; +} + + /* * AlterExtensionSchemaStmtObjectAddress returns the ObjectAddress of the extension that is * the subject of the AlterObjectSchemaStmt. Errors if missing_ok is false. diff --git a/src/backend/distributed/commands/foreign_data_wrapper.c b/src/backend/distributed/commands/foreign_data_wrapper.c new file mode 100644 index 000000000..b0553ecae --- /dev/null +++ b/src/backend/distributed/commands/foreign_data_wrapper.c @@ -0,0 +1,144 @@ +/*------------------------------------------------------------------------- + * + * foreign_data_wrapper.c + * Commands for FOREIGN DATA WRAPPER statements. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/pg_foreign_data_wrapper.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/commands.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/metadata_sync.h" +#include "distributed/metadata/distobject.h" +#include "foreign/foreign.h" +#include "nodes/makefuncs.h" +#include "nodes/parsenodes.h" +#include "utils/syscache.h" + +static bool NameListHasFDWOwnedByDistributedExtension(List *FDWNames); +static ObjectAddress GetObjectAddressByFDWName(char *FDWName, bool missing_ok); + + +/* + * PreprocessGrantOnFDWStmt is executed before the statement is applied to the + * local postgres instance. + * + * In this stage we can prepare the commands that need to be run on all workers to grant + * on foreign data wrappers. + */ +List * +PreprocessGrantOnFDWStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + Assert(stmt->objtype == OBJECT_FDW); + + if (!NameListHasFDWOwnedByDistributedExtension(stmt->objects)) + { + /* + * We propagate granted privileges on a FDW only if it belongs to a distributed + * extension. For now, we skip for custom FDWs, as most of the users prefer + * extension FDWs. + */ + return NIL; + } + + if (list_length(stmt->objects) > 1) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot grant on FDW with other FDWs"), + errhint("Try granting on each object in separate commands"))); + } + + if (!ShouldPropagate()) + { + return NIL; + } + + EnsureCoordinator(); + + Assert(list_length(stmt->objects) == 1); + + char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * NameListHasFDWOwnedByDistributedExtension takes a namelist of FDWs and returns true + * if at least one of them depends on a distributed extension. Returns false otherwise. + */ +static bool +NameListHasFDWOwnedByDistributedExtension(List *FDWNames) +{ + Value *FDWValue = NULL; + foreach_ptr(FDWValue, FDWNames) + { + /* captures the extension address during lookup */ + ObjectAddress extensionAddress = { 0 }; + ObjectAddress FDWAddress = GetObjectAddressByFDWName(strVal(FDWValue), false); + + if (IsObjectAddressOwnedByExtension(&FDWAddress, &extensionAddress)) + { + if (IsObjectDistributed(&extensionAddress)) + { + return true; + } + } + } + + return false; +} + + +/* + * GetObjectAddressByFDWName takes a FDW name and returns the object address. + */ +static ObjectAddress +GetObjectAddressByFDWName(char *FDWName, bool missing_ok) +{ + ForeignDataWrapper *FDW = GetForeignDataWrapperByName(FDWName, missing_ok); + Oid FDWId = FDW->fdwid; + ObjectAddress address = { 0 }; + ObjectAddressSet(address, ForeignDataWrapperRelationId, FDWId); + + return address; +} + + +/* + * GetPrivilegesForFDW takes a FDW object id and returns the privileges granted + * on that FDW as a Acl object. Returns NULL if there is no privilege granted. + */ +Acl * +GetPrivilegesForFDW(Oid FDWOid) +{ + HeapTuple fdwtup = SearchSysCache1(FOREIGNDATAWRAPPEROID, ObjectIdGetDatum(FDWOid)); + + bool isNull = true; + Datum aclDatum = SysCacheGetAttr(FOREIGNDATAWRAPPEROID, fdwtup, + Anum_pg_foreign_data_wrapper_fdwacl, &isNull); + if (isNull) + { + ReleaseSysCache(fdwtup); + return NULL; + } + + Acl *aclEntry = DatumGetAclPCopy(aclDatum); + + ReleaseSysCache(fdwtup); + + return aclEntry; +} diff --git a/src/backend/distributed/commands/foreign_server.c b/src/backend/distributed/commands/foreign_server.c index a5fa21c0e..291ff565c 100644 --- a/src/backend/distributed/commands/foreign_server.c +++ b/src/backend/distributed/commands/foreign_server.c @@ -9,6 +9,7 @@ */ #include "postgres.h" +#include "miscadmin.h" #include "catalog/pg_foreign_server.h" #include "distributed/commands/utility_hook.h" @@ -23,8 +24,11 @@ #include "nodes/makefuncs.h" #include "nodes/parsenodes.h" #include "nodes/primnodes.h" +#include "utils/builtins.h" +static char * GetForeignServerAlterOwnerCommand(Oid serverId); static Node * RecreateForeignServerStmt(Oid serverId); +static bool NameListHasDistributedServer(List *serverNames); static ObjectAddress GetObjectAddressByServerName(char *serverName, bool missing_ok); @@ -62,6 +66,53 @@ AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok) } +/* + * PreprocessGrantOnForeignServerStmt is executed before the statement is applied to the + * local postgres instance. + * + * In this stage we can prepare the commands that need to be run on all workers to grant + * on servers. + */ +List * +PreprocessGrantOnForeignServerStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + Assert(stmt->objtype == OBJECT_FOREIGN_SERVER); + + bool includesDistributedServer = NameListHasDistributedServer(stmt->objects); + + if (!includesDistributedServer) + { + return NIL; + } + + if (list_length(stmt->objects) > 1) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot grant on distributed server with other servers"), + errhint("Try granting on each object in separate commands"))); + } + + if (!ShouldPropagate()) + { + return NIL; + } + + EnsureCoordinator(); + + Assert(list_length(stmt->objects) == 1); + + char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + /* * RenameForeignServerStmtObjectAddress finds the ObjectAddress for the server that is * renamed by given RenmaeStmt. If missingOk is false and if the server does not exist, @@ -109,14 +160,37 @@ GetForeignServerCreateDDLCommand(Oid serverId) Node *stmt = RecreateForeignServerStmt(serverId); /* capture ddl command for the create statement */ - const char *ddlCommand = DeparseTreeNode(stmt); + const char *createCommand = DeparseTreeNode(stmt); + const char *alterOwnerCommand = GetForeignServerAlterOwnerCommand(serverId); - List *ddlCommands = list_make1((void *) ddlCommand); + List *ddlCommands = list_make2((void *) createCommand, + (void *) alterOwnerCommand); return ddlCommands; } +/* + * GetForeignServerAlterOwnerCommand returns "ALTER SERVER .. OWNER TO .." statement + * for the specified foreign server. + */ +static char * +GetForeignServerAlterOwnerCommand(Oid serverId) +{ + ForeignServer *server = GetForeignServer(serverId); + Oid ownerId = server->owner; + char *ownerName = GetUserNameFromId(ownerId, false); + + StringInfo alterCommand = makeStringInfo(); + + appendStringInfo(alterCommand, "ALTER SERVER %s OWNER TO %s;", + quote_identifier(server->servername), + quote_identifier(ownerName)); + + return alterCommand->data; +} + + /* * RecreateForeignServerStmt returns a parsetree for a CREATE SERVER statement * that would recreate the given server on a new node. @@ -161,6 +235,28 @@ RecreateForeignServerStmt(Oid serverId) } +/* + * NameListHasDistributedServer takes a namelist of servers and returns true if at least + * one of them is distributed. Returns false otherwise. + */ +static bool +NameListHasDistributedServer(List *serverNames) +{ + Value *serverValue = NULL; + foreach_ptr(serverValue, serverNames) + { + ObjectAddress address = GetObjectAddressByServerName(strVal(serverValue), false); + + if (IsObjectDistributed(&address)) + { + return true; + } + } + + return false; +} + + static ObjectAddress GetObjectAddressByServerName(char *serverName, bool missing_ok) { diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 43f5ba502..d12c703d2 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -6,7 +6,9 @@ * We currently support replicating function definitions on the * coordinator in all the worker nodes in the form of * - * CREATE OR REPLACE FUNCTION ... queries. + * CREATE OR REPLACE FUNCTION ... queries and + * GRANT ... ON FUNCTION queries + * * * ALTER or DROP operations are not yet propagated. * @@ -104,6 +106,7 @@ static void DistributeFunctionColocatedWithDistributedTable(RegProcedure funcOid functionAddress); static void DistributeFunctionColocatedWithReferenceTable(const ObjectAddress *functionAddress); +static List * FilterDistributedFunctions(GrantStmt *grantStmt); static void EnsureExtensionFunctionCanBeDistributed(const ObjectAddress functionAddress, const ObjectAddress extensionAddress, @@ -239,8 +242,17 @@ create_distributed_function(PG_FUNCTION_ARGS) const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true); const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid); initStringInfo(&ddlCommand); - appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_METADATA_SYNC, - createFunctionSQL, alterFunctionOwnerSQL, ENABLE_METADATA_SYNC); + appendStringInfo(&ddlCommand, "%s;%s;%s", DISABLE_METADATA_SYNC, + createFunctionSQL, alterFunctionOwnerSQL); + List *grantDDLCommands = GrantOnFunctionDDLCommands(funcOid); + char *grantOnFunctionSQL = NULL; + foreach_ptr(grantOnFunctionSQL, grantDDLCommands) + { + appendStringInfo(&ddlCommand, ";%s", grantOnFunctionSQL); + } + + appendStringInfo(&ddlCommand, ";%s", ENABLE_METADATA_SYNC); + SendCommandToWorkersAsUser(NON_COORDINATOR_NODES, CurrentUserName(), ddlCommand.data); } @@ -1920,3 +1932,162 @@ EnsureExtensionFunctionCanBeDistributed(const ObjectAddress functionAddress, EnsureDependenciesExistOnAllNodes(&functionAddress); } + + +/* + * PreprocessGrantOnFunctionStmt is executed before the statement is applied to the local + * postgres instance. + * + * In this stage we can prepare the commands that need to be run on all workers to grant + * on distributed functions, procedures, routines. + */ +List * +PreprocessGrantOnFunctionStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + Assert(isFunction(stmt->objtype)); + + List *distributedFunctions = FilterDistributedFunctions(stmt); + + if (list_length(distributedFunctions) == 0 || !ShouldPropagate()) + { + return NIL; + } + + EnsureCoordinator(); + + List *grantFunctionList = NIL; + ObjectAddress *functionAddress = NULL; + foreach_ptr(functionAddress, distributedFunctions) + { + ObjectWithArgs *distFunction = ObjectWithArgsFromOid( + functionAddress->objectId); + grantFunctionList = lappend(grantFunctionList, distFunction); + } + + List *originalObjects = stmt->objects; + GrantTargetType originalTargtype = stmt->targtype; + + stmt->objects = grantFunctionList; + stmt->targtype = ACL_TARGET_OBJECT; + + char *sql = DeparseTreeNode((Node *) stmt); + + stmt->objects = originalObjects; + stmt->targtype = originalTargtype; + + List *commandList = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList); +} + + +/* + * PostprocessGrantOnFunctionStmt makes sure dependencies of each + * distributed function in the statement exist on all nodes + */ +List * +PostprocessGrantOnFunctionStmt(Node *node, const char *queryString) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + + List *distributedFunctions = FilterDistributedFunctions(stmt); + + if (list_length(distributedFunctions) == 0) + { + return NIL; + } + + ObjectAddress *functionAddress = NULL; + foreach_ptr(functionAddress, distributedFunctions) + { + EnsureDependenciesExistOnAllNodes(functionAddress); + } + return NIL; +} + + +/* + * FilterDistributedFunctions determines and returns a list of distributed functions + * ObjectAddress-es from given grant statement. + */ +static List * +FilterDistributedFunctions(GrantStmt *grantStmt) +{ + List *grantFunctionList = NIL; + + bool grantOnFunctionCommand = (grantStmt->targtype == ACL_TARGET_OBJECT && + isFunction(grantStmt->objtype)); + bool grantAllFunctionsOnSchemaCommand = (grantStmt->targtype == + ACL_TARGET_ALL_IN_SCHEMA && + isFunction(grantStmt->objtype)); + + /* we are only interested in function/procedure/routine level grants */ + if (!grantOnFunctionCommand && !grantAllFunctionsOnSchemaCommand) + { + return NIL; + } + + if (grantAllFunctionsOnSchemaCommand) + { + List *distributedFunctionList = DistributedFunctionList(); + ObjectAddress *distributedFunction = NULL; + List *namespaceOidList = NIL; + + /* iterate over all namespace names provided to get their oid's */ + Value *namespaceValue = NULL; + foreach_ptr(namespaceValue, grantStmt->objects) + { + char *nspname = strVal(namespaceValue); + bool missing_ok = false; + Oid namespaceOid = get_namespace_oid(nspname, missing_ok); + namespaceOidList = list_append_unique_oid(namespaceOidList, namespaceOid); + } + + /* + * iterate over all distributed functions to filter the ones + * that belong to one of the namespaces from above + */ + foreach_ptr(distributedFunction, distributedFunctionList) + { + Oid namespaceOid = get_func_namespace(distributedFunction->objectId); + + /* + * if this distributed function's schema is one of the schemas + * specified in the GRANT .. ALL FUNCTIONS IN SCHEMA .. + * add it to the list + */ + if (list_member_oid(namespaceOidList, namespaceOid)) + { + grantFunctionList = lappend(grantFunctionList, distributedFunction); + } + } + } + else + { + bool missingOk = false; + ObjectWithArgs *objectWithArgs = NULL; + foreach_ptr(objectWithArgs, grantStmt->objects) + { + ObjectAddress *functionAddress = palloc0(sizeof(ObjectAddress)); + functionAddress->classId = ProcedureRelationId; + functionAddress->objectId = LookupFuncWithArgs(grantStmt->objtype, + objectWithArgs, + missingOk); + functionAddress->objectSubId = 0; + + /* + * if this function from GRANT .. ON FUNCTION .. is a distributed + * function, add it to the list + */ + if (IsObjectDistributed(functionAddress)) + { + grantFunctionList = lappend(grantFunctionList, functionAddress); + } + } + } + return grantFunctionList; +} diff --git a/src/backend/distributed/commands/grant.c b/src/backend/distributed/commands/grant.c index c40605e69..bf7c39b3b 100644 --- a/src/backend/distributed/commands/grant.c +++ b/src/backend/distributed/commands/grant.c @@ -8,13 +8,244 @@ *------------------------------------------------------------------------- */ +#include "postgres.h" + +#include "distributed/citus_ruleutils.h" #include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/metadata/distobject.h" +#include "distributed/metadata_cache.h" +#include "distributed/version_compat.h" +#include "lib/stringinfo.h" +#include "nodes/parsenodes.h" +#include "utils/lsyscache.h" -/* placeholder for PreprocessGrantStmt */ +/* Local functions forward declarations for helper functions */ +static List * CollectGrantTableIdList(GrantStmt *grantStmt); + + +/* + * PreprocessGrantStmt determines whether a given GRANT/REVOKE statement involves + * a distributed table. If so, it creates DDLJobs to encapsulate information + * needed during the worker node portion of DDL execution before returning the + * DDLJobs in a List. If no distributed table is involved, this returns NIL. + * + * NB: So far column level privileges are not supported. + */ List * PreprocessGrantStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { - return NIL; + GrantStmt *grantStmt = castNode(GrantStmt, node); + StringInfoData privsString; + StringInfoData granteesString; + StringInfoData targetString; + StringInfoData ddlString; + ListCell *granteeCell = NULL; + ListCell *tableListCell = NULL; + bool isFirst = true; + List *ddlJobs = NIL; + + initStringInfo(&privsString); + initStringInfo(&granteesString); + initStringInfo(&targetString); + initStringInfo(&ddlString); + + /* + * So far only table level grants are supported. Most other types of + * grants aren't interesting anyway. + */ + if (grantStmt->objtype != OBJECT_TABLE) + { + return NIL; + } + + List *tableIdList = CollectGrantTableIdList(grantStmt); + + /* nothing to do if there is no distributed table in the grant list */ + if (tableIdList == NIL) + { + return NIL; + } + + /* deparse the privileges */ + if (grantStmt->privileges == NIL) + { + appendStringInfo(&privsString, "ALL"); + } + else + { + ListCell *privilegeCell = NULL; + + isFirst = true; + foreach(privilegeCell, grantStmt->privileges) + { + AccessPriv *priv = lfirst(privilegeCell); + + if (!isFirst) + { + appendStringInfoString(&privsString, ", "); + } + isFirst = false; + + if (priv->cols != NIL) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("grant/revoke on column list is currently " + "unsupported"))); + } + + Assert(priv->priv_name != NULL); + + appendStringInfo(&privsString, "%s", priv->priv_name); + } + } + + /* deparse the grantees */ + isFirst = true; + foreach(granteeCell, grantStmt->grantees) + { + RoleSpec *spec = lfirst(granteeCell); + + if (!isFirst) + { + appendStringInfoString(&granteesString, ", "); + } + isFirst = false; + + appendStringInfoString(&granteesString, RoleSpecString(spec, true)); + } + + /* + * Deparse the target objects, and issue the deparsed statements to + * workers, if applicable. That's so we easily can replicate statements + * only to distributed relations. + */ + isFirst = true; + foreach(tableListCell, tableIdList) + { + Oid relationId = lfirst_oid(tableListCell); + const char *grantOption = ""; + + resetStringInfo(&targetString); + appendStringInfo(&targetString, "%s", generate_relation_name(relationId, NIL)); + + if (grantStmt->is_grant) + { + if (grantStmt->grant_option) + { + grantOption = " WITH GRANT OPTION"; + } + + appendStringInfo(&ddlString, "GRANT %s ON %s TO %s%s", + privsString.data, targetString.data, granteesString.data, + grantOption); + } + else + { + if (grantStmt->grant_option) + { + grantOption = "GRANT OPTION FOR "; + } + + appendStringInfo(&ddlString, "REVOKE %s%s ON %s FROM %s", + grantOption, privsString.data, targetString.data, + granteesString.data); + } + + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); + ddlJob->metadataSyncCommand = pstrdup(ddlString.data); + ddlJob->taskList = NIL; + if (IsCitusTable(relationId)) + { + ddlJob->taskList = DDLTaskList(relationId, ddlString.data); + } + ddlJobs = lappend(ddlJobs, ddlJob); + + resetStringInfo(&ddlString); + } + + return ddlJobs; +} + + +/* + * CollectGrantTableIdList determines and returns a list of distributed table + * Oids from grant statement. + * Grant statement may appear in two forms + * 1 - grant on table: + * each distributed table oid in grant object list is added to returned list. + * 2 - grant all tables in schema: + * Collect namespace oid list from grant statement + * Add each distributed table oid in the target namespace list to the returned list. + */ +static List * +CollectGrantTableIdList(GrantStmt *grantStmt) +{ + List *grantTableList = NIL; + + bool grantOnTableCommand = (grantStmt->targtype == ACL_TARGET_OBJECT && + grantStmt->objtype == OBJECT_TABLE); + bool grantAllTablesOnSchemaCommand = (grantStmt->targtype == + ACL_TARGET_ALL_IN_SCHEMA && + grantStmt->objtype == OBJECT_TABLE); + + /* we are only interested in table level grants */ + if (!grantOnTableCommand && !grantAllTablesOnSchemaCommand) + { + return NIL; + } + + if (grantAllTablesOnSchemaCommand) + { + List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE); + ListCell *citusTableIdCell = NULL; + List *namespaceOidList = NIL; + + ListCell *objectCell = NULL; + foreach(objectCell, grantStmt->objects) + { + char *nspname = strVal(lfirst(objectCell)); + bool missing_ok = false; + Oid namespaceOid = get_namespace_oid(nspname, missing_ok); + Assert(namespaceOid != InvalidOid); + namespaceOidList = list_append_unique_oid(namespaceOidList, namespaceOid); + } + + foreach(citusTableIdCell, citusTableIdList) + { + Oid relationId = lfirst_oid(citusTableIdCell); + Oid namespaceOid = get_rel_namespace(relationId); + if (list_member_oid(namespaceOidList, namespaceOid)) + { + grantTableList = lappend_oid(grantTableList, relationId); + } + } + } + else + { + ListCell *objectCell = NULL; + foreach(objectCell, grantStmt->objects) + { + RangeVar *relvar = (RangeVar *) lfirst(objectCell); + Oid relationId = RangeVarGetRelid(relvar, NoLock, false); + if (IsCitusTable(relationId)) + { + grantTableList = lappend_oid(grantTableList, relationId); + continue; + } + + /* check for distributed sequences included in GRANT ON TABLE statement */ + ObjectAddress sequenceAddress = { 0 }; + ObjectAddressSet(sequenceAddress, RelationRelationId, relationId); + if (IsObjectDistributed(&sequenceAddress)) + { + grantTableList = lappend_oid(grantTableList, relationId); + } + } + } + + return grantTableList; } diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index 4b5d2a72a..42a50c4a3 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -73,10 +73,12 @@ #include "distributed/commands/multi_copy.h" #include "distributed/commands/utility_hook.h" #include "distributed/intermediate_results.h" +#include "distributed/listutils.h" #include "distributed/local_executor.h" #include "distributed/log_utils.h" #include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" +#include "distributed/multi_executor.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_router_planner.h" @@ -102,6 +104,7 @@ #include "libpq/pqformat.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" +#include "parser/parse_func.h" #include "parser/parse_type.h" #if PG_VERSION_NUM >= PG_VERSION_13 #include "tcop/cmdtag.h" @@ -117,6 +120,9 @@ /* constant used in binary protocol */ static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0"; +/* if true, skip validation of JSONB columns during COPY */ +bool SkipJsonbValidationInCopy = true; + /* custom Citus option for appending to a shard */ #define APPEND_TO_SHARD_OPTION "append_to_shard" @@ -242,6 +248,9 @@ typedef enum LocalCopyStatus /* Local functions forward declarations */ static void CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag); +static bool IsCopyInBinaryFormat(CopyStmt *copyStatement); +static List * FindJsonbInputColumns(TupleDesc tupleDescriptor, + List *inputColumnNameList); static List * RemoveOptionFromList(List *optionList, char *optionName); static bool BinaryOutputFunctionDefined(Oid typeId); static bool BinaryInputFunctionDefined(Oid typeId); @@ -452,6 +461,7 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT List *columnNameList = NIL; int partitionColumnIndex = INVALID_PARTITION_COLUMN_INDEX; + bool isInputFormatBinary = IsCopyInBinaryFormat(copyStatement); uint64 processedRowCount = 0; ErrorContextCallback errorCallback; @@ -543,6 +553,72 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT copiedDistributedRelationTuple->relkind = RELKIND_RELATION; } + /* + * We make an optimisation to skip JSON parsing for JSONB columns, because many + * Citus users have large objects in this column and parsing it on the coordinator + * causes significant CPU overhead. We do this by forcing BeginCopyFrom and + * NextCopyFrom to parse the column as text and then encoding it as JSON again + * by using citus_text_send_as_jsonb as the binary output function. + * + * The main downside of enabling this optimisation is that it defers validation + * until the object is parsed by the worker, which is unable to give an accurate + * line number. + */ + if (SkipJsonbValidationInCopy && !isInputFormatBinary) + { + CopyOutState copyOutState = copyDest->copyOutState; + ListCell *jsonbColumnIndexCell = NULL; + + /* get the column indices for all JSONB columns that appear in the input */ + List *jsonbColumnIndexList = FindJsonbInputColumns( + copiedDistributedRelation->rd_att, + copyStatement->attlist); + + foreach(jsonbColumnIndexCell, jsonbColumnIndexList) + { + int jsonbColumnIndex = lfirst_int(jsonbColumnIndexCell); + Form_pg_attribute currentColumn = + TupleDescAttr(copiedDistributedRelation->rd_att, jsonbColumnIndex); + + if (jsonbColumnIndex == partitionColumnIndex) + { + /* + * In the curious case of using a JSONB column as partition column, + * we leave it as is because we want to make sure the hashing works + * correctly. + */ + continue; + } + + ereport(DEBUG1, (errmsg("parsing JSONB column %s as text", + NameStr(currentColumn->attname)))); + + /* parse the column as text instead of JSONB */ + currentColumn->atttypid = TEXTOID; + + if (copyOutState->binary) + { + Oid textSendAsJsonbFunctionId = CitusTextSendAsJsonbFunctionId(); + + /* + * If we're using binary encoding between coordinator and workers + * then we should honour the format expected by jsonb_recv, which + * is a version number followed by text. We therefore use an output + * function which sends the text as if it were jsonb, namely by + * prepending a version number. + */ + fmgr_info(textSendAsJsonbFunctionId, + ©Dest->columnOutputFunctions[jsonbColumnIndex]); + } + else + { + Oid textoutFunctionId = TextOutFunctionId(); + fmgr_info(textoutFunctionId, + ©Dest->columnOutputFunctions[jsonbColumnIndex]); + } + } + } + /* initialize copy state to read from COPY data source */ CopyFromState copyState = BeginCopyFrom_compat(NULL, copiedDistributedRelation, @@ -610,6 +686,82 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT } +/* + * IsCopyInBinaryFormat determines whether the given COPY statement has the + * WITH (format binary) option. + */ +static bool +IsCopyInBinaryFormat(CopyStmt *copyStatement) +{ + ListCell *optionCell = NULL; + + foreach(optionCell, copyStatement->options) + { + DefElem *defel = lfirst_node(DefElem, optionCell); + if (strcmp(defel->defname, "format") == 0 && + strcmp(defGetString(defel), "binary") == 0) + { + return true; + } + } + + return false; +} + + +/* + * FindJsonbInputColumns finds columns in the tuple descriptor that have + * the JSONB type and appear in inputColumnNameList. If the list is empty then + * all JSONB columns are returned. + */ +static List * +FindJsonbInputColumns(TupleDesc tupleDescriptor, List *inputColumnNameList) +{ + List *jsonbColumnIndexList = NIL; + int columnCount = tupleDescriptor->natts; + + for (int columnIndex = 0; columnIndex < columnCount; columnIndex++) + { + Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex); + if (currentColumn->attisdropped) + { + continue; + } + + if (currentColumn->atttypid != JSONBOID) + { + continue; + } + + if (inputColumnNameList != NIL) + { + ListCell *inputColumnCell = NULL; + bool isInputColumn = false; + + foreach(inputColumnCell, inputColumnNameList) + { + char *inputColumnName = strVal(lfirst(inputColumnCell)); + + if (namestrcmp(¤tColumn->attname, inputColumnName) == 0) + { + isInputColumn = true; + break; + } + } + + if (!isInputColumn) + { + continue; + } + } + + jsonbColumnIndexList = lappend_int(jsonbColumnIndexList, columnIndex); + } + + return jsonbColumnIndexList; +} + + static void CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64 processedRowCount) { diff --git a/src/backend/distributed/commands/policy.c b/src/backend/distributed/commands/policy.c index 0956a0268..e41d33451 100644 --- a/src/backend/distributed/commands/policy.c +++ b/src/backend/distributed/commands/policy.c @@ -12,112 +12,650 @@ #include "catalog/namespace.h" #include "commands/policy.h" +#include "distributed/citus_ruleutils.h" #include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/listutils.h" #include "distributed/metadata_cache.h" +#include "miscadmin.h" +#include "nodes/makefuncs.h" +#include "parser/parse_clause.h" +#include "parser/parse_relation.h" +#include "rewrite/rewriteManip.h" +#include "rewrite/rowsecurity.h" #include "utils/builtins.h" +#include "utils/ruleutils.h" -/* placeholder for CreatePolicyCommands */ +static const char * unparse_policy_command(const char aclchar); +static void AddRangeTableEntryToQueryCompat(ParseState *parseState, Relation relation); +static RowSecurityPolicy * GetPolicyByName(Oid relationId, const char *policyName); +static List * GetPolicyListForRelation(Oid relationId); +static char * CreatePolicyCommandForPolicy(Oid relationId, RowSecurityPolicy *policy); + + +/* + * CreatePolicyCommands takes in a relationId, and returns the list of create policy + * commands needed to reconstruct the policies of that table. + */ List * CreatePolicyCommands(Oid relationId) { - /* placeholder for future implementation */ - return NIL; -} + List *commands = NIL; + List *policyList = GetPolicyListForRelation(relationId); -/* placeholder for PreprocessCreatePolicyStmt */ -List * -PreprocessCreatePolicyStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - CreatePolicyStmt *stmt = castNode(CreatePolicyStmt, node); - Oid relationId = RangeVarGetRelid(stmt->table, - AccessExclusiveLock, - false); - if (IsCitusTable(relationId)) + RowSecurityPolicy *policy; + foreach_ptr(policy, policyList) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("policies on distributed tables are only supported in " - "Citus Enterprise"))); + char *createPolicyCommand = CreatePolicyCommandForPolicy(relationId, policy); + commands = lappend(commands, makeTableDDLCommandString(createPolicyCommand)); } - /* placeholder for future implementation */ - return NIL; + return commands; } -/* placeholder for PreprocessAlterPolicyStmt */ +/* + * GetPolicyListForRelation returns a list of RowSecurityPolicy objects identifying + * the policies on the relation with relationId. Note that this function acquires + * AccessShareLock on relation and does not release it in the end to make sure that + * caller will process valid policies through the transaction. + */ +static List * +GetPolicyListForRelation(Oid relationId) +{ + Relation relation = table_open(relationId, AccessShareLock); + + if (!relation_has_policies(relation)) + { + table_close(relation, NoLock); + + return NIL; + } + + if (relation->rd_rsdesc == NULL) + { + /* + * there are policies, but since RLS is not enabled they are not loaded into + * cache, we will do so here for us to access + */ + RelationBuildRowSecurity(relation); + } + + List *policyList = NIL; + + RowSecurityPolicy *policy; + foreach_ptr(policy, relation->rd_rsdesc->policies) + { + policyList = lappend(policyList, policy); + } + + table_close(relation, NoLock); + + return policyList; +} + + +/* + * CreatePolicyCommandForPolicy takes a relationId and a policy, returns + * the CREATE POLICY command needed to reconstruct the policy identified + * by the "policy" object on the relation with relationId. + */ +static char * +CreatePolicyCommandForPolicy(Oid relationId, RowSecurityPolicy *policy) +{ + char *relationName = generate_qualified_relation_name(relationId); + List *relationContext = deparse_context_for(relationName, relationId); + + StringInfo createPolicyCommand = makeStringInfo(); + + appendStringInfo(createPolicyCommand, "CREATE POLICY %s ON %s FOR %s", + quote_identifier(policy->policy_name), + relationName, + unparse_policy_command(policy->polcmd)); + + + appendStringInfoString(createPolicyCommand, " TO "); + + /* + * iterate over all roles and append them to the ddl command with commas + * separating the role names + */ + Oid *roles = (Oid *) ARR_DATA_PTR(policy->roles); + for (int roleIndex = 0; roleIndex < ARR_DIMS(policy->roles)[0]; roleIndex++) + { + const char *roleName; + + if (roleIndex > 0) + { + appendStringInfoString(createPolicyCommand, ", "); + } + + if (roles[roleIndex] == ACL_ID_PUBLIC) + { + roleName = "PUBLIC"; + } + else + { + roleName = quote_identifier(GetUserNameFromId(roles[roleIndex], false)); + } + + appendStringInfoString(createPolicyCommand, roleName); + } + + if (policy->qual) + { + char *qualString = deparse_expression((Node *) (policy->qual), + relationContext, false, false); + appendStringInfo(createPolicyCommand, " USING (%s)", qualString); + } + + if (policy->with_check_qual) + { + char *withCheckQualString = deparse_expression( + (Node *) (policy->with_check_qual), relationContext, false, false); + appendStringInfo(createPolicyCommand, " WITH CHECK (%s)", + withCheckQualString); + } + + return createPolicyCommand->data; +} + + +/* + * unparse_policy_command takes the type of a policy command and converts it to its full + * command string. This function is the exact inverse of parse_policy_command that is in + * postgres. + */ +static const char * +unparse_policy_command(const char aclchar) +{ + switch (aclchar) + { + case '*': + { + return "ALL"; + } + + case ACL_SELECT_CHR: + { + return "SELECT"; + } + + case ACL_INSERT_CHR: + { + return "INSERT"; + } + + case ACL_UPDATE_CHR: + { + return "UPDATE"; + } + + case ACL_DELETE_CHR: + { + return "DELETE"; + } + + default: + { + elog(ERROR, "unrecognized aclchar: %d", aclchar); + return NULL; + } + } +} + + +/* + * PostprocessCreatePolicyStmt determines when a CREATE POLICY statement involves + * a distributed table. If so, it creates DDLJobs to encapsulate information + * needed during the worker node portion of DDL execution before returning the + * DDLJobs in a List. If no distributed table is involved, this returns NIL. + */ +List * +PostprocessCreatePolicyStmt(Node *node, const char *queryString) +{ + CreatePolicyStmt *stmt = castNode(CreatePolicyStmt, node); + + /* load relation information */ + RangeVar *relvar = stmt->table; + Oid relationId = RangeVarGetRelid(relvar, NoLock, false); + if (!IsCitusTable(relationId)) + { + return NIL; + } + + Relation relation = table_open(relationId, AccessShareLock); + + + ParseState *qual_pstate = make_parsestate(NULL); + AddRangeTableEntryToQueryCompat(qual_pstate, relation); + Node *qual = transformWhereClause(qual_pstate, + copyObject(stmt->qual), + EXPR_KIND_POLICY, + "POLICY"); + if (qual) + { + ErrorIfUnsupportedPolicyExpr(qual); + } + + ParseState *with_check_pstate = make_parsestate(NULL); + AddRangeTableEntryToQueryCompat(with_check_pstate, relation); + Node *with_check_qual = transformWhereClause(with_check_pstate, + copyObject(stmt->with_check), + EXPR_KIND_POLICY, + "POLICY"); + if (with_check_qual) + { + ErrorIfUnsupportedPolicyExpr(with_check_qual); + } + + RowSecurityPolicy *policy = GetPolicyByName(relationId, stmt->policy_name); + + if (policy == NULL) + { + /* + * As this function is executed after standard process utility created the + * policy, we should be able to find & deparse the policy with policy_name. + * But to be more safe, error out here. + */ + ereport(ERROR, (errmsg("cannot create policy, policy does not exist."))); + } + + EnsureCoordinator(); + + char *ddlCommand = CreatePolicyCommandForPolicy(relationId, policy); + + /* + * create the DDLJob that needs to be executed both on the local relation and all its + * placements. + */ + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); + ddlJob->metadataSyncCommand = pstrdup(ddlCommand); + ddlJob->taskList = DDLTaskList(relationId, ddlCommand); + + relation_close(relation, NoLock); + + return list_make1(ddlJob); +} + + +/* + * AddRangeTableEntryToQueryCompat adds the given relation to query. + * This method is a compatibility wrapper. + */ +static void +AddRangeTableEntryToQueryCompat(ParseState *parseState, Relation relation) +{ +#if PG_VERSION_NUM >= PG_VERSION_13 + ParseNamespaceItem *rte = NULL; +#else + RangeTblEntry *rte = NULL; +#endif + + rte = addRangeTableEntryForRelation(parseState, relation, +#if PG_VERSION_NUM >= PG_VERSION_12 + AccessShareLock, +#endif + NULL, false, false); +#if PG_VERSION_NUM >= PG_VERSION_13 + addNSItemToQuery(parseState, rte, false, true, true); +#else + addRTEtoQuery(parseState, rte, false, true, true); +#endif +} + + +/* + * GetPolicyByName takes a relationId and a policyName, returns RowSecurityPolicy + * object which identifies the policy with name "policyName" on the relation + * with relationId. If there does not exist such a policy, then this function + * returns NULL. + */ +static RowSecurityPolicy * +GetPolicyByName(Oid relationId, const char *policyName) +{ + List *policyList = GetPolicyListForRelation(relationId); + + RowSecurityPolicy *policy = NULL; + foreach_ptr(policy, policyList) + { + if (strncmp(policy->policy_name, policyName, NAMEDATALEN) == 0) + { + return policy; + } + } + + return NULL; +} + + +/* + * PreprocessAlterPolicyStmt determines whether a given ALTER POLICY statement involves a + * distributed table. If so, it creates DDLJobs to encapsulate information needed during + * the worker node portion of DDL execution before returning the DDLJobs in a list. If no + * distributed table is involved this returns NIL. + */ List * PreprocessAlterPolicyStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { - /* placeholder for future implementation */ - return NIL; + AlterPolicyStmt *stmt = castNode(AlterPolicyStmt, node); + StringInfoData ddlString; + ListCell *roleCell = NULL; + + /* load relation information */ + RangeVar *relvar = stmt->table; + Oid relOid = RangeVarGetRelid(relvar, NoLock, false); + if (!IsCitusTable(relOid)) + { + return NIL; + } + + initStringInfo(&ddlString); + + Relation relation = relation_open(relOid, AccessShareLock); + char *relationName = generate_relation_name(relOid, NIL); + + appendStringInfo(&ddlString, "ALTER POLICY %s ON %s", + quote_identifier(stmt->policy_name), + relationName + ); + + if (stmt->roles) + { + appendStringInfoString(&ddlString, " TO "); + foreach(roleCell, stmt->roles) + { + RoleSpec *roleSpec = (RoleSpec *) lfirst(roleCell); + + appendStringInfoString(&ddlString, RoleSpecString(roleSpec, true)); + + if (lnext_compat(stmt->roles, roleCell) != NULL) + { + appendStringInfoString(&ddlString, ", "); + } + } + } + + List *relationContext = deparse_context_for(relationName, relOid); + + ParseState *qual_pstate = make_parsestate(NULL); + AddRangeTableEntryToQueryCompat(qual_pstate, relation); + Node *qual = transformWhereClause(qual_pstate, + copyObject(stmt->qual), + EXPR_KIND_POLICY, + "POLICY"); + if (qual) + { + ErrorIfUnsupportedPolicyExpr(qual); + + char *qualString = deparse_expression(qual, relationContext, false, false); + appendStringInfo(&ddlString, " USING (%s)", qualString); + } + + ParseState *with_check_pstate = make_parsestate(NULL); + AddRangeTableEntryToQueryCompat(with_check_pstate, relation); + Node *with_check_qual = transformWhereClause(with_check_pstate, + copyObject(stmt->with_check), + EXPR_KIND_POLICY, + "POLICY"); + if (with_check_qual) + { + ErrorIfUnsupportedPolicyExpr(with_check_qual); + + char *withCheckString = deparse_expression(with_check_qual, relationContext, + false, + false); + appendStringInfo(&ddlString, " WITH CHECK (%s)", withCheckString); + } + + /* + * create the DDLJob that needs to be executed both on the local relation and all its + * placements. + */ + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relOid); + ddlJob->metadataSyncCommand = pstrdup(ddlString.data); + ddlJob->taskList = DDLTaskList(relOid, ddlString.data); + + relation_close(relation, NoLock); + + return list_make1(ddlJob); } -/* placeholder for ErrorIfUnsupportedPolicy */ +/* + * ErrorIfUnsupportedPolicy runs checks related to a Relation their Policies and errors + * out if it is not possible to create one of the policies in a distributed environment. + * + * To support policies we require that: + * - Policy expressions do not contain subqueries. + */ void ErrorIfUnsupportedPolicy(Relation relation) { - if (relation_has_policies(relation)) + ListCell *policyCell = NULL; + + if (!relation_has_policies(relation)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("policies on distributed tables are only supported in " - "Citus Enterprise"), - errhint("Remove any policies on a table before distributing"))); + return; + } + + /* + * even if a relation has policies they might not be loaded on the Relation yet. This + * happens if policies are on a Relation without Row Level Security enabled. We need + * to make sure the policies installed are valid for distribution if RLS gets enabled + * after the table has been distributed. Therefore we force a build of the policies on + * the cached Relation + */ + if (relation->rd_rsdesc == NULL) + { + RelationBuildRowSecurity(relation); + } + + foreach(policyCell, relation->rd_rsdesc->policies) + { + RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(policyCell); + + ErrorIfUnsupportedPolicyExpr((Node *) policy->qual); + ErrorIfUnsupportedPolicyExpr((Node *) policy->with_check_qual); } } -/* placeholder for PreprocessDropPolicyStmt */ +/* + * ErrorIfUnsupportedPolicyExpr tests if the provided expression for a policy is + * supported on a distributed table. + */ +void +ErrorIfUnsupportedPolicyExpr(Node *expr) +{ + /* + * We do not allow any sublink to prevent expressions with subqueries to be used as an + * expression in policies on distributed tables. + */ + if (checkExprHasSubLink(expr)) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot create policy"), + errdetail("Subqueries are not supported in policies on distributed " + "tables"))); + } +} + + +/* + * PreprocessDropPolicyStmt determines whether a given DROP POLICY statement involves a + * distributed table. If so it creates DDLJobs to encapsulate information needed during + * the worker node portion of DDL execution before returning the DDLJobs in a List. If no + * distributed table is involved this returns NIL. + */ List * PreprocessDropPolicyStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { - /* placeholder for future implementation */ - return NIL; + DropStmt *stmt = castNode(DropStmt, node); + List *ddlJobs = NIL; + ListCell *dropObjectCell = NULL; + + Assert(stmt->removeType == OBJECT_POLICY); + + foreach(dropObjectCell, stmt->objects) + { + List *names = (List *) lfirst(dropObjectCell); + + /* + * the last element in the list of names is the name of the policy. The ones + * before are describing the relation. By removing the last item from the list we + * can use makeRangeVarFromNameList to get to the relation. As list_truncate + * changes the list in place we make a copy before. + */ + names = list_copy(names); + names = list_truncate(names, list_length(names) - 1); + RangeVar *relation = makeRangeVarFromNameList(names); + + Oid relOid = RangeVarGetRelid(relation, NoLock, false); + if (!IsCitusTable(relOid)) + { + continue; + } + + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relOid); + ddlJob->metadataSyncCommand = queryString; + ddlJob->taskList = DDLTaskList(relOid, queryString); + + ddlJobs = lappend(ddlJobs, ddlJob); + } + + return ddlJobs; } -/* placeholder for IsPolicyRenameStmt */ +/* + * IsPolicyRenameStmt returns wherher the passed-in RenameStmt is one of the following + * forms: + * + * - ALTER POLICY ... ON ... RENAME TO ... + */ bool IsPolicyRenameStmt(RenameStmt *stmt) { - /* placeholder for future implementation */ - return false; + return stmt->renameType == OBJECT_POLICY; } -/* placeholder for CreatePolicyEventExtendNames */ +/* + * CreatePolicyEventExtendNames extends relation names in the given CreatePolicyStmt tree. + * This function has side effects on the tree as the names are replaced inplace. + */ void CreatePolicyEventExtendNames(CreatePolicyStmt *stmt, const char *schemaName, uint64 shardId) { - /* placeholder for future implementation */ + RangeVar *relation = stmt->table; + char **relationName = &(relation->relname); + char **relationSchemaName = &(relation->schemaname); + + /* prefix with schema name if it is not added already */ + SetSchemaNameIfNotExist(relationSchemaName, schemaName); + + AppendShardIdToName(relationName, shardId); } -/* placeholder for AlterPolicyEventExtendNames */ +/* + * AlterPolicyEventExtendNames extends relation names in the given AlterPolicyStatement + * tree. This function has side effects on the tree as the names are replaced inplace. + */ void AlterPolicyEventExtendNames(AlterPolicyStmt *stmt, const char *schemaName, uint64 shardId) { - /* placeholder for future implementation */ + RangeVar *relation = stmt->table; + char **relationName = &(relation->relname); + char **relationSchemaName = &(relation->schemaname); + + /* prefix with schema name if it is not added already */ + SetSchemaNameIfNotExist(relationSchemaName, schemaName); + + AppendShardIdToName(relationName, shardId); } -/* placeholder for RenamePolicyEventExtendNames */ +/* + * RenamePolicyEventExtendNames extends relation names in the given RenameStmt tree. This + * function has side effects on the tree as the names are replaced inline. + */ void RenamePolicyEventExtendNames(RenameStmt *stmt, const char *schemaName, uint64 shardId) { - /* placeholder for future implementation */ + char **relationName = &(stmt->relation->relname); + char **objectSchemaName = &(stmt->relation->schemaname); + + /* prefix with schema name if it is not added already */ + SetSchemaNameIfNotExist(objectSchemaName, schemaName); + + AppendShardIdToName(relationName, shardId); } -/* placeholder for DropPolicyEventExtendNames */ +/* + * DropPolicyEventExtendNames extends relation names in the given DropStmt tree specific + * to policies. This function has side effects on the tree as the names are replaced + * inplace. + */ void DropPolicyEventExtendNames(DropStmt *dropStmt, const char *schemaName, uint64 shardId) { - /* placeholder for future implementation */ + Value *relationSchemaNameValue = NULL; + Value *relationNameValue = NULL; + + uint32 dropCount = list_length(dropStmt->objects); + if (dropCount > 1) + { + ereport(ERROR, (errmsg("cannot extend name for multiple drop objects"))); + } + + List *relationNameList = (List *) linitial(dropStmt->objects); + int relationNameListLength = list_length(relationNameList); + + switch (relationNameListLength) + { + case 2: + { + relationNameValue = linitial(relationNameList); + break; + } + + case 3: + { + relationSchemaNameValue = linitial(relationNameList); + relationNameValue = lsecond(relationNameList); + break; + } + + default: + { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("improper policy name: \"%s\"", + NameListToString(relationNameList)))); + break; + } + } + + /* prefix with schema name if it is not added already */ + if (relationSchemaNameValue == NULL) + { + Value *schemaNameValue = makeString(pstrdup(schemaName)); + relationNameList = lcons(schemaNameValue, relationNameList); + } + + char **relationName = &(relationNameValue->val.str); + AppendShardIdToName(relationName, shardId); } diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 9d1c9d1fe..4108d8e4f 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -14,7 +14,9 @@ #include "access/heapam.h" #include "access/htup_details.h" +#include "access/genam.h" #include "access/table.h" +#include "access/xact.h" #include "catalog/catalog.h" #include "catalog/pg_auth_members.h" #include "catalog/pg_authid.h" @@ -31,6 +33,9 @@ #include "distributed/coordinator_protocol.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" +#include "distributed/metadata/distobject.h" +#include "distributed/multi_executor.h" +#include "distributed/relation_access_tracking.h" #include "distributed/version_compat.h" #include "distributed/worker_transaction.h" #include "miscadmin.h" @@ -40,6 +45,7 @@ #include "parser/scansup.h" #include "utils/acl.h" #include "utils/builtins.h" +#include "utils/fmgroids.h" #include "utils/guc_tables.h" #include "utils/guc.h" #include "utils/rel.h" @@ -54,6 +60,9 @@ static char * CreateCreateOrAlterRoleCommand(const char *roleName, AlterRoleStmt *alterRoleStmt); static DefElem * makeDefElemInt(char *name, int value); static List * GenerateRoleOptionsList(HeapTuple tuple); +static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options); +static List * GenerateGrantRoleStmtsOfRole(Oid roleid); +static void EnsureSequentialModeForRoleDDL(void); static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple, TupleDesc DbRoleSettingDescription); @@ -68,6 +77,7 @@ static int ConfigGenericNameCompare(const void *lhs, const void *rhs); static ObjectAddress RoleSpecToObjectAddress(RoleSpec *role, bool missing_ok); /* controlled via GUC */ +bool EnableCreateRolePropagation = true; bool EnableAlterRolePropagation = true; bool EnableAlterRoleSetPropagation = true; @@ -133,11 +143,13 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString) return NIL; } - if (!EnableAlterRolePropagation || !IsCoordinator()) + if (!EnableAlterRolePropagation) { return NIL; } + EnsureCoordinator(); + AlterRoleStmt *stmt = castNode(AlterRoleStmt, node); DefElem *option = NULL; @@ -161,7 +173,9 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString) break; } } - List *commands = list_make1((void *) CreateAlterRoleIfExistsCommand(stmt)); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) CreateAlterRoleIfExistsCommand(stmt), + ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } @@ -206,14 +220,7 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString, return NIL; } - /* - * Since roles need to be handled manually on community, we need to support such queries - * by handling them locally on worker nodes - */ - if (!IsCoordinator()) - { - return NIL; - } + EnsureCoordinator(); QualifyTreeNode((Node *) stmt); const char *sql = DeparseTreeNode((Node *) stmt); @@ -493,6 +500,14 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(roleTuple)); CreateRoleStmt *createRoleStmt = NULL; + if (EnableCreateRolePropagation) + { + createRoleStmt = makeNode(CreateRoleStmt); + createRoleStmt->stmt_type = ROLESTMT_ROLE; + createRoleStmt->role = pstrdup(NameStr(role->rolname)); + createRoleStmt->options = GenerateRoleOptionsList(roleTuple); + } + AlterRoleStmt *alterRoleStmt = NULL; if (EnableAlterRolePropagation) { @@ -526,6 +541,16 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) completeRoleList = list_concat(completeRoleList, alterRoleSetCommands); } + if (EnableCreateRolePropagation) + { + List *grantRoleStmts = GenerateGrantRoleStmtsOfRole(roleOid); + Node *stmt = NULL; + foreach_ptr(stmt, grantRoleStmts) + { + completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt)); + } + } + return completeRoleList; } @@ -732,6 +757,157 @@ MakeSetStatementArguments(char *configurationName, char *configurationValue) } +/* + * GenerateGrantRoleStmtsFromOptions gets a RoleSpec of a role that is being + * created and a list of options of CreateRoleStmt to generate GrantRoleStmts + * for the role's memberships. + */ +static List * +GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options) +{ + List *stmts = NIL; + + DefElem *option = NULL; + foreach_ptr(option, options) + { + if (strcmp(option->defname, "adminmembers") != 0 && + strcmp(option->defname, "rolemembers") != 0 && + strcmp(option->defname, "addroleto") != 0) + { + continue; + } + + GrantRoleStmt *grantRoleStmt = makeNode(GrantRoleStmt); + grantRoleStmt->is_grant = true; + + if (strcmp(option->defname, "adminmembers") == 0 || strcmp(option->defname, + "rolemembers") == 0) + { + grantRoleStmt->granted_roles = list_make1(roleSpec); + grantRoleStmt->grantee_roles = (List *) option->arg; + } + else + { + grantRoleStmt->granted_roles = (List *) option->arg; + grantRoleStmt->grantee_roles = list_make1(roleSpec); + } + + if (strcmp(option->defname, "adminmembers") == 0) + { + grantRoleStmt->admin_opt = true; + } + + stmts = lappend(stmts, grantRoleStmt); + } + return stmts; +} + + +/* + * GenerateGrantRoleStmtsOfRole generates the GrantRoleStmts for the memberships + * of the role whose oid is roleid. + */ +static List * +GenerateGrantRoleStmtsOfRole(Oid roleid) +{ + Relation pgAuthMembers = table_open(AuthMemRelationId, AccessShareLock); + HeapTuple tuple = NULL; + List *stmts = NIL; + + ScanKeyData skey[1]; + + ScanKeyInit(&skey[0], Anum_pg_auth_members_member, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(roleid)); + SysScanDesc scan = systable_beginscan(pgAuthMembers, AuthMemMemRoleIndexId, true, + NULL, 1, &skey[0]); + + while (HeapTupleIsValid(tuple = systable_getnext(scan))) + { + Form_pg_auth_members membership = (Form_pg_auth_members) GETSTRUCT(tuple); + + GrantRoleStmt *grantRoleStmt = makeNode(GrantRoleStmt); + grantRoleStmt->is_grant = true; + + RoleSpec *grantedRole = makeNode(RoleSpec); + grantedRole->roletype = ROLESPEC_CSTRING; + grantedRole->location = -1; + grantedRole->rolename = GetUserNameFromId(membership->roleid, true); + grantRoleStmt->granted_roles = list_make1(grantedRole); + + RoleSpec *granteeRole = makeNode(RoleSpec); + granteeRole->roletype = ROLESPEC_CSTRING; + granteeRole->location = -1; + granteeRole->rolename = GetUserNameFromId(membership->member, true); + grantRoleStmt->grantee_roles = list_make1(granteeRole); + + grantRoleStmt->grantor = NULL; + + grantRoleStmt->admin_opt = membership->admin_option; + + stmts = lappend(stmts, grantRoleStmt); + } + + systable_endscan(scan); + table_close(pgAuthMembers, AccessShareLock); + + return stmts; +} + + +/* + * PreprocessCreateRoleStmt creates a worker_create_or_alter_role query for the + * role that is being created. With that query we can create the role in the + * workers or if they exist we alter them to the way they are being created + * right now. + */ +List * +PreprocessCreateRoleStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + if (!EnableCreateRolePropagation || !ShouldPropagate()) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialModeForRoleDDL(); + + LockRelationOid(DistNodeRelationId(), RowShareLock); + + CreateRoleStmt *createRoleStmt = castNode(CreateRoleStmt, node); + + AlterRoleStmt *alterRoleStmt = makeNode(AlterRoleStmt); + alterRoleStmt->role = makeNode(RoleSpec); + alterRoleStmt->role->roletype = ROLESPEC_CSTRING; + alterRoleStmt->role->location = -1; + alterRoleStmt->role->rolename = pstrdup(createRoleStmt->role); + alterRoleStmt->action = 1; + alterRoleStmt->options = createRoleStmt->options; + + List *grantRoleStmts = GenerateGrantRoleStmtsFromOptions(alterRoleStmt->role, + createRoleStmt->options); + + char *createOrAlterRoleQuery = CreateCreateOrAlterRoleCommand(createRoleStmt->role, + createRoleStmt, + alterRoleStmt); + + List *commands = NIL; + commands = lappend(commands, DISABLE_DDL_PROPAGATION); + commands = lappend(commands, createOrAlterRoleQuery); + + /* deparse all grant statements and add them to the to commands list */ + Node *stmt = NULL; + foreach_ptr(stmt, grantRoleStmts) + { + commands = lappend(commands, DeparseTreeNode(stmt)); + } + + commands = lappend(commands, ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + /* * makeStringConst creates a Const Node that stores a given string * @@ -786,6 +962,178 @@ makeFloatConst(char *str, int location) } +/* + * PreprocessDropRoleStmt finds the distributed role out of the ones + * being dropped and unmarks them distributed and creates the drop statements + * for the workers. + */ +List * +PreprocessDropRoleStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + DropRoleStmt *stmt = castNode(DropRoleStmt, node); + List *allDropRoles = stmt->roles; + + List *distributedDropRoles = FilterDistributedRoles(allDropRoles); + if (list_length(distributedDropRoles) <= 0) + { + return NIL; + } + + if (!EnableCreateRolePropagation || !ShouldPropagate()) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialModeForRoleDDL(); + + + stmt->roles = distributedDropRoles; + char *sql = DeparseTreeNode((Node *) stmt); + stmt->roles = allDropRoles; + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * UnmarkRolesDistributed unmarks the roles in the RoleSpec list distributed. + */ +void +UnmarkRolesDistributed(List *roles) +{ + Node *roleNode = NULL; + foreach_ptr(roleNode, roles) + { + RoleSpec *role = castNode(RoleSpec, roleNode); + ObjectAddress roleAddress = { 0 }; + Oid roleOid = get_rolespec_oid(role, true); + + if (roleOid == InvalidOid) + { + /* + * If the role is dropped (concurrently), we might get an inactive oid for the + * role. If it is invalid oid, skip. + */ + continue; + } + + ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid); + UnmarkObjectDistributed(&roleAddress); + } +} + + +/* + * FilterDistributedRoles filters the list of RoleSpecs and returns the ones + * that are distributed. + */ +List * +FilterDistributedRoles(List *roles) +{ + List *distributedRoles = NIL; + Node *roleNode = NULL; + foreach_ptr(roleNode, roles) + { + RoleSpec *role = castNode(RoleSpec, roleNode); + ObjectAddress roleAddress = { 0 }; + Oid roleOid = get_rolespec_oid(role, true); + if (roleOid == InvalidOid) + { + /* + * Non-existing roles are ignored silently here. Postgres will + * handle to give an error or not for these roles. + */ + continue; + } + ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid); + if (IsObjectDistributed(&roleAddress)) + { + distributedRoles = lappend(distributedRoles, role); + } + } + return distributedRoles; +} + + +/* + * PreprocessGrantRoleStmt finds the distributed grantee roles and creates the + * query to run on the workers. + */ +List * +PreprocessGrantRoleStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + if (!EnableCreateRolePropagation || !ShouldPropagate()) + { + return NIL; + } + + EnsureCoordinator(); + + GrantRoleStmt *stmt = castNode(GrantRoleStmt, node); + List *allGranteeRoles = stmt->grantee_roles; + RoleSpec *grantor = stmt->grantor; + + List *distributedGranteeRoles = FilterDistributedRoles(allGranteeRoles); + if (list_length(distributedGranteeRoles) <= 0) + { + return NIL; + } + + /* + * Postgres don't seem to use the grantor. Even dropping the grantor doesn't + * seem to affect the membership. If this changes, we might need to add grantors + * to the dependency resolution too. For now we just don't propagate it. + */ + stmt->grantor = NULL; + stmt->grantee_roles = distributedGranteeRoles; + char *sql = DeparseTreeNode((Node *) stmt); + stmt->grantee_roles = allGranteeRoles; + stmt->grantor = grantor; + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PostprocessGrantRoleStmt actually creates the plan we need to execute for grant + * role statement. + */ +List * +PostprocessGrantRoleStmt(Node *node, const char *queryString) +{ + if (!EnableCreateRolePropagation || !IsCoordinator() || !ShouldPropagate()) + { + return NIL; + } + + GrantRoleStmt *stmt = castNode(GrantRoleStmt, node); + + RoleSpec *role = NULL; + foreach_ptr(role, stmt->grantee_roles) + { + ObjectAddress roleAddress = { 0 }; + Oid roleOid = get_rolespec_oid(role, false); + ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid); + if (IsObjectDistributed(&roleAddress)) + { + EnsureDependenciesExistOnAllNodes(&roleAddress); + } + } + return NIL; +} + + /* * ConfigGenericNameCompare compares two config_generic structs based on their * name fields. If the name fields contain the same strings two structs are @@ -806,3 +1154,64 @@ ConfigGenericNameCompare(const void *a, const void *b) */ return pg_strcasecmp(confa->name, confb->name); } + + +/* + * CreateRoleStmtObjectAddress finds the ObjectAddress for the role described + * by the CreateRoleStmt. If missing_ok is false this function throws an error if the + * role does not exist. + * + * Never returns NULL, but the objid in the address could be invalid if missing_ok was set + * to true. + */ +ObjectAddress +CreateRoleStmtObjectAddress(Node *node, bool missing_ok) +{ + CreateRoleStmt *stmt = castNode(CreateRoleStmt, node); + Oid roleOid = get_role_oid(stmt->role, missing_ok); + ObjectAddress roleAddress = { 0 }; + ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid); + + return roleAddress; +} + + +/* + * EnsureSequentialModeForRoleDDL makes sure that the current transaction is already in + * sequential mode, or can still safely be put in sequential mode, it errors if that is + * not possible. The error contains information for the user to retry the transaction with + * sequential mode set from the begining. + * + * As roles are node scoped objects there exists only 1 instance of the role used by + * potentially multiple shards. To make sure all shards in the transaction can interact + * with the role the role needs to be visible on all connections used by the transaction, + * meaning we can only use 1 connection per node. + */ +static void +EnsureSequentialModeForRoleDDL(void) +{ + if (!IsTransactionBlock()) + { + /* we do not need to switch to sequential mode if we are not in a transaction */ + return; + } + + if (ParallelQueryExecutedInTransaction()) + { + ereport(ERROR, (errmsg("cannot create or modify role because there was a " + "parallel operation on a distributed table in the " + "transaction"), + errdetail("When creating or altering a role, Citus needs to " + "perform all operations over a single connection per " + "node to ensure consistency."), + errhint("Try re-running the transaction with " + "\"SET LOCAL citus.multi_shard_modify_mode TO " + "\'sequential\';\""))); + } + + ereport(DEBUG1, (errmsg("switching to sequential query execution mode"), + errdetail("Role is created or altered. To make sure subsequent " + "commands see the role correctly we need to make sure to " + "use only one connection for all future commands"))); + SetLocalMultiShardModifyModeToSequential(); +} diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index e75f4527d..76daafce3 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -161,14 +161,7 @@ PreprocessGrantOnSchemaStmt(Node *node, const char *queryString, return NIL; } - /* - * Since access control needs to be handled manually on community, we need to support - * such queries by handling them locally on worker nodes. - */ - if (!IsCoordinator()) - { - return NIL; - } + EnsureCoordinator(); List *originalObjects = stmt->objects; @@ -178,7 +171,11 @@ PreprocessGrantOnSchemaStmt(Node *node, const char *queryString, stmt->objects = originalObjects; - return NodeDDLTaskList(NON_COORDINATOR_NODES, list_make1(sql)); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index b97ca4215..fcf47deac 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -24,6 +24,7 @@ #include "distributed/metadata/distobject.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" +#include "nodes/makefuncs.h" #include "distributed/worker_create_or_replace.h" #include "nodes/parsenodes.h" #include "utils/builtins.h" @@ -32,6 +33,7 @@ /* Local functions forward declarations for helper functions */ static bool OptionsSpecifyOwnedBy(List *optionList, Oid *ownedByTableId); static Oid SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress); +static List * FilterDistributedSequences(GrantStmt *stmt); /* @@ -660,6 +662,97 @@ PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString) } +/* + * PreprocessGrantOnSequenceStmt is executed before the statement is applied to the local + * postgres instance. + * + * In this stage we can prepare the commands that need to be run on all workers to grant + * on distributed sequences. + */ +List * +PreprocessGrantOnSequenceStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + Assert(stmt->objtype == OBJECT_SEQUENCE); + + if (creating_extension) + { + /* + * extensions should be created separately on the workers, sequences cascading + * from an extension should therefore not be propagated here. + */ + return NIL; + } + + if (!EnableMetadataSync) + { + /* + * we are configured to disable object propagation, should not propagate anything + */ + return NIL; + } + + List *distributedSequences = FilterDistributedSequences(stmt); + + if (list_length(distributedSequences) == 0) + { + return NIL; + } + + EnsureCoordinator(); + + GrantStmt *stmtCopy = copyObject(stmt); + stmtCopy->objects = distributedSequences; + + /* + * if the original command was targeting schemas, we have expanded to the distributed + * sequences in these schemas through FilterDistributedSequences. + */ + stmtCopy->targtype = ACL_TARGET_OBJECT; + + QualifyTreeNode((Node *) stmtCopy); + + char *sql = DeparseTreeNode((Node *) stmtCopy); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); +} + + +/* + * PostprocessGrantOnSequenceStmt makes sure dependencies of each + * distributed sequence in the statement exist on all nodes + */ +List * +PostprocessGrantOnSequenceStmt(Node *node, const char *queryString) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + Assert(stmt->objtype == OBJECT_SEQUENCE); + + List *distributedSequences = FilterDistributedSequences(stmt); + + if (list_length(distributedSequences) == 0) + { + return NIL; + } + + EnsureCoordinator(); + + RangeVar *sequence = NULL; + foreach_ptr(sequence, distributedSequences) + { + ObjectAddress sequenceAddress = { 0 }; + Oid sequenceOid = RangeVarGetRelid(sequence, NoLock, false); + ObjectAddressSet(sequenceAddress, RelationRelationId, sequenceOid); + EnsureDependenciesExistOnAllNodes(&sequenceAddress); + } + return NIL; +} + + /* * GenerateBackupNameForSequenceCollision generates a new sequence name for an existing * sequence. The name is generated in such a way that the new name doesn't overlap with @@ -702,6 +795,96 @@ GenerateBackupNameForSequenceCollision(const ObjectAddress *address) } +/* + * FilterDistributedSequences determines and returns a list of distributed sequences + * RangeVar-s from given grant statement. + * - If the stmt's targtype is ACL_TARGET_OBJECT, i.e. of the form GRANT ON SEQUENCE ... + * it returns the distributed sequences in the list of sequences in the statement + * - If targtype is ACL_TARGET_ALL_IN_SCHEMA, i.e. GRANT ON ALL SEQUENCES IN SCHEMA ... + * it expands the ALL IN SCHEMA to the actual sequences, and returns the distributed + * sequences from those. + */ +static List * +FilterDistributedSequences(GrantStmt *stmt) +{ + bool grantOnSequenceCommand = (stmt->targtype == ACL_TARGET_OBJECT && + stmt->objtype == OBJECT_SEQUENCE); + bool grantOnAllSequencesInSchemaCommand = (stmt->targtype == + ACL_TARGET_ALL_IN_SCHEMA && + stmt->objtype == OBJECT_SEQUENCE); + + /* we are only interested in sequence level grants */ + if (!grantOnSequenceCommand && !grantOnAllSequencesInSchemaCommand) + { + return NIL; + } + + List *grantSequenceList = NIL; + + if (grantOnAllSequencesInSchemaCommand) + { + /* iterate over all namespace names provided to get their oid's */ + List *namespaceOidList = NIL; + Value *namespaceValue = NULL; + foreach_ptr(namespaceValue, stmt->objects) + { + char *nspname = strVal(namespaceValue); + bool missing_ok = false; + Oid namespaceOid = get_namespace_oid(nspname, missing_ok); + namespaceOidList = list_append_unique_oid(namespaceOidList, namespaceOid); + } + + /* + * iterate over all distributed sequences to filter the ones + * that belong to one of the namespaces from above + */ + List *distributedSequenceList = DistributedSequenceList(); + ObjectAddress *sequenceAddress = NULL; + foreach_ptr(sequenceAddress, distributedSequenceList) + { + Oid namespaceOid = get_rel_namespace(sequenceAddress->objectId); + + /* + * if this distributed sequence's schema is one of the schemas + * specified in the GRANT .. ALL SEQUENCES IN SCHEMA .. + * add it to the list + */ + if (list_member_oid(namespaceOidList, namespaceOid)) + { + RangeVar *distributedSequence = makeRangeVar(get_namespace_name( + namespaceOid), + get_rel_name( + sequenceAddress->objectId), + -1); + grantSequenceList = lappend(grantSequenceList, distributedSequence); + } + } + } + else + { + bool missing_ok = false; + RangeVar *sequenceRangeVar = NULL; + foreach_ptr(sequenceRangeVar, stmt->objects) + { + ObjectAddress sequenceAddress = { 0 }; + Oid sequenceOid = RangeVarGetRelid(sequenceRangeVar, NoLock, missing_ok); + ObjectAddressSet(sequenceAddress, RelationRelationId, sequenceOid); + + /* + * if this sequence from GRANT .. ON SEQUENCE .. is a distributed + * sequence, add it to the list + */ + if (IsObjectDistributed(&sequenceAddress)) + { + grantSequenceList = lappend(grantSequenceList, sequenceRangeVar); + } + } + } + + return grantSequenceList; +} + + /* * RenameExistingSequenceWithDifferentTypeIfExists renames the sequence's type if * that sequence exists and the desired sequence type is different than it's type. diff --git a/src/backend/distributed/commands/subscription.c b/src/backend/distributed/commands/subscription.c index de6a4f676..3d32c78af 100644 --- a/src/backend/distributed/commands/subscription.c +++ b/src/backend/distributed/commands/subscription.c @@ -10,13 +10,129 @@ #include "postgres.h" +#include "safe_lib.h" + +#include + +#include "commands/defrem.h" #include "distributed/commands.h" +#include "distributed/connection_management.h" +#include "distributed/pg_version_constants.h" +#include "distributed/version_compat.h" +#include "libpq-fe.h" #include "nodes/parsenodes.h" +#include "utils/builtins.h" -/* placeholder for ProcessCreateSubscriptionStmt */ +static char * GenerateConninfoWithAuth(char *conninfo); + +/* + * ProcessCreateSubscriptionStmt looks for a special citus_use_authinfo option. + * If it is set to true, then we'll expand the node's authinfo into the create + * statement (see GenerateConninfoWithAuth). + */ Node * ProcessCreateSubscriptionStmt(CreateSubscriptionStmt *createSubStmt) { + ListCell *currCell = NULL; + #if PG_VERSION_NUM < PG_VERSION_13 + ListCell *prevCell = NULL; + #endif + bool useAuthinfo = false; + + foreach(currCell, createSubStmt->options) + { + DefElem *defElem = (DefElem *) lfirst(currCell); + + if (strcmp(defElem->defname, "citus_use_authinfo") == 0) + { + useAuthinfo = defGetBoolean(defElem); + + createSubStmt->options = list_delete_cell_compat(createSubStmt->options, + currCell, + prevCell); + + break; + } + #if PG_VERSION_NUM < PG_VERSION_13 + prevCell = currCell; + #endif + } + + if (useAuthinfo) + { + createSubStmt->conninfo = GenerateConninfoWithAuth(createSubStmt->conninfo); + } + return (Node *) createSubStmt; } + + +/* + * GenerateConninfoWithAuth extracts the host and port from the provided libpq + * conninfo string, using them to find an appropriate authinfo for the target + * host. If such an authinfo is found, it is added to the (repalloc'd) string, + * which is then returned. + */ +static char * +GenerateConninfoWithAuth(char *conninfo) +{ + StringInfo connInfoWithAuth = makeStringInfo(); + char *host = NULL, *user = NULL; + int32 port = -1; + PQconninfoOption *option = NULL, *optionArray = NULL; + + optionArray = PQconninfoParse(conninfo, NULL); + if (optionArray == NULL) + { + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("not a valid libpq connection info string: %s", + conninfo))); + } + + for (option = optionArray; option->keyword != NULL; option++) + { + if (option->val == NULL || option->val[0] == '\0') + { + continue; + } + + if (strcmp(option->keyword, "host") == 0) + { + host = option->val; + } + else if (strcmp(option->keyword, "port") == 0) + { + port = pg_atoi(option->val, 4, 0); + } + else if (strcmp(option->keyword, "user") == 0) + { + user = option->val; + } + } + + /* + * In case of repetition of parameters in connection strings, last value + * wins. So first add the provided connection string, then global + * connection parameters, then node specific ones. + * + * Note that currently lists of parameters in pg_dist_authnode and + * citus.node_conninfo do not overlap. + * + * The only overlapping parameter between these three lists is + * connect_timeout, which is assigned in conninfo (generated + * by CreateShardMoveSubscription) and is also allowed in + * citus.node_conninfo. Prioritizing the value in citus.node_conninfo + * over conninfo gives user the power to control this value. + */ + appendStringInfo(connInfoWithAuth, "%s %s", conninfo, NodeConninfo); + if (host != NULL && port > 0 && user != NULL) + { + char *nodeAuthInfo = GetAuthinfo(host, port, user); + appendStringInfo(connInfoWithAuth, " %s", nodeAuthInfo); + } + + PQconninfoFree(optionArray); + + return connInfoWithAuth->data; +} diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 72d761433..8a73f1471 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -2600,6 +2600,7 @@ ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod, * ALTER TABLE ADD|DROP CONSTRAINT * ALTER TABLE REPLICA IDENTITY * ALTER TABLE SET () + * ALTER TABLE ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY * ALTER TABLE RESET () * ALTER TABLE ENABLE/DISABLE TRIGGER (if enable_unsafe_triggers is not set, we only support triggers for citus local tables) */ @@ -2943,6 +2944,10 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) case AT_SetNotNull: case AT_ReplicaIdentity: case AT_ChangeOwner: + case AT_EnableRowSecurity: + case AT_DisableRowSecurity: + case AT_ForceRowSecurity: + case AT_NoForceRowSecurity: case AT_ValidateConstraint: case AT_DropConstraint: /* we do the check for invalidation in AlterTableDropsForeignKey */ #if PG_VERSION_NUM >= PG_VERSION_14 @@ -2987,6 +2992,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) errdetail("Only ADD|DROP COLUMN, SET|DROP NOT NULL, " "SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, " "SET (), RESET (), " + "ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, " "ATTACH|DETACH PARTITION and TYPE subcommands " "are supported."))); } diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index be08337d9..5b72770e6 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -54,6 +54,7 @@ #include "distributed/listutils.h" #include "distributed/local_executor.h" #include "distributed/maintenanced.h" +#include "distributed/multi_logical_replication.h" #include "distributed/multi_partitioning_utils.h" #if PG_VERSION_NUM < 140000 #include "distributed/metadata_cache.h" @@ -65,6 +66,7 @@ #include "distributed/multi_physical_planner.h" #include "distributed/reference_table_utils.h" #include "distributed/resource_lock.h" +#include "distributed/string_utils.h" #include "distributed/transmit.h" #include "distributed/version_compat.h" #include "distributed/worker_shard_visibility.h" @@ -78,6 +80,7 @@ #include "utils/lsyscache.h" #include "utils/syscache.h" + bool EnableDDLPropagation = true; /* ddl propagation is enabled */ int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_IMMEDIATE; PropSetCmdBehavior PropagateSetCommands = PROPSETCMD_NONE; /* SET prop off */ @@ -409,6 +412,31 @@ ProcessUtilityInternal(PlannedStmt *pstmt, parsetree = ProcessCreateSubscriptionStmt(createSubStmt); } + if (IsA(parsetree, AlterSubscriptionStmt)) + { + AlterSubscriptionStmt *alterSubStmt = (AlterSubscriptionStmt *) parsetree; + if (!superuser() && + StringStartsWith(alterSubStmt->subname, + SHARD_MOVE_SUBSCRIPTION_PREFIX)) + { + ereport(ERROR, ( + errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("Only superusers can alter shard move subscriptions"))); + } + } + + if (IsA(parsetree, DropSubscriptionStmt)) + { + DropSubscriptionStmt *dropSubStmt = (DropSubscriptionStmt *) parsetree; + if (!superuser() && + StringStartsWith(dropSubStmt->subname, SHARD_MOVE_SUBSCRIPTION_PREFIX)) + { + ereport(ERROR, ( + errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("Only superusers can drop shard move subscriptions"))); + } + } + /* * Process SET LOCAL and SET TRANSACTION statements in multi-statement * transactions. @@ -601,7 +629,7 @@ ProcessUtilityInternal(PlannedStmt *pstmt, errhint("You can manually create a database and its " "extensions on workers."))); } - else if (IsA(parsetree, CreateRoleStmt)) + else if (IsA(parsetree, CreateRoleStmt) && !EnableCreateRolePropagation) { ereport(NOTICE, (errmsg("not propagating CREATE ROLE/USER commands to worker" " nodes"), @@ -631,6 +659,24 @@ ProcessUtilityInternal(PlannedStmt *pstmt, StopMaintenanceDaemon(MyDatabaseId); } + /* + * Make sure that dropping the role deletes the pg_dist_object entries. There is a + * separate logic for roles, since roles are not included as dropped objects in the + * drop event trigger. To handle it both on worker and coordinator nodes, it is not + * implemented as a part of process functions but here. + */ + if (IsA(parsetree, DropRoleStmt)) + { + DropRoleStmt *stmt = castNode(DropRoleStmt, parsetree); + List *allDropRoles = stmt->roles; + + List *distributedDropRoles = FilterDistributedRoles(allDropRoles); + if (list_length(distributedDropRoles) > 0) + { + UnmarkRolesDistributed(distributedDropRoles); + } + } + pstmt->utilityStmt = parsetree; PG_TRY(); @@ -740,6 +786,21 @@ ProcessUtilityInternal(PlannedStmt *pstmt, { PostprocessAlterTableStmt(castNode(AlterTableStmt, parsetree)); } + if (IsA(parsetree, GrantStmt)) + { + GrantStmt *grantStmt = (GrantStmt *) parsetree; + if (grantStmt->targtype == ACL_TARGET_ALL_IN_SCHEMA) + { + /* + * Grant .. IN SCHEMA causes a deadlock if we don't use local execution + * because standard process utility processes the shard placements as well + * and the row-level locks in pg_class will not be released until the current + * transaction commits. We could skip the local shard placements after standard + * process utility, but for simplicity we just prefer using local execution. + */ + SetLocalExecutionStatus(LOCAL_EXECUTION_REQUIRED); + } + } DDLJob *ddlJob = NULL; foreach_ptr(ddlJob, ddlJobs) @@ -1625,7 +1686,6 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands) ddlJob->targetObjectAddress = InvalidObjectAddress; ddlJob->metadataSyncCommand = NULL; ddlJob->taskList = list_make1(task); - return list_make1(ddlJob); } diff --git a/src/backend/distributed/connection/connection_configuration.c b/src/backend/distributed/connection/connection_configuration.c index 41017fab9..88828d4cb 100644 --- a/src/backend/distributed/connection/connection_configuration.c +++ b/src/backend/distributed/connection/connection_configuration.c @@ -10,9 +10,12 @@ #include "postgres.h" +#include "access/transam.h" +#include "access/xact.h" #include "distributed/backend_data.h" #include "distributed/citus_safe_lib.h" #include "distributed/connection_management.h" +#include "distributed/intermediate_result_pruning.h" #include "distributed/metadata_cache.h" #include "distributed/worker_manager.h" @@ -40,6 +43,7 @@ typedef struct ConnParamsInfo static ConnParamsInfo ConnParams; /* helper functions for processing connection info */ +static ConnectionHashKey * GetEffectiveConnKey(ConnectionHashKey *key); static Size CalculateMaxSize(void); static int uri_prefix_length(const char *connstr); @@ -232,6 +236,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, * already we can add a pointer to the runtimeValues. */ char nodePortString[12] = ""; + ConnectionHashKey *effectiveKey = GetEffectiveConnKey(key); StringInfo applicationName = makeStringInfo(); appendStringInfo(applicationName, "%s%ld", CITUS_APPLICATION_NAME_PREFIX, @@ -260,10 +265,10 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, "application_name" }; const char *runtimeValues[] = { - key->hostname, + effectiveKey->hostname, nodePortString, - key->database, - key->user, + effectiveKey->database, + effectiveKey->user, GetDatabaseEncodingName(), applicationName->data }; @@ -300,7 +305,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, errmsg("too many connParams entries"))); } - pg_ltoa(key->port, nodePortString); /* populate node port string with port */ + pg_ltoa(effectiveKey->port, nodePortString); /* populate node port string with port */ /* first step: copy global parameters to beginning of array */ for (Size paramIndex = 0; paramIndex < ConnParams.size; paramIndex++) @@ -322,6 +327,58 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, MemoryContextStrdup(context, runtimeValues[runtimeParamIndex]); } + /* we look up authinfo by original key, not effective one */ + char *authinfo = GetAuthinfo(key->hostname, key->port, key->user); + char *pqerr = NULL; + PQconninfoOption *optionArray = PQconninfoParse(authinfo, &pqerr); + if (optionArray == NULL) + { + /* PQconninfoParse failed, it's unsafe to continue as this has caused segfaults in production */ + if (pqerr == NULL) + { + /* parse failed without an error message, treat as OOM error */ + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"), + errdetail("Failed to parse authentication information via libpq"))); + } + else + { + /* + * Parse error, should not be possible as the validity is checked upon insert into pg_dist_authinfo, + * however, better safe than sorry + */ + + /* + * errmsg is populated by PQconninfoParse which requires us to free the message. Since we want to + * incorporate the parse error into the detail of our message we need to copy the error message before + * freeing it. Not freeing the message will leak memory. + */ + char *pqerrcopy = pstrdup(pqerr); + PQfreemem(pqerr); + + ereport(ERROR, (errmsg( + "failed to parse node authentication information for %s@%s:%d", + key->user, key->hostname, key->port), + errdetail("%s", pqerrcopy))); + } + } + + for (PQconninfoOption *option = optionArray; option->keyword != NULL; option++) + { + if (option->val == NULL || option->val[0] == '\0') + { + continue; + } + + connKeywords[authParamsIdx] = MemoryContextStrdup(context, option->keyword); + connValues[authParamsIdx] = MemoryContextStrdup(context, option->val); + + authParamsIdx++; + } + + PQconninfoFree(optionArray); + /* final step: add terminal NULL, required by libpq */ connKeywords[authParamsIdx] = connValues[authParamsIdx] = NULL; } @@ -346,6 +403,116 @@ GetConnParam(const char *keyword) } +/* + * GetEffectiveConnKey checks whether there is any pooler configuration for the + * provided key (host/port combination). The one case where this logic is not + * applied is for loopback connections originating within the task tracker. If + * a corresponding row is found in the poolinfo table, a modified (effective) + * key is returned with the node, port, and dbname overridden, as applicable, + * otherwise, the original key is returned unmodified. + */ +ConnectionHashKey * +GetEffectiveConnKey(ConnectionHashKey *key) +{ + PQconninfoOption *option = NULL, *optionArray = NULL; + + if (!IsTransactionState()) + { + /* we're in the task tracker, so should only see loopback */ + Assert(strncmp(LOCAL_HOST_NAME, key->hostname, MAX_NODE_LENGTH) == 0 && + PostPortNumber == key->port); + return key; + } + + WorkerNode *worker = FindWorkerNode(key->hostname, key->port); + if (worker == NULL) + { + /* this can be hit when the key references an unknown node */ + return key; + } + + char *poolinfo = GetPoolinfoViaCatalog(worker->nodeId); + if (poolinfo == NULL) + { + return key; + } + + /* copy the key to provide defaults for all fields */ + ConnectionHashKey *effectiveKey = palloc(sizeof(ConnectionHashKey)); + *effectiveKey = *key; + + optionArray = PQconninfoParse(poolinfo, NULL); + for (option = optionArray; option->keyword != NULL; option++) + { + if (option->val == NULL || option->val[0] == '\0') + { + continue; + } + + if (strcmp(option->keyword, "host") == 0) + { + strlcpy(effectiveKey->hostname, option->val, MAX_NODE_LENGTH); + } + else if (strcmp(option->keyword, "port") == 0) + { + effectiveKey->port = pg_atoi(option->val, 4, 0); + } + else if (strcmp(option->keyword, "dbname") == 0) + { + /* permit dbname for poolers which can key pools based on dbname */ + strlcpy(effectiveKey->database, option->val, NAMEDATALEN); + } + else + { + ereport(FATAL, (errmsg("unrecognized poolinfo keyword"))); + } + } + + PQconninfoFree(optionArray); + + return effectiveKey; +} + + +/* + * GetAuthinfo simply returns the string representation of authentication info + * for a specified hostname/port/user combination. If the current transaction + * is valid, then we use the catalog, otherwise a shared memory hash is used, + * a mode that is currently only useful for getting authentication information + * to the Task Tracker, which lacks a database connection and transaction. + */ +char * +GetAuthinfo(char *hostname, int32 port, char *user) +{ + char *authinfo = NULL; + bool isLoopback = (strncmp(LOCAL_HOST_NAME, hostname, MAX_NODE_LENGTH) == 0 && + PostPortNumber == port); + + if (IsTransactionState()) + { + int64 nodeId = WILDCARD_NODE_ID; + + /* -1 is a special value for loopback connections (task tracker) */ + if (isLoopback) + { + nodeId = LOCALHOST_NODE_ID; + } + else + { + WorkerNode *worker = FindWorkerNode(hostname, port); + if (worker != NULL) + { + nodeId = worker->nodeId; + } + } + + authinfo = GetAuthinfoViaCatalog(user, nodeId); + } + + return (authinfo != NULL) ? authinfo : ""; +} + + /* * CalculateMaxSize simply counts the number of elements returned by * PQconnDefaults, including the final NULL. This helps us know how space would diff --git a/src/backend/distributed/connection/remote_commands.c b/src/backend/distributed/connection/remote_commands.c index 2445a69f3..a7e96efed 100644 --- a/src/backend/distributed/connection/remote_commands.c +++ b/src/backend/distributed/connection/remote_commands.c @@ -18,6 +18,7 @@ #include "distributed/listutils.h" #include "distributed/log_utils.h" #include "distributed/remote_commands.h" +#include "distributed/errormessage.h" #include "distributed/cancel_utils.h" #include "lib/stringinfo.h" #include "miscadmin.h" diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index ff8da5991..d3434d775 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -79,6 +79,7 @@ static void deparse_index_columns(StringInfo buffer, List *indexParameterList, List *deparseContext); static void AppendStorageParametersToString(StringInfo stringBuffer, List *optionList); +static const char * convert_aclright_to_string(int aclright); static void simple_quote_literal(StringInfo buf, const char *val); static void AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer); @@ -1062,6 +1063,138 @@ pg_get_indexclusterdef_string(Oid indexRelationId) } +/* + * pg_get_table_grants returns a list of sql statements which recreate the + * permissions for a specific table. + * + * This function is modeled after aclexplode(), don't change too heavily. + */ +List * +pg_get_table_grants(Oid relationId) +{ + /* *INDENT-OFF* */ + StringInfoData buffer; + List *defs = NIL; + bool isNull = false; + + Relation relation = relation_open(relationId, AccessShareLock); + char *relationName = generate_relation_name(relationId, NIL); + + initStringInfo(&buffer); + + /* lookup all table level grants */ + HeapTuple classTuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId)); + if (!HeapTupleIsValid(classTuple)) + { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("relation with OID %u does not exist", + relationId))); + } + + Datum aclDatum = SysCacheGetAttr(RELOID, classTuple, Anum_pg_class_relacl, + &isNull); + + ReleaseSysCache(classTuple); + + if (!isNull) + { + + /* + * First revoke all default permissions, so we can start adding the + * exact permissions from the master. Note that we only do so if there + * are any actual grants; an empty grant set signals default + * permissions. + * + * Note: This doesn't work correctly if default permissions have been + * changed with ALTER DEFAULT PRIVILEGES - but that's hard to fix + * properly currently. + */ + appendStringInfo(&buffer, "REVOKE ALL ON %s FROM PUBLIC", + relationName); + defs = lappend(defs, pstrdup(buffer.data)); + resetStringInfo(&buffer); + + /* iterate through the acl datastructure, emit GRANTs */ + + Acl *acl = DatumGetAclP(aclDatum); + AclItem *aidat = ACL_DAT(acl); + + int offtype = -1; + int i = 0; + while (i < ACL_NUM(acl)) + { + AclItem *aidata = NULL; + AclMode priv_bit = 0; + + offtype++; + + if (offtype == N_ACL_RIGHTS) + { + offtype = 0; + i++; + if (i >= ACL_NUM(acl)) /* done */ + { + break; + } + } + + aidata = &aidat[i]; + priv_bit = 1 << offtype; + + if (ACLITEM_GET_PRIVS(*aidata) & priv_bit) + { + const char *roleName = NULL; + const char *withGrant = ""; + + if (aidata->ai_grantee != 0) + { + + HeapTuple htup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(aidata->ai_grantee)); + if (HeapTupleIsValid(htup)) + { + Form_pg_authid authForm = ((Form_pg_authid) GETSTRUCT(htup)); + + roleName = quote_identifier(NameStr(authForm->rolname)); + + ReleaseSysCache(htup); + } + else + { + elog(ERROR, "cache lookup failed for role %u", aidata->ai_grantee); + } + } + else + { + roleName = "PUBLIC"; + } + + if ((ACLITEM_GET_GOPTIONS(*aidata) & priv_bit) != 0) + { + withGrant = " WITH GRANT OPTION"; + } + + appendStringInfo(&buffer, "GRANT %s ON %s TO %s%s", + convert_aclright_to_string(priv_bit), + relationName, + roleName, + withGrant); + + defs = lappend(defs, pstrdup(buffer.data)); + + resetStringInfo(&buffer); + } + } + } + + resetStringInfo(&buffer); + + relation_close(relation, NoLock); + return defs; + /* *INDENT-ON* */ +} + + /* * generate_qualified_relation_name computes the schema-qualified name to display for a * relation specified by OID. @@ -1156,6 +1289,45 @@ AppendStorageParametersToString(StringInfo stringBuffer, List *optionList) } +/* copy of postgresql's function, which is static as well */ +static const char * +convert_aclright_to_string(int aclright) +{ + /* *INDENT-OFF* */ + switch (aclright) + { + case ACL_INSERT: + return "INSERT"; + case ACL_SELECT: + return "SELECT"; + case ACL_UPDATE: + return "UPDATE"; + case ACL_DELETE: + return "DELETE"; + case ACL_TRUNCATE: + return "TRUNCATE"; + case ACL_REFERENCES: + return "REFERENCES"; + case ACL_TRIGGER: + return "TRIGGER"; + case ACL_EXECUTE: + return "EXECUTE"; + case ACL_USAGE: + return "USAGE"; + case ACL_CREATE: + return "CREATE"; + case ACL_CREATE_TEMP: + return "TEMPORARY"; + case ACL_CONNECT: + return "CONNECT"; + default: + elog(ERROR, "unrecognized aclright: %d", aclright); + return NULL; + } + /* *INDENT-ON* */ +} + + /* * contain_nextval_expression_walker walks over expression tree and returns * true if it contains call to 'nextval' function. @@ -1224,6 +1396,46 @@ pg_get_replica_identity_command(Oid tableRelationId) } +/* + * pg_get_row_level_security_commands function returns the required ALTER .. TABLE + * commands to define the row level security settings for a relation. + */ +List * +pg_get_row_level_security_commands(Oid relationId) +{ + StringInfoData buffer; + List *commands = NIL; + + initStringInfo(&buffer); + + Relation relation = table_open(relationId, AccessShareLock); + + if (relation->rd_rel->relrowsecurity) + { + char *relationName = generate_qualified_relation_name(relationId); + + appendStringInfo(&buffer, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY", + relationName); + commands = lappend(commands, pstrdup(buffer.data)); + resetStringInfo(&buffer); + } + + if (relation->rd_rel->relforcerowsecurity) + { + char *relationName = generate_qualified_relation_name(relationId); + + appendStringInfo(&buffer, "ALTER TABLE %s FORCE ROW LEVEL SECURITY", + relationName); + commands = lappend(commands, pstrdup(buffer.data)); + resetStringInfo(&buffer); + } + + table_close(relation, AccessShareLock); + + return commands; +} + + /* * Generate a C string representing a relation's reloptions, or NULL if none. * diff --git a/src/backend/distributed/deparser/deparse_foreign_data_wrapper_stmts.c b/src/backend/distributed/deparser/deparse_foreign_data_wrapper_stmts.c new file mode 100644 index 000000000..b8f2574a4 --- /dev/null +++ b/src/backend/distributed/deparser/deparse_foreign_data_wrapper_stmts.c @@ -0,0 +1,93 @@ +/*------------------------------------------------------------------------- + * + * deparse_foreign_data_wrapper_stmts.c + * All routines to deparse foreign data wrapper statements. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "commands/defrem.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/relay_utility.h" +#include "lib/stringinfo.h" +#include "nodes/nodes.h" +#include "utils/builtins.h" + +static void AppendGrantOnFDWStmt(StringInfo buf, GrantStmt *stmt); +static void AppendGrantOnFDWNames(StringInfo buf, GrantStmt *stmt); + + +char * +DeparseGrantOnFDWStmt(Node *node) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + Assert(stmt->objtype == OBJECT_FDW); + + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendGrantOnFDWStmt(&str, stmt); + + return str.data; +} + + +static void +AppendGrantOnFDWStmt(StringInfo buf, GrantStmt *stmt) +{ + Assert(stmt->objtype == OBJECT_FDW); + + appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE"); + + if (!stmt->is_grant && stmt->grant_option) + { + appendStringInfo(buf, "GRANT OPTION FOR "); + } + + AppendGrantPrivileges(buf, stmt); + + AppendGrantOnFDWNames(buf, stmt); + + AppendGrantGrantees(buf, stmt); + + if (stmt->is_grant && stmt->grant_option) + { + appendStringInfo(buf, " WITH GRANT OPTION"); + } + if (!stmt->is_grant) + { + if (stmt->behavior == DROP_RESTRICT) + { + appendStringInfo(buf, " RESTRICT"); + } + else if (stmt->behavior == DROP_CASCADE) + { + appendStringInfo(buf, " CASCADE"); + } + } + appendStringInfo(buf, ";"); +} + + +static void +AppendGrantOnFDWNames(StringInfo buf, GrantStmt *stmt) +{ + ListCell *cell = NULL; + appendStringInfo(buf, " ON FOREIGN DATA WRAPPER "); + + foreach(cell, stmt->objects) + { + char *fdwname = strVal(lfirst(cell)); + + appendStringInfoString(buf, quote_identifier(fdwname)); + if (cell != list_tail(stmt->objects)) + { + appendStringInfo(buf, ", "); + } + } +} diff --git a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c index 62c5f98c8..de5c60f79 100644 --- a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c +++ b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c @@ -27,6 +27,8 @@ static void AppendDropForeignServerStmt(StringInfo buf, DropStmt *stmt); static void AppendServerNames(StringInfo buf, DropStmt *stmt); static void AppendBehavior(StringInfo buf, DropStmt *stmt); static char * GetDefElemActionString(DefElemAction action); +static void AppendGrantOnForeignServerStmt(StringInfo buf, GrantStmt *stmt); +static void AppendGrantOnForeignServerServers(StringInfo buf, GrantStmt *stmt); char * DeparseCreateForeignServerStmt(Node *node) @@ -104,6 +106,21 @@ DeparseDropForeignServerStmt(Node *node) } +char * +DeparseGrantOnForeignServerStmt(Node *node) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + Assert(stmt->objtype == OBJECT_FOREIGN_SERVER); + + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendGrantOnForeignServerStmt(&str, stmt); + + return str.data; +} + + static void AppendCreateForeignServerStmt(StringInfo buf, CreateForeignServerStmt *stmt) { @@ -275,3 +292,58 @@ GetDefElemActionString(DefElemAction action) return ""; } } + + +static void +AppendGrantOnForeignServerStmt(StringInfo buf, GrantStmt *stmt) +{ + Assert(stmt->objtype == OBJECT_FOREIGN_SERVER); + + appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE"); + + if (!stmt->is_grant && stmt->grant_option) + { + appendStringInfo(buf, "GRANT OPTION FOR "); + } + + AppendGrantPrivileges(buf, stmt); + + AppendGrantOnForeignServerServers(buf, stmt); + + AppendGrantGrantees(buf, stmt); + + if (stmt->is_grant && stmt->grant_option) + { + appendStringInfo(buf, " WITH GRANT OPTION"); + } + if (!stmt->is_grant) + { + if (stmt->behavior == DROP_RESTRICT) + { + appendStringInfo(buf, " RESTRICT"); + } + else if (stmt->behavior == DROP_CASCADE) + { + appendStringInfo(buf, " CASCADE"); + } + } + appendStringInfo(buf, ";"); +} + + +static void +AppendGrantOnForeignServerServers(StringInfo buf, GrantStmt *stmt) +{ + ListCell *cell = NULL; + appendStringInfo(buf, " ON FOREIGN SERVER "); + + foreach(cell, stmt->objects) + { + char *servername = strVal(lfirst(cell)); + appendStringInfoString(buf, quote_identifier(servername)); + if (cell != list_tail(stmt->objects)) + { + appendStringInfo(buf, ", "); + } + } +} diff --git a/src/backend/distributed/deparser/deparse_function_stmts.c b/src/backend/distributed/deparser/deparse_function_stmts.c index d58faabfb..f7a44eec8 100644 --- a/src/backend/distributed/deparser/deparse_function_stmts.c +++ b/src/backend/distributed/deparser/deparse_function_stmts.c @@ -67,6 +67,9 @@ static void AppendAlterFunctionSchemaStmt(StringInfo buf, AlterObjectSchemaStmt static void AppendAlterFunctionOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt); static void AppendAlterFunctionDependsStmt(StringInfo buf, AlterObjectDependsStmt *stmt); +static void AppendGrantOnFunctionStmt(StringInfo buf, GrantStmt *stmt); +static void AppendGrantOnFunctionFunctions(StringInfo buf, GrantStmt *stmt); + static char * CopyAndConvertToUpperCase(const char *str); /* @@ -711,3 +714,113 @@ CopyAndConvertToUpperCase(const char *str) return result; } + + +/* + * DeparseGrantOnFunctionStmt builds and returns a string representing the GrantOnFunctionStmt + */ +char * +DeparseGrantOnFunctionStmt(Node *node) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + Assert(isFunction(stmt->objtype)); + + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendGrantOnFunctionStmt(&str, stmt); + + return str.data; +} + + +/* + * AppendGrantOnFunctionStmt builds and returns an SQL command representing a + * GRANT .. ON FUNCTION command from given GrantStmt object. + */ +static void +AppendGrantOnFunctionStmt(StringInfo buf, GrantStmt *stmt) +{ + Assert(isFunction(stmt->objtype)); + + if (stmt->targtype == ACL_TARGET_ALL_IN_SCHEMA) + { + elog(ERROR, + "GRANT .. ALL FUNCTIONS/PROCEDURES IN SCHEMA is not supported for formatting."); + } + + appendStringInfoString(buf, stmt->is_grant ? "GRANT " : "REVOKE "); + + if (!stmt->is_grant && stmt->grant_option) + { + appendStringInfoString(buf, "GRANT OPTION FOR "); + } + + AppendGrantPrivileges(buf, stmt); + + AppendGrantOnFunctionFunctions(buf, stmt); + + AppendGrantGrantees(buf, stmt); + + if (stmt->is_grant && stmt->grant_option) + { + appendStringInfoString(buf, " WITH GRANT OPTION"); + } + if (!stmt->is_grant) + { + if (stmt->behavior == DROP_RESTRICT) + { + appendStringInfoString(buf, " RESTRICT"); + } + else if (stmt->behavior == DROP_CASCADE) + { + appendStringInfoString(buf, " CASCADE"); + } + } + appendStringInfoString(buf, ";"); +} + + +/* + * AppendGrantOnFunctionFunctions appends the function names along with their arguments + * to the given StringInfo from the given GrantStmt + */ +static void +AppendGrantOnFunctionFunctions(StringInfo buf, GrantStmt *stmt) +{ + ListCell *cell = NULL; + appendStringInfo(buf, " ON %s ", ObjectTypeToKeyword(stmt->objtype)); + + foreach(cell, stmt->objects) + { + /* + * GrantOnFunction statement keeps its objects (functions) as + * a list of ObjectWithArgs + */ + ObjectWithArgs *function = (ObjectWithArgs *) lfirst(cell); + + appendStringInfoString(buf, NameListToString(function->objname)); + if (!function->args_unspecified) + { + /* if args are specified, we should append "(arg1, arg2, ...)" to the function name */ + const char *args = TypeNameListToString(function->objargs); + appendStringInfo(buf, "(%s)", args); + } + if (cell != list_tail(stmt->objects)) + { + appendStringInfoString(buf, ", "); + } + } +} + + +/* + * isFunction returns true if the given ObjectType is a function, a procedure or a routine + * otherwise returns false + */ +bool +isFunction(ObjectType objectType) +{ + return (objectType == OBJECT_FUNCTION || objectType == OBJECT_PROCEDURE || + objectType == OBJECT_ROUTINE); +} diff --git a/src/backend/distributed/deparser/deparse_role_stmts.c b/src/backend/distributed/deparser/deparse_role_stmts.c index 047bdbb60..302b9cb0f 100644 --- a/src/backend/distributed/deparser/deparse_role_stmts.c +++ b/src/backend/distributed/deparser/deparse_role_stmts.c @@ -21,7 +21,11 @@ static void AppendAlterRoleStmt(StringInfo buf, AlterRoleStmt *stmt); static void AppendAlterRoleSetStmt(StringInfo buf, AlterRoleSetStmt *stmt); +static void AppendCreateRoleStmt(StringInfo buf, CreateRoleStmt *stmt); static void AppendRoleOption(StringInfo buf, ListCell *optionCell); +static void AppendRoleList(StringInfo buf, List *roleList); +static void AppendDropRoleStmt(StringInfo buf, DropRoleStmt *stmt); +static void AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt); /* @@ -173,6 +177,213 @@ AppendRoleOption(StringInfo buf, ListCell *optionCell) } +/* + * DeparseCreateRoleStmt builds and returns a string representing of the + * CreateRoleStmt for application on a remote server. + */ +char * +DeparseCreateRoleStmt(Node *node) +{ + CreateRoleStmt *stmt = castNode(CreateRoleStmt, node); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + AppendCreateRoleStmt(&buf, stmt); + + return buf.data; +} + + +/* + * AppendCreateRoleStmt generates the string representation of the + * CreateRoleStmt and appends it to the buffer. + */ +static void +AppendCreateRoleStmt(StringInfo buf, CreateRoleStmt *stmt) +{ + ListCell *optionCell = NULL; + + appendStringInfo(buf, "CREATE "); + + switch (stmt->stmt_type) + { + case ROLESTMT_ROLE: + { + appendStringInfo(buf, "ROLE "); + break; + } + + case ROLESTMT_USER: + { + appendStringInfo(buf, "USER "); + break; + } + + case ROLESTMT_GROUP: + { + appendStringInfo(buf, "GROUP "); + break; + } + } + + appendStringInfo(buf, "%s", quote_identifier(stmt->role)); + + foreach(optionCell, stmt->options) + { + AppendRoleOption(buf, optionCell); + + DefElem *option = (DefElem *) lfirst(optionCell); + + if (strcmp(option->defname, "sysid") == 0) + { + appendStringInfo(buf, " SYSID %s", quote_literal_cstr(strVal(option->arg))); + } + else if (strcmp(option->defname, "adminmembers") == 0) + { + appendStringInfo(buf, " ADMIN "); + AppendRoleList(buf, (List *) option->arg); + } + else if (strcmp(option->defname, "rolemembers") == 0) + { + appendStringInfo(buf, " ROLE "); + AppendRoleList(buf, (List *) option->arg); + } + else if (strcmp(option->defname, "addroleto") == 0) + { + appendStringInfo(buf, " IN ROLE "); + AppendRoleList(buf, (List *) option->arg); + } + } +} + + +/* + * DeparseDropRoleStmt builds and returns a string representing of the + * DropRoleStmt for application on a remote server. + */ +char * +DeparseDropRoleStmt(Node *node) +{ + DropRoleStmt *stmt = castNode(DropRoleStmt, node); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + AppendDropRoleStmt(&buf, stmt); + + return buf.data; +} + + +/* + * AppendDropRoleStmt generates the string representation of the + * DropRoleStmt and appends it to the buffer. + */ +static void +AppendDropRoleStmt(StringInfo buf, DropRoleStmt *stmt) +{ + appendStringInfo(buf, "DROP ROLE "); + + if (stmt->missing_ok) + { + appendStringInfo(buf, "IF EXISTS "); + } + + AppendRoleList(buf, stmt->roles); +} + + +static void +AppendRoleList(StringInfo buf, List *roleList) +{ + ListCell *cell = NULL; + foreach(cell, roleList) + { + Node *roleNode = (Node *) lfirst(cell); + Assert(IsA(roleNode, RoleSpec) || IsA(roleNode, AccessPriv)); + char const *rolename = NULL; + if (IsA(roleNode, RoleSpec)) + { + rolename = RoleSpecString((RoleSpec *) roleNode, true); + } + if (IsA(roleNode, AccessPriv)) + { + rolename = quote_identifier(((AccessPriv *) roleNode)->priv_name); + } + appendStringInfoString(buf, rolename); + if (cell != list_tail(roleList)) + { + appendStringInfo(buf, ", "); + } + } +} + + +/* + * DeparseGrantRoleStmt builds and returns a string representing of the + * GrantRoleStmt for application on a remote server. + */ +char * +DeparseGrantRoleStmt(Node *node) +{ + GrantRoleStmt *stmt = castNode(GrantRoleStmt, node); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + AppendGrantRoleStmt(&buf, stmt); + + return buf.data; +} + + +/* + * AppendGrantRoleStmt generates the string representation of the + * GrantRoleStmt and appends it to the buffer. + */ +static void +AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt) +{ + appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE"); + + if (!stmt->is_grant && stmt->admin_opt) + { + appendStringInfo(buf, "ADMIN OPTION FOR "); + } + + AppendRoleList(buf, stmt->granted_roles); + + appendStringInfo(buf, "%s ", stmt->is_grant ? " TO " : " FROM "); + + AppendRoleList(buf, stmt->grantee_roles); + + if (stmt->is_grant) + { + if (stmt->admin_opt) + { + appendStringInfo(buf, " WITH ADMIN OPTION"); + } + + if (stmt->grantor) + { + appendStringInfo(buf, " GRANTED BY %s", RoleSpecString(stmt->grantor, true)); + } + } + else + { + if (stmt->behavior == DROP_RESTRICT) + { + appendStringInfo(buf, " RESTRICT"); + } + else if (stmt->behavior == DROP_CASCADE) + { + appendStringInfo(buf, " CASCADE"); + } + } +} + + /* * AppendAlterRoleSetStmt generates the string representation of the * AlterRoleSetStmt and appends it to the buffer. diff --git a/src/backend/distributed/deparser/deparse_schema_stmts.c b/src/backend/distributed/deparser/deparse_schema_stmts.c index ebc76d5e8..f401421a2 100644 --- a/src/backend/distributed/deparser/deparse_schema_stmts.c +++ b/src/backend/distributed/deparser/deparse_schema_stmts.c @@ -22,9 +22,7 @@ static void AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt); static void AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt); static void AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt); -static void AppendGrantOnSchemaPrivileges(StringInfo buf, GrantStmt *stmt); static void AppendGrantOnSchemaSchemas(StringInfo buf, GrantStmt *stmt); -static void AppendGrantOnSchemaGrantees(StringInfo buf, GrantStmt *stmt); static void AppendAlterSchemaRenameStmt(StringInfo buf, RenameStmt *stmt); char * @@ -161,11 +159,11 @@ AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt) appendStringInfo(buf, "GRANT OPTION FOR "); } - AppendGrantOnSchemaPrivileges(buf, stmt); + AppendGrantPrivileges(buf, stmt); AppendGrantOnSchemaSchemas(buf, stmt); - AppendGrantOnSchemaGrantees(buf, stmt); + AppendGrantGrantees(buf, stmt); if (stmt->is_grant && stmt->grant_option) { @@ -186,8 +184,8 @@ AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt) } -static void -AppendGrantOnSchemaPrivileges(StringInfo buf, GrantStmt *stmt) +void +AppendGrantPrivileges(StringInfo buf, GrantStmt *stmt) { if (list_length(stmt->privileges) == 0) { @@ -227,8 +225,8 @@ AppendGrantOnSchemaSchemas(StringInfo buf, GrantStmt *stmt) } -static void -AppendGrantOnSchemaGrantees(StringInfo buf, GrantStmt *stmt) +void +AppendGrantGrantees(StringInfo buf, GrantStmt *stmt) { ListCell *cell = NULL; appendStringInfo(buf, " %s ", stmt->is_grant ? "TO" : "FROM"); diff --git a/src/backend/distributed/deparser/deparse_sequence_stmts.c b/src/backend/distributed/deparser/deparse_sequence_stmts.c index 0680e7a20..0a615d741 100644 --- a/src/backend/distributed/deparser/deparse_sequence_stmts.c +++ b/src/backend/distributed/deparser/deparse_sequence_stmts.c @@ -27,6 +27,8 @@ static void AppendSequenceNameList(StringInfo buf, List *objects, ObjectType obj static void AppendRenameSequenceStmt(StringInfo buf, RenameStmt *stmt); static void AppendAlterSequenceSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt); static void AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt); +static void AppendGrantOnSequenceStmt(StringInfo buf, GrantStmt *stmt); +static void AppendGrantOnSequenceSequences(StringInfo buf, GrantStmt *stmt); /* * DeparseDropSequenceStmt builds and returns a string representing the DropStmt @@ -254,3 +256,107 @@ AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt) } } } + + +/* + * DeparseGrantOnSequenceStmt builds and returns a string representing the GrantOnSequenceStmt + */ +char * +DeparseGrantOnSequenceStmt(Node *node) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + Assert(stmt->objtype == OBJECT_SEQUENCE); + + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendGrantOnSequenceStmt(&str, stmt); + + return str.data; +} + + +/* + * AppendGrantOnSequenceStmt builds and returns an SQL command representing a + * GRANT .. ON SEQUENCE command from given GrantStmt object. + */ +static void +AppendGrantOnSequenceStmt(StringInfo buf, GrantStmt *stmt) +{ + Assert(stmt->objtype == OBJECT_SEQUENCE); + + if (stmt->targtype == ACL_TARGET_ALL_IN_SCHEMA) + { + /* + * Normally we shouldn't reach this + * We deparse a GrantStmt with OBJECT_SEQUENCE after setting targtype + * to ACL_TARGET_OBJECT + */ + elog(ERROR, + "GRANT .. ALL SEQUENCES IN SCHEMA is not supported for formatting."); + } + + appendStringInfoString(buf, stmt->is_grant ? "GRANT " : "REVOKE "); + + if (!stmt->is_grant && stmt->grant_option) + { + appendStringInfoString(buf, "GRANT OPTION FOR "); + } + + AppendGrantPrivileges(buf, stmt); + + AppendGrantOnSequenceSequences(buf, stmt); + + AppendGrantGrantees(buf, stmt); + + if (stmt->is_grant && stmt->grant_option) + { + appendStringInfoString(buf, " WITH GRANT OPTION"); + } + if (!stmt->is_grant) + { + if (stmt->behavior == DROP_RESTRICT) + { + appendStringInfoString(buf, " RESTRICT"); + } + else if (stmt->behavior == DROP_CASCADE) + { + appendStringInfoString(buf, " CASCADE"); + } + } + appendStringInfoString(buf, ";"); +} + + +/* + * AppendGrantOnSequenceSequences appends the sequence names along with their arguments + * to the given StringInfo from the given GrantStmt + */ +static void +AppendGrantOnSequenceSequences(StringInfo buf, GrantStmt *stmt) +{ + Assert(stmt->objtype == OBJECT_SEQUENCE); + + appendStringInfoString(buf, " ON SEQUENCE "); + + ListCell *cell = NULL; + foreach(cell, stmt->objects) + { + /* + * GrantOnSequence statement keeps its objects (sequences) as + * a list of RangeVar-s + */ + RangeVar *sequence = (RangeVar *) lfirst(cell); + + /* + * We have qualified the statement beforehand + */ + appendStringInfoString(buf, quote_qualified_identifier(sequence->schemaname, + sequence->relname)); + + if (cell != list_tail(stmt->objects)) + { + appendStringInfoString(buf, ", "); + } + } +} diff --git a/src/backend/distributed/deparser/qualify_sequence_stmt.c b/src/backend/distributed/deparser/qualify_sequence_stmt.c index ac7d72ed8..a3f23fc62 100644 --- a/src/backend/distributed/deparser/qualify_sequence_stmt.c +++ b/src/backend/distributed/deparser/qualify_sequence_stmt.c @@ -132,3 +132,41 @@ QualifyRenameSequenceStmt(Node *node) seq->schemaname = get_namespace_name(schemaOid); } } + + +/* + * QualifyGrantOnSequenceStmt transforms a + * GRANT ON SEQUENCE ... + * statement in place and makes the sequence names fully qualified. + */ +void +QualifyGrantOnSequenceStmt(Node *node) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + Assert(stmt->objtype == OBJECT_SEQUENCE); + + /* + * The other option would be GRANT ALL SEQUENCES ON SCHEMA ... + * For that we don't need to qualify + */ + if (stmt->targtype != ACL_TARGET_OBJECT) + { + return; + } + List *qualifiedSequenceRangeVars = NIL; + RangeVar *sequenceRangeVar = NULL; + foreach_ptr(sequenceRangeVar, stmt->objects) + { + if (sequenceRangeVar->schemaname == NULL) + { + Oid seqOid = RangeVarGetRelid(sequenceRangeVar, NoLock, false); + Oid schemaOid = get_rel_namespace(seqOid); + sequenceRangeVar->schemaname = get_namespace_name(schemaOid); + } + + qualifiedSequenceRangeVars = lappend(qualifiedSequenceRangeVars, + sequenceRangeVar); + } + + stmt->objects = qualifiedSequenceRangeVars; +} diff --git a/src/backend/distributed/executor/citus_custom_scan.c b/src/backend/distributed/executor/citus_custom_scan.c index 92bfbd715..202709b3a 100644 --- a/src/backend/distributed/executor/citus_custom_scan.c +++ b/src/backend/distributed/executor/citus_custom_scan.c @@ -40,6 +40,7 @@ #include "nodes/makefuncs.h" #include "optimizer/optimizer.h" #include "optimizer/clauses.h" +#include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/datum.h" @@ -674,7 +675,10 @@ CitusEndScan(CustomScanState *node) partitionKeyConst = workerJob->partitionKeyValue; } - /* queryId is not set if pg_stat_statements is not installed */ + /* + * queryId is not set if pg_stat_statements is not installed, + * it can be set with as of pg14: set compute_query_id to on; + */ if (queryId != 0) { if (partitionKeyConst != NULL && executorType == MULTI_EXECUTOR_ADAPTIVE) diff --git a/src/backend/distributed/executor/query_stats.c b/src/backend/distributed/executor/query_stats.c index ee361a7c5..ecca89b77 100644 --- a/src/backend/distributed/executor/query_stats.c +++ b/src/backend/distributed/executor/query_stats.c @@ -2,6 +2,8 @@ * * query_stats.c * Statement-level statistics for distributed queries. + * Code is mostly taken from postgres/contrib/pg_stat_statements + * and adapted to citus. * * Copyright (c) Citus Data, Inc. *------------------------------------------------------------------------- @@ -9,11 +11,112 @@ #include "postgres.h" -#include "fmgr.h" +#include "safe_lib.h" +#include "miscadmin.h" + +#include "distributed/pg_version_constants.h" + +#include "access/hash.h" +#include "catalog/pg_authid.h" +#include "distributed/citus_safe_lib.h" +#include "distributed/function_utils.h" +#include "distributed/multi_executor.h" +#include "distributed/multi_server_executor.h" +#include "distributed/version_compat.h" #include "distributed/query_stats.h" +#include "distributed/tuplestore.h" +#include "funcapi.h" +#include "storage/ipc.h" +#include "storage/fd.h" +#include "storage/spin.h" +#include "tcop/utility.h" #include "utils/builtins.h" +#include + +#define CITUS_STATS_DUMP_FILE "pg_stat/citus_query_stats.stat" +#define CITUS_STAT_STATEMENTS_COLS 6 +#define CITUS_STAT_STATAMENTS_QUERY_ID 0 +#define CITUS_STAT_STATAMENTS_USER_ID 1 +#define CITUS_STAT_STATAMENTS_DB_ID 2 +#define CITUS_STAT_STATAMENTS_EXECUTOR_TYPE 3 +#define CITUS_STAT_STATAMENTS_PARTITION_KEY 4 +#define CITUS_STAT_STATAMENTS_CALLS 5 + + +#define USAGE_DECREASE_FACTOR (0.99) /* decreased every CitusQueryStatsEntryDealloc */ +#define STICKY_DECREASE_FACTOR (0.50) /* factor for sticky entries */ +#define USAGE_DEALLOC_PERCENT 5 /* free this % of entries at once */ +#define USAGE_INIT (1.0) /* including initial planning */ +#define STATS_SHARED_MEM_NAME "citus_query_stats" + +#define MAX_KEY_LENGTH NAMEDATALEN + +static const uint32 CITUS_QUERY_STATS_FILE_HEADER = 0x0d756e0f; + +/* time interval in seconds for maintenance daemon to call CitusQueryStatsSynchronizeEntries */ +int StatStatementsPurgeInterval = 10; + +/* maximum number of entries in queryStats hash, controlled by GUC citus.stat_statements_max */ +int StatStatementsMax = 50000; + +/* tracking all or none, for citus_stat_statements, controlled by GUC citus.stat_statements_track */ +int StatStatementsTrack = STAT_STATEMENTS_TRACK_NONE; + +/* + * Hashtable key that defines the identity of a hashtable entry. We use the + * same hash as pg_stat_statements + */ +typedef struct QueryStatsHashKey +{ + Oid userid; /* user OID */ + Oid dbid; /* database OID */ + uint64 queryid; /* query identifier */ + MultiExecutorType executorType; /* executor type */ + char partitionKey[MAX_KEY_LENGTH]; +} QueryStatsHashKey; + +/* + * Statistics per query and executor type + */ +typedef struct queryStatsEntry +{ + QueryStatsHashKey key; /* hash key of entry - MUST BE FIRST */ + int64 calls; /* # of times executed */ + double usage; /* hashtable usage factor */ + slock_t mutex; /* protects the counters only */ +} QueryStatsEntry; + +/* + * Global shared state + */ +typedef struct QueryStatsSharedState +{ + LWLockId lock; /* protects hashtable search/modification */ + double cur_median_usage; /* current median usage in hashtable */ +} QueryStatsSharedState; + +/* lookup table for existing pg_stat_statements entries */ +typedef struct ExistingStatsHashKey +{ + Oid userid; /* user OID */ + Oid dbid; /* database OID */ + uint64 queryid; /* query identifier */ +} ExistingStatsHashKey; + +/* saved hook address in case of unload */ +static shmem_startup_hook_type prev_shmem_startup_hook = NULL; + +/* Links to shared memory state */ +static QueryStatsSharedState *queryStats = NULL; +static HTAB *queryStatsHash = NULL; + +/*--- Functions --- */ + +Datum citus_query_stats_reset(PG_FUNCTION_ARGS); +Datum citus_query_stats(PG_FUNCTION_ARGS); + PG_FUNCTION_INFO_V1(citus_stat_statements_reset); PG_FUNCTION_INFO_V1(citus_query_stats); PG_FUNCTION_INFO_V1(citus_executor_name); @@ -21,47 +124,894 @@ PG_FUNCTION_INFO_V1(citus_executor_name); static char * CitusExecutorName(MultiExecutorType executorType); +static Size CitusQueryStatsSharedMemSize(void); + +static void CitusQueryStatsShmemStartup(void); +static void CitusQueryStatsShmemShutdown(int code, Datum arg); +static QueryStatsEntry * CitusQueryStatsEntryAlloc(QueryStatsHashKey *key, bool sticky); +static void CitusQueryStatsEntryDealloc(void); +static void CitusQueryStatsEntryReset(void); +static uint32 CitusQuerysStatsHashFn(const void *key, Size keysize); +static int CitusQuerysStatsMatchFn(const void *key1, const void *key2, Size keysize); +static uint32 ExistingStatsHashFn(const void *key, Size keysize); +static int ExistingStatsMatchFn(const void *key1, const void *key2, Size keysize); + +static HTAB * BuildExistingQueryIdHash(void); +static int GetPGStatStatementsMax(void); +static void CitusQueryStatsRemoveExpiredEntries(HTAB *existingQueryIdHash); -/* placeholder for InitializeCitusQueryStats */ void InitializeCitusQueryStats(void) { - /* placeholder for future implementation */ + RequestAddinShmemSpace(CitusQueryStatsSharedMemSize()); + + elog(LOG, "requesting named LWLockTranch for %s", STATS_SHARED_MEM_NAME); + RequestNamedLWLockTranche(STATS_SHARED_MEM_NAME, 1); + + /* Install hook */ + prev_shmem_startup_hook = shmem_startup_hook; + shmem_startup_hook = CitusQueryStatsShmemStartup; } -/* placeholder for CitusQueryStatsExecutorsEntry */ +static void +CitusQueryStatsShmemStartup(void) +{ + bool found; + HASHCTL info; + uint32 header; + int32 num; + QueryStatsEntry *buffer = NULL; + + if (prev_shmem_startup_hook) + { + prev_shmem_startup_hook(); + } + + /* Create or attach to the shared memory state */ + LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); + + /* global access lock */ + queryStats = ShmemInitStruct(STATS_SHARED_MEM_NAME, + sizeof(QueryStatsSharedState), + &found); + + if (!found) + { + /* First time through ... */ + queryStats->lock = &(GetNamedLWLockTranche(STATS_SHARED_MEM_NAME))->lock; + } + + memset(&info, 0, sizeof(info)); + info.keysize = sizeof(QueryStatsHashKey); + info.entrysize = sizeof(QueryStatsEntry); + info.hash = CitusQuerysStatsHashFn; + info.match = CitusQuerysStatsMatchFn; + + /* allocate stats shared memory hash */ + queryStatsHash = ShmemInitHash("citus_query_stats hash", + StatStatementsMax, StatStatementsMax, + &info, + HASH_ELEM | HASH_FUNCTION | HASH_COMPARE); + + LWLockRelease(AddinShmemInitLock); + + if (!IsUnderPostmaster) + { + on_shmem_exit(CitusQueryStatsShmemShutdown, (Datum) 0); + } + + /* + * Done if some other process already completed our initialization. + */ + if (found) + { + return; + } + + /* Load stat file, don't care about locking */ + FILE *file = AllocateFile(CITUS_STATS_DUMP_FILE, PG_BINARY_R); + if (file == NULL) + { + if (errno == ENOENT) + { + return; /* ignore not-found error */ + } + goto error; + } + + /* check is header is valid */ + if (fread(&header, sizeof(uint32), 1, file) != 1 || + header != CITUS_QUERY_STATS_FILE_HEADER) + { + goto error; + } + + /* get number of entries */ + if (fread(&num, sizeof(int32), 1, file) != 1) + { + goto error; + } + + for (int i = 0; i < num; i++) + { + QueryStatsEntry temp; + + if (fread(&temp, sizeof(QueryStatsEntry), 1, file) != 1) + { + goto error; + } + + /* Skip loading "sticky" entries */ + if (temp.calls == 0) + { + continue; + } + + QueryStatsEntry *entry = CitusQueryStatsEntryAlloc(&temp.key, false); + + /* copy in the actual stats */ + entry->calls = temp.calls; + entry->usage = temp.usage; + + /* don't initialize spinlock, already done */ + } + + FreeFile(file); + + /* + * Remove the file so it's not included in backups/replication slaves, + * etc. A new file will be written on next shutdown. + */ + unlink(CITUS_STATS_DUMP_FILE); + + return; + +error: + ereport(LOG, + (errcode_for_file_access(), + errmsg("could not read citus_query_stats file \"%s\": %m", + CITUS_STATS_DUMP_FILE))); + if (buffer) + { + pfree(buffer); + } + if (file) + { + FreeFile(file); + } + + /* delete bogus file, don't care of errors in this case */ + unlink(CITUS_STATS_DUMP_FILE); +} + + +/* + * CitusQueryStatsShmemShutdown is a shmem_shutdown hook, + * it dumps statistics into file. + */ +static void +CitusQueryStatsShmemShutdown(int code, Datum arg) +{ + HASH_SEQ_STATUS hash_seq; + QueryStatsEntry *entry; + + /* Don't try to dump during a crash. */ + if (code) + { + return; + } + + if (!queryStats) + { + return; + } + + FILE *file = AllocateFile(CITUS_STATS_DUMP_FILE ".tmp", PG_BINARY_W); + if (file == NULL) + { + goto error; + } + + if (fwrite(&CITUS_QUERY_STATS_FILE_HEADER, sizeof(uint32), 1, file) != 1) + { + goto error; + } + + int32 num_entries = hash_get_num_entries(queryStatsHash); + + if (fwrite(&num_entries, sizeof(int32), 1, file) != 1) + { + goto error; + } + + hash_seq_init(&hash_seq, queryStatsHash); + while ((entry = hash_seq_search(&hash_seq)) != NULL) + { + if (fwrite(entry, sizeof(QueryStatsEntry), 1, file) != 1) + { + /* note: we assume hash_seq_term won't change errno */ + hash_seq_term(&hash_seq); + goto error; + } + } + + if (FreeFile(file)) + { + file = NULL; + goto error; + } + + /* + * Rename file inplace + */ + if (rename(CITUS_STATS_DUMP_FILE ".tmp", CITUS_STATS_DUMP_FILE) != 0) + { + ereport(LOG, + (errcode_for_file_access(), + errmsg("could not rename citus_query_stats file \"%s\": %m", + CITUS_STATS_DUMP_FILE ".tmp"))); + } + + return; + +error: + ereport(LOG, + (errcode_for_file_access(), + errmsg("could not read citus_query_stats file \"%s\": %m", + CITUS_STATS_DUMP_FILE))); + + if (file) + { + FreeFile(file); + } + unlink(CITUS_STATS_DUMP_FILE); +} + + +/* + * CitusQueryStatsSharedMemSize calculates and returns shared memory size + * required to keep query statistics. + */ +static Size +CitusQueryStatsSharedMemSize(void) +{ + Assert(StatStatementsMax >= 0); + + Size size = MAXALIGN(sizeof(QueryStatsSharedState)); + size = add_size(size, hash_estimate_size(StatStatementsMax, sizeof(QueryStatsEntry))); + + return size; +} + + +/* + * CitusQueryStatsExecutorsEntry is the function to update statistics + * for a given query id. + */ void CitusQueryStatsExecutorsEntry(uint64 queryId, MultiExecutorType executorType, char *partitionKey) { - /* placeholder for future implementation */ + QueryStatsHashKey key; + + /* Safety check... */ + if (!queryStats || !queryStatsHash) + { + return; + } + + /* early return if tracking is disabled */ + if (!StatStatementsTrack) + { + return; + } + + /* Set up key for hashtable search */ + key.userid = GetUserId(); + key.dbid = MyDatabaseId; + key.queryid = queryId; + key.executorType = executorType; + memset(key.partitionKey, 0, MAX_KEY_LENGTH); + if (partitionKey != NULL) + { + strlcpy(key.partitionKey, partitionKey, MAX_KEY_LENGTH); + } + + /* Lookup the hash table entry with shared lock. */ + LWLockAcquire(queryStats->lock, LW_SHARED); + + QueryStatsEntry *entry = (QueryStatsEntry *) hash_search(queryStatsHash, &key, + HASH_FIND, NULL); + + /* Create new entry, if not present */ + if (!entry) + { + /* Need exclusive lock to make a new hashtable entry - promote */ + LWLockRelease(queryStats->lock); + LWLockAcquire(queryStats->lock, LW_EXCLUSIVE); + + /* OK to create a new hashtable entry */ + entry = CitusQueryStatsEntryAlloc(&key, false); + } + + /* + * Grab the spinlock while updating the counters (see comment about + * locking rules at the head of the pg_stat_statements file) + */ + volatile QueryStatsEntry *e = (volatile QueryStatsEntry *) entry; + + SpinLockAcquire(&e->mutex); + + /* "Unstick" entry if it was previously sticky */ + if (e->calls == 0) + { + e->usage = USAGE_INIT; + } + + e->calls += 1; + + SpinLockRelease(&e->mutex); + + LWLockRelease(queryStats->lock); } /* - * placeholder function for citus_stat_statements_reset + * Allocate a new hashtable entry. + * caller must hold an exclusive lock on queryStats->lock + */ +static QueryStatsEntry * +CitusQueryStatsEntryAlloc(QueryStatsHashKey *key, bool sticky) +{ + bool found; + long StatStatementsMaxLong = StatStatementsMax; + + /* Make space if needed */ + while (hash_get_num_entries(queryStatsHash) >= StatStatementsMaxLong) + { + CitusQueryStatsEntryDealloc(); + } + + /* Find or create an entry with desired hash code */ + QueryStatsEntry *entry = (QueryStatsEntry *) hash_search(queryStatsHash, key, + HASH_ENTER, &found); + + if (!found) + { + /* New entry, initialize it */ + + /* set the appropriate initial usage count */ + entry->usage = sticky ? queryStats->cur_median_usage : USAGE_INIT; + + /* re-initialize the mutex each time ... we assume no one using it */ + SpinLockInit(&entry->mutex); + } + + entry->calls = 0; + entry->usage = (0.0); + + return entry; +} + + +/* + * entry_cmp is qsort comparator for sorting into increasing usage order + */ +static int +entry_cmp(const void *lhs, const void *rhs) +{ + double l_usage = (*(QueryStatsEntry *const *) lhs)->usage; + double r_usage = (*(QueryStatsEntry *const *) rhs)->usage; + + if (l_usage < r_usage) + { + return -1; + } + else if (l_usage > r_usage) + { + return +1; + } + else + { + return 0; + } +} + + +/* + * CitusQueryStatsEntryDealloc deallocates least used entries. + * Caller must hold an exclusive lock on queryStats->lock. + */ +static void +CitusQueryStatsEntryDealloc(void) +{ + HASH_SEQ_STATUS hash_seq; + QueryStatsEntry *entry; + + /* + * Sort entries by usage and deallocate USAGE_DEALLOC_PERCENT of them. + * While we're scanning the table, apply the decay factor to the usage + * values. + */ + QueryStatsEntry **entries = palloc(hash_get_num_entries(queryStatsHash) * + sizeof(QueryStatsEntry *)); + + int i = 0; + hash_seq_init(&hash_seq, queryStatsHash); + while ((entry = hash_seq_search(&hash_seq)) != NULL) + { + entries[i++] = entry; + + /* "Sticky" entries get a different usage decay rate. */ + if (entry->calls == 0) + { + entry->usage *= STICKY_DECREASE_FACTOR; + } + else + { + entry->usage *= USAGE_DECREASE_FACTOR; + } + } + + SafeQsort(entries, i, sizeof(QueryStatsEntry *), entry_cmp); + + if (i > 0) + { + /* Record the (approximate) median usage */ + queryStats->cur_median_usage = entries[i / 2]->usage; + } + + int nvictims = Max(10, i * USAGE_DEALLOC_PERCENT / 100); + nvictims = Min(nvictims, i); + + for (i = 0; i < nvictims; i++) + { + hash_search(queryStatsHash, &entries[i]->key, HASH_REMOVE, NULL); + } + + pfree(entries); +} + + +/* + * CitusQueryStatsEntryReset resets statistics. + */ +static void +CitusQueryStatsEntryReset(void) +{ + HASH_SEQ_STATUS hash_seq; + QueryStatsEntry *entry; + + LWLockAcquire(queryStats->lock, LW_EXCLUSIVE); + + hash_seq_init(&hash_seq, queryStatsHash); + while ((entry = hash_seq_search(&hash_seq)) != NULL) + { + hash_search(queryStatsHash, &entry->key, HASH_REMOVE, NULL); + } + + LWLockRelease(queryStats->lock); +} + + +/* + * CitusQuerysStatsHashFn calculates and returns hash value for a key + */ +static uint32 +CitusQuerysStatsHashFn(const void *key, Size keysize) +{ + const QueryStatsHashKey *k = (const QueryStatsHashKey *) key; + + if (k->partitionKey[0] != '\0') + { + return hash_uint32((uint32) k->userid) ^ + hash_uint32((uint32) k->dbid) ^ + hash_any((const unsigned char *) &(k->queryid), sizeof(uint64)) ^ + hash_uint32((uint32) k->executorType) ^ + hash_any((const unsigned char *) (k->partitionKey), strlen( + k->partitionKey)); + } + else + { + return hash_uint32((uint32) k->userid) ^ + hash_uint32((uint32) k->dbid) ^ + hash_any((const unsigned char *) &(k->queryid), sizeof(uint64)) ^ + hash_uint32((uint32) k->executorType); + } +} + + +/* + * CitusQuerysStatsMatchFn compares two keys - zero means match. + * See definition of HashCompareFunc in hsearch.h for more info. + */ +static int +CitusQuerysStatsMatchFn(const void *key1, const void *key2, Size keysize) +{ + const QueryStatsHashKey *k1 = (const QueryStatsHashKey *) key1; + const QueryStatsHashKey *k2 = (const QueryStatsHashKey *) key2; + + if (k1->userid == k2->userid && + k1->dbid == k2->dbid && + k1->queryid == k2->queryid && + k1->executorType == k2->executorType) + { + return 0; + } + return 1; +} + + +/* + * ExistingStatsHashFn calculates and returns hash value for ExistingStatsHashKey + */ +static uint32 +ExistingStatsHashFn(const void *key, Size keysize) +{ + const ExistingStatsHashKey *k = (const ExistingStatsHashKey *) key; + + return hash_uint32((uint32) k->userid) ^ + hash_uint32((uint32) k->dbid) ^ + hash_any((const unsigned char *) &(k->queryid), sizeof(uint64)); +} + + +/* + * ExistingStatsMatchFn compares two keys of type ExistingStatsHashKey - zero + * means match. See definition of HashCompareFunc in hsearch.h for more info. + */ +static int +ExistingStatsMatchFn(const void *key1, const void *key2, Size keysize) +{ + const ExistingStatsHashKey *k1 = (const ExistingStatsHashKey *) key1; + const ExistingStatsHashKey *k2 = (const ExistingStatsHashKey *) key2; + + + if (k1->userid == k2->userid && + k1->dbid == k2->dbid && + k1->queryid == k2->queryid) + { + return 0; + } + + return 1; +} + + +/* + * Reset statistics. */ Datum citus_stat_statements_reset(PG_FUNCTION_ARGS) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("citus_stat_statements_reset() is only supported on " - "Citus Enterprise"))); + CitusQueryStatsEntryReset(); PG_RETURN_VOID(); } /* - * placeholder function for citus_query_stats + * citus_query_stats returns query stats kept in memory. */ Datum citus_query_stats(PG_FUNCTION_ARGS) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("citus_query_stats() is only supported on " - "Citus Enterprise"))); - PG_RETURN_VOID(); + TupleDesc tupdesc; + HASH_SEQ_STATUS hash_seq; + QueryStatsEntry *entry; + Oid currentUserId = GetUserId(); + bool canSeeStats = superuser(); + + if (!queryStats) + { + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("citus_query_stats: shared memory not initialized"))); + } + + if (is_member_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS)) + { + canSeeStats = true; + } + + Tuplestorestate *tupstore = SetupTuplestore(fcinfo, &tupdesc); + + + /* exclusive lock on queryStats->lock is acquired and released inside the function */ + CitusQueryStatsSynchronizeEntries(); + + LWLockAcquire(queryStats->lock, LW_SHARED); + + hash_seq_init(&hash_seq, queryStatsHash); + while ((entry = hash_seq_search(&hash_seq)) != NULL) + { + Datum values[CITUS_STAT_STATEMENTS_COLS]; + bool nulls[CITUS_STAT_STATEMENTS_COLS]; + + /* following vars are to keep data for processing after spinlock release */ + uint64 queryid = 0; + Oid userid = InvalidOid; + Oid dbid = InvalidOid; + MultiExecutorType executorType = MULTI_EXECUTOR_INVALID_FIRST; + char partitionKey[MAX_KEY_LENGTH]; + int64 calls = 0; + + memset(values, 0, sizeof(values)); + memset(nulls, 0, sizeof(nulls)); + memset(partitionKey, 0, MAX_KEY_LENGTH); + + SpinLockAcquire(&entry->mutex); + + /* + * Skip entry if unexecuted (ie, it's a pending "sticky" entry) or + * the user does not have permission to view it. + */ + if (entry->calls == 0 || !(currentUserId == entry->key.userid || canSeeStats)) + { + SpinLockRelease(&entry->mutex); + continue; + } + + queryid = entry->key.queryid; + userid = entry->key.userid; + dbid = entry->key.dbid; + executorType = entry->key.executorType; + + if (entry->key.partitionKey[0] != '\0') + { + memcpy_s(partitionKey, sizeof(partitionKey), entry->key.partitionKey, + sizeof(entry->key.partitionKey)); + } + + calls = entry->calls; + + SpinLockRelease(&entry->mutex); + + values[CITUS_STAT_STATAMENTS_QUERY_ID] = UInt64GetDatum(queryid); + values[CITUS_STAT_STATAMENTS_USER_ID] = ObjectIdGetDatum(userid); + values[CITUS_STAT_STATAMENTS_DB_ID] = ObjectIdGetDatum(dbid); + values[CITUS_STAT_STATAMENTS_EXECUTOR_TYPE] = UInt32GetDatum( + (uint32) executorType); + + if (partitionKey[0] != '\0') + { + values[CITUS_STAT_STATAMENTS_PARTITION_KEY] = CStringGetTextDatum( + partitionKey); + } + else + { + nulls[CITUS_STAT_STATAMENTS_PARTITION_KEY] = true; + } + + values[CITUS_STAT_STATAMENTS_CALLS] = Int64GetDatumFast(calls); + + tuplestore_putvalues(tupstore, tupdesc, values, nulls); + } + + LWLockRelease(queryStats->lock); + + /* clean up and return the tuplestore */ + tuplestore_donestoring(tupstore); + + return (Datum) 0; +} + + +/* + * CitusQueryStatsSynchronizeEntries removes all entries in queryStats hash + * that does not have matching queryId in pg_stat_statements. + * + * A function called inside (CitusQueryStatsRemoveExpiredEntries) acquires + * an exclusive lock on queryStats->lock. + */ +void +CitusQueryStatsSynchronizeEntries(void) +{ + HTAB *existingQueryIdHash = BuildExistingQueryIdHash(); + if (existingQueryIdHash != NULL) + { + CitusQueryStatsRemoveExpiredEntries(existingQueryIdHash); + hash_destroy(existingQueryIdHash); + } +} + + +/* + * BuildExistingQueryIdHash goes over entries in pg_stat_statements and prepare + * a hash table of queryId's. The function returns null if + * public.pg_stat_statements(bool) function is not available. Returned hash + * table is allocated on the CurrentMemoryContext, and caller is responsible + * for deallocation. + */ +static HTAB * +BuildExistingQueryIdHash(void) +{ + const int userIdAttributeNumber = 1; + const int dbIdAttributeNumber = 2; +#if PG_VERSION_NUM >= PG_VERSION_14 + const int queryIdAttributeNumber = 4; +#else + const int queryIdAttributeNumber = 3; +#endif + Datum commandTypeDatum = (Datum) 0; + HASHCTL info; + bool missingOK = true; + + Oid pgStatStatementsOid = FunctionOidExtended("public", "pg_stat_statements", 1, + missingOK); + if (!OidIsValid(pgStatStatementsOid)) + { + return NULL; + } + + + /* fetch pg_stat_statements.max, it is expected to be available, if not bail out */ + int pgStatStatementsMax = GetPGStatStatementsMax(); + if (pgStatStatementsMax == 0) + { + ereport(DEBUG1, (errmsg("Cannot access pg_stat_statements.max"))); + return NULL; + } + + FmgrInfo *fmgrPGStatStatements = (FmgrInfo *) palloc0(sizeof(FmgrInfo)); + commandTypeDatum = BoolGetDatum(false); + + fmgr_info(pgStatStatementsOid, fmgrPGStatStatements); + + ReturnSetInfo *statStatementsReturnSet = FunctionCallGetTupleStore1( + fmgrPGStatStatements->fn_addr, + pgStatStatementsOid, + commandTypeDatum); + TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat( + statStatementsReturnSet->setDesc, + &TTSOpsMinimalTuple); + + info.keysize = sizeof(ExistingStatsHashKey); + info.entrysize = sizeof(ExistingStatsHashKey); + info.hcxt = CurrentMemoryContext; + info.hash = ExistingStatsHashFn; + info.match = ExistingStatsMatchFn; + + int hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION | HASH_COMPARE); + + /* + * Allocate more hash slots (twice as much) than necessary to minimize + * collisions. + */ + HTAB *queryIdHashTable = hash_create("pg_stats_statements queryId hash", + pgStatStatementsMax * 2, &info, hashFlags); + + /* iterate over tuples in tuple store, and add queryIds to hash table */ + while (true) + { + bool isNull = false; + + bool tuplePresent = tuplestore_gettupleslot(statStatementsReturnSet->setResult, + true, + false, + tupleTableSlot); + + if (!tuplePresent) + { + break; + } + + Datum userIdDatum = slot_getattr(tupleTableSlot, userIdAttributeNumber, &isNull); + Datum dbIdDatum = slot_getattr(tupleTableSlot, dbIdAttributeNumber, &isNull); + Datum queryIdDatum = slot_getattr(tupleTableSlot, queryIdAttributeNumber, + &isNull); + + + /* + * queryId may be returned as NULL when current user is not authorized to see other + * users' stats. + */ + if (!isNull) + { + ExistingStatsHashKey key; + key.userid = DatumGetInt32(userIdDatum); + key.dbid = DatumGetInt32(dbIdDatum); + key.queryid = DatumGetInt64(queryIdDatum); + hash_search(queryIdHashTable, (void *) &key, HASH_ENTER, NULL); + } + + ExecClearTuple(tupleTableSlot); + } + + ExecDropSingleTupleTableSlot(tupleTableSlot); + + tuplestore_end(statStatementsReturnSet->setResult); + + pfree(fmgrPGStatStatements); + + return queryIdHashTable; +} + + +/* + * GetPGStatStatementsMax returns GUC value pg_stat_statements.max. The + * function returns 0 if for some reason it can not access + * pg_stat_statements.max value. + */ +static int +GetPGStatStatementsMax(void) +{ + const char *name = "pg_stat_statements.max"; + int maxValue = 0; + + const char *pgssMax = GetConfigOption(name, true, false); + + /* + * Retrieving pg_stat_statements.max can fail if the extension is loaded + * after citus in shared_preload_libraries, or not at all. + */ + if (pgssMax) + { + maxValue = pg_atoi(pgssMax, 4, 0); + } + + return maxValue; +} + + +/* + * CitusQueryStatsRemoveExpiredEntries iterates over queryStats hash entries + * and removes entries with keys that do not exists in the provided hash of + * queryIds. + * + * Acquires and releases exclusive lock on queryStats->lock. + */ +static void +CitusQueryStatsRemoveExpiredEntries(HTAB *existingQueryIdHash) +{ + HASH_SEQ_STATUS hash_seq; + QueryStatsEntry *entry; + int removedCount = 0; + bool canSeeStats = superuser(); + Oid currentUserId = GetUserId(); + + if (is_member_of_role(currentUserId, ROLE_PG_READ_ALL_STATS)) + { + canSeeStats = true; + } + + LWLockAcquire(queryStats->lock, LW_EXCLUSIVE); + + hash_seq_init(&hash_seq, queryStatsHash); + while ((entry = hash_seq_search(&hash_seq)) != NULL) + { + bool found = false; + ExistingStatsHashKey existingStatsKey = { 0, 0, 0 }; + + /* + * pg_stat_statements returns NULL in the queryId field for queries + * belonging to other users. Those queries are therefore not reflected + * in the existingQueryIdHash, but that does not mean that we should + * remove them as they are relevant to other users. + */ + if (!(currentUserId == entry->key.userid || canSeeStats)) + { + continue; + } + + existingStatsKey.userid = entry->key.userid; + existingStatsKey.dbid = entry->key.dbid; + existingStatsKey.queryid = entry->key.queryid; + + hash_search(existingQueryIdHash, (void *) &existingStatsKey, HASH_FIND, &found); + if (!found) + { + hash_search(queryStatsHash, &entry->key, HASH_REMOVE, NULL); + removedCount++; + } + } + + LWLockRelease(queryStats->lock); + + if (removedCount > 0) + { + elog(DEBUG2, "citus_stat_statements removed %d expired entries", removedCount); + } } diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index 12d977fbf..99bcfc240 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -21,9 +21,13 @@ #include "catalog/catalog.h" #include "catalog/dependency.h" #include "catalog/indexing.h" +#include "catalog/pg_auth_members.h" +#include "catalog/pg_authid_d.h" #include "catalog/pg_class.h" #include "catalog/pg_constraint.h" #include "catalog/pg_depend.h" +#include "catalog/pg_extension_d.h" +#include "catalog/pg_foreign_data_wrapper_d.h" #include "catalog/pg_namespace.h" #include "catalog/pg_proc_d.h" #include "catalog/pg_rewrite.h" @@ -45,6 +49,7 @@ #include "utils/fmgroids.h" #include "utils/hsearch.h" #include "utils/lsyscache.h" +#include "utils/syscache.h" /* * ObjectAddressCollector keeps track of collected ObjectAddresses. This can be used @@ -168,6 +173,8 @@ static void ApplyAddToDependencyList(ObjectAddressCollector *collector, static List * GetViewRuleReferenceDependencyList(Oid relationId); static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress target); +static List * GetDependentRoleIdsFDW(Oid FDWOid); +static List * ExpandRolesToGroups(Oid roleid); static ViewDependencyNode * BuildViewDependencyGraph(Oid relationId, HTAB *nodeMap); @@ -670,16 +677,13 @@ SupportedDependencyByCitus(const ObjectAddress *address) case OCLASS_ROLE: { - /* - * Community only supports the extension owner as a distributed object to - * propagate alter statements for this user - */ - if (address->objectId == CitusExtensionOwner()) + /* if it is a reserved role do not propagate */ + if (IsReservedName(GetUserNameFromId(address->objectId, false))) { - return true; + return false; } - return false; + return true; } case OCLASS_EXTENSION: @@ -1275,6 +1279,42 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe switch (target.classId) { + case AuthIdRelationId: + { + /* + * Roles are members of other roles. These relations are not recorded directly + * but can be deduced from pg_auth_members + */ + return ExpandRolesToGroups(target.objectId); + } + + case ExtensionRelationId: + { + /* + * FDWs get propagated along with the extensions they belong to. + * In case there are GRANTed privileges on FDWs to roles, those + * GRANT statements will be propagated to. In order to make sure + * that those GRANT statements work, the privileged roles should + * exist on the worker nodes. Hence, here we find these dependent + * roles and add them as dependencies. + */ + + Oid extensionId = target.objectId; + List *FDWOids = GetDependentFDWsToExtension(extensionId); + + Oid FDWOid = InvalidOid; + foreach_oid(FDWOid, FDWOids) + { + List *dependentRoleIds = GetDependentRoleIdsFDW(FDWOid); + List *dependencies = + CreateObjectAddressDependencyDefList(AuthIdRelationId, + dependentRoleIds); + result = list_concat(result, dependencies); + } + + break; + } + case TypeRelationId: { switch (get_typtype(target.objectId)) @@ -1407,6 +1447,73 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe } +/* + * GetDependentRoleIdsFDW returns a list of role oids that has privileges on the + * FDW with the given object id. + */ +static List * +GetDependentRoleIdsFDW(Oid FDWOid) +{ + List *roleIds = NIL; + + Acl *aclEntry = GetPrivilegesForFDW(FDWOid); + + if (aclEntry == NULL) + { + return NIL; + } + + AclItem *privileges = ACL_DAT(aclEntry); + int numberOfPrivsGranted = ACL_NUM(aclEntry); + + for (int i = 0; i < numberOfPrivsGranted; i++) + { + roleIds = lappend_oid(roleIds, privileges[i].ai_grantee); + } + + return roleIds; +} + + +/* + * ExpandRolesToGroups returns a list of object addresses pointing to roles that roleid + * depends on. + */ +static List * +ExpandRolesToGroups(Oid roleid) +{ + Relation pgAuthMembers = table_open(AuthMemRelationId, AccessShareLock); + HeapTuple tuple = NULL; + + ScanKeyData scanKey[1]; + const int scanKeyCount = 1; + + /* scan pg_auth_members for member = $1 via index pg_auth_members_member_role_index */ + ScanKeyInit(&scanKey[0], Anum_pg_auth_members_member, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(roleid)); + + SysScanDesc scanDescriptor = systable_beginscan(pgAuthMembers, AuthMemMemRoleIndexId, + true, NULL, scanKeyCount, scanKey); + + List *roles = NIL; + while ((tuple = systable_getnext(scanDescriptor)) != NULL) + { + Form_pg_auth_members membership = (Form_pg_auth_members) GETSTRUCT(tuple); + + DependencyDefinition *definition = palloc0(sizeof(DependencyDefinition)); + definition->mode = DependencyObjectAddress; + ObjectAddressSet(definition->data.address, AuthIdRelationId, membership->roleid); + + roles = lappend(roles, definition); + } + + systable_endscan(scanDescriptor); + table_close(pgAuthMembers, AccessShareLock); + + return roles; +} + + /* * GetViewRuleReferenceDependencyList returns the dependencies of the view's * internal rule dependencies. diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index 7265f8685..0581e54b3 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -28,6 +28,7 @@ #include "catalog/pg_type.h" #include "citus_version.h" #include "commands/extension.h" +#include "distributed/listutils.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" @@ -44,6 +45,7 @@ #include "parser/parse_type.h" #include "utils/builtins.h" #include "utils/fmgroids.h" +#include "utils/lsyscache.h" #include "utils/regproc.h" #include "utils/rel.h" @@ -519,3 +521,82 @@ UpdateDistributedObjectColocationId(uint32 oldColocationId, table_close(pgDistObjectRel, NoLock); CommandCounterIncrement(); } + + +/* + * DistributedFunctionList returns the list of ObjectAddress-es of all the + * distributed functions found in pg_dist_object + */ +List * +DistributedFunctionList(void) +{ + List *distributedFunctionList = NIL; + + ScanKeyData key[1]; + Relation pgDistObjectRel = table_open(DistObjectRelationId(), AccessShareLock); + + /* scan pg_dist_object for classid = ProcedureRelationId via index */ + ScanKeyInit(&key[0], Anum_pg_dist_object_classid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(ProcedureRelationId)); + SysScanDesc pgDistObjectScan = systable_beginscan(pgDistObjectRel, + DistObjectPrimaryKeyIndexId(), + true, NULL, 1, key); + + HeapTuple pgDistObjectTup = NULL; + while (HeapTupleIsValid(pgDistObjectTup = systable_getnext(pgDistObjectScan))) + { + Form_pg_dist_object pg_dist_object = + (Form_pg_dist_object) GETSTRUCT(pgDistObjectTup); + + ObjectAddress *functionAddress = palloc0(sizeof(ObjectAddress)); + functionAddress->classId = ProcedureRelationId; + functionAddress->objectId = pg_dist_object->objid; + functionAddress->objectSubId = pg_dist_object->objsubid; + distributedFunctionList = lappend(distributedFunctionList, functionAddress); + } + + systable_endscan(pgDistObjectScan); + relation_close(pgDistObjectRel, AccessShareLock); + return distributedFunctionList; +} + + +/* + * DistributedSequenceList returns the list of ObjectAddress-es of all the + * distributed sequences found in pg_dist_object + */ +List * +DistributedSequenceList(void) +{ + List *distributedSequenceList = NIL; + + ScanKeyData key[1]; + Relation pgDistObjectRel = table_open(DistObjectRelationId(), AccessShareLock); + + /* scan pg_dist_object for classid = RelationRelationId via index */ + ScanKeyInit(&key[0], Anum_pg_dist_object_classid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + SysScanDesc pgDistObjectScan = systable_beginscan(pgDistObjectRel, + DistObjectPrimaryKeyIndexId(), + true, NULL, 1, key); + + HeapTuple pgDistObjectTup = NULL; + while (HeapTupleIsValid(pgDistObjectTup = systable_getnext(pgDistObjectScan))) + { + Form_pg_dist_object pg_dist_object = + (Form_pg_dist_object) GETSTRUCT(pgDistObjectTup); + + if (get_rel_relkind(pg_dist_object->objid) == RELKIND_SEQUENCE) + { + ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); + sequenceAddress->classId = RelationRelationId; + sequenceAddress->objectId = pg_dist_object->objid; + sequenceAddress->objectSubId = pg_dist_object->objsubid; + distributedSequenceList = lappend(distributedSequenceList, sequenceAddress); + } + } + + systable_endscan(pgDistObjectScan); + relation_close(pgDistObjectRel, AccessShareLock); + return distributedSequenceList; +} diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index d668ee693..2d23109df 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -20,6 +20,7 @@ #include "access/nbtree.h" #include "access/xact.h" #include "access/sysattr.h" +#include "catalog/index.h" #include "catalog/indexing.h" #include "catalog/pg_am.h" #include "catalog/pg_collation.h" @@ -37,6 +38,7 @@ #include "distributed/citus_ruleutils.h" #include "distributed/multi_executor.h" #include "distributed/function_utils.h" +#include "distributed/listutils.h" #include "distributed/foreign_key_relationship.h" #include "distributed/listutils.h" #include "distributed/metadata_utility.h" @@ -62,6 +64,7 @@ #include "parser/parse_func.h" #include "parser/parse_type.h" #include "storage/lmgr.h" +#include "utils/array.h" #include "utils/builtins.h" #include "utils/catcache.h" #include "utils/datum.h" @@ -162,6 +165,7 @@ typedef struct MetadataCacheData Oid workerHashFunctionId; Oid anyValueFunctionId; Oid textSendAsJsonbFunctionId; + Oid textoutFunctionId; Oid extensionOwner; Oid binaryCopyFormatId; Oid textCopyFormatId; @@ -169,6 +173,10 @@ typedef struct MetadataCacheData Oid secondaryNodeRoleId; Oid pgTableIsVisibleFuncId; Oid citusTableIsVisibleFuncId; + Oid distAuthinfoRelationId; + Oid distAuthinfoIndexId; + Oid distPoolinfoRelationId; + Oid distPoolinfoIndexId; Oid relationIsAKnownShardFuncId; Oid jsonbExtractPathFuncId; Oid jsonbExtractPathTextFuncId; @@ -231,6 +239,7 @@ static void InitializeWorkerNodeCache(void); static void RegisterForeignKeyGraphCacheCallbacks(void); static void RegisterWorkerNodeCacheCallbacks(void); static void RegisterLocalGroupIdCacheCallbacks(void); +static void RegisterAuthinfoCacheCallbacks(void); static void RegisterCitusTableCacheEntryReleaseCallbacks(void); static uint32 WorkerNodeHashCode(const void *key, Size keySize); static void ResetCitusTableCacheEntry(CitusTableCacheEntry *cacheEntry); @@ -242,6 +251,7 @@ static void InvalidateForeignRelationGraphCacheCallback(Datum argument, Oid rela static void InvalidateDistRelationCacheCallback(Datum argument, Oid relationId); static void InvalidateNodeRelationCacheCallback(Datum argument, Oid relationId); static void InvalidateLocalGroupIdRelationCacheCallback(Datum argument, Oid relationId); +static void InvalidateConnParamsCacheCallback(Datum argument, Oid relationId); static void CitusTableCacheEntryReleaseCallback(ResourceReleasePhase phase, bool isCommit, bool isTopLevel, void *arg); static HeapTuple LookupDistPartitionTuple(Relation pgDistPartition, Oid relationId); @@ -269,6 +279,10 @@ static bool IsCitusTableTypeInternal(char partitionMethod, char replicationModel CitusTableType tableType); static bool RefreshTableCacheEntryIfInvalid(ShardIdCacheEntry *shardEntry); +static Oid DistAuthinfoRelationId(void); +static Oid DistAuthinfoIndexId(void); +static Oid DistPoolinfoRelationId(void); +static Oid DistPoolinfoIndexId(void); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(citus_dist_partition_cache_invalidate); @@ -2567,6 +2581,50 @@ DistPlacementGroupidIndexId(void) } +/* return oid of pg_dist_authinfo relation */ +static Oid +DistAuthinfoRelationId(void) +{ + CachedRelationLookup("pg_dist_authinfo", + &MetadataCache.distAuthinfoRelationId); + + return MetadataCache.distAuthinfoRelationId; +} + + +/* return oid of pg_dist_authinfo identification index */ +static Oid +DistAuthinfoIndexId(void) +{ + CachedRelationLookup("pg_dist_authinfo_identification_index", + &MetadataCache.distAuthinfoIndexId); + + return MetadataCache.distAuthinfoIndexId; +} + + +/* return oid of pg_dist_poolinfo relation */ +static Oid +DistPoolinfoRelationId(void) +{ + CachedRelationLookup("pg_dist_poolinfo", + &MetadataCache.distPoolinfoRelationId); + + return MetadataCache.distPoolinfoRelationId; +} + + +/* return oid of pg_dist_poolinfo primary key index */ +static Oid +DistPoolinfoIndexId(void) +{ + CachedRelationLookup("pg_dist_poolinfo_pkey", + &MetadataCache.distPoolinfoIndexId); + + return MetadataCache.distPoolinfoIndexId; +} + + /* return oid of the read_intermediate_result(text,citus_copy_format) function */ Oid CitusReadIntermediateResultFuncId(void) @@ -2686,6 +2744,42 @@ CitusAnyValueFunctionId(void) } +/* return oid of the citus_text_send_as_jsonb(text) function */ +Oid +CitusTextSendAsJsonbFunctionId(void) +{ + if (MetadataCache.textSendAsJsonbFunctionId == InvalidOid) + { + List *nameList = list_make2(makeString("pg_catalog"), + makeString("citus_text_send_as_jsonb")); + Oid paramOids[1] = { TEXTOID }; + + MetadataCache.textSendAsJsonbFunctionId = + LookupFuncName(nameList, 1, paramOids, false); + } + + return MetadataCache.textSendAsJsonbFunctionId; +} + + +/* return oid of the textout(text) function */ +Oid +TextOutFunctionId(void) +{ + if (MetadataCache.textoutFunctionId == InvalidOid) + { + List *nameList = list_make2(makeString("pg_catalog"), + makeString("textout")); + Oid paramOids[1] = { TEXTOID }; + + MetadataCache.textoutFunctionId = + LookupFuncName(nameList, 1, paramOids, false); + } + + return MetadataCache.textoutFunctionId; +} + + /* * PgTableVisibleFuncId returns oid of the pg_table_is_visible function. */ @@ -3274,7 +3368,7 @@ citus_conninfo_cache_invalidate(PG_FUNCTION_ARGS) errmsg("must be called as trigger"))); } - /* no-op in community edition */ + CitusInvalidateRelcacheByRelid(DistAuthinfoRelationId()); PG_RETURN_DATUM(PointerGetDatum(NULL)); } @@ -3402,6 +3496,7 @@ InitializeCaches(void) RegisterForeignKeyGraphCacheCallbacks(); RegisterWorkerNodeCacheCallbacks(); RegisterLocalGroupIdCacheCallbacks(); + RegisterAuthinfoCacheCallbacks(); RegisterCitusTableCacheEntryReleaseCallbacks(); } PG_CATCH(); @@ -3807,6 +3902,18 @@ RegisterLocalGroupIdCacheCallbacks(void) } +/* + * RegisterAuthinfoCacheCallbacks registers the callbacks required to + * maintain cached connection parameters at fresh values. + */ +static void +RegisterAuthinfoCacheCallbacks(void) +{ + /* Watch for invalidation events. */ + CacheRegisterRelcacheCallback(InvalidateConnParamsCacheCallback, (Datum) 0); +} + + /* * WorkerNodeHashCode computes the hash code for a worker node from the node's * host name and port number. Nodes that only differ by their rack locations @@ -4305,6 +4412,30 @@ InvalidateLocalGroupIdRelationCacheCallback(Datum argument, Oid relationId) } +/* + * InvalidateConnParamsCacheCallback sets isValid flag to false for all entries + * in ConnParamsHash, a cache used during connection establishment. + */ +static void +InvalidateConnParamsCacheCallback(Datum argument, Oid relationId) +{ + if (relationId == MetadataCache.distAuthinfoRelationId || + relationId == MetadataCache.distPoolinfoRelationId || + relationId == InvalidOid) + { + ConnParamsHashEntry *entry = NULL; + HASH_SEQ_STATUS status; + + hash_seq_init(&status, ConnParamsHash); + + while ((entry = (ConnParamsHashEntry *) hash_seq_search(&status)) != NULL) + { + entry->isValid = false; + } + } +} + + /* * CitusTableCacheFlushInvalidatedEntries frees invalidated cache entries. * Invalidated entries aren't freed immediately as callers expect their lifetime @@ -4912,37 +5043,164 @@ role_exists(PG_FUNCTION_ARGS) /* - * authinfo_valid is a check constraint which errors on all rows, intended for - * use in prohibiting writes to pg_dist_authinfo in Citus Community. + * GetPoolinfoViaCatalog searches the pg_dist_poolinfo table for a row matching + * the provided nodeId and returns the poolinfo field of this row if found. + * Otherwise, this function returns NULL. */ -Datum -authinfo_valid(PG_FUNCTION_ARGS) +char * +GetPoolinfoViaCatalog(int64 nodeId) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot write to pg_dist_authinfo"), - errdetail( - "Citus Community Edition does not support the use of " - "custom authentication options."), - errhint( - "To learn more about using advanced authentication schemes " - "with Citus, please contact us at " - "https://citusdata.com/about/contact_us"))); + ScanKeyData scanKey[1]; + const int scanKeyCount = 1; + const AttrNumber nodeIdIdx = 1, poolinfoIdx = 2; + Relation pgDistPoolinfo = table_open(DistPoolinfoRelationId(), AccessShareLock); + bool indexOK = true; + char *poolinfo = NULL; + + /* set scan arguments */ + ScanKeyInit(&scanKey[0], nodeIdIdx, BTEqualStrategyNumber, F_INT4EQ, + Int32GetDatum(nodeId)); + + SysScanDesc scanDescriptor = systable_beginscan(pgDistPoolinfo, DistPoolinfoIndexId(), + indexOK, + NULL, scanKeyCount, scanKey); + + HeapTuple heapTuple = systable_getnext(scanDescriptor); + if (HeapTupleIsValid(heapTuple)) + { + TupleDesc tupleDescriptor = RelationGetDescr(pgDistPoolinfo); + bool isNull = false; + + Datum poolinfoDatum = heap_getattr(heapTuple, poolinfoIdx, tupleDescriptor, + &isNull); + + Assert(!isNull); + + poolinfo = TextDatumGetCString(poolinfoDatum); + } + + systable_endscan(scanDescriptor); + table_close(pgDistPoolinfo, AccessShareLock); + + return poolinfo; } /* - * poolinfo_valid is a check constraint which errors on all rows, intended for - * use in prohibiting writes to pg_dist_poolinfo in Citus Community. + * GetAuthinfoViaCatalog searches pg_dist_authinfo for a row matching a pro- + * vided role and node id. Three types of rules are currently permitted: those + * matching a specific node (non-zero nodeid), those matching all nodes (a + * nodeid of zero), and those denoting a loopback connection (nodeid of -1). + * Rolename must always be specified. If both types of rules exist for a given + * user/host, the more specific (host-specific) rule wins. This means that when + * both a zero and non-zero row exist for a given rolename, the non-zero row + * has precedence. + * + * In short, this function will return a rule matching nodeId, or if that's + * absent the rule for 0, or if that's absent, an empty string. Callers can + * just use the returned authinfo and know the precedence has been honored. + */ +char * +GetAuthinfoViaCatalog(const char *roleName, int64 nodeId) +{ + char *authinfo = ""; + Datum nodeIdDatumArray[2] = { + Int32GetDatum(nodeId), + Int32GetDatum(WILDCARD_NODE_ID) + }; + ArrayType *nodeIdArrayType = DatumArrayToArrayType(nodeIdDatumArray, + lengthof(nodeIdDatumArray), + INT4OID); + ScanKeyData scanKey[2]; + const AttrNumber nodeIdIdx = 1, roleIdx = 2, authinfoIdx = 3; + + /* + * Our index's definition ensures correct precedence for positive nodeIds, + * but when handling a negative value we need to traverse backwards to keep + * the invariant that the zero rule has lowest precedence. + */ + ScanDirection direction = (nodeId < 0) ? BackwardScanDirection : ForwardScanDirection; + + if (ReindexIsProcessingIndex(DistAuthinfoIndexId())) + { + ereport(ERROR, (errmsg("authinfo is being reindexed; try again"))); + } + + memset(&scanKey, 0, sizeof(scanKey)); + + /* first column in index is rolename, need exact match there ... */ + ScanKeyInit(&scanKey[0], roleIdx, BTEqualStrategyNumber, + F_NAMEEQ, CStringGetDatum(roleName)); + + /* second column is nodeId, match against array of nodeid and zero (any node) ... */ + ScanKeyInit(&scanKey[1], nodeIdIdx, BTEqualStrategyNumber, + F_INT4EQ, PointerGetDatum(nodeIdArrayType)); + scanKey[1].sk_flags |= SK_SEARCHARRAY; + + /* + * It's important that we traverse the index in order: we need to ensure + * that rules with nodeid 0 are encountered last. We'll use the first tuple + * we find. This ordering defines the precedence order of authinfo rules. + */ + Relation pgDistAuthinfo = table_open(DistAuthinfoRelationId(), AccessShareLock); + Relation pgDistAuthinfoIdx = index_open(DistAuthinfoIndexId(), AccessShareLock); + SysScanDesc scanDescriptor = systable_beginscan_ordered(pgDistAuthinfo, + pgDistAuthinfoIdx, + NULL, lengthof(scanKey), + scanKey); + + /* first tuple represents highest-precedence rule for this node */ + HeapTuple authinfoTuple = systable_getnext_ordered(scanDescriptor, direction); + if (HeapTupleIsValid(authinfoTuple)) + { + TupleDesc tupleDescriptor = RelationGetDescr(pgDistAuthinfo); + bool isNull = false; + + Datum authinfoDatum = heap_getattr(authinfoTuple, authinfoIdx, + tupleDescriptor, &isNull); + + Assert(!isNull); + + authinfo = TextDatumGetCString(authinfoDatum); + } + + systable_endscan_ordered(scanDescriptor); + index_close(pgDistAuthinfoIdx, AccessShareLock); + table_close(pgDistAuthinfo, AccessShareLock); + + return authinfo; +} + + +/* + * authinfo_valid is a check constraint to verify that an inserted authinfo row + * uses only permitted libpq parameters. + */ +Datum +authinfo_valid(PG_FUNCTION_ARGS) +{ + char *authinfo = TextDatumGetCString(PG_GETARG_DATUM(0)); + + /* this array _must_ be kept in an order usable by bsearch */ + const char *allowList[] = { "password", "sslcert", "sslkey" }; + bool authinfoValid = CheckConninfo(authinfo, allowList, lengthof(allowList), NULL); + + PG_RETURN_BOOL(authinfoValid); +} + + +/* + * poolinfo_valid is a check constraint to verify that an inserted poolinfo row + * uses only permitted libpq parameters. */ Datum poolinfo_valid(PG_FUNCTION_ARGS) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot write to pg_dist_poolinfo"), - errdetail( - "Citus Community Edition does not support the use of " - "pooler options."), - errhint("To learn more about using advanced pooling schemes " - "with Citus, please contact us at " - "https://citusdata.com/about/contact_us"))); + char *poolinfo = TextDatumGetCString(PG_GETARG_DATUM(0)); + + /* this array _must_ be kept in an order usable by bsearch */ + const char *allowList[] = { "dbname", "host", "port" }; + bool poolinfoValid = CheckConninfo(poolinfo, allowList, lengthof(allowList), NULL); + + PG_RETURN_BOOL(poolinfoValid); } diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 732325365..f11a66f53 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -71,6 +71,7 @@ #include "executor/spi.h" #include "foreign/foreign.h" #include "miscadmin.h" +#include "nodes/makefuncs.h" #include "nodes/pg_list.h" #include "pgstat.h" #include "postmaster/bgworker.h" @@ -113,6 +114,11 @@ static List * GetObjectsForGrantStmt(ObjectType objectType, Oid objectId); static AccessPriv * GetAccessPrivObjectForGrantStmt(char *permission); static List * GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid, AclItem *aclItem); +static List * GenerateGrantOnFunctionQueriesFromAclItem(Oid schemaOid, + AclItem *aclItem); +static List * GrantOnSequenceDDLCommands(Oid sequenceOid); +static List * GenerateGrantOnSequenceQueriesFromAclItem(Oid sequenceOid, + AclItem *aclItem); static void SetLocalReplicateReferenceTablesOnActivate(bool state); static char * GenerateSetRoleQuery(Oid roleOid); static void MetadataSyncSigTermHandler(SIGNAL_ARGS); @@ -511,6 +517,7 @@ ShouldSyncUserCommandForObject(ObjectAddress objectAddress) { Oid relOid = objectAddress.objectId; return ShouldSyncTableMetadata(relOid) || + ShouldSyncSequenceMetadata(relOid) || get_rel_relkind(relOid) == RELKIND_VIEW; } @@ -583,6 +590,26 @@ ShouldSyncTableMetadataInternal(bool hashDistributed, bool citusTableWithNoDistK /* + * ShouldSyncSequenceMetadata checks if the metadata of a sequence should be + * propagated to metadata workers, i.e. the sequence is marked as distributed + */ +bool +ShouldSyncSequenceMetadata(Oid relationId) +{ + if (!OidIsValid(relationId) || !(get_rel_relkind(relationId) == RELKIND_SEQUENCE)) + { + return false; + } + + ObjectAddress sequenceAddress = { 0 }; + ObjectAddressSet(sequenceAddress, RelationRelationId, relationId); + + return IsObjectDistributed(&sequenceAddress); +} + + +/* + * SyncMetadataSnapshotToNode does the following: * SyncNodeMetadataSnapshotToNode does the following: * 1. Sets the localGroupId on the worker so the worker knows which tuple in * pg_dist_node represents itself. @@ -1344,6 +1371,23 @@ ShardListInsertCommand(List *shardIntervalList) } +/* + * ShardListDeleteCommand generates a command list that can be executed to delete + * shard and shard placement metadata for the given shard. + */ +List * +ShardDeleteCommandList(ShardInterval *shardInterval) +{ + uint64 shardId = shardInterval->shardId; + + StringInfo deleteShardCommand = makeStringInfo(); + appendStringInfo(deleteShardCommand, + "SELECT citus_internal_delete_shard_metadata(%ld);", shardId); + + return list_make1(deleteShardCommand->data); +} + + /* * NodeDeleteCommand generate a command that can be * executed to delete the metadata for a worker node. @@ -1478,6 +1522,8 @@ DDLCommandsForSequence(Oid sequenceOid, char *ownerName) sequenceDDLList = lappend(sequenceDDLList, wrappedSequenceDef->data); sequenceDDLList = lappend(sequenceDDLList, sequenceGrantStmt->data); + sequenceDDLList = list_concat(sequenceDDLList, GrantOnSequenceDDLCommands( + sequenceOid)); return sequenceDDLList; } @@ -1937,7 +1983,7 @@ GrantOnSchemaDDLCommands(Oid schemaOid) /* - * GenerateGrantOnSchemaQueryFromACL generates a query string for replicating a users permissions + * GenerateGrantOnSchemaQueryFromACLItem generates a query string for replicating a users permissions * on a schema. */ List * @@ -2021,6 +2067,34 @@ GetObjectsForGrantStmt(ObjectType objectType, Oid objectId) return list_make1(makeString(get_namespace_name(objectId))); } + /* enterprise supported object types */ + case OBJECT_FUNCTION: + case OBJECT_PROCEDURE: + { + ObjectWithArgs *owa = ObjectWithArgsFromOid(objectId); + return list_make1(owa); + } + + case OBJECT_FDW: + { + ForeignDataWrapper *fdw = GetForeignDataWrapper(objectId); + return list_make1(makeString(fdw->fdwname)); + } + + case OBJECT_FOREIGN_SERVER: + { + ForeignServer *server = GetForeignServer(objectId); + return list_make1(makeString(server->servername)); + } + + case OBJECT_SEQUENCE: + { + Oid namespaceOid = get_rel_namespace(objectId); + RangeVar *sequence = makeRangeVar(get_namespace_name(namespaceOid), + get_rel_name(objectId), -1); + return list_make1(sequence); + } + default: { elog(ERROR, "unsupported object type for GRANT"); @@ -2031,6 +2105,211 @@ GetObjectsForGrantStmt(ObjectType objectType, Oid objectId) } +/* + * GrantOnFunctionDDLCommands creates a list of ddl command for replicating the permissions + * of roles on distributed functions. + */ +List * +GrantOnFunctionDDLCommands(Oid functionOid) +{ + HeapTuple proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(functionOid)); + + bool isNull = true; + Datum aclDatum = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_proacl, + &isNull); + if (isNull) + { + ReleaseSysCache(proctup); + return NIL; + } + + Acl *acl = DatumGetAclPCopy(aclDatum); + AclItem *aclDat = ACL_DAT(acl); + int aclNum = ACL_NUM(acl); + List *commands = NIL; + + ReleaseSysCache(proctup); + + for (int i = 0; i < aclNum; i++) + { + commands = list_concat(commands, + GenerateGrantOnFunctionQueriesFromAclItem( + functionOid, + &aclDat[i])); + } + + return commands; +} + + +/* + * GrantOnForeignServerDDLCommands creates a list of ddl command for replicating the + * permissions of roles on distributed foreign servers. + */ +List * +GrantOnForeignServerDDLCommands(Oid serverId) +{ + HeapTuple servertup = SearchSysCache1(FOREIGNSERVEROID, ObjectIdGetDatum(serverId)); + + bool isNull = true; + Datum aclDatum = SysCacheGetAttr(FOREIGNSERVEROID, servertup, + Anum_pg_foreign_server_srvacl, &isNull); + if (isNull) + { + ReleaseSysCache(servertup); + return NIL; + } + + Acl *aclEntry = DatumGetAclPCopy(aclDatum); + AclItem *privileges = ACL_DAT(aclEntry); + int numberOfPrivsGranted = ACL_NUM(aclEntry); + List *commands = NIL; + + ReleaseSysCache(servertup); + + for (int i = 0; i < numberOfPrivsGranted; i++) + { + commands = list_concat(commands, + GenerateGrantOnForeignServerQueriesFromAclItem( + serverId, + &privileges[i])); + } + + return commands; +} + + +/* + * GenerateGrantOnForeignServerQueriesFromAclItem generates a query string for + * replicating a users permissions on a foreign server. + */ +List * +GenerateGrantOnForeignServerQueriesFromAclItem(Oid serverId, AclItem *aclItem) +{ + /* privileges to be granted */ + AclMode permissions = ACLITEM_GET_PRIVS(*aclItem) & ACL_ALL_RIGHTS_FOREIGN_SERVER; + + /* WITH GRANT OPTION clause */ + AclMode grants = ACLITEM_GET_GOPTIONS(*aclItem) & ACL_ALL_RIGHTS_FOREIGN_SERVER; + + /* + * seems unlikely but we check if there is a grant option in the list without the actual permission + */ + Assert(!(grants & ACL_USAGE) || (permissions & ACL_USAGE)); + + Oid granteeOid = aclItem->ai_grantee; + List *queries = NIL; + + /* switch to the role which had granted acl */ + queries = lappend(queries, GenerateSetRoleQuery(aclItem->ai_grantor)); + + /* generate the GRANT stmt that will be executed by the grantor role */ + if (permissions & ACL_USAGE) + { + char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights( + OBJECT_FOREIGN_SERVER, granteeOid, serverId, + "USAGE", grants & ACL_USAGE)); + queries = lappend(queries, query); + } + + /* reset the role back */ + queries = lappend(queries, "RESET ROLE"); + + return queries; +} + + +/* + * GenerateGrantOnFunctionQueryFromACLItem generates a query string for replicating a users permissions + * on a distributed function. + */ +List * +GenerateGrantOnFunctionQueriesFromAclItem(Oid functionOid, AclItem *aclItem) +{ + AclMode permissions = ACLITEM_GET_PRIVS(*aclItem) & ACL_ALL_RIGHTS_FUNCTION; + AclMode grants = ACLITEM_GET_GOPTIONS(*aclItem) & ACL_ALL_RIGHTS_FUNCTION; + + /* + * seems unlikely but we check if there is a grant option in the list without the actual permission + */ + Assert(!(grants & ACL_EXECUTE) || (permissions & ACL_EXECUTE)); + Oid granteeOid = aclItem->ai_grantee; + List *queries = NIL; + + queries = lappend(queries, GenerateSetRoleQuery(aclItem->ai_grantor)); + + if (permissions & ACL_EXECUTE) + { + char prokind = get_func_prokind(functionOid); + ObjectType objectType; + + if (prokind == PROKIND_FUNCTION) + { + objectType = OBJECT_FUNCTION; + } + else if (prokind == PROKIND_PROCEDURE) + { + objectType = OBJECT_PROCEDURE; + } + else + { + ereport(ERROR, (errmsg("unsupported prokind"), + errdetail("GRANT commands on procedures are propagated only " + "for procedures and functions."))); + } + + char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights( + objectType, granteeOid, functionOid, "EXECUTE", + grants & ACL_EXECUTE)); + queries = lappend(queries, query); + } + + queries = lappend(queries, "RESET ROLE"); + + return queries; +} + + +/* + * GenerateGrantOnFDWQueriesFromAclItem generates a query string for + * replicating a users permissions on a foreign data wrapper. + */ +List * +GenerateGrantOnFDWQueriesFromAclItem(Oid FDWId, AclItem *aclItem) +{ + /* privileges to be granted */ + AclMode permissions = ACLITEM_GET_PRIVS(*aclItem) & ACL_ALL_RIGHTS_FDW; + + /* WITH GRANT OPTION clause */ + AclMode grants = ACLITEM_GET_GOPTIONS(*aclItem) & ACL_ALL_RIGHTS_FDW; + + /* + * seems unlikely but we check if there is a grant option in the list without the actual permission + */ + Assert(!(grants & ACL_USAGE) || (permissions & ACL_USAGE)); + + Oid granteeOid = aclItem->ai_grantee; + List *queries = NIL; + + /* switch to the role which had granted acl */ + queries = lappend(queries, GenerateSetRoleQuery(aclItem->ai_grantor)); + + /* generate the GRANT stmt that will be executed by the grantor role */ + if (permissions & ACL_USAGE) + { + char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights( + OBJECT_FDW, granteeOid, FDWId, "USAGE", + grants & ACL_USAGE)); + queries = lappend(queries, query); + } + + /* reset the role back */ + queries = lappend(queries, "RESET ROLE"); + + return queries; +} + + /* * GetAccessPrivObjectForGrantStmt creates an AccessPriv object for the given permission. * It will be used when creating GrantStmt objects. @@ -2046,6 +2325,93 @@ GetAccessPrivObjectForGrantStmt(char *permission) } +/* + * GrantOnSequenceDDLCommands creates a list of ddl command for replicating the permissions + * of roles on distributed sequences. + */ +static List * +GrantOnSequenceDDLCommands(Oid sequenceOid) +{ + HeapTuple seqtup = SearchSysCache1(RELOID, ObjectIdGetDatum(sequenceOid)); + bool isNull = false; + Datum aclDatum = SysCacheGetAttr(RELOID, seqtup, Anum_pg_class_relacl, + &isNull); + if (isNull) + { + ReleaseSysCache(seqtup); + return NIL; + } + + Acl *acl = DatumGetAclPCopy(aclDatum); + AclItem *aclDat = ACL_DAT(acl); + int aclNum = ACL_NUM(acl); + List *commands = NIL; + + ReleaseSysCache(seqtup); + + for (int i = 0; i < aclNum; i++) + { + commands = list_concat(commands, + GenerateGrantOnSequenceQueriesFromAclItem( + sequenceOid, + &aclDat[i])); + } + + return commands; +} + + +/* + * GenerateGrantOnSequenceQueriesFromAclItem generates a query string for replicating a users permissions + * on a distributed sequence. + */ +static List * +GenerateGrantOnSequenceQueriesFromAclItem(Oid sequenceOid, AclItem *aclItem) +{ + AclMode permissions = ACLITEM_GET_PRIVS(*aclItem) & ACL_ALL_RIGHTS_SEQUENCE; + AclMode grants = ACLITEM_GET_GOPTIONS(*aclItem) & ACL_ALL_RIGHTS_SEQUENCE; + + /* + * seems unlikely but we check if there is a grant option in the list without the actual permission + */ + Assert(!(grants & ACL_USAGE) || (permissions & ACL_USAGE)); + Assert(!(grants & ACL_SELECT) || (permissions & ACL_SELECT)); + Assert(!(grants & ACL_UPDATE) || (permissions & ACL_UPDATE)); + + Oid granteeOid = aclItem->ai_grantee; + List *queries = NIL; + queries = lappend(queries, GenerateSetRoleQuery(aclItem->ai_grantor)); + + if (permissions & ACL_USAGE) + { + char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights( + OBJECT_SEQUENCE, granteeOid, sequenceOid, + "USAGE", grants & ACL_USAGE)); + queries = lappend(queries, query); + } + + if (permissions & ACL_SELECT) + { + char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights( + OBJECT_SEQUENCE, granteeOid, sequenceOid, + "SELECT", grants & ACL_SELECT)); + queries = lappend(queries, query); + } + + if (permissions & ACL_UPDATE) + { + char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights( + OBJECT_SEQUENCE, granteeOid, sequenceOid, + "UPDATE", grants & ACL_UPDATE)); + queries = lappend(queries, query); + } + + queries = lappend(queries, "RESET ROLE"); + + return queries; +} + + /* * SetLocalEnableMetadataSync sets the enable_metadata_sync locally */ diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c index 4ad3f9e48..2fd84d239 100644 --- a/src/backend/distributed/metadata/metadata_utility.c +++ b/src/backend/distributed/metadata/metadata_utility.c @@ -66,6 +66,9 @@ #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/syscache.h" +#if PG_VERSION_NUM < 120000 +#include "utils/tqual.h" +#endif #define DISK_SPACE_FIELDS 2 @@ -2175,11 +2178,8 @@ EnsureSuperUser(void) } -/* - * Return a table's owner as a string. - */ -char * -TableOwner(Oid relationId) +Oid +TableOwnerOid(Oid relationId) { HeapTuple tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId)); if (!HeapTupleIsValid(tuple)) @@ -2191,8 +2191,17 @@ TableOwner(Oid relationId) Oid userId = ((Form_pg_class) GETSTRUCT(tuple))->relowner; ReleaseSysCache(tuple); + return userId; +} - return GetUserNameFromId(userId, false); + +/* + * Return a table's owner as a string. + */ +char * +TableOwner(Oid relationId) +{ + return GetUserNameFromId(TableOwnerOid(relationId), false); } diff --git a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c index 47e401499..d33e4b319 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c +++ b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c @@ -420,20 +420,9 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, case OBJECT_EXTENSION: case OBJECT_COLLATION: case OBJECT_VIEW: - { - check_object_ownership(userId, type, *addr, node, *relation); - break; - } - case OBJECT_ROLE: { - /* Support only extension owner role with community */ - if (addr->objectId != CitusExtensionOwner()) - { - ereport(ERROR, (errmsg("Current user does not have required " - "access privileges on role %d with type %d", - addr->objectId, type))); - } + check_object_ownership(userId, type, *addr, node, *relation); break; } diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c index d18ef749c..a8926d2b3 100644 --- a/src/backend/distributed/operations/node_protocol.c +++ b/src/backend/distributed/operations/node_protocol.c @@ -622,6 +622,17 @@ GetPreLoadTableCreationCommands(Oid relationId, } } + List *tableACLList = pg_get_table_grants(relationId); + if (tableACLList != NIL) + { + char *tableACLCommand = NULL; + foreach_ptr(tableACLCommand, tableACLList) + { + tableDDLEventList = lappend(tableDDLEventList, + makeTableDDLCommandString(tableACLCommand)); + } + } + char *tableOwnerDef = TableOwnerResetCommand(relationId); if (tableOwnerDef != NULL) { @@ -629,6 +640,9 @@ GetPreLoadTableCreationCommands(Oid relationId, tableOwnerDef)); } + List *tableRowLevelSecurityCommands = GetTableRowLevelSecurityCommands(relationId); + tableDDLEventList = list_concat(tableDDLEventList, tableRowLevelSecurityCommands); + List *policyCommands = CreatePolicyCommands(relationId); tableDDLEventList = list_concat(tableDDLEventList, policyCommands); @@ -777,6 +791,29 @@ GatherIndexAndConstraintDefinitionList(Form_pg_index indexForm, List **indexDDLE } +/* + * GetTableRowLevelSecurityCommands takes in a relationId, and returns the list of + * commands needed to reconstruct the row level security policy. + */ +List * +GetTableRowLevelSecurityCommands(Oid relationId) +{ + List *rowLevelSecurityCommandList = NIL; + + List *rowLevelSecurityEnableCommands = pg_get_row_level_security_commands(relationId); + + char *rowLevelSecurityCommand = NULL; + foreach_ptr(rowLevelSecurityCommand, rowLevelSecurityEnableCommands) + { + rowLevelSecurityCommandList = lappend( + rowLevelSecurityCommandList, + makeTableDDLCommandString(rowLevelSecurityCommand)); + } + + return rowLevelSecurityCommandList; +} + + /* * IndexImpliedByAConstraint is a helper function to be used while scanning * pg_index. It returns true if the index identified by the given indexForm is diff --git a/src/backend/distributed/operations/repair_shards.c b/src/backend/distributed/operations/repair_shards.c index 1efec3193..f29f0a75a 100644 --- a/src/backend/distributed/operations/repair_shards.c +++ b/src/backend/distributed/operations/repair_shards.c @@ -32,6 +32,7 @@ #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/multi_join_order.h" +#include "distributed/multi_logical_replication.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/reference_table_utils.h" #include "distributed/remote_commands.h" @@ -53,6 +54,9 @@ #include "utils/syscache.h" /* local function forward declarations */ +static void VerifyTablesHaveReplicaIdentity(List *colocatedTableList); +static bool RelationCanPublishAllModifications(Oid relationId); +static bool CanUseLogicalReplication(Oid relationId, char shardReplicationMode); static void ErrorIfTableCannotBeReplicated(Oid relationId); static void RepairShardPlacement(int64 shardId, const char *sourceNodeName, int32 sourceNodePort, const char *targetNodeName, @@ -64,6 +68,12 @@ static void ReplicateColocatedShardPlacement(int64 shardId, char *sourceNodeName static void CopyShardTables(List *shardIntervalList, char *sourceNodeName, int32 sourceNodePort, char *targetNodeName, int32 targetNodePort, bool useLogicalReplication); +static void CopyShardTablesViaLogicalReplication(List *shardIntervalList, + char *sourceNodeName, + int32 sourceNodePort, + char *targetNodeName, + int32 targetNodePort); + static void CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, int32 sourceNodePort, char *targetNodeName, int32 targetNodePort); @@ -146,11 +156,10 @@ citus_copy_shard_placement(PG_FUNCTION_ARGS) char *targetNodeName = text_to_cstring(targetNodeNameText); char shardReplicationMode = LookupShardTransferMode(shardReplicationModeOid); - if (shardReplicationMode == TRANSFER_MODE_FORCE_LOGICAL) + if (doRepair && shardReplicationMode == TRANSFER_MODE_FORCE_LOGICAL) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("the force_logical transfer mode is currently " - "unsupported"))); + errmsg("logical replication cannot be used for repairs"))); } ShardInterval *shardInterval = LoadShardInterval(shardId); @@ -282,8 +291,7 @@ CheckSpaceConstraints(MultiConnection *connection, uint64 colocationSizeInBytes) * After that, there are two different paths. First one is blocking shard move in the * sense that during shard move all modifications are paused to the shard. The second * one relies on logical replication meaning that the writes blocked only for a very - * short duration almost only when the metadata is actually being updated. This option - * is currently only available in Citus Enterprise. + * short duration almost only when the metadata is actually being updated. * * After successful move operation, shards in the source node gets deleted. If the move * fails at any point, this function throws an error, leaving the cluster without doing @@ -354,23 +362,52 @@ citus_move_shard_placement(PG_FUNCTION_ARGS) } char shardReplicationMode = LookupShardTransferMode(shardReplicationModeOid); - if (shardReplicationMode == TRANSFER_MODE_FORCE_LOGICAL) + if (shardReplicationMode == TRANSFER_MODE_AUTOMATIC) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("the force_logical transfer mode is currently " - "unsupported"))); + VerifyTablesHaveReplicaIdentity(colocatedTableList); } EnsureEnoughDiskSpaceForShardMove(colocatedShardList, sourceNodeName, sourceNodePort, targetNodeName, targetNodePort); - BlockWritesToShardList(colocatedShardList); + /* + * At this point of the shard moves, we don't need to block the writes to + * shards when logical replication is used. + */ + bool useLogicalReplication = CanUseLogicalReplication(distributedTableId, + shardReplicationMode); + if (!useLogicalReplication) + { + BlockWritesToShardList(colocatedShardList); + } + else + { + /* + * We prevent multiple shard moves in a transaction that use logical + * replication. That's because the first call opens a transaction block + * on the worker to drop the old shard placement and replication slot + * creation waits for pending transactions to finish, which will not + * happen ever. In other words, we prevent a self-deadlock if both + * source shard placements are on the same node. + */ + if (PlacementMovedUsingLogicalReplicationInTX) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("moving multiple shard placements via logical " + "replication in the same transaction is currently " + "not supported"), + errhint("If you wish to move multiple shard placements " + "in a single transaction set the shard_transfer_mode " + "to 'block_writes'."))); + } + + PlacementMovedUsingLogicalReplicationInTX = true; + } /* * CopyColocatedShardPlacement function copies given shard with its co-located * shards. */ - bool useLogicalReplication = false; CopyShardTables(colocatedShardList, sourceNodeName, sourceNodePort, targetNodeName, targetNodePort, useLogicalReplication); @@ -522,6 +559,74 @@ ErrorIfMoveUnsupportedTableType(Oid relationId) } +/* + * VerifyTablesHaveReplicaIdentity throws an error if any of the tables + * do not have a replica identity, which is required for logical replication + * to replicate UPDATE and DELETE commands. + */ +static void +VerifyTablesHaveReplicaIdentity(List *colocatedTableList) +{ + ListCell *colocatedTableCell = NULL; + + foreach(colocatedTableCell, colocatedTableList) + { + Oid colocatedTableId = lfirst_oid(colocatedTableCell); + + if (!RelationCanPublishAllModifications(colocatedTableId)) + { + char *colocatedRelationName = get_rel_name(colocatedTableId); + + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot use logical replication to transfer shards of " + "the relation %s since it doesn't have a REPLICA " + "IDENTITY or PRIMARY KEY", colocatedRelationName), + errdetail("UPDATE and DELETE commands on the shard will " + "error out during logical replication unless " + "there is a REPLICA IDENTITY or PRIMARY KEY."), + errhint("If you wish to continue without a replica " + "identity set the shard_transfer_mode to " + "'force_logical' or 'block_writes'."))); + } + } +} + + +/* + * RelationCanPublishAllModifications returns true if the relation is safe to publish + * all modification while being replicated via logical replication. + */ +static bool +RelationCanPublishAllModifications(Oid relationId) +{ + Relation relation = RelationIdGetRelation(relationId); + bool canPublish = false; + + if (relation == NULL) + { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("could not open relation with OID %u", relationId))); + } + + /* if relation has replica identity we are always good */ + if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL || + OidIsValid(RelationGetReplicaIndex(relation))) + { + canPublish = true; + } + + /* partitioned tables do not contain any data themselves, can always replicate */ + if (PartitionedTable(relationId)) + { + canPublish = true; + } + + RelationClose(relation); + + return canPublish; +} + + /* * BlockWritesToShardList blocks writes to all shards in the given shard * list. The function assumes that all the shards in the list are colocated. @@ -567,6 +672,49 @@ BlockWritesToShardList(List *shardList) } +/* + * CanUseLogicalReplication returns true if the given table can be logically replicated. + */ +static bool +CanUseLogicalReplication(Oid relationId, char shardReplicationMode) +{ + if (shardReplicationMode == TRANSFER_MODE_BLOCK_WRITES) + { + /* user explicitly chose not to use logical replication */ + return false; + } + + /* + * Logical replication doesn't support replicating foreign tables and views. + */ + if (!RegularTable(relationId)) + { + ereport(LOG, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Cannot use logical replication for " + "shard move since the relation %s is not " + "a regular relation", + get_rel_name(relationId)))); + + + return false; + } + + /* Logical replication doesn't support inherited tables */ + if (IsParentTable(relationId)) + { + ereport(LOG, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Cannot use logical replication for " + "shard move since the relation %s is an " + "inherited relation", + get_rel_name(relationId)))); + + return false; + } + + return true; +} + + /* * ErrorIfTableCannotBeReplicated function errors out if the given table is not suitable * for its shard being replicated. There are 2 cases in which shard replication is not @@ -790,7 +938,16 @@ ReplicateColocatedShardPlacement(int64 shardId, char *sourceNodeName, */ colocatedShardList = SortList(colocatedShardList, CompareShardIntervalsById); - BlockWritesToShardList(colocatedShardList); + /* + * At this point of the shard replication, we don't need to block the writes to + * shards when logical replication is used. + */ + bool useLogicalReplication = CanUseLogicalReplication(distributedTableId, + shardReplicationMode); + if (!useLogicalReplication) + { + BlockWritesToShardList(colocatedShardList); + } ShardInterval *colocatedShard = NULL; foreach_ptr(colocatedShard, colocatedShardList) @@ -805,6 +962,11 @@ ReplicateColocatedShardPlacement(int64 shardId, char *sourceNodeName, targetNodeName, targetNodePort); } + if (shardReplicationMode == TRANSFER_MODE_AUTOMATIC) + { + VerifyTablesHaveReplicaIdentity(colocatedTableList); + } + if (!IsCitusTableType(distributedTableId, REFERENCE_TABLE)) { /* @@ -818,7 +980,6 @@ ReplicateColocatedShardPlacement(int64 shardId, char *sourceNodeName, EnsureReferenceTablesExistOnAllNodesExtended(shardReplicationMode); } - bool useLogicalReplication = false; CopyShardTables(colocatedShardList, sourceNodeName, sourceNodePort, targetNodeName, targetNodePort, useLogicalReplication); @@ -912,7 +1073,9 @@ CopyShardTables(List *shardIntervalList, char *sourceNodeName, int32 sourceNodeP if (useLogicalReplication) { - /* only supported in Citus enterprise */ + CopyShardTablesViaLogicalReplication(shardIntervalList, sourceNodeName, + sourceNodePort, targetNodeName, + targetNodePort); } else { @@ -922,6 +1085,50 @@ CopyShardTables(List *shardIntervalList, char *sourceNodeName, int32 sourceNodeP } +/* + * CopyShardTablesViaLogicalReplication copies a shard along with its co-located shards + * from a source node to target node via logical replication. + */ +static void +CopyShardTablesViaLogicalReplication(List *shardIntervalList, char *sourceNodeName, + int32 sourceNodePort, char *targetNodeName, + int32 targetNodePort) +{ + MemoryContext localContext = AllocSetContextCreate(CurrentMemoryContext, + "CopyShardTablesViaLogicalReplication", + ALLOCSET_DEFAULT_SIZES); + MemoryContext oldContext = MemoryContextSwitchTo(localContext); + + /* + * Iterate through the colocated shards and create them on the + * target node. We do not create the indexes yet. + */ + ShardInterval *shardInterval = NULL; + foreach_ptr(shardInterval, shardIntervalList) + { + Oid relationId = shardInterval->relationId; + uint64 shardId = shardInterval->shardId; + List *tableRecreationCommandList = RecreateTableDDLCommandList(relationId); + tableRecreationCommandList = + WorkerApplyShardDDLCommandList(tableRecreationCommandList, shardId); + + char *tableOwner = TableOwner(shardInterval->relationId); + + SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, + tableOwner, + tableRecreationCommandList); + + MemoryContextReset(localContext); + } + + MemoryContextSwitchTo(oldContext); + + /* data copy is done seperately when logical replication is used */ + LogicallyReplicateShards(shardIntervalList, sourceNodeName, + sourceNodePort, targetNodeName, targetNodePort); +} + + /* * CopyShardTablesViaBlockWrites copies a shard along with its co-located shards * from a source node to target node via COPY command. While the command is in @@ -989,12 +1196,12 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, { List *shardForeignConstraintCommandList = NIL; List *referenceTableForeignConstraintList = NIL; - List *commandList = NIL; CopyShardForeignConstraintCommandListGrouped(shardInterval, &shardForeignConstraintCommandList, &referenceTableForeignConstraintList); + List *commandList = NIL; commandList = list_concat(commandList, shardForeignConstraintCommandList); commandList = list_concat(commandList, referenceTableForeignConstraintList); diff --git a/src/backend/distributed/operations/shard_cleaner.c b/src/backend/distributed/operations/shard_cleaner.c index cb9f8ae20..0c9e7903c 100644 --- a/src/backend/distributed/operations/shard_cleaner.c +++ b/src/backend/distributed/operations/shard_cleaner.c @@ -5,7 +5,7 @@ * left around. Shards that are left around are marked as state 4 * (SHARD_STATE_TO_DELETE) in pg_dist_placement. * - * Copyright (c), Citus Data, Inc. + * Copyright (c) 2018, Citus Data, Inc. * *------------------------------------------------------------------------- */ diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index 43dd167b0..1c6866b32 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -6,6 +6,8 @@ * * Copyright (c) Citus Data, Inc. * + * $Id$ + * *------------------------------------------------------------------------- */ @@ -317,7 +319,7 @@ CheckRebalanceStateInvariants(const RebalanceState *state) /* Check that utilization field is up to date. */ Assert(fillState->utilization == CalculateUtilization(fillState->totalCost, - fillState->capacity)); + fillState->capacity)); /* lgtm[cpp/equality-on-floats] */ /* * Check that fillState->totalCost is within 0.1% difference of @@ -698,14 +700,6 @@ ExecutePlacementUpdates(List *placementUpdateList, Oid shardReplicationModeOid, ListCell *placementUpdateCell = NULL; - char shardReplicationMode = LookupShardTransferMode(shardReplicationModeOid); - if (shardReplicationMode == TRANSFER_MODE_FORCE_LOGICAL) - { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("the force_logical transfer mode is currently " - "unsupported"))); - } - DropOrphanedShardsInSeparateTransaction(); foreach(placementUpdateCell, placementUpdateList) @@ -2341,7 +2335,7 @@ FindAndMoveShardCost(float4 utilizationLowerBound, } if (newTargetUtilization == sourceFillState->utilization && newSourceUtilization <= targetFillState->utilization - ) + ) /* lgtm[cpp/equality-on-floats] */ { /* * this can trigger when capacity of the nodes is not the diff --git a/src/backend/distributed/operations/split_shards.c b/src/backend/distributed/operations/split_shards.c index df95faded..680a3fdf3 100644 --- a/src/backend/distributed/operations/split_shards.c +++ b/src/backend/distributed/operations/split_shards.c @@ -13,6 +13,7 @@ #include "postgres.h" #include "c.h" #include "fmgr.h" +#include "libpq-fe.h" #include "catalog/pg_class.h" #include "distributed/colocation_utils.h" @@ -20,14 +21,17 @@ #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/multi_join_order.h" +#include "distributed/multi_partitioning_utils.h" #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_shard.h" #include "distributed/remote_commands.h" +#include "distributed/reference_table_utils.h" #include "distributed/resource_lock.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" +#include "distributed/version_compat.h" #include "nodes/pg_list.h" #include "storage/lock.h" #include "utils/builtins.h" @@ -42,6 +46,25 @@ PG_FUNCTION_INFO_V1(isolate_tenant_to_new_shard); PG_FUNCTION_INFO_V1(worker_hash); +/* local function forward declarations */ +static uint64 SplitShardByValue(ShardInterval *sourceShard, Datum distributionValueDatum); +static void ErrorIfCannotSplitShard(ShardInterval *sourceShard); +static void CreateSplitOffShards(ShardInterval *sourceShard, int hashedValue, + List **splitOffShardList, int *isolatedShardId); +static List * ShardTemplateList(ShardInterval *sourceShard, int hashedValue, + int *isolatedShardIndex); +static ShardInterval * CreateSplitOffShardFromTemplate(ShardInterval *shardTemplate, + Oid relationId); +static List * SplitOffCommandList(ShardInterval *sourceShard, + ShardInterval *splitOffShard); +static void ExecuteCommandListOnPlacements(List *commandList, List *placementList); +static void InsertSplitOffShardMetadata(List *splitOffShardList, + List *sourcePlacementList); +static void CreateForeignConstraints(List *splitOffShardList, List *sourcePlacementList); +static void ExecuteCommandListOnWorker(char *nodeName, int nodePort, List *commandList); +static void DropShardList(List *shardIntervalList); + + /* * isolate_tenant_to_new_shard isolates a tenant to its own shard by spliting * the current matching shard. @@ -49,9 +72,100 @@ PG_FUNCTION_INFO_V1(worker_hash); Datum isolate_tenant_to_new_shard(PG_FUNCTION_ARGS) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("isolate_tenant_to_new_shard() is only supported on " - "Citus Enterprise"))); + CheckCitusVersion(ERROR); + EnsureCoordinator(); + + Oid relationId = PG_GETARG_OID(0); + Datum inputDatum = PG_GETARG_DATUM(1); + text *cascadeOptionText = PG_GETARG_TEXT_P(2); + ListCell *colocatedTableCell = NULL; + + EnsureTableOwner(relationId); + + CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId); + + char partitionMethod = cacheEntry->partitionMethod; + if (partitionMethod != DISTRIBUTE_BY_HASH) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot isolate tenant because tenant isolation " + "is only support for hash distributed tables"))); + } + + if (PartitionedTable(relationId)) + { + char *sourceRelationName = get_rel_name(relationId); + + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot isolate shard placement of '%s', because it " + "is a partitioned table", sourceRelationName), + errdetail("Citus does not support isolating placements of " + "partitioned tables."))); + } + + List *colocatedTableList = ColocatedTableList(relationId); + int colocatedTableCount = list_length(colocatedTableList); + + foreach(colocatedTableCell, colocatedTableList) + { + Oid colocatedTableId = lfirst_oid(colocatedTableCell); + + /* + * At the moment, Citus does not support copying a shard if that shard's + * relation is in a colocation group with a partitioned table or partition. + */ + if (colocatedTableId != relationId && + PartitionedTable(colocatedTableId)) + { + char *sourceRelationName = get_rel_name(relationId); + char *colocatedRelationName = get_rel_name(colocatedTableId); + + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot isolate shard placement of '%s', because it " + "is a partitioned table", colocatedRelationName), + errdetail("In colocation group of '%s', a partitioned " + "relation exists: '%s'. Citus does not support " + "isolating placements of partitioned tables.", + sourceRelationName, colocatedRelationName))); + } + } + + Oid inputDataType = get_fn_expr_argtype(fcinfo->flinfo, 1); + char *tenantIdString = DatumToString(inputDatum, inputDataType); + + char *cascadeOptionString = text_to_cstring(cascadeOptionText); + if (pg_strncasecmp(cascadeOptionString, "CASCADE", NAMEDATALEN) != 0 && + colocatedTableCount > 1) + { + char *relationName = get_rel_name(relationId); + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot isolate tenant because \"%s\" has colocated " + "tables", relationName), + errhint("Use CASCADE option to isolate tenants for the " + "colocated tables too. Example usage: " + "isolate_tenant_to_new_shard('%s', '%s', 'CASCADE')", + relationName, tenantIdString))); + } + + EnsureReferenceTablesExistOnAllNodes(); + + Var *distributionColumn = DistPartitionKey(relationId); + + /* earlier we checked that the table was hash partitioned, so there should be a distribution column */ + Assert(distributionColumn != NULL); + + Oid distributionColumnType = distributionColumn->vartype; + + Datum tenantIdDatum = StringToDatum(tenantIdString, distributionColumnType); + ShardInterval *sourceShard = FindShardInterval(tenantIdDatum, cacheEntry); + if (sourceShard == NULL) + { + ereport(ERROR, (errmsg("tenant does not have a shard"))); + } + + uint64 isolatedShardId = SplitShardByValue(sourceShard, tenantIdDatum); + + PG_RETURN_INT64(isolatedShardId); } @@ -86,3 +200,623 @@ worker_hash(PG_FUNCTION_ARGS) PG_RETURN_INT32(hashedValueDatum); } + + +/* + * SplitShardByValue gets a shard and a value which is in the range of + * distribution column of this shard. Then, it splits this shard and all its + * colocated shards into three; the lower range, the given value itself, and + * the upper range. Finally, it returns the id of the shard which is created + * for the given value. + */ +static uint64 +SplitShardByValue(ShardInterval *sourceShard, Datum distributionValueDatum) +{ + Oid relationId = sourceShard->relationId; + int isolatedShardId = 0; + List *splitOffShardList = NIL; + + if (XactModificationLevel > XACT_MODIFICATION_NONE) + { + ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + errmsg("cannot isolate a tenant after other modifications " + "in the same transaction"))); + } + + /* sort the tables to avoid deadlocks */ + List *colocatedTableList = ColocatedTableList(relationId); + colocatedTableList = SortList(colocatedTableList, CompareOids); + + Oid colocatedTableId = InvalidOid; + foreach_oid(colocatedTableId, colocatedTableList) + { + /* + * Block concurrent DDL / TRUNCATE commands on the relation. Similarly, + * block concurrent citus_move_shard_placement()/isolate_tenant_to_new_shard() + * on any shard of the same relation. This is OK for now since + * we're executing shard moves/splits sequentially anyway. + */ + LockRelationOid(colocatedTableId, ShareUpdateExclusiveLock); + } + + /* get colocated shard list */ + List *colocatedShardList = ColocatedShardIntervalList(sourceShard); + + /* get locks */ + BlockWritesToShardList(colocatedShardList); + + ErrorIfCannotSplitShard(sourceShard); + + /* get hash function name */ + CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId); + FmgrInfo *hashFunction = cacheEntry->hashFunction; + + /* get hashed value of the distribution value */ + Datum hashedValueDatum = FunctionCall1(hashFunction, distributionValueDatum); + int hashedValue = DatumGetInt32(hashedValueDatum); + + /* create a list of nodes with source shard placements */ + List *sourcePlacementList = ActiveShardPlacementList(sourceShard->shardId); + + /* create new shards in a separate transaction and commit them */ + CreateSplitOffShards(sourceShard, hashedValue, &splitOffShardList, &isolatedShardId); + + /* + * Drop old shards and delete related metadata. Have to do that before + * creating the new shard metadata, because there's cross-checks + * preventing inconsistent metadata (like overlapping shards). + */ + DropShardList(colocatedShardList); + + /* insert new metadata */ + InsertSplitOffShardMetadata(splitOffShardList, sourcePlacementList); + + /* + * Create foreign keys if exists after the metadata changes happening in + * DropShardList() and InsertSplitOffShardMetadata() because the foreign + * key creation depends on the new metadata. + */ + CreateForeignConstraints(splitOffShardList, sourcePlacementList); + + CitusInvalidateRelcacheByRelid(DistShardRelationId()); + + return isolatedShardId; +} + + +/* + * CreateForeignConstraints creates the foreign constraints on the newly + * created shards via the tenant isolation. + * + * The function treats foreign keys to reference tables and foreign keys to + * co-located distributed tables differently. The former one needs to be + * executed over a single connection to prevent self-deadlocks. The latter + * one can be executed in parallel if there are multiple replicas. + */ +static void +CreateForeignConstraints(List *splitOffShardList, List *sourcePlacementList) +{ + ListCell *splitOffShardCell = NULL; + + List *colocatedShardForeignConstraintCommandList = NIL; + List *referenceTableForeignConstraintList = NIL; + + foreach(splitOffShardCell, splitOffShardList) + { + ShardInterval *splitOffShard = (ShardInterval *) lfirst(splitOffShardCell); + + List *currentColocatedForeignKeyList = NIL; + List *currentReferenceForeignKeyList = NIL; + + CopyShardForeignConstraintCommandListGrouped(splitOffShard, + ¤tColocatedForeignKeyList, + ¤tReferenceForeignKeyList); + + colocatedShardForeignConstraintCommandList = + list_concat(colocatedShardForeignConstraintCommandList, + currentColocatedForeignKeyList); + referenceTableForeignConstraintList = + list_concat(referenceTableForeignConstraintList, + currentReferenceForeignKeyList); + } + + /* + * We can use parallel connections to while creating co-located foreign keys + * if the source placement . + * However, foreign keys to reference tables need to be created using a single + * connection per worker to prevent self-deadlocks. + */ + if (colocatedShardForeignConstraintCommandList != NIL) + { + ExecuteCommandListOnPlacements(colocatedShardForeignConstraintCommandList, + sourcePlacementList); + } + + if (referenceTableForeignConstraintList != NIL) + { + ListCell *shardPlacementCell = NULL; + foreach(shardPlacementCell, sourcePlacementList) + { + ShardPlacement *shardPlacement = + (ShardPlacement *) lfirst(shardPlacementCell); + + char *nodeName = shardPlacement->nodeName; + int32 nodePort = shardPlacement->nodePort; + + /* + * We're using the connections that we've used for dropping the + * source placements within the same coordinated transaction. + */ + ExecuteCommandListOnWorker(nodeName, nodePort, + referenceTableForeignConstraintList); + } + } +} + + +/* + * ExecuteCommandListOnWorker executes the command on the given node within + * the coordinated 2PC. + */ +static void +ExecuteCommandListOnWorker(char *nodeName, int nodePort, List *commandList) +{ + ListCell *commandCell = NULL; + + foreach(commandCell, commandList) + { + char *command = (char *) lfirst(commandCell); + + SendCommandToWorker(nodeName, nodePort, command); + } +} + + +/* + * ErrorIfCannotSplitShard checks relation kind and invalid shards. It errors + * out if we are not able to split the given shard. + */ +static void +ErrorIfCannotSplitShard(ShardInterval *sourceShard) +{ + Oid relationId = sourceShard->relationId; + ListCell *colocatedTableCell = NULL; + ListCell *colocatedShardCell = NULL; + + /* checks for table ownership and foreign tables */ + List *colocatedTableList = ColocatedTableList(relationId); + foreach(colocatedTableCell, colocatedTableList) + { + Oid colocatedTableId = lfirst_oid(colocatedTableCell); + + /* check that user has owner rights in all co-located tables */ + EnsureTableOwner(colocatedTableId); + + char relationKind = get_rel_relkind(colocatedTableId); + if (relationKind == RELKIND_FOREIGN_TABLE) + { + char *relationName = get_rel_name(colocatedTableId); + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot isolate tenant because \"%s\" is a " + "foreign table", relationName), + errdetail("Isolating shards backed by foreign tables " + "is not supported."))); + } + } + + /* check shards with inactive placements */ + List *colocatedShardList = ColocatedShardIntervalList(sourceShard); + foreach(colocatedShardCell, colocatedShardList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(colocatedShardCell); + uint64 shardId = shardInterval->shardId; + ListCell *shardPlacementCell = NULL; + + List *shardPlacementList = ShardPlacementListWithoutOrphanedPlacements(shardId); + foreach(shardPlacementCell, shardPlacementList) + { + ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); + if (placement->shardState != SHARD_STATE_ACTIVE) + { + char *relationName = get_rel_name(shardInterval->relationId); + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot isolate tenant because relation " + "\"%s\" has an inactive shard placement " + "for the shard %lu", relationName, shardId), + errhint("Use master_copy_shard_placement UDF to " + "repair the inactive shard placement."))); + } + } + } +} + + +/* + * CreateSplitOffShards gets a shard and a hashed value to pick the split point. + * First, it creates templates to create new shards. Then, for every colocated + * shard, it creates new split shards data and physically creates them on the + * worker nodes. This function returns newly created split off shards and the + * matching shard id for the source shard and hashed value via passed parameters. + */ +static void +CreateSplitOffShards(ShardInterval *sourceShard, int hashedValue, + List **splitOffShardList, int *isolatedShardId) +{ + List *nodeCommandList = NIL; + ListCell *sourceColocatedShardCell = NULL; + int isolatedShardIndex = 0; + + List *sourceColocatedShardList = ColocatedShardIntervalList(sourceShard); + List *shardTemplateList = ShardTemplateList(sourceShard, hashedValue, + &isolatedShardIndex); + + foreach(sourceColocatedShardCell, sourceColocatedShardList) + { + ShardInterval *sourceColocatedShard = + (ShardInterval *) lfirst(sourceColocatedShardCell); + Oid relationId = sourceColocatedShard->relationId; + ListCell *templateShardCell = NULL; + int currentShardIndex = 0; + + foreach(templateShardCell, shardTemplateList) + { + ShardInterval *templateShard = (ShardInterval *) lfirst(templateShardCell); + + ShardInterval *splitOffShard = CreateSplitOffShardFromTemplate(templateShard, + relationId); + List *splitOffCommandList = SplitOffCommandList(sourceColocatedShard, + splitOffShard); + nodeCommandList = list_concat(nodeCommandList, splitOffCommandList); + + /* check if this is the isolated shard for the given table */ + if (splitOffShard->relationId == sourceShard->relationId && + currentShardIndex == isolatedShardIndex) + { + (*isolatedShardId) = splitOffShard->shardId; + } + + /* add newly created split off shards to list */ + (*splitOffShardList) = lappend(*splitOffShardList, splitOffShard); + + currentShardIndex++; + } + } + + List *sourcePlacementList = ActiveShardPlacementList(sourceShard->shardId); + ExecuteCommandListOnPlacements(nodeCommandList, sourcePlacementList); +} + + +/* + * ShardTemplateList creates shard templates with new min and max values from + * the given shard and the split point which is the given hashed value. + * It returns the list of shard templates, and passes the isolated shard index + * via isolatedShardIndex parameter. + */ +static List * +ShardTemplateList(ShardInterval *sourceShard, int hashedValue, int *isolatedShardIndex) +{ + List *shardTemplateList = NIL; + + /* get min and max values of the source shard */ + int32 shardMinValue = DatumGetInt32(sourceShard->minValue); + int32 shardMaxValue = DatumGetInt32(sourceShard->maxValue); + + (*isolatedShardIndex) = 0; + + /* add a shard template for lower range if exists */ + if (shardMinValue < hashedValue) + { + ShardInterval *lowerRangeShard = CopyShardInterval(sourceShard); + + lowerRangeShard->minValue = Int32GetDatum(shardMinValue); + lowerRangeShard->maxValue = Int32GetDatum(hashedValue - 1); + + shardTemplateList = lappend(shardTemplateList, lowerRangeShard); + (*isolatedShardIndex) = 1; + } + + /* add shard template for the isolated value */ + ShardInterval *isolatedShard = CopyShardInterval(sourceShard); + + isolatedShard->minValue = Int32GetDatum(hashedValue); + isolatedShard->maxValue = Int32GetDatum(hashedValue); + + shardTemplateList = lappend(shardTemplateList, isolatedShard); + + /* add a shard template for upper range if exists */ + if (shardMaxValue > hashedValue) + { + ShardInterval *upperRangeShard = CopyShardInterval(sourceShard); + + upperRangeShard->minValue = Int32GetDatum(hashedValue + 1); + upperRangeShard->maxValue = Int32GetDatum(shardMaxValue); + + shardTemplateList = lappend(shardTemplateList, upperRangeShard); + } + + if (list_length(shardTemplateList) == 1) + { + char *tableName = get_rel_name(sourceShard->relationId); + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("table \"%s\" has already been isolated for the " + "given value", tableName))); + } + + return shardTemplateList; +} + + +/* + * CreateSplitOffShardFromTemplate creates a new split off shard from the given + * shard template by creating a new shard id and setting the relation id. + */ +static ShardInterval * +CreateSplitOffShardFromTemplate(ShardInterval *shardTemplate, Oid relationId) +{ + ShardInterval *splitOffShard = CopyShardInterval(shardTemplate); + + /* set new shard id and the relation id */ + splitOffShard->shardId = GetNextShardId(); + splitOffShard->relationId = relationId; + + return splitOffShard; +} + + +/* + * SplitOffCommandList creates a command list to run on worker nodes to create + * new split off shard from the source shard. + */ +static List * +SplitOffCommandList(ShardInterval *sourceShard, ShardInterval *splitOffShard) +{ + List *splitOffCommandList = NIL; + bool includeSequenceDefaults = false; + + Oid relationId = sourceShard->relationId; + Var *partitionKey = DistPartitionKey(relationId); + Assert(partitionKey != NULL); + const char *partitionColumnName = get_attname(relationId, + partitionKey->varattno, false); + const char *quotedPartitionColumnName = quote_identifier(partitionColumnName); + + char *splitOffShardName = ConstructQualifiedShardName(splitOffShard); + char *sourceShardName = ConstructQualifiedShardName(sourceShard); + + int32 shardMinValue = DatumGetInt32(splitOffShard->minValue); + int32 shardMaxValue = DatumGetInt32(splitOffShard->maxValue); + + List *tableCreationCommandList = + GetPreLoadTableCreationCommands(relationId, includeSequenceDefaults, NULL); + tableCreationCommandList = WorkerApplyShardDDLCommandList(tableCreationCommandList, + splitOffShard->shardId); + + splitOffCommandList = list_concat(splitOffCommandList, tableCreationCommandList); + + StringInfo splitOffShardCommand = makeStringInfo(); + appendStringInfo(splitOffShardCommand, + "INSERT INTO %s SELECT * FROM %s WHERE " + "worker_hash(%s) >= %d AND worker_hash(%s) <= %d", + splitOffShardName, sourceShardName, quotedPartitionColumnName, + shardMinValue, quotedPartitionColumnName, shardMaxValue); + + splitOffCommandList = lappend(splitOffCommandList, splitOffShardCommand->data); + + List *indexCommandList = GetPostLoadTableCreationCommands(relationId, true, true); + indexCommandList = WorkerApplyShardDDLCommandList(indexCommandList, + splitOffShard->shardId); + + splitOffCommandList = list_concat(splitOffCommandList, indexCommandList); + + return splitOffCommandList; +} + + +/* + * ExecuteCommandListOnPlacements runs the given command list on the nodes of + * the given shard placement list. First, it creates connections. Then it sends + * commands one by one. For every command, first it send the command to all + * connections and then checks the results. This helps to run long running + * commands in parallel. Finally, it sends commit messages to all connections + * and close them. + */ +static void +ExecuteCommandListOnPlacements(List *commandList, List *placementList) +{ + List *workerConnectionList = NIL; + ListCell *workerConnectionCell = NULL; + ListCell *shardPlacementCell = NULL; + ListCell *commandCell = NULL; + + /* create connections and start transactions */ + foreach(shardPlacementCell, placementList) + { + ShardPlacement *shardPlacement = (ShardPlacement *) lfirst(shardPlacementCell); + char *nodeName = shardPlacement->nodeName; + int32 nodePort = shardPlacement->nodePort; + + int connectionFlags = FORCE_NEW_CONNECTION; + char *currentUser = CurrentUserName(); + + /* create a new connection */ + MultiConnection *workerConnection = GetNodeUserDatabaseConnection(connectionFlags, + nodeName, + nodePort, + currentUser, + NULL); + + /* mark connection as critical ans start transaction */ + MarkRemoteTransactionCritical(workerConnection); + RemoteTransactionBegin(workerConnection); + + /* add connection to the list */ + workerConnectionList = lappend(workerConnectionList, workerConnection); + } + + /* send and check results for every command one by one */ + foreach(commandCell, commandList) + { + char *command = lfirst(commandCell); + + /* first only send the command */ + foreach(workerConnectionCell, workerConnectionList) + { + MultiConnection *workerConnection = + (MultiConnection *) lfirst(workerConnectionCell); + + int querySent = SendRemoteCommand(workerConnection, command); + if (querySent == 0) + { + ReportConnectionError(workerConnection, ERROR); + } + } + + /* then check the result separately to run long running commands in parallel */ + foreach(workerConnectionCell, workerConnectionList) + { + MultiConnection *workerConnection = + (MultiConnection *) lfirst(workerConnectionCell); + bool raiseInterrupts = true; + + PGresult *result = GetRemoteCommandResult(workerConnection, raiseInterrupts); + if (!IsResponseOK(result)) + { + ReportResultError(workerConnection, result, ERROR); + } + + PQclear(result); + ForgetResults(workerConnection); + } + } + + /* finally commit each transaction and close connections */ + foreach(workerConnectionCell, workerConnectionList) + { + MultiConnection *workerConnection = + (MultiConnection *) lfirst(workerConnectionCell); + + RemoteTransactionCommit(workerConnection); + CloseConnection(workerConnection); + } +} + + +/* + * InsertSplitOffShardMetadata inserts new shard and shard placement data into + * catolog tables both the coordinator and mx nodes. + */ +static void +InsertSplitOffShardMetadata(List *splitOffShardList, List *sourcePlacementList) +{ + List *syncedShardList = NIL; + ListCell *shardCell = NULL; + ListCell *commandCell = NULL; + + /* add new metadata */ + foreach(shardCell, splitOffShardList) + { + ShardInterval *splitOffShard = (ShardInterval *) lfirst(shardCell); + Oid relationId = splitOffShard->relationId; + uint64 shardId = splitOffShard->shardId; + char storageType = splitOffShard->storageType; + ListCell *shardPlacementCell = NULL; + + int32 shardMinValue = DatumGetInt32(splitOffShard->minValue); + int32 shardMaxValue = DatumGetInt32(splitOffShard->maxValue); + text *shardMinValueText = IntegerToText(shardMinValue); + text *shardMaxValueText = IntegerToText(shardMaxValue); + + InsertShardRow(relationId, shardId, storageType, shardMinValueText, + shardMaxValueText); + + /* split off shard placement metadata */ + foreach(shardPlacementCell, sourcePlacementList) + { + ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); + uint64 shardSize = 0; + + InsertShardPlacementRow(shardId, INVALID_PLACEMENT_ID, SHARD_STATE_ACTIVE, + shardSize, placement->groupId); + } + + if (ShouldSyncTableMetadata(relationId)) + { + syncedShardList = lappend(syncedShardList, splitOffShard); + } + } + + /* send commands to synced nodes one by one */ + List *splitOffShardMetadataCommandList = ShardListInsertCommand(syncedShardList); + foreach(commandCell, splitOffShardMetadataCommandList) + { + char *command = (char *) lfirst(commandCell); + SendCommandToWorkersWithMetadata(command); + } +} + + +/* + * DropShardList drops shards and their metadata from both the coordinator and + * mx nodes. + */ +static void +DropShardList(List *shardIntervalList) +{ + ListCell *shardIntervalCell = NULL; + + foreach(shardIntervalCell, shardIntervalList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); + ListCell *shardPlacementCell = NULL; + Oid relationId = shardInterval->relationId; + uint64 oldShardId = shardInterval->shardId; + + /* delete metadata from synced nodes */ + if (ShouldSyncTableMetadata(relationId)) + { + ListCell *commandCell = NULL; + + /* send the commands one by one */ + List *shardMetadataDeleteCommandList = ShardDeleteCommandList(shardInterval); + foreach(commandCell, shardMetadataDeleteCommandList) + { + char *command = (char *) lfirst(commandCell); + SendCommandToWorkersWithMetadata(command); + } + } + + /* delete shard placements and drop shards */ + List *shardPlacementList = ActiveShardPlacementList(oldShardId); + foreach(shardPlacementCell, shardPlacementList) + { + ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); + char *workerName = placement->nodeName; + uint32 workerPort = placement->nodePort; + StringInfo dropQuery = makeStringInfo(); + + DeleteShardPlacementRow(placement->placementId); + + /* get shard name */ + char *qualifiedShardName = ConstructQualifiedShardName(shardInterval); + + char storageType = shardInterval->storageType; + if (storageType == SHARD_STORAGE_TABLE) + { + appendStringInfo(dropQuery, DROP_REGULAR_TABLE_COMMAND, + qualifiedShardName); + } + else if (storageType == SHARD_STORAGE_FOREIGN) + { + appendStringInfo(dropQuery, DROP_FOREIGN_TABLE_COMMAND, + qualifiedShardName); + } + + /* drop old shard */ + SendCommandToWorker(workerName, workerPort, dropQuery->data); + } + + /* delete shard row */ + DeleteShardRow(oldShardId); + } +} diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index cbd9abc43..57582ec97 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -40,6 +40,7 @@ #include "distributed/multi_physical_planner.h" #include "distributed/pg_dist_partition.h" #include "distributed/query_pushdown_planning.h" +#include "distributed/string_utils.h" #include "distributed/tdigest_extension.h" #include "distributed/worker_protocol.h" #include "distributed/version_compat.h" @@ -58,9 +59,6 @@ #include "utils/rel.h" #include "utils/syscache.h" -#define StartsWith(msg, prefix) \ - (strncmp(msg, prefix, strlen(prefix)) == 0) - /* Config variable managed via guc.c */ int LimitClauseRowFetchCount = -1; /* number of rows to fetch from each task */ double CountDistinctErrorRate = 0.0; /* precision of count(distinct) approximate */ @@ -3418,7 +3416,7 @@ GetAggregateType(Aggref *aggregateExpression) * perform these checks if there is some chance it will actually result in a positive * hit. */ - if (StartsWith(aggregateProcName, "tdigest")) + if (StringStartsWith(aggregateProcName, "tdigest")) { if (aggFunctionId == TDigestExtensionAggTDigest1()) { diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c new file mode 100644 index 000000000..2a63ad0bd --- /dev/null +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -0,0 +1,2063 @@ +/*------------------------------------------------------------------------- + * + * multi_logical_replication.c + * + * This file contains functions to use logical replication on the distributed + * tables for moving/replicating shards. + * + * Copyright (c) 2017, Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "miscadmin.h" +#include "fmgr.h" +#include "pgstat.h" +#include "libpq-fe.h" + +#include "distributed/pg_version_constants.h" + +#if PG_VERSION_NUM >= PG_VERSION_12 +#include "access/genam.h" +#endif + +#if PG_VERSION_NUM >= PG_VERSION_13 +#include "postmaster/interrupt.h" +#endif + +#include "access/htup_details.h" +#include "access/sysattr.h" +#include "access/xact.h" +#include "catalog/pg_subscription_rel.h" +#include "commands/dbcommands.h" +#include "catalog/namespace.h" +#include "catalog/pg_constraint.h" +#include "distributed/adaptive_executor.h" +#include "distributed/colocation_utils.h" +#include "distributed/connection_management.h" +#include "distributed/listutils.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_sync.h" +#include "distributed/multi_join_order.h" +#include "distributed/multi_logical_replication.h" +#include "distributed/multi_partitioning_utils.h" +#include "distributed/distributed_planner.h" +#include "distributed/remote_commands.h" +#include "distributed/resource_lock.h" +#include "distributed/shard_rebalancer.h" +#include "distributed/version_compat.h" +#include "nodes/bitmapset.h" +#include "parser/scansup.h" +#include "storage/ipc.h" +#include "storage/latch.h" +#include "storage/lock.h" +#include "utils/guc.h" +#include "utils/builtins.h" +#include "utils/fmgrprotos.h" +#include "utils/fmgroids.h" +#include "utils/formatting.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/pg_lsn.h" +#include "utils/rel.h" +#include "utils/ruleutils.h" +#include "utils/syscache.h" + + +#define REPLICATION_SLOT_CATALOG_TABLE_NAME "pg_replication_slots" +#define CURRENT_LOG_POSITION_COMMAND "SELECT pg_current_wal_lsn()" + +/* decimal representation of Adler-16 hash value of citus_shard_move_publication */ +#define SHARD_MOVE_ADVISORY_LOCK_FIRST_KEY 44000 + +/* decimal representation of Adler-16 hash value of citus_shard_move_subscription */ +#define SHARD_MOVE_ADVISORY_LOCK_SECOND_KEY 55152 + + +/* GUC variable, defaults to 2 hours */ +int LogicalReplicationTimeout = 2 * 60 * 60 * 1000; + + +/* see the comment in master_move_shard_placement */ +bool PlacementMovedUsingLogicalReplicationInTX = false; + + +/* report in every 10 seconds */ +static int logicalReplicationProgressReportTimeout = 10 * 1000; + + +static void CreateForeignConstraintsToReferenceTable(List *shardList, + MultiConnection *targetConnection); +static List * PrepareReplicationSubscriptionList(List *shardList); +static Bitmapset * TableOwnerIds(List *shardList); +static void CreateReplicaIdentity(List *shardList, char *nodeName, int32 + nodePort); +static List * GetReplicaIdentityCommandListForShard(Oid relationId, uint64 shardId); +static List * GetIndexCommandListForShardBackingReplicaIdentity(Oid relationId, + uint64 shardId); +static void CreatePostLogicalReplicationDataLoadObjects(List *shardList, + char *targetNodeName, + int32 targetNodePort); +static void ExecuteCreateIndexCommands(List *shardList, char *targetNodeName, + int targetNodePort); +static void ExecuteCreateConstraintsBackedByIndexCommands(List *shardList, + char *targetNodeName, + int targetNodePort); +static List * ConvertNonExistingPlacementDDLCommandsToTasks(List *shardCommandList, + uint64 shardId, + char *targetNodeName, + int targetNodePort); +static void ExecuteClusterOnCommands(List *shardList, char *targetNodeName, + int targetNodePort); +static void ExecuteCreateIndexStatisticsCommands(List *shardList, char *targetNodeName, + int targetNodePort); +static void ExecuteRemainingPostLoadTableCommands(List *shardList, char *targetNodeName, + int targetNodePort); +static void CreatePartitioningHierarchy(List *shardList, char *targetNodeName, + int targetNodePort); +static void CreateColocatedForeignKeys(List *shardList, char *targetNodeName, + int targetNodePort); +static void ConflictOnlyWithIsolationTesting(void); +static void DropShardMovePublications(MultiConnection *connection, + Bitmapset *tableOwnerIds); +static void DropShardMoveSubscriptions(MultiConnection *connection, + Bitmapset *tableOwnerIds); +static void CreateShardMovePublications(MultiConnection *connection, List *shardList, + Bitmapset *tableOwnerIds); +static void CreateShardMoveSubscriptions(MultiConnection *connection, + char *sourceNodeName, + int sourceNodePort, char *userName, + char *databaseName, + Bitmapset *tableOwnerIds); +static char * escape_param_str(const char *str); +static XLogRecPtr GetRemoteLogPosition(MultiConnection *connection); +static XLogRecPtr GetRemoteLSN(MultiConnection *connection, char *command); +static void WaitForRelationSubscriptionsBecomeReady(MultiConnection *targetConnection, + Bitmapset *tableOwnerIds); +static uint64 TotalRelationSizeForSubscription(MultiConnection *connection, + char *command); +static bool RelationSubscriptionsAreReady(MultiConnection *targetConnection, + Bitmapset *tableOwnerIds); +static void WaitForShardMoveSubscription(MultiConnection *targetConnection, + XLogRecPtr sourcePosition, + Bitmapset *tableOwnerIds); +static void WaitForMiliseconds(long timeout); +static XLogRecPtr GetSubscriptionPosition(MultiConnection *connection, + Bitmapset *tableOwnerIds); +static char * ShardMovePublicationName(Oid ownerId); +static char * ShardMoveSubscriptionName(Oid ownerId); +static void AcquireLogicalReplicationLock(void); +static void DropAllShardMoveLeftovers(void); +static void DropAllShardMoveSubscriptions(MultiConnection *connection); +static void DropAllShardMoveReplicationSlots(MultiConnection *connection); +static void DropAllShardMovePublications(MultiConnection *connection); +static void DropAllShardMoveUsers(MultiConnection *connection); +static char * ShardMoveSubscriptionNamesValueList(Bitmapset *tableOwnerIds); +static void DropShardMoveSubscription(MultiConnection *connection, + char *subscriptionName); +static void DropShardMoveReplicationSlot(MultiConnection *connection, + char *publicationName); +static void DropShardMovePublication(MultiConnection *connection, char *publicationName); +static void DropShardMoveUser(MultiConnection *connection, char *username); + +/* + * LogicallyReplicateShards replicates a list of shards from one node to another + * using logical replication. Once replication is reasonably caught up, writes + * are blocked and then the publication and subscription are dropped. + * + * The caller of the function should ensure that logical replication is applicable + * for the given shards, source and target nodes. Also, the caller is responsible + * for ensuring that the input shard list consists of co-located distributed tables + * or a single shard. + */ +void +LogicallyReplicateShards(List *shardList, char *sourceNodeName, int sourceNodePort, + char *targetNodeName, int targetNodePort) +{ + AcquireLogicalReplicationLock(); + char *superUser = CitusExtensionOwnerName(); + char *databaseName = get_database_name(MyDatabaseId); + int connectionFlags = FORCE_NEW_CONNECTION; + List *replicationSubscriptionList = PrepareReplicationSubscriptionList(shardList); + + /* no shards to move */ + if (list_length(replicationSubscriptionList) == 0) + { + return; + } + + Bitmapset *tableOwnerIds = TableOwnerIds(replicationSubscriptionList); + + DropAllShardMoveLeftovers(); + + MultiConnection *sourceConnection = + GetNodeUserDatabaseConnection(connectionFlags, sourceNodeName, sourceNodePort, + superUser, databaseName); + MultiConnection *targetConnection = + GetNodeUserDatabaseConnection(connectionFlags, targetNodeName, targetNodePort, + superUser, databaseName); + + /* + * Operations on publications and subscriptions cannot run in a transaction + * block. Claim the connections exclusively to ensure they do not get used + * for metadata syncing, which does open a transaction block. + */ + ClaimConnectionExclusively(sourceConnection); + ClaimConnectionExclusively(targetConnection); + + PG_TRY(); + { + /* + * We have to create the primary key (or any other replica identity) + * before the initial COPY is done. This is necessary because as soon + * as the COPY command finishes, the update/delete operations that + * are queued will be replicated. And, if the replica identity does not + * exist on the target, the replication would fail. + */ + CreateReplicaIdentity(shardList, targetNodeName, targetNodePort); + + /* set up the publication on the source and subscription on the target */ + CreateShardMovePublications(sourceConnection, replicationSubscriptionList, + tableOwnerIds); + CreateShardMoveSubscriptions(targetConnection, sourceNodeName, sourceNodePort, + superUser, databaseName, tableOwnerIds); + + /* only useful for isolation testing, see the function comment for the details */ + ConflictOnlyWithIsolationTesting(); + + /* + * Logical replication starts with copying the existing data for each table in + * the publication. During the copy operation the state of the associated relation + * subscription is not ready. There is no point of locking the shards before the + * subscriptions for each relation becomes ready, so wait for it. + */ + WaitForRelationSubscriptionsBecomeReady(targetConnection, tableOwnerIds); + + /* + * Wait until the subscription is caught up to changes that has happened + * after the initial COPY on the shards. + */ + XLogRecPtr sourcePosition = GetRemoteLogPosition(sourceConnection); + WaitForShardMoveSubscription(targetConnection, sourcePosition, tableOwnerIds); + + /* + * Now lets create the post-load objects, such as the indexes, constraints + * and partitioning hierarchy. Once they are done, wait until the replication + * catches up again. So we don't block writes too long. + */ + CreatePostLogicalReplicationDataLoadObjects(shardList, targetNodeName, + targetNodePort); + sourcePosition = GetRemoteLogPosition(sourceConnection); + WaitForShardMoveSubscription(targetConnection, sourcePosition, tableOwnerIds); + + /* + * We're almost done, we'll block the writes to the shards that we're + * replicating and expect the subscription to catch up quickly afterwards. + * + * Notice that although shards in partitioned relation are excluded from + * logical replication, they are still locked against modification, and + * foreign constraints are created on them too. + */ + BlockWritesToShardList(shardList); + + sourcePosition = GetRemoteLogPosition(sourceConnection); + WaitForShardMoveSubscription(targetConnection, sourcePosition, tableOwnerIds); + + /* + * We're creating the foreign constraints to reference tables after the + * data is already replicated and all the necessary locks are acquired. + * + * We prefer to do it here because the placements of reference tables + * are always valid, and any modification during the shard move would + * cascade to the hash distributed tables' shards if we had created + * the constraints earlier. + */ + CreateForeignConstraintsToReferenceTable(shardList, targetConnection); + + /* we're done, cleanup the publication and subscription */ + DropShardMoveSubscriptions(targetConnection, tableOwnerIds); + DropShardMovePublications(sourceConnection, tableOwnerIds); + + /* + * We use these connections exclusively for subscription management, + * because otherwise subsequent metadata changes may inadvertedly use + * these connections instead of the connections that were used to + * grab locks in BlockWritesToShardList. + */ + CloseConnection(targetConnection); + CloseConnection(sourceConnection); + } + PG_CATCH(); + { + /* + * Try our best not to leave any left-over subscription or publication. + * + * Although it is not very advisable to use code-paths that could throw + * new errors, we prefer to do it here since we expect the cost of leaving + * left-overs not be very low. + */ + + /* reconnect if the connection failed or is waiting for a command */ + if (PQstatus(targetConnection->pgConn) != CONNECTION_OK || + PQisBusy(targetConnection->pgConn)) + { + targetConnection = GetNodeUserDatabaseConnection(connectionFlags, + targetNodeName, + targetNodePort, + superUser, databaseName); + } + DropShardMoveSubscriptions(targetConnection, tableOwnerIds); + + /* reconnect if the connection failed or is waiting for a command */ + if (PQstatus(sourceConnection->pgConn) != CONNECTION_OK || + PQisBusy(sourceConnection->pgConn)) + { + sourceConnection = GetNodeUserDatabaseConnection(connectionFlags, + sourceNodeName, + sourceNodePort, superUser, + databaseName); + } + DropShardMovePublications(sourceConnection, tableOwnerIds); + + /* We don't need to UnclaimConnections since we're already erroring out */ + + PG_RE_THROW(); + } + PG_END_TRY(); +} + + +/* + * AcquireLogicalReplicationLock tries to acquire a lock for logical + * replication. We need this lock, because at the start of logical replication + * we clean up old subscriptions and publications. Because of this cleanup it's + * not safe to run multiple logical replication based shard moves at the same + * time. If multiple logical replication moves would run at the same time, the + * second move might clean up subscriptions and publications that are in use by + * another move. + */ +static void +AcquireLogicalReplicationLock(void) +{ + LOCKTAG tag; + SET_LOCKTAG_LOGICAL_REPLICATION(tag); + + LockAcquire(&tag, ExclusiveLock, false, false); +} + + +/* + * DropAllShardMoveLeftovers drops shard move subscriptions, publications, roles + * and replication slots on all nodes. These might have been left there after + * the coordinator crashed during a shard move. It's important to delete them + * for two reasons: + * 1. Starting new shard moves will fail when they exist, because it cannot + * create them. + * 2. Leftover replication slots that are not consumed from anymore make it + * impossible for WAL to be dropped. This can cause out-of-disk issues. + */ +static void +DropAllShardMoveLeftovers(void) +{ + char *superUser = CitusExtensionOwnerName(); + char *databaseName = get_database_name(MyDatabaseId); + + /* + * We open new connections to all nodes. The reason for this is that + * operations on subscriptions and publications cannot be run in a + * transaction. By forcing a new connection we make sure no transaction is + * active on the connection. + */ + int connectionFlags = FORCE_NEW_CONNECTION; + + List *workerNodeList = ActivePrimaryNodeList(AccessShareLock); + List *cleanupConnectionList = NIL; + WorkerNode *workerNode = NULL; + + /* + * First we try to remove the subscription, everywhere and only after + * having done that we try to remove the publication everywhere. This is + * needed, because the publication can only be removed if there's no active + * subscription on it. + */ + foreach_ptr(workerNode, workerNodeList) + { + MultiConnection *cleanupConnection = GetNodeUserDatabaseConnection( + connectionFlags, workerNode->workerName, workerNode->workerPort, + superUser, databaseName); + cleanupConnectionList = lappend(cleanupConnectionList, cleanupConnection); + + DropAllShardMoveSubscriptions(cleanupConnection); + DropAllShardMoveUsers(cleanupConnection); + } + + MultiConnection *cleanupConnection = NULL; + foreach_ptr(cleanupConnection, cleanupConnectionList) + { + /* + * If replication slot could not be dropped while dropping the + * subscriber, drop it here. + */ + DropAllShardMoveReplicationSlots(cleanupConnection); + DropAllShardMovePublications(cleanupConnection); + + /* + * We close all connections that we opened for the dropping here. That + * way we don't keep these connections open unnecessarily during the + * shard move (which can take a long time). + */ + CloseConnection(cleanupConnection); + } +} + + +/* + * PrepareReplicationSubscriptionList returns list of shards to be logically + * replicated from given shard list. This is needed because Postgres does not + * allow logical replication on partitioned tables, therefore shards belonging + * to a partitioned tables should be exluded from logical replication + * subscription list. + */ +static List * +PrepareReplicationSubscriptionList(List *shardList) +{ + List *replicationSubscriptionList = NIL; + ListCell *shardCell = NULL; + + foreach(shardCell, shardList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardCell); + if (!PartitionedTable(shardInterval->relationId)) + { + /* only add regular and child tables to subscription */ + replicationSubscriptionList = lappend(replicationSubscriptionList, + shardInterval); + } + } + + return replicationSubscriptionList; +} + + +/* + * TableOwnerIds returns a bitmapset containing all the owners of the tables + * that the given shards belong to. + */ +static Bitmapset * +TableOwnerIds(List *shardList) +{ + ShardInterval *shardInterval = NULL; + Bitmapset *tableOwnerIds = NULL; + + foreach_ptr(shardInterval, shardList) + { + tableOwnerIds = bms_add_member(tableOwnerIds, TableOwnerOid( + shardInterval->relationId)); + } + + return tableOwnerIds; +} + + +/* + * CreateReplicaIdentity gets a shardList and creates all the replica identities + * on the shards in the given node. + */ +static void +CreateReplicaIdentity(List *shardList, char *nodeName, int32 nodePort) +{ + MemoryContext localContext = AllocSetContextCreate(CurrentMemoryContext, + "CreateReplicaIdentity", + ALLOCSET_DEFAULT_SIZES); + MemoryContext oldContext = MemoryContextSwitchTo(localContext); + + ShardInterval *shardInterval; + foreach_ptr(shardInterval, shardList) + { + uint64 shardId = shardInterval->shardId; + Oid relationId = shardInterval->relationId; + + List *backingIndexCommandList = + GetIndexCommandListForShardBackingReplicaIdentity(relationId, shardId); + + List *replicaIdentityShardCommandList = + GetReplicaIdentityCommandListForShard(relationId, shardId); + + List *commandList = + list_concat(backingIndexCommandList, replicaIdentityShardCommandList); + + if (commandList != NIL) + { + ereport(DEBUG1, (errmsg("Creating replica identity for shard %ld on" + "target node %s:%d", shardId, nodeName, nodePort))); + + SendCommandListToWorkerOutsideTransaction(nodeName, nodePort, + TableOwner(relationId), + commandList); + } + + MemoryContextReset(localContext); + } + + MemoryContextSwitchTo(oldContext); +} + + +/* + * GetIndexCommandListForShardBackingReplicaIdentity returns all the create index + * commands that are needed to create replica identity. If the table doesn't have + * a replica identity, the function returns NIL. + */ +static List * +GetIndexCommandListForShardBackingReplicaIdentity(Oid relationId, uint64 shardId) +{ + List *commandList = NIL; + Relation relation = table_open(relationId, AccessShareLock); + Oid replicaIdentityIndex = GetRelationIdentityOrPK(relation); + table_close(relation, NoLock); + + if (OidIsValid(replicaIdentityIndex)) + { + /* + * The replica identity is backed by an index or primary key, + * so get the index/pkey definition first. + */ + HeapTuple indexTuple = + SearchSysCache1(INDEXRELID, ObjectIdGetDatum(replicaIdentityIndex)); + if (!HeapTupleIsValid(indexTuple)) + { + /* should not happen */ + elog(ERROR, "cache lookup failed for index %u", replicaIdentityIndex); + } + + Form_pg_index indexForm = ((Form_pg_index) GETSTRUCT(indexTuple)); + List *indexCommandTableDDLList = NIL; + int indexFlags = INCLUDE_INDEX_ALL_STATEMENTS; + GatherIndexAndConstraintDefinitionList(indexForm, &indexCommandTableDDLList, + indexFlags); + + List *indexCommandShardDDLList = + WorkerApplyShardDDLCommandList(indexCommandTableDDLList, shardId); + + commandList = list_concat(commandList, indexCommandShardDDLList); + + ReleaseSysCache(indexTuple); + } + + return commandList; +} + + +/* + * GetReplicaIdentityCommandListForShard returns the create replica identity + * command that are needed to create replica identity. If the table doesn't have + * a replica identity, the function returns NIL. + */ +static List * +GetReplicaIdentityCommandListForShard(Oid relationId, uint64 shardId) +{ + List *replicaIdentityTableDDLCommand = + GetTableReplicaIdentityCommand(relationId); + List *replicaIdentityShardCommandList = + WorkerApplyShardDDLCommandList(replicaIdentityTableDDLCommand, shardId); + + return replicaIdentityShardCommandList; +} + + +/* + * CreatePostLogicalReplicationDataLoadObjects gets a shardList and creates all + * the objects that can be created after the data is moved with logical replication. + */ +static void +CreatePostLogicalReplicationDataLoadObjects(List *shardList, char *targetNodeName, + int32 targetNodePort) +{ + /* + * We create indexes in 4 steps. + * - CREATE INDEX statements + * - CREATE CONSTRAINT statements that are backed by + * indexes (unique and exclude constraints) + * - ALTER TABLE %s CLUSTER ON %s + * - ALTER INDEX %s ALTER COLUMN %d SET STATISTICS %d + * + * On each step, we execute can execute commands in parallel. For example, + * multiple indexes on the shard table or indexes for the colocated shards + * can be created in parallel. However, the latter two steps, clustering the + * table and setting the statistics of indexes, depends on the indexes being + * created. That's why the execution is divided into four distinct stages. + */ + ExecuteCreateIndexCommands(shardList, targetNodeName, targetNodePort); + ExecuteCreateConstraintsBackedByIndexCommands(shardList, targetNodeName, + targetNodePort); + ExecuteClusterOnCommands(shardList, targetNodeName, targetNodePort); + ExecuteCreateIndexStatisticsCommands(shardList, targetNodeName, targetNodePort); + + /* + * Once the indexes are created, there are few more objects like triggers and table + * statistics that should be created after the data move. + */ + ExecuteRemainingPostLoadTableCommands(shardList, targetNodeName, targetNodePort); + + /* create partitioning hierarchy, if any */ + CreatePartitioningHierarchy(shardList, targetNodeName, targetNodePort); + + /* create colocated foreign keys, if any */ + CreateColocatedForeignKeys(shardList, targetNodeName, targetNodePort); +} + + +/* + * ExecuteCreateIndexCommands gets a shardList and creates all the indexes + * for the given shardList in the given target node. + * + * The execution is done in parallel, and throws an error if any of the + * commands fail. + */ +static void +ExecuteCreateIndexCommands(List *shardList, char *targetNodeName, int targetNodePort) +{ + List *taskList = NIL; + ListCell *shardCell = NULL; + foreach(shardCell, shardList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardCell); + Oid relationId = shardInterval->relationId; + + List *tableCreateIndexCommandList = + GetTableIndexAndConstraintCommandsExcludingReplicaIdentity(relationId, + INCLUDE_CREATE_INDEX_STATEMENTS); + + List *shardCreateIndexCommandList = + WorkerApplyShardDDLCommandList(tableCreateIndexCommandList, + shardInterval->shardId); + List *taskListForShard = + ConvertNonExistingPlacementDDLCommandsToTasks(shardCreateIndexCommandList, + shardInterval->shardId, + targetNodeName, targetNodePort); + taskList = list_concat(taskList, taskListForShard); + } + + /* + * We are going to create indexes and constraints using the current user. That is + * alright because an index/constraint always belongs to the owner of the table, + * and Citus already ensures that the current user owns all the tables that are + * moved. + * + * CREATE INDEX commands acquire ShareLock on a relation. So, it is + * allowed to run multiple CREATE INDEX commands concurrently on a table + * and across different tables (e.g., shards). + */ + + ereport(DEBUG1, (errmsg("Creating post logical replication objects " + "(indexes) on node %s:%d", targetNodeName, + targetNodePort))); + + ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, taskList, + MaxAdaptiveExecutorPoolSize, + NIL); +} + + +/* + * ExecuteCreateConstraintsBackedByIndexCommands gets a shardList and creates all the constraints + * that are backed by indexes for the given shardList in the given target node. + * + * The execution is done in sequential mode, and throws an error if any of the + * commands fail. + */ +static void +ExecuteCreateConstraintsBackedByIndexCommands(List *shardList, char *targetNodeName, + int targetNodePort) +{ + ereport(DEBUG1, (errmsg("Creating post logical replication objects " + "(constraints backed by indexes) on node %s:%d", + targetNodeName, + targetNodePort))); + + MemoryContext localContext = AllocSetContextCreate(CurrentMemoryContext, + "CreateConstraintsBackedByIndexContext", + ALLOCSET_DEFAULT_SIZES); + MemoryContext oldContext = MemoryContextSwitchTo(localContext); + + ListCell *shardCell = NULL; + foreach(shardCell, shardList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardCell); + Oid relationId = shardInterval->relationId; + + List *tableCreateConstraintCommandList = + GetTableIndexAndConstraintCommandsExcludingReplicaIdentity(relationId, + INCLUDE_CREATE_CONSTRAINT_STATEMENTS); + + if (tableCreateConstraintCommandList == NIL) + { + /* no constraints backed by indexes, skip */ + MemoryContextReset(localContext); + continue; + } + + List *shardCreateConstraintCommandList = + WorkerApplyShardDDLCommandList(tableCreateConstraintCommandList, + shardInterval->shardId); + + char *tableOwner = TableOwner(shardInterval->relationId); + SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, + tableOwner, + shardCreateConstraintCommandList); + MemoryContextReset(localContext); + } + + MemoryContextSwitchTo(oldContext); +} + + +/* + * ConvertNonExistingShardDDLCommandsToTasks generates one task per input + * element in shardCommandList. + * + * The generated tasks' placements do not exist (yet). We are generating + * fake placements for the tasks. + */ +static List * +ConvertNonExistingPlacementDDLCommandsToTasks(List *shardCommandList, + uint64 shardId, + char *targetNodeName, + int targetNodePort) +{ + WorkerNode *workerNode = FindWorkerNodeOrError(targetNodeName, targetNodePort); + + List *taskList = NIL; + uint64 jobId = INVALID_JOB_ID; + + ListCell *commandCell = NULL; + int taskId = 1; + foreach(commandCell, shardCommandList) + { + char *command = (char *) lfirst(commandCell); + Task *task = CreateBasicTask(jobId, taskId, DDL_TASK, command); + + /* this placement currently does not exist */ + ShardPlacement *taskPlacement = CitusMakeNode(ShardPlacement); + SetPlacementNodeMetadata(taskPlacement, workerNode); + + task->taskPlacementList = list_make1(taskPlacement); + task->anchorShardId = shardId; + + taskList = lappend(taskList, task); + taskId++; + } + + return taskList; +} + + +/* + * ExecuteClusterOnCommands gets a shardList and creates all the CLUSTER ON commands + * for the given shardList in the given target node. + * + * The execution is done in parallel, and in case of any failure, the transaction + * is aborted. + */ +static void +ExecuteClusterOnCommands(List *shardList, char *targetNodeName, int targetNodePort) +{ + List *taskList = NIL; + ListCell *shardCell; + foreach(shardCell, shardList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardCell); + Oid relationId = shardInterval->relationId; + + List *tableAlterTableClusterOnCommandList = + GetTableIndexAndConstraintCommandsExcludingReplicaIdentity(relationId, + INCLUDE_INDEX_CLUSTERED_STATEMENTS); + + List *shardAlterTableClusterOnCommandList = + WorkerApplyShardDDLCommandList(tableAlterTableClusterOnCommandList, + shardInterval->shardId); + + List *taskListForShard = + ConvertNonExistingPlacementDDLCommandsToTasks( + shardAlterTableClusterOnCommandList, + shardInterval->shardId, + targetNodeName, targetNodePort); + taskList = list_concat(taskList, taskListForShard); + } + + ereport(DEBUG1, (errmsg("Creating post logical replication objects " + "(CLUSTER ON) on node %s:%d", targetNodeName, + targetNodePort))); + + ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, taskList, + MaxAdaptiveExecutorPoolSize, + NIL); +} + + +/* + * ExecuteCreateIndexStatisticsCommands gets a shardList and creates + * all the statistics objects for the indexes in the given target node. + * + * The execution is done in sequentially, and in case of any failure, the transaction + * is aborted. + */ +static void +ExecuteCreateIndexStatisticsCommands(List *shardList, char *targetNodeName, int + targetNodePort) +{ + ereport(DEBUG1, (errmsg("Creating post logical replication objects " + "(index statistics) on node %s:%d", targetNodeName, + targetNodePort))); + + MemoryContext localContext = AllocSetContextCreate(CurrentMemoryContext, + "CreateIndexStatisticsContext", + ALLOCSET_DEFAULT_SIZES); + MemoryContext oldContext = MemoryContextSwitchTo(localContext); + + ListCell *shardCell; + foreach(shardCell, shardList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardCell); + Oid relationId = shardInterval->relationId; + + List *tableAlterIndexSetStatisticsCommandList = + GetTableIndexAndConstraintCommandsExcludingReplicaIdentity(relationId, + INCLUDE_INDEX_STATISTICS_STATEMENTTS); + List *shardAlterIndexSetStatisticsCommandList = + WorkerApplyShardDDLCommandList(tableAlterIndexSetStatisticsCommandList, + shardInterval->shardId); + + if (shardAlterIndexSetStatisticsCommandList == NIL) + { + /* no index statistics exists, skip */ + MemoryContextReset(localContext); + continue; + } + + /* + * These remaining operations do not require significant resources, so no + * need to create them in parallel. + */ + char *tableOwner = TableOwner(shardInterval->relationId); + SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, + tableOwner, + shardAlterIndexSetStatisticsCommandList); + + MemoryContextReset(localContext); + } + + MemoryContextSwitchTo(oldContext); +} + + +/* + * ExecuteRemainingPostLoadTableCommands gets a shardList and creates + * all the remaining post load objects other than the indexes + * in the given target node. + */ +static void +ExecuteRemainingPostLoadTableCommands(List *shardList, char *targetNodeName, int + targetNodePort) +{ + ereport(DEBUG1, (errmsg("Creating post logical replication objects " + "(triggers and table statistics) on node %s:%d", + targetNodeName, + targetNodePort))); + + MemoryContext localContext = AllocSetContextCreate(CurrentMemoryContext, + "CreateTableStatisticsContext", + ALLOCSET_DEFAULT_SIZES); + MemoryContext oldContext = MemoryContextSwitchTo(localContext); + + ListCell *shardCell = NULL; + foreach(shardCell, shardList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardCell); + Oid relationId = shardInterval->relationId; + + bool includeIndexes = false; + bool includeReplicaIdentity = false; + + List *tablePostLoadTableCommandList = + GetPostLoadTableCreationCommands(relationId, includeIndexes, + includeReplicaIdentity); + + List *shardPostLoadTableCommandList = + WorkerApplyShardDDLCommandList(tablePostLoadTableCommandList, + shardInterval->shardId); + + if (shardPostLoadTableCommandList == NIL) + { + /* no index statistics exists, skip */ + continue; + } + + /* + * These remaining operations do not require significant resources, so no + * need to create them in parallel. + */ + char *tableOwner = TableOwner(shardInterval->relationId); + SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, + tableOwner, + shardPostLoadTableCommandList); + + MemoryContextReset(localContext); + } + + MemoryContextSwitchTo(oldContext); +} + + +/* + * CreatePartitioningHierarchy gets a shardList and creates the partitioning + * hierarchy between the shardList, if any, + */ +static void +CreatePartitioningHierarchy(List *shardList, char *targetNodeName, int targetNodePort) +{ + ereport(DEBUG1, (errmsg("Creating post logical replication objects " + "(partitioning hierarchy) on node %s:%d", targetNodeName, + targetNodePort))); + + MemoryContext localContext = AllocSetContextCreate(CurrentMemoryContext, + "CreatePartitioningHierarchy", + ALLOCSET_DEFAULT_SIZES); + MemoryContext oldContext = MemoryContextSwitchTo(localContext); + + ListCell *shardCell = NULL; + foreach(shardCell, shardList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardCell); + + if (PartitionTable(shardInterval->relationId)) + { + char *attachPartitionCommand = + GenerateAttachShardPartitionCommand(shardInterval); + + char *tableOwner = TableOwner(shardInterval->relationId); + + /* + * Attaching partition may acquire conflicting locks when created in + * parallel, so create them sequentially. Also attaching partition + * is a quick operation, so it is fine to execute sequentially. + */ + SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, + tableOwner, + list_make1( + attachPartitionCommand)); + MemoryContextReset(localContext); + } + } + + MemoryContextSwitchTo(oldContext); +} + + +/* + * CreateColocatedForeignKeys gets a shardList and creates the colocated foreign + * keys between the shardList, if any, + */ +static void +CreateColocatedForeignKeys(List *shardList, char *targetNodeName, int targetNodePort) +{ + ereport(DEBUG1, (errmsg("Creating post logical replication objects " + "(co-located foreign keys) on node %s:%d", targetNodeName, + targetNodePort))); + + MemoryContext localContext = AllocSetContextCreate(CurrentMemoryContext, + "CreateColocatedForeignKeys", + ALLOCSET_DEFAULT_SIZES); + MemoryContext oldContext = MemoryContextSwitchTo(localContext); + + ListCell *shardCell = NULL; + foreach(shardCell, shardList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardCell); + + List *shardForeignConstraintCommandList = NIL; + List *referenceTableForeignConstraintList = NIL; + CopyShardForeignConstraintCommandListGrouped(shardInterval, + &shardForeignConstraintCommandList, + &referenceTableForeignConstraintList); + + if (shardForeignConstraintCommandList == NIL) + { + /* no colocated foreign keys, skip */ + continue; + } + + /* + * Creating foreign keys may acquire conflicting locks when done in + * parallel. Hence we create foreign keys one at a time. + * + */ + char *tableOwner = TableOwner(shardInterval->relationId); + SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, + tableOwner, + shardForeignConstraintCommandList); + MemoryContextReset(localContext); + } + + MemoryContextSwitchTo(oldContext); +} + + +/* + * CreateForeignConstraintsToReferenceTable is used to create the foreign constraints + * from distributed to reference tables in the newly created shard replicas. + */ +static void +CreateForeignConstraintsToReferenceTable(List *shardList, + MultiConnection *targetConnection) +{ + ereport(DEBUG1, (errmsg("Creating post logical replication objects " + "(foreign keys to reference tables) on node " + "%s:%d", targetConnection->hostname, + targetConnection->port))); + + MemoryContext localContext = + AllocSetContextCreate(CurrentMemoryContext, + "CreateForeignConstraintsToReferenceTable", + ALLOCSET_DEFAULT_SIZES); + MemoryContext oldContext = MemoryContextSwitchTo(localContext); + + + ListCell *shardCell = NULL; + foreach(shardCell, shardList) + { + ListCell *commandCell = NULL; + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardCell); + List *commandList = GetForeignConstraintCommandsToReferenceTable(shardInterval); + + /* iterate over the commands and execute them in the same connection */ + foreach(commandCell, commandList) + { + char *commandString = lfirst(commandCell); + + ExecuteCriticalRemoteCommand(targetConnection, commandString); + } + MemoryContextReset(localContext); + } + + MemoryContextSwitchTo(oldContext); +} + + +/* + * ConflictOnlyWithIsolationTesting is only useful for testing and should + * not be called by any code-path except for LogicallyReplicateShards(). + * + * Since logically replicating shards does eventually block modifications, + * it becomes tricky to use isolation tester to show concurrent behaviour + * of online shard rebalancing and modification queries. + * + * Note that since the cost of calling this function is pretty low, we prefer + * to use it in non-assert builds as well not to diverge in the behaviour. + */ +static void +ConflictOnlyWithIsolationTesting() +{ + LOCKTAG tag; + const bool sessionLock = false; + const bool dontWait = false; + + if (RunningUnderIsolationTest) + { + /* we've picked random keys */ + SET_LOCKTAG_ADVISORY(tag, MyDatabaseId, SHARD_MOVE_ADVISORY_LOCK_FIRST_KEY, + SHARD_MOVE_ADVISORY_LOCK_SECOND_KEY, 2); + + (void) LockAcquire(&tag, ExclusiveLock, sessionLock, dontWait); + } +} + + +/* + * DropShardMovePublication drops the publication used for shard moves over the given + * connection, if it exists. It also drops the replication slot if that slot was not + * dropped while dropping the subscription. + */ +static void +DropShardMovePublications(MultiConnection *connection, Bitmapset *tableOwnerIds) +{ + int ownerId = -1; + + while ((ownerId = bms_next_member(tableOwnerIds, ownerId)) >= 0) + { + /* + * If replication slot can not be dropped while dropping the subscriber, drop + * it here. + */ + DropShardMoveReplicationSlot(connection, ShardMoveSubscriptionName(ownerId)); + DropShardMovePublication(connection, ShardMovePublicationName(ownerId)); + } +} + + +/* + * DropShardMoveReplicationSlot drops the replication slot with the given name + * if it exists. + */ +static void +DropShardMoveReplicationSlot(MultiConnection *connection, char *replicationSlotName) +{ + ExecuteCriticalRemoteCommand( + connection, + psprintf( + "select pg_drop_replication_slot(slot_name) from " + REPLICATION_SLOT_CATALOG_TABLE_NAME + " where slot_name = %s", + quote_literal_cstr(replicationSlotName))); +} + + +/* + * DropShardMovePublication drops the publication with the given name if it + * exists. + */ +static void +DropShardMovePublication(MultiConnection *connection, char *publicationName) +{ + ExecuteCriticalRemoteCommand(connection, psprintf( + "DROP PUBLICATION IF EXISTS %s", + quote_identifier(publicationName))); +} + + +/* + * ShardMovePublicationName returns the name of the publication for the given + * table owner. + */ +static char * +ShardMovePublicationName(Oid ownerId) +{ + return psprintf("%s%i", SHARD_MOVE_PUBLICATION_PREFIX, ownerId); +} + + +/* + * ShardMoveSubscriptionName returns the name of the subscription for the given + * owner. If we're running the isolation tester the function also appends the + * process id normal subscription name. + * + * When it contains the PID of the current process it is used for block detection + * by the isolation test runner, since the replication process on the publishing + * node uses the name of the subscription as the application_name of the SQL session. + * This PID is then extracted from the application_name to find out which PID on the + * coordinator is blocked by the blocked replication process. + */ +static char * +ShardMoveSubscriptionName(Oid ownerId) +{ + if (RunningUnderIsolationTest) + { + return psprintf("%s%i_%i", SHARD_MOVE_SUBSCRIPTION_PREFIX, ownerId, MyProcPid); + } + else + { + return psprintf("%s%i", SHARD_MOVE_SUBSCRIPTION_PREFIX, ownerId); + } +} + + +/* + * ShardMoveSubscriptionRole returns the name of the role used by the + * subscription that subscribes to the tables of the given owner. + */ +static char * +ShardMoveSubscriptionRole(Oid ownerId) +{ + return psprintf("%s%i", SHARD_MOVE_SUBSCRIPTION_ROLE_PREFIX, ownerId); +} + + +/* + * GetQueryResultStringList expects a query that returns a single column of + * strings. This query is executed on the connection and the function then + * returns the results of the query in a List. + */ +static List * +GetQueryResultStringList(MultiConnection *connection, char *query) +{ + bool raiseInterrupts = true; + + int querySent = SendRemoteCommand(connection, query); + if (querySent == 0) + { + ReportConnectionError(connection, ERROR); + } + + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); + if (!IsResponseOK(result)) + { + ReportResultError(connection, result, ERROR); + } + + int rowCount = PQntuples(result); + int columnCount = PQnfields(result); + + if (columnCount != 1) + { + ereport(ERROR, (errmsg("unexpected number of columns returned while reading "))); + } + + List *resultList = NIL; + for (int rowIndex = 0; rowIndex < rowCount; rowIndex++) + { + int columnIndex = 0; + StringInfo resultStringInfo = makeStringInfo(); + + char *resultString = PQgetvalue(result, rowIndex, columnIndex); + + /* we're using the stringinfo to copy the data into the current memory context */ + appendStringInfoString(resultStringInfo, resultString); + + resultList = lappend(resultList, resultStringInfo->data); + } + + PQclear(result); + ForgetResults(connection); + return resultList; +} + + +/* + * DropAllShardMoveSubscriptions drops all the existing subscriptions that + * match our shard move naming scheme on the node that the connection points + * to. + */ +static void +DropAllShardMoveSubscriptions(MultiConnection *connection) +{ + char *query = psprintf( + "SELECT subname FROM pg_subscription " + "WHERE subname LIKE %s || '%%'", + quote_literal_cstr(SHARD_MOVE_SUBSCRIPTION_PREFIX)); + List *subscriptionNameList = GetQueryResultStringList(connection, query); + char *subscriptionName; + foreach_ptr(subscriptionName, subscriptionNameList) + { + DropShardMoveSubscription(connection, subscriptionName); + } +} + + +/* + * DropAllShardMoveUsers drops all the users that match our shard move naming + * scheme for temporary shard move users on the node that the connection points + * to. + */ +static void +DropAllShardMoveUsers(MultiConnection *connection) +{ + char *query = psprintf( + "SELECT rolname FROM pg_roles " + "WHERE rolname LIKE %s || '%%'", + quote_literal_cstr(SHARD_MOVE_SUBSCRIPTION_ROLE_PREFIX)); + List *usernameList = GetQueryResultStringList(connection, query); + char *username; + foreach_ptr(username, usernameList) + { + DropShardMoveUser(connection, username); + } +} + + +/* + * DropAllShardMoveReplicationSlots drops all the existing replication slots + * that match our shard move naming scheme on the node that the connection + * points to. + */ +static void +DropAllShardMoveReplicationSlots(MultiConnection *connection) +{ + char *query = psprintf( + "SELECT slot_name FROM pg_replication_slots " + "WHERE slot_name LIKE %s || '%%'", + quote_literal_cstr(SHARD_MOVE_SUBSCRIPTION_PREFIX)); + List *slotNameList = GetQueryResultStringList(connection, query); + char *slotName; + foreach_ptr(slotName, slotNameList) + { + DropShardMoveReplicationSlot(connection, slotName); + } +} + + +/* + * DropAllShardMovePublications drops all the existing publications that + * match our shard move naming scheme on the node that the connection points + * to. + */ +static void +DropAllShardMovePublications(MultiConnection *connection) +{ + char *query = psprintf( + "SELECT pubname FROM pg_publication " + "WHERE pubname LIKE %s || '%%'", + quote_literal_cstr(SHARD_MOVE_PUBLICATION_PREFIX)); + List *publicationNameList = GetQueryResultStringList(connection, query); + char *publicationName; + foreach_ptr(publicationName, publicationNameList) + { + DropShardMovePublication(connection, publicationName); + } +} + + +/* + * DropShardMoveSubscriptions drops subscriptions from the subscriber node that + * are used to move shards for the given table owners. Note that, it drops the + * replication slots on the publisher node if it can drop the slots as well + * with the DROP SUBSCRIPTION command. Otherwise, only the subscriptions will + * be deleted with DROP SUBSCRIPTION via the connection. In the latter case, + * replication slots will be dropped while cleaning the publisher node when + * calling DropShardMovePublications. + */ +static void +DropShardMoveSubscriptions(MultiConnection *connection, Bitmapset *tableOwnerIds) +{ + int ownerId = -1; + while ((ownerId = bms_next_member(tableOwnerIds, ownerId)) >= 0) + { + DropShardMoveSubscription(connection, ShardMoveSubscriptionName(ownerId)); + DropShardMoveUser(connection, ShardMoveSubscriptionRole(ownerId)); + } +} + + +/* + * DropShardMoveSubscription drops subscription with the given name on the + * subscriber node. Note that, it also drops the replication slot on the + * publisher node if it can drop the slot as well with the DROP SUBSCRIPTION + * command. Otherwise, only the subscription will be deleted with DROP + * SUBSCRIPTION via the connection. + */ +static void +DropShardMoveSubscription(MultiConnection *connection, char *subscriptionName) +{ + PGresult *result = NULL; + + /* + * Instead of ExecuteCriticalRemoteCommand, we use the + * ExecuteOptionalRemoteCommand to fall back into the logic inside the + * if block below in case of any error while sending the command. + */ + int dropCommandResult = ExecuteOptionalRemoteCommand( + connection, + psprintf( + "DROP SUBSCRIPTION IF EXISTS %s", + quote_identifier(subscriptionName)), + &result); + + if (PQstatus(connection->pgConn) != CONNECTION_OK) + { + ReportConnectionError(connection, ERROR); + } + + PQclear(result); + ForgetResults(connection); + + /* + * If we can not drop the replication slot using the DROP SUBSCRIPTION command + * then we need to alter the subscription to drop the subscriber only and drop + * the replication slot separately. + */ + if (dropCommandResult != 0) + { + StringInfo alterSubscriptionSlotCommand = makeStringInfo(); + StringInfo alterSubscriptionDisableCommand = makeStringInfo(); + + appendStringInfo(alterSubscriptionDisableCommand, + "ALTER SUBSCRIPTION %s DISABLE", + quote_identifier(subscriptionName)); + ExecuteCriticalRemoteCommand(connection, + alterSubscriptionDisableCommand->data); + + appendStringInfo(alterSubscriptionSlotCommand, + "ALTER SUBSCRIPTION %s SET (slot_name = NONE)", + quote_identifier(subscriptionName)); + ExecuteCriticalRemoteCommand(connection, alterSubscriptionSlotCommand->data); + + ExecuteCriticalRemoteCommand(connection, psprintf( + "DROP SUBSCRIPTION %s", + quote_identifier(subscriptionName))); + } +} + + +/* + * DropShardMoveUser drops the user with the given name if it exists. + */ +static void +DropShardMoveUser(MultiConnection *connection, char *username) +{ + /* + * The DROP USER command should not propagate, so we temporarily disable + * DDL propagation. + */ + SendCommandListToWorkerOutsideTransaction( + connection->hostname, connection->port, connection->user, + list_make2( + "SET LOCAL citus.enable_ddl_propagation TO OFF;", + psprintf("DROP USER IF EXISTS %s", + quote_identifier(username)))); +} + + +/* + * CreateShardMovePublications creates a set of publications for moving a list + * of shards over the given connection. One publication is created for each of + * the table owners in tableOwnerIds. Each of those publications only contains + * shards that the respective table owner owns. + */ +static void +CreateShardMovePublications(MultiConnection *connection, List *shardList, + Bitmapset *tableOwnerIds) +{ + int ownerId = -1; + + while ((ownerId = bms_next_member(tableOwnerIds, ownerId)) >= 0) + { + StringInfo createPublicationCommand = makeStringInfo(); + bool prefixWithComma = false; + + appendStringInfo(createPublicationCommand, "CREATE PUBLICATION %s FOR TABLE ", + ShardMovePublicationName(ownerId)); + + ShardInterval *shard = NULL; + foreach_ptr(shard, shardList) + { + if (TableOwnerOid(shard->relationId) != ownerId) + { + continue; + } + + char *shardName = ConstructQualifiedShardName(shard); + + if (prefixWithComma) + { + appendStringInfoString(createPublicationCommand, ","); + } + + appendStringInfoString(createPublicationCommand, shardName); + prefixWithComma = true; + } + + ExecuteCriticalRemoteCommand(connection, createPublicationCommand->data); + pfree(createPublicationCommand->data); + pfree(createPublicationCommand); + } +} + + +/* + * CreateShardMoveSubscriptions creates the subscriptions used for shard moves + * over the given connection. One subscription is created for each of the table + * owners in tableOwnerIds. The remote node needs to have appropriate + * pg_dist_authinfo rows for the user such that the apply process can connect. + * Because the generated CREATE SUBSCRIPTION statements uses the host and port + * names directly (rather than looking up any relevant pg_dist_poolinfo rows), + * all such connections remain direct and will not route through any configured + * poolers. + */ +static void +CreateShardMoveSubscriptions(MultiConnection *connection, char *sourceNodeName, + int sourceNodePort, char *userName, char *databaseName, + Bitmapset *tableOwnerIds) +{ + int ownerId = -1; + while ((ownerId = bms_next_member(tableOwnerIds, ownerId)) >= 0) + { + StringInfo createSubscriptionCommand = makeStringInfo(); + StringInfo conninfo = makeStringInfo(); + + /* + * The CREATE USER command should not propagate, so we temporarily + * disable DDL propagation. + */ + SendCommandListToWorkerOutsideTransaction( + connection->hostname, connection->port, connection->user, + list_make2( + "SET LOCAL citus.enable_ddl_propagation TO OFF;", + psprintf( + "CREATE USER %s SUPERUSER IN ROLE %s", + ShardMoveSubscriptionRole(ownerId), + GetUserNameFromId(ownerId, false) + ))); + + appendStringInfo(conninfo, "host='%s' port=%d user='%s' dbname='%s' " + "connect_timeout=20", + escape_param_str(sourceNodeName), sourceNodePort, + escape_param_str(userName), escape_param_str(databaseName)); + + appendStringInfo(createSubscriptionCommand, + "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s " + "WITH (citus_use_authinfo=true, enabled=false)", + quote_identifier(ShardMoveSubscriptionName(ownerId)), + quote_literal_cstr(conninfo->data), + quote_identifier(ShardMovePublicationName(ownerId))); + + ExecuteCriticalRemoteCommand(connection, createSubscriptionCommand->data); + pfree(createSubscriptionCommand->data); + pfree(createSubscriptionCommand); + ExecuteCriticalRemoteCommand(connection, psprintf( + "ALTER SUBSCRIPTION %s OWNER TO %s", + ShardMoveSubscriptionName(ownerId), + ShardMoveSubscriptionRole(ownerId) + )); + + /* + * The ALTER ROLE command should not propagate, so we temporarily + * disable DDL propagation. + */ + SendCommandListToWorkerOutsideTransaction( + connection->hostname, connection->port, connection->user, + list_make2( + "SET LOCAL citus.enable_ddl_propagation TO OFF;", + psprintf( + "ALTER ROLE %s NOSUPERUSER", + ShardMoveSubscriptionRole(ownerId) + ))); + + ExecuteCriticalRemoteCommand(connection, psprintf( + "ALTER SUBSCRIPTION %s ENABLE", + ShardMoveSubscriptionName(ownerId) + )); + } +} + + +/* *INDENT-OFF* */ +/* + * Escaping libpq connect parameter strings. + * + * Replaces "'" with "\'" and "\" with "\\". + * + * Copied from dblink.c to escape libpq params + */ +static char * +escape_param_str(const char *str) +{ + StringInfoData buf; + + initStringInfo(&buf); + + for (const char *cp = str; *cp; cp++) + { + if (*cp == '\\' || *cp == '\'') + appendStringInfoChar(&buf, '\\'); + appendStringInfoChar(&buf, *cp); + } + + return buf.data; +} + +/* *INDENT-ON* */ + + +/* + * GetRemoteLogPosition gets the current WAL log position over the given connection. + */ +static XLogRecPtr +GetRemoteLogPosition(MultiConnection *connection) +{ + return GetRemoteLSN(connection, CURRENT_LOG_POSITION_COMMAND); +} + + +/* + * GetRemoteLSN executes a command that returns a single LSN over the given connection + * and returns it as an XLogRecPtr (uint64). + */ +static XLogRecPtr +GetRemoteLSN(MultiConnection *connection, char *command) +{ + bool raiseInterrupts = false; + XLogRecPtr remoteLogPosition = InvalidXLogRecPtr; + + int querySent = SendRemoteCommand(connection, command); + if (querySent == 0) + { + ReportConnectionError(connection, ERROR); + } + + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); + if (!IsResponseOK(result)) + { + ReportResultError(connection, result, ERROR); + } + + int rowCount = PQntuples(result); + if (rowCount != 1) + { + PQclear(result); + ForgetResults(connection); + return InvalidXLogRecPtr; + } + + int colCount = PQnfields(result); + if (colCount != 1) + { + ereport(ERROR, (errmsg("unexpected number of columns returned by: %s", + command))); + } + + if (!PQgetisnull(result, 0, 0)) + { + char *resultString = PQgetvalue(result, 0, 0); + Datum remoteLogPositionDatum = DirectFunctionCall1Coll(pg_lsn_in, InvalidOid, + CStringGetDatum( + resultString)); + remoteLogPosition = DatumGetLSN(remoteLogPositionDatum); + } + + PQclear(result); + ForgetResults(connection); + + return remoteLogPosition; +} + + +/* + * WaitForRelationSubscriptionsBecomeReady waits until the states of the subsriptions + * for each shard becomes ready. This indicates that the initial COPY is finished + * on the shards. + * + * The function errors if the total size of the relations that belong to the subscription + * on the target node doesn't change within LogicalReplicationErrorTimeout. The + * function also reports its progress in every logicalReplicationProgressReportTimeout. + */ +static void +WaitForRelationSubscriptionsBecomeReady(MultiConnection *targetConnection, + Bitmapset *tableOwnerIds) +{ + uint64 previousTotalRelationSizeForSubscription = 0; + TimestampTz previousSizeChangeTime = GetCurrentTimestamp(); + + /* report in the first iteration as well */ + TimestampTz previousReportTime = 0; + + uint64 previousReportedTotalSize = 0; + + + /* + * We might be in the loop for a while. Since we don't need to preserve + * any memory beyond this function, we can simply switch to a child context + * and reset it on every iteration to make sure we don't slowly build up + * a lot of memory. + */ + MemoryContext loopContext = AllocSetContextCreateExtended(CurrentMemoryContext, + "WaitForRelationSubscriptionsBecomeReady", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + + MemoryContext oldContext = MemoryContextSwitchTo(loopContext); + + while (true) + { + /* we're done, all relations are ready */ + if (RelationSubscriptionsAreReady(targetConnection, tableOwnerIds)) + { + ereport(LOG, (errmsg("The states of the relations belonging to the " + "subscriptions became READY on the " + "target node %s:%d", + targetConnection->hostname, + targetConnection->port))); + + break; + } + char *subscriptionValueList = ShardMoveSubscriptionNamesValueList(tableOwnerIds); + + /* Get the current total size of tables belonging to the subscriber */ + uint64 currentTotalRelationSize = + TotalRelationSizeForSubscription(targetConnection, psprintf( + "SELECT sum(pg_total_relation_size(srrelid)) " + "FROM pg_subscription_rel, pg_stat_subscription " + "WHERE srsubid = subid AND subname IN %s", + subscriptionValueList + ) + ); + + /* + * The size has not been changed within the last iteration. If necessary + * log a messages. If size does not change over a given replication timeout + * error out. + */ + if (currentTotalRelationSize == previousTotalRelationSizeForSubscription) + { + /* log the progress if necessary */ + if (TimestampDifferenceExceeds(previousReportTime, + GetCurrentTimestamp(), + logicalReplicationProgressReportTimeout)) + { + ereport(LOG, (errmsg("Subscription size has been staying same for the " + "last %d msec", + logicalReplicationProgressReportTimeout))); + + previousReportTime = GetCurrentTimestamp(); + } + + /* Error out if the size does not change within the given time threshold */ + if (TimestampDifferenceExceeds(previousSizeChangeTime, + GetCurrentTimestamp(), + LogicalReplicationTimeout)) + { + ereport(ERROR, (errmsg("The logical replication waiting timeout " + "%d msec exceeded", + LogicalReplicationTimeout), + errdetail("The subscribed relations haven't become " + "ready on the target node %s:%d", + targetConnection->hostname, + targetConnection->port), + errhint( + "There might have occurred problems on the target " + "node. If not, consider using higher values for " + "citus.logical_replication_timeout"))); + } + } + else + { + /* first, record that there is some change in the size */ + previousSizeChangeTime = GetCurrentTimestamp(); + + /* + * Subscription size may decrease or increase. + * + * Subscription size may decrease in case of VACUUM operation, which + * may get fired with autovacuum, on it. + * + * Increase of the relation's size belonging to subscriber means a successful + * copy from publisher to subscriber. + */ + bool sizeIncreased = currentTotalRelationSize > + previousTotalRelationSizeForSubscription; + + if (TimestampDifferenceExceeds(previousReportTime, + GetCurrentTimestamp(), + logicalReplicationProgressReportTimeout)) + { + ereport(LOG, ((errmsg("The total size of the relations belonging to " + "subscriptions %s from %ld to %ld at %s " + "on the target node %s:%d", + sizeIncreased ? "increased" : "decreased", + previousReportedTotalSize, + currentTotalRelationSize, + timestamptz_to_str(previousSizeChangeTime), + targetConnection->hostname, + targetConnection->port)))); + + previousReportedTotalSize = currentTotalRelationSize; + previousReportTime = GetCurrentTimestamp(); + } + } + + previousTotalRelationSizeForSubscription = currentTotalRelationSize; + + /* wait for 1 second (1000 miliseconds) and try again */ + WaitForMiliseconds(1000); + + MemoryContextReset(loopContext); + } + + MemoryContextSwitchTo(oldContext); +} + + +/* + * TotalRelationSizeForSubscription is a helper function which returns the total + * size of the shards that are replicated via the subscription. Note that the + * function returns the total size including indexes. + */ +static uint64 +TotalRelationSizeForSubscription(MultiConnection *connection, char *command) +{ + bool raiseInterrupts = false; + uint64 remoteTotalSize = 0; + + int querySent = SendRemoteCommand(connection, command); + if (querySent == 0) + { + ReportConnectionError(connection, ERROR); + } + + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); + if (!IsResponseOK(result)) + { + ReportResultError(connection, result, ERROR); + } + + int rowCount = PQntuples(result); + if (rowCount != 1) + { + ereport(ERROR, (errmsg("unexpected number of rows returned by: %s", + command))); + } + + int colCount = PQnfields(result); + if (colCount != 1) + { + ereport(ERROR, (errmsg("unexpected number of columns returned by: %s", + command))); + } + + if (!PQgetisnull(result, 0, 0)) + { + char *resultString = PQgetvalue(result, 0, 0); + + remoteTotalSize = pg_strtouint64(resultString, NULL, 10); + } + else + { + ereport(ERROR, (errmsg("unexpected value returned by: %s", + command))); + } + + PQclear(result); + ForgetResults(connection); + + return remoteTotalSize; +} + + +/* + * ShardMoveSubscriptionNamesValueList returns a SQL value list containing the + * subscription names for all of the given table owner ids. This value list can + * be used in a query by using the IN operator. + */ +static char * +ShardMoveSubscriptionNamesValueList(Bitmapset *tableOwnerIds) +{ + StringInfo subscriptionValueList = makeStringInfo(); + appendStringInfoString(subscriptionValueList, "("); + int ownerId = -1; + bool first = true; + + while ((ownerId = bms_next_member(tableOwnerIds, ownerId)) >= 0) + { + if (!first) + { + appendStringInfoString(subscriptionValueList, ","); + } + else + { + first = false; + } + appendStringInfoString(subscriptionValueList, + quote_literal_cstr(ShardMoveSubscriptionName(ownerId))); + } + appendStringInfoString(subscriptionValueList, ")"); + return subscriptionValueList->data; +} + + +/* + * RelationSubscriptionsAreReady gets the subscription status for each + * shard and returns false if at least one of them is not ready. + */ +static bool +RelationSubscriptionsAreReady(MultiConnection *targetConnection, + Bitmapset *tableOwnerIds) +{ + bool raiseInterrupts = false; + + char *subscriptionValueList = ShardMoveSubscriptionNamesValueList(tableOwnerIds); + char *query = psprintf( + "SELECT count(*) FROM pg_subscription_rel, pg_stat_subscription " + "WHERE srsubid = subid AND srsubstate != 'r' AND subname IN %s", + subscriptionValueList); + int querySent = SendRemoteCommand(targetConnection, query); + if (querySent == 0) + { + ReportConnectionError(targetConnection, ERROR); + } + + PGresult *result = GetRemoteCommandResult(targetConnection, raiseInterrupts); + if (!IsResponseOK(result)) + { + ReportResultError(targetConnection, result, ERROR); + } + + int rowCount = PQntuples(result); + int columnCount = PQnfields(result); + + if (columnCount != 1) + { + ereport(ERROR, (errmsg("unexpected number of columns returned while reading "))); + } + if (rowCount != 1) + { + ereport(ERROR, (errmsg("unexpected number of rows returned while reading "))); + } + + int columnIndex = 0; + int rowIndex = 0; + + /* we're using the pstrdup to copy the data into the current memory context */ + char *resultString = pstrdup(PQgetvalue(result, rowIndex, columnIndex)); + + PQclear(result); + ForgetResults(targetConnection); + + int64 resultInt = SafeStringToInt64(resultString); + + return resultInt == 0; +} + + +/* + * WaitForShardMoveSubscription waits until the last LSN reported by the subscription. + * + * The function errors if the target LSN doesn't increase within LogicalReplicationErrorTimeout. + * The function also reports its progress in every logicalReplicationProgressReportTimeout. + */ +static void +WaitForShardMoveSubscription(MultiConnection *targetConnection, XLogRecPtr sourcePosition, + Bitmapset *tableOwnerIds) +{ + XLogRecPtr previousTargetPosition = 0; + TimestampTz previousLSNIncrementTime = GetCurrentTimestamp(); + + /* report in the first iteration as well */ + TimestampTz previousReportTime = 0; + + + /* + * We might be in the loop for a while. Since we don't need to preserve + * any memory beyond this function, we can simply switch to a child context + * and reset it on every iteration to make sure we don't slowly build up + * a lot of memory. + */ + MemoryContext loopContext = AllocSetContextCreateExtended(CurrentMemoryContext, + "WaitForShardMoveSubscription", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + + MemoryContext oldContext = MemoryContextSwitchTo(loopContext); + + while (true) + { + XLogRecPtr targetPosition = GetSubscriptionPosition(targetConnection, + tableOwnerIds); + if (targetPosition >= sourcePosition) + { + ereport(LOG, (errmsg( + "The LSN of the target subscriptions on node %s:%d have " + "caught up with the source LSN ", + targetConnection->hostname, + targetConnection->port))); + + break; + } + + /* + * The following logic ensures that the subsription continues to grow withing + * LogicalReplicationErrorTimeout duration. Otherwise, we error out since we + * suspect that there is a problem on the target. It also handles the progess + * reporting. + */ + if (targetPosition > previousTargetPosition) + { + /* variable is only used for the log message */ + uint64 previousTargetBeforeThisLoop = previousTargetPosition; + + previousTargetPosition = targetPosition; + previousLSNIncrementTime = GetCurrentTimestamp(); + + if (TimestampDifferenceExceeds(previousReportTime, + GetCurrentTimestamp(), + logicalReplicationProgressReportTimeout)) + { + ereport(LOG, (errmsg( + "The LSN of the target subscriptions on node %s:%d have " + "increased from %ld to %ld at %s where the source LSN is %ld ", + targetConnection->hostname, + targetConnection->port, previousTargetBeforeThisLoop, + targetPosition, + timestamptz_to_str(previousLSNIncrementTime), + sourcePosition))); + + previousReportTime = GetCurrentTimestamp(); + } + } + else + { + if (TimestampDifferenceExceeds(previousLSNIncrementTime, + GetCurrentTimestamp(), + LogicalReplicationTimeout)) + { + ereport(ERROR, (errmsg("The logical replication waiting timeout " + "%d msec exceeded", + LogicalReplicationTimeout), + errdetail("The LSN on the target subscription hasn't " + "caught up ready on the target node %s:%d", + targetConnection->hostname, + targetConnection->port), + errhint( + "There might have occurred problems on the target " + "node. If not consider using higher values for " + "citus.logical_replication_error_timeout"))); + } + } + + /* sleep for 1 seconds (1000 miliseconds) and try again */ + WaitForMiliseconds(1000); + + MemoryContextReset(loopContext); + } + + MemoryContextSwitchTo(oldContext); +} + + +/* + * WaitForMiliseconds waits for given timeout and then checks for some + * interrupts. + */ +static void +WaitForMiliseconds(long timeout) +{ + int latchFlags = WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH; + + /* wait until timeout, or until somebody wakes us up */ + int rc = WaitLatch(MyLatch, latchFlags, timeout, PG_WAIT_EXTENSION); + + /* emergency bailout if postmaster has died */ + if (rc & WL_POSTMASTER_DEATH) + { + proc_exit(1); + } + + if (rc & WL_LATCH_SET) + { + ResetLatch(MyLatch); + CHECK_FOR_INTERRUPTS(); + } + + #if PG_VERSION_NUM >= PG_VERSION_13 + if (ConfigReloadPending) + { + ConfigReloadPending = false; + ProcessConfigFile(PGC_SIGHUP); + } + #endif +} + + +/* + * GetSubscriptionPosition gets the current WAL log position of the subscription, that + * is the WAL log position on the source node up to which the subscription completed + * replication. + */ +static XLogRecPtr +GetSubscriptionPosition(MultiConnection *connection, Bitmapset *tableOwnerIds) +{ + char *subscriptionValueList = ShardMoveSubscriptionNamesValueList(tableOwnerIds); + return GetRemoteLSN(connection, psprintf( + "SELECT min(latest_end_lsn) FROM pg_stat_subscription " + "WHERE subname IN %s", subscriptionValueList)); +} diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index ca6a100ef..aba765ed4 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -37,6 +37,7 @@ #include "distributed/connection_management.h" #include "distributed/cte_inline.h" #include "distributed/distributed_deadlock_detection.h" +#include "distributed/errormessage.h" #include "distributed/insert_select_executor.h" #include "distributed/intermediate_result_pruning.h" #include "distributed/local_multi_copy.h" @@ -44,6 +45,7 @@ #include "distributed/local_distributed_join_planner.h" #include "distributed/locally_reserved_shared_connections.h" #include "distributed/maintenanced.h" +#include "distributed/shard_cleaner.h" #include "distributed/metadata_utility.h" #include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" @@ -52,6 +54,7 @@ #include "distributed/multi_executor.h" #include "distributed/multi_explain.h" #include "distributed/multi_join_order.h" +#include "distributed/multi_logical_replication.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/distributed_planner.h" #include "distributed/combine_query_planner.h" @@ -59,6 +62,7 @@ #include "distributed/multi_server_executor.h" #include "distributed/pg_dist_partition.h" #include "distributed/placement_connection.h" +#include "distributed/query_stats.h" #include "distributed/recursive_planning.h" #include "distributed/reference_table_utils.h" #include "distributed/relation_access_tracking.h" @@ -152,6 +156,12 @@ static const struct config_enum_entry propagate_set_commands_options[] = { }; +static const struct config_enum_entry stat_statements_track_options[] = { + { "none", STAT_STATEMENTS_TRACK_NONE, false }, + { "all", STAT_STATEMENTS_TRACK_ALL, false }, + { NULL, 0, false } +}; + static const struct config_enum_entry task_assignment_policy_options[] = { { "greedy", TASK_ASSIGNMENT_GREEDY, false }, { "first-replica", TASK_ASSIGNMENT_FIRST_REPLICA, false }, @@ -802,7 +812,7 @@ RegisterCitusConfigVariables(void) "workers"), NULL, &EnableAlterDatabaseOwner, - false, + true, PGC_USERSET, GUC_NO_SHOW_ALL, NULL, NULL, NULL); @@ -855,6 +865,17 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL, NULL, NULL, NULL); + DefineCustomBoolVariable( + "citus.enable_create_role_propagation", + gettext_noop("Enables propagating CREATE ROLE " + "and DROP ROLE statements to workers"), + NULL, + &EnableCreateRolePropagation, + true, + PGC_USERSET, + GUC_STANDARD, + NULL, NULL, NULL); + DefineCustomBoolVariable( "citus.enable_create_type_propagation", gettext_noop("Enables propagating of CREATE TYPE statements to workers"), @@ -1302,6 +1323,18 @@ RegisterCitusConfigVariables(void) GUC_STANDARD, NULL, NULL, NULL); + DefineCustomIntVariable( + "citus.logical_replication_timeout", + gettext_noop("Sets the timeout to error out when logical replication is used"), + gettext_noop("Citus uses logical replication when it moves/replicates shards. " + "This setting determines when Citus gives up waiting for progress " + "during logical replication and errors out."), + &LogicalReplicationTimeout, + 2 * 60 * 60 * 1000, 0, 7 * 24 * 3600 * 1000, + PGC_SIGHUP, + GUC_NO_SHOW_ALL | GUC_UNIT_MS, + NULL, NULL, NULL); + DefineCustomIntVariable( "citus.max_adaptive_executor_pool_size", gettext_noop("Sets the maximum number of connections per worker node used by " @@ -1715,6 +1748,22 @@ RegisterCitusConfigVariables(void) ShowShardsForAppNamePrefixesAssignHook, NULL); + DefineCustomBoolVariable( + "citus.skip_jsonb_validation_in_copy", + gettext_noop("Skip validation of JSONB columns on the coordinator during COPY " + "into a distributed table"), + gettext_noop("Parsing large JSON objects may incur significant CPU overhead, " + "which can lower COPY throughput. If this GUC is set (the default), " + "JSON parsing is skipped on the coordinator, which means you cannot " + "see the line number in case of malformed JSON, but throughput will " + "be higher. This setting does not apply if the input format is " + "binary."), + &SkipJsonbValidationInCopy, + true, + PGC_USERSET, + 0, + NULL, NULL, NULL); + DefineCustomBoolVariable( "citus.sort_returning", gettext_noop("Sorts the RETURNING clause to get consistent test output"), @@ -1729,6 +1778,47 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL, NULL, NULL, NULL); + /* + * It takes about 140 bytes of shared memory to store one row, therefore + * this setting should be used responsibly. setting it to 10M will require + * 1.4GB of shared memory. + */ + DefineCustomIntVariable( + "citus.stat_statements_max", + gettext_noop("Determines maximum number of statements tracked by " + "citus_stat_statements."), + NULL, + &StatStatementsMax, + 50000, 1000, 10000000, + PGC_POSTMASTER, + GUC_NO_SHOW_ALL, + NULL, NULL, NULL); + + DefineCustomIntVariable( + "citus.stat_statements_purge_interval", + gettext_noop("Determines time interval in seconds for " + "citus_stat_statements to purge expired entries."), + NULL, + &StatStatementsPurgeInterval, + 10, -1, INT_MAX, + PGC_SIGHUP, + GUC_UNIT_MS | GUC_NO_SHOW_ALL, + NULL, NULL, NULL); + + DefineCustomEnumVariable( + "citus.stat_statements_track", + gettext_noop( + "Enables/Disables the stats collection for citus_stat_statements."), + gettext_noop("Enables the stats collection when set to 'all'. " + "Disables when set to 'none'. Disabling can be useful for " + "avoiding extra CPU cycles needed for the calculations."), + &StatStatementsTrack, + STAT_STATEMENTS_TRACK_NONE, + stat_statements_track_options, + PGC_SUSET, + GUC_STANDARD, + NULL, NULL, NULL); + DefineCustomBoolVariable( "citus.subquery_pushdown", gettext_noop("Usage of this GUC is highly discouraged, please read the long " @@ -2075,8 +2165,10 @@ NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source) #if defined(ENABLE_GSS) || defined(ENABLE_SSPI) "krbsrvname", #endif + "sslcert", "sslcompression", "sslcrl", + "sslkey", "sslmode", "sslrootcert", "tcp_user_timeout", diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/11.0-1.sql index 64b89ec0e..d329abf29 100644 --- a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/11.0-1.sql +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/11.0-1.sql @@ -48,7 +48,7 @@ RETURNS boolean AS $$ AND blocking_locks.pid != blocked_locks.pid JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid WHERE NOT blocked_locks.GRANTED AND blocked_activity.application_name LIKE 'citus_shard_move_subscription_%' - $two$) where result='citus_shard_move_subscription_' || pBlockedPid); + $two$) where result LIKE 'citus_shard_move_subscription_%_' || pBlockedPid); END; $$ LANGUAGE plpgsql; diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/9.0-1.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/9.0-1.sql index 0b91cc37c..8b1f452ff 100644 --- a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/9.0-1.sql +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/9.0-1.sql @@ -48,7 +48,7 @@ RETURNS boolean AS $$ AND blocking_locks.pid != blocked_locks.pid JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid WHERE NOT blocked_locks.GRANTED AND blocked_activity.application_name LIKE 'citus_shard_move_subscription_%' - $two$) where result='citus_shard_move_subscription_' || pBlockedPid); + $two$) where result LIKE 'citus_shard_move_subscription_%_' || pBlockedPid); END; $$ LANGUAGE plpgsql; diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql index 64b89ec0e..d329abf29 100644 --- a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql @@ -48,7 +48,7 @@ RETURNS boolean AS $$ AND blocking_locks.pid != blocked_locks.pid JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid WHERE NOT blocked_locks.GRANTED AND blocked_activity.application_name LIKE 'citus_shard_move_subscription_%' - $two$) where result='citus_shard_move_subscription_' || pBlockedPid); + $two$) where result LIKE 'citus_shard_move_subscription_%_' || pBlockedPid); END; $$ LANGUAGE plpgsql; diff --git a/src/backend/distributed/test/foreign_key_relationship_query.c b/src/backend/distributed/test/foreign_key_relationship_query.c index f6266d709..ab23e4faa 100644 --- a/src/backend/distributed/test/foreign_key_relationship_query.c +++ b/src/backend/distributed/test/foreign_key_relationship_query.c @@ -5,7 +5,7 @@ * This file contains UDFs for getting foreign constraint relationship between * distributed tables. * - * Copyright (c), Citus Data, Inc. + * Copyright (c) Citus Data, Inc. * *------------------------------------------------------------------------- */ diff --git a/src/backend/distributed/test/pg_send_cancellation.c b/src/backend/distributed/test/pg_send_cancellation.c new file mode 100644 index 000000000..576d915a6 --- /dev/null +++ b/src/backend/distributed/test/pg_send_cancellation.c @@ -0,0 +1,70 @@ +/*------------------------------------------------------------------------- + * + * pg_send_cancellation.c + * + * This file contains functions to test setting pg_send_cancellation. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "miscadmin.h" +#include "fmgr.h" +#include "port.h" + +#include "postmaster/postmaster.h" + + +#define PG_SEND_CANCELLATION_VERSION \ + "pg_send_cancellation (PostgreSQL) " PG_VERSION "\n" + + +/* exports for SQL callable functions */ +PG_FUNCTION_INFO_V1(get_cancellation_key); +PG_FUNCTION_INFO_V1(run_pg_send_cancellation); + + +/* + * get_cancellation_key returns the cancellation key of the current process + * as an integer. + */ +Datum +get_cancellation_key(PG_FUNCTION_ARGS) +{ + PG_RETURN_INT32(MyCancelKey); +} + + +/* + * run_pg_send_cancellation runs the pg_send_cancellation program with + * the specified arguments + */ +Datum +run_pg_send_cancellation(PG_FUNCTION_ARGS) +{ + int pid = PG_GETARG_INT32(0); + int cancelKey = PG_GETARG_INT32(1); + + char sendCancellationPath[MAXPGPATH]; + char command[1024]; + + /* Locate executable backend before we change working directory */ + if (find_other_exec(my_exec_path, "pg_send_cancellation", + PG_SEND_CANCELLATION_VERSION, + sendCancellationPath) < 0) + { + ereport(ERROR, (errmsg("could not locate pg_send_cancellation"))); + } + + pg_snprintf(command, sizeof(command), "%s %d %d %s %d", + sendCancellationPath, pid, cancelKey, "localhost", PostPortNumber); + + if (system(command) != 0) + { + ereport(ERROR, (errmsg("failed to run command: %s", command))); + } + + PG_RETURN_VOID(); +} diff --git a/src/backend/distributed/transaction/distributed_deadlock_detection.c b/src/backend/distributed/transaction/distributed_deadlock_detection.c index 82c274661..5219336a3 100644 --- a/src/backend/distributed/transaction/distributed_deadlock_detection.c +++ b/src/backend/distributed/transaction/distributed_deadlock_detection.c @@ -16,6 +16,7 @@ #include "access/hash.h" #include "distributed/backend_data.h" +#include "distributed/errormessage.h" #include "distributed/distributed_deadlock_detection.h" #include "distributed/errormessage.h" #include "distributed/hash_helpers.h" diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 3f55534e5..a0e8d0fd1 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -31,6 +31,7 @@ #include "distributed/locally_reserved_shared_connections.h" #include "distributed/maintenanced.h" #include "distributed/multi_executor.h" +#include "distributed/multi_logical_replication.h" #include "distributed/multi_explain.h" #include "distributed/repartition_join_execution.h" #include "distributed/transaction_management.h" @@ -315,6 +316,8 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) UnSetDistributedTransactionId(); + PlacementMovedUsingLogicalReplicationInTX = false; + /* empty the CommitContext to ensure we're not leaking memory */ MemoryContextSwitchTo(previousContext); MemoryContextReset(CommitContext); @@ -387,6 +390,8 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) */ SubPlanLevel = 0; UnSetDistributedTransactionId(); + + PlacementMovedUsingLogicalReplicationInTX = false; break; } diff --git a/src/backend/distributed/utils/enable_ssl.c b/src/backend/distributed/utils/enable_ssl.c index 3d10d97b8..0dca1d0fa 100644 --- a/src/backend/distributed/utils/enable_ssl.c +++ b/src/backend/distributed/utils/enable_ssl.c @@ -163,8 +163,6 @@ citus_check_defaults_for_sslmode(PG_FUNCTION_ARGS) configChanged = true; } - /* placeholder for extra changes to configuration before reloading */ - if (configChanged) { GloballyReloadConfig(); diff --git a/src/backend/distributed/utils/errormessage.c b/src/backend/distributed/utils/errormessage.c index 62520555d..72758f9ca 100644 --- a/src/backend/distributed/utils/errormessage.c +++ b/src/backend/distributed/utils/errormessage.c @@ -8,9 +8,12 @@ #include "postgres.h" #include "utils/memutils.h" +#include "common/sha2.h" #include "distributed/citus_nodes.h" #include "distributed/errormessage.h" #include "distributed/log_utils.h" +#include "utils/builtins.h" + /* * DeferredErrorInternal is a helper function for DeferredError(). diff --git a/src/backend/distributed/utils/function_utils.c b/src/backend/distributed/utils/function_utils.c index d410246ea..04750b23f 100644 --- a/src/backend/distributed/utils/function_utils.c +++ b/src/backend/distributed/utils/function_utils.c @@ -16,6 +16,7 @@ #include "utils/builtins.h" #include "utils/regproc.h" + /* * FunctionOid searches for a function that has the given name and the given * number of arguments, and returns the corresponding function's oid. The diff --git a/src/backend/distributed/utils/log_utils.c b/src/backend/distributed/utils/log_utils.c index 9ee75dcda..a74f210c2 100644 --- a/src/backend/distributed/utils/log_utils.c +++ b/src/backend/distributed/utils/log_utils.c @@ -9,8 +9,18 @@ #include "postgres.h" +#include "distributed/pg_version_constants.h" + #include "utils/guc.h" #include "distributed/log_utils.h" +#include "distributed/errormessage.h" +#include "common/sha2.h" + +#include "utils/builtins.h" + +#if PG_VERSION_NUM >= PG_VERSION_14 +#include "common/cryptohash.h" +#endif /* @@ -26,7 +36,8 @@ IsLoggableLevel(int logLevel) /* - * HashLogMessage is only supported in Citus Enterprise + * HashLogMessage is deprecated and doesn't do anything anymore. Its indirect + * usage will be removed later. */ char * HashLogMessage(const char *logText) diff --git a/src/backend/distributed/utils/maintenanced.c b/src/backend/distributed/utils/maintenanced.c index 36d4b0b6e..0f5d20cb6 100644 --- a/src/backend/distributed/utils/maintenanced.c +++ b/src/backend/distributed/utils/maintenanced.c @@ -37,8 +37,9 @@ #include "distributed/maintenanced.h" #include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" -#include "distributed/metadata_sync.h" #include "distributed/shard_cleaner.h" +#include "distributed/metadata_sync.h" +#include "distributed/query_stats.h" #include "distributed/statistics_collection.h" #include "distributed/transaction_recovery.h" #include "distributed/version_compat.h" @@ -288,6 +289,7 @@ CitusMaintenanceDaemonMain(Datum main_arg) ErrorContextCallback errorCallback; TimestampTz lastRecoveryTime = 0; TimestampTz lastShardCleanTime = 0; + TimestampTz lastStatStatementsPurgeTime = 0; TimestampTz nextMetadataSyncTime = 0; @@ -661,6 +663,34 @@ CitusMaintenanceDaemonMain(Datum main_arg) timeout = Min(timeout, DeferShardDeleteInterval); } + if (StatStatementsPurgeInterval > 0 && + StatStatementsTrack != STAT_STATEMENTS_TRACK_NONE && + TimestampDifferenceExceeds(lastStatStatementsPurgeTime, GetCurrentTimestamp(), + (StatStatementsPurgeInterval * 1000))) + { + StartTransactionCommand(); + + if (!LockCitusExtension()) + { + ereport(DEBUG1, (errmsg("could not lock the citus extension, " + "skipping stat statements purging"))); + } + else if (CheckCitusVersion(DEBUG1) && CitusHasBeenLoaded()) + { + /* + * Record last time we perform the purge to ensure we run once per + * StatStatementsPurgeInterval. + */ + lastStatStatementsPurgeTime = GetCurrentTimestamp(); + + CitusQueryStatsSynchronizeEntries(); + } + + CommitTransactionCommand(); + + /* make sure we don't wait too long, need to convert seconds to milliseconds */ + timeout = Min(timeout, (StatStatementsPurgeInterval * 1000)); + } /* * Wait until timeout, or until somebody wakes us up. Also cast the timeout to diff --git a/src/backend/distributed/utils/reference_table_utils.c b/src/backend/distributed/utils/reference_table_utils.c index 5c6775c80..e271d0ceb 100644 --- a/src/backend/distributed/utils/reference_table_utils.c +++ b/src/backend/distributed/utils/reference_table_utils.c @@ -346,9 +346,9 @@ ReplicateReferenceTableShardToNode(ShardInterval *shardInterval, char *nodeName, ShardPlacement *sourceShardPlacement = ActiveShardPlacement(shardId, missingOk); char *srcNodeName = sourceShardPlacement->nodeName; uint32 srcNodePort = sourceShardPlacement->nodePort; - bool includeData = true; - List *ddlCommandList = - CopyShardCommandList(shardInterval, srcNodeName, srcNodePort, includeData); + bool includeDataCopy = true; /* TODO: consider using logical replication */ + List *ddlCommandList = CopyShardCommandList(shardInterval, srcNodeName, srcNodePort, + includeDataCopy); ereport(NOTICE, (errmsg("Replicating reference table \"%s\" to the node %s:%d", get_rel_name(shardInterval->relationId), nodeName, diff --git a/src/backend/distributed/utils/resource_lock.c b/src/backend/distributed/utils/resource_lock.c index 47ab25523..8cf78b1a5 100644 --- a/src/backend/distributed/utils/resource_lock.c +++ b/src/backend/distributed/utils/resource_lock.c @@ -37,6 +37,7 @@ #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" #include "distributed/worker_protocol.h" +#include "distributed/worker_transaction.h" #include "distributed/version_compat.h" #include "distributed/local_executor.h" #include "distributed/worker_shard_visibility.h" diff --git a/src/bin/pg_send_cancellation/.gitignore b/src/bin/pg_send_cancellation/.gitignore new file mode 100644 index 000000000..8088a2e98 --- /dev/null +++ b/src/bin/pg_send_cancellation/.gitignore @@ -0,0 +1 @@ +pg_send_cancellation diff --git a/src/bin/pg_send_cancellation/Makefile b/src/bin/pg_send_cancellation/Makefile new file mode 100644 index 000000000..7cf76757f --- /dev/null +++ b/src/bin/pg_send_cancellation/Makefile @@ -0,0 +1,20 @@ +citus_top_builddir = ../../.. + +PROGRAM = pg_send_cancellation +PGFILEDESC = "pg_send_cancellation sends a custom cancellation message" +OBJS = $(citus_abs_srcdir)/src/bin/pg_send_cancellation/pg_send_cancellation.o +PG_CPPFLAGS = -I$(libpq_srcdir) +PG_LIBS_INTERNAL = $(libpq_pgport) +PG_LDFLAGS += $(LDFLAGS) + +include $(citus_top_builddir)/Makefile.global + +# We reuse all the Citus flags (incl. security flags), but we are building a program not a shared library +override CFLAGS := $(filter-out -shared,$(CFLAGS)) + +# Filter out unneeded dependencies +override LIBS := $(filter-out -lz -lreadline -ledit -ltermcap -lncurses -lcurses -lpam, $(LIBS)) + +clean: clean-pg_send_cancellation +clean-pg_send_cancellation: + rm -f $(PROGRAM) $(OBJS) diff --git a/src/bin/pg_send_cancellation/README.md b/src/bin/pg_send_cancellation/README.md new file mode 100644 index 000000000..c83316419 --- /dev/null +++ b/src/bin/pg_send_cancellation/README.md @@ -0,0 +1,47 @@ +# pg_send_cancellation + +pg_send_cancellation is a program for manually sending a cancellation +to a Postgres endpoint. It is effectively a command-line version of +PQcancel in libpq, but it can use any PID or cancellation key. + +We use pg_send_cancellation primarily to propagate cancellations between pgbouncers +behind a load balancer. Since the cancellation protocol involves +opening a new connection, the new connection may go to a different +node that does not recognize the cancellation key. To handle that +scenario, we modified pgbouncer to pass unrecognized cancellation +keys to a shell command. + +Users can configure the cancellation_command, which will be run with: +``` + +``` + +Note that pgbouncer does not use actual PIDs. Instead, it generates PID and cancellation key together a random 8-byte number. This makes the chance of collisions exceedingly small. + +By providing pg_send_cancellation as part of Citus, we can use a shell script that pgbouncer invokes to propagate the cancellation to all *other* worker nodes in the same cluster, for example: + +```bash +#!/bin/sh +remote_ip=$1 +remote_port=$2 +pid=$3 +cancel_key=$4 + +postgres_path=/usr/pgsql-14/bin +pgbouncer_port=6432 + +nodes_query="select nodename from pg_dist_node where groupid > 0 and groupid not in (select groupid from pg_dist_local_group) and nodecluster = current_setting('citus.cluster_name')" + +# Get hostnames of other worker nodes in the cluster, and send cancellation to their pgbouncers +$postgres_path/psql -c "$nodes_query" -tAX | xargs -n 1 sh -c "$postgres_path/pg_send_cancellation $pid $cancel_key \$0 $pgbouncer_port" +``` + +One thing we need to be careful about is that the cancellations do not get forwarded +back-and-forth. This is handled in pgbouncer by setting the last bit of all generated +cancellation keys (sent to clients) to 1, and setting the last bit of all forwarded bits to 0. +That way, when a pgbouncer receives a cancellation key with the last bit set to 0, +it knows it is from another pgbouncer and should not forward further, and should set +the last bit to 1 when comparing to stored cancellation keys. + +Another thing we need to be careful about is that the integers should be encoded +as big endian on the wire. diff --git a/src/bin/pg_send_cancellation/pg_send_cancellation.c b/src/bin/pg_send_cancellation/pg_send_cancellation.c new file mode 100644 index 000000000..0ab2be95a --- /dev/null +++ b/src/bin/pg_send_cancellation/pg_send_cancellation.c @@ -0,0 +1,261 @@ +/* + * pg_send_cancellation is a program for manually sending a cancellation + * to a Postgres endpoint. It is effectively a command-line version of + * PQcancel in libpq, but it can use any PID or cancellation key. + * + * Portions Copyright (c) Citus Data, Inc. + * + * For the internal_cancel function: + * + * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * Permission to use, copy, modify, and distribute this software and its + * documentation for any purpose, without fee, and without a written agreement + * is hereby granted, provided that the above copyright notice and this + * paragraph and the following two paragraphs appear in all copies. + * + * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING + * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS + * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + * + */ +#include "postgres_fe.h" + +#include +#include +#include +#include +#include + +#include "common/ip.h" +#include "common/link-canary.h" +#include "common/scram-common.h" +#include "common/string.h" +#include "libpq-fe.h" +#include "libpq-int.h" +#include "mb/pg_wchar.h" +#include "port/pg_bswap.h" + + +#define ERROR_BUFFER_SIZE 256 + + +static int internal_cancel(SockAddr *raddr, int be_pid, int be_key, + char *errbuf, int errbufsize); + + +/* + * main entry point into the pg_send_cancellation program. + */ +int +main(int argc, char *argv[]) +{ + if (argc == 2 && strcmp(argv[1], "-V") == 0) + { + pg_fprintf(stdout, "pg_send_cancellation (PostgreSQL) " PG_VERSION "\n"); + return 0; + } + + if (argc < 4 || argc > 5) + { + char *program = argv[0]; + pg_fprintf(stderr, "%s requires 4 arguments\n\n", program); + pg_fprintf(stderr, "Usage: %s [port]\n", program); + return 1; + } + + char *pidString = argv[1]; + char *cancelKeyString = argv[2]; + char *host = argv[3]; + char *portString = "5432"; + + if (argc >= 5) + { + portString = argv[4]; + } + + /* parse the PID and cancellation key */ + int pid = strtol(pidString, NULL, 10); + int cancelAuthCode = strtol(cancelKeyString, NULL, 10); + + char errorBuffer[ERROR_BUFFER_SIZE] = { 0 }; + + struct addrinfo *ipAddressList; + struct addrinfo hint; + int ipAddressListFamily = AF_UNSPEC; + SockAddr socketAddress; + + memset(&hint, 0, sizeof(hint)); + hint.ai_socktype = SOCK_STREAM; + hint.ai_family = ipAddressListFamily; + + /* resolve the hostname to an IP */ + int ret = pg_getaddrinfo_all(host, portString, &hint, &ipAddressList); + if (ret || !ipAddressList) + { + pg_fprintf(stderr, "could not translate host name \"%s\" to address: %s\n", + host, gai_strerror(ret)); + return 1; + } + + if (ipAddressList->ai_addrlen > sizeof(socketAddress.addr)) + { + pg_fprintf(stderr, "invalid address length"); + return 1; + } + + /* + * Explanation of IGNORE-BANNED: + * This is a common pattern when using getaddrinfo. The system guarantees + * that ai_addrlen < sizeof(socketAddress.addr). Out of an abundance of + * caution. We also check it above. + */ + memcpy(&socketAddress.addr, ipAddressList->ai_addr, ipAddressList->ai_addrlen); /* IGNORE-BANNED */ + socketAddress.salen = ipAddressList->ai_addrlen; + + /* send the cancellation */ + bool cancelSucceeded = internal_cancel(&socketAddress, pid, cancelAuthCode, + errorBuffer, sizeof(errorBuffer)); + if (!cancelSucceeded) + { + pg_fprintf(stderr, "sending cancellation to %s:%s failed: %s", + host, portString, errorBuffer); + return 1; + } + + pg_freeaddrinfo_all(ipAddressListFamily, ipAddressList); + + return 0; +} + + +/* *INDENT-OFF* */ + +/* + * internal_cancel is copied from fe-connect.c + * + * The return value is true if the cancel request was successfully + * dispatched, false if not (in which case an error message is available). + * Note: successful dispatch is no guarantee that there will be any effect at + * the backend. The application must read the operation result as usual. + * + * CAUTION: we want this routine to be safely callable from a signal handler + * (for example, an application might want to call it in a SIGINT handler). + * This means we cannot use any C library routine that might be non-reentrant. + * malloc/free are often non-reentrant, and anything that might call them is + * just as dangerous. We avoid sprintf here for that reason. Building up + * error messages with strcpy/strcat is tedious but should be quite safe. + * We also save/restore errno in case the signal handler support doesn't. + * + * internal_cancel() is an internal helper function to make code-sharing + * between the two versions of the cancel function possible. + */ +static int +internal_cancel(SockAddr *raddr, int be_pid, int be_key, + char *errbuf, int errbufsize) +{ + int save_errno = SOCK_ERRNO; + pgsocket tmpsock = PGINVALID_SOCKET; + char sebuf[PG_STRERROR_R_BUFLEN]; + int maxlen; + struct + { + uint32 packetlen; + CancelRequestPacket cp; + } crp; + + /* + * We need to open a temporary connection to the postmaster. Do this with + * only kernel calls. + */ + if ((tmpsock = socket(raddr->addr.ss_family, SOCK_STREAM, 0)) == PGINVALID_SOCKET) + { + strlcpy(errbuf, "PQcancel() -- socket() failed: ", errbufsize); + goto cancel_errReturn; + } +retry3: + if (connect(tmpsock, (struct sockaddr *) &raddr->addr, raddr->salen) < 0) + { + if (SOCK_ERRNO == EINTR) + /* Interrupted system call - we'll just try again */ + goto retry3; + strlcpy(errbuf, "PQcancel() -- connect() failed: ", errbufsize); + goto cancel_errReturn; + } + + /* + * We needn't set nonblocking I/O or NODELAY options here. + */ + + /* Create and send the cancel request packet. */ + + crp.packetlen = pg_hton32((uint32) sizeof(crp)); + crp.cp.cancelRequestCode = (MsgType) pg_hton32(CANCEL_REQUEST_CODE); + crp.cp.backendPID = pg_hton32(be_pid); + crp.cp.cancelAuthCode = pg_hton32(be_key); + +retry4: + if (send(tmpsock, (char *) &crp, sizeof(crp), 0) != (int) sizeof(crp)) + { + if (SOCK_ERRNO == EINTR) + /* Interrupted system call - we'll just try again */ + goto retry4; + strlcpy(errbuf, "PQcancel() -- send() failed: ", errbufsize); + goto cancel_errReturn; + } + + /* + * Wait for the postmaster to close the connection, which indicates that + * it's processed the request. Without this delay, we might issue another + * command only to find that our cancel zaps that command instead of the + * one we thought we were canceling. Note we don't actually expect this + * read to obtain any data, we are just waiting for EOF to be signaled. + */ +retry5: + if (recv(tmpsock, (char *) &crp, 1, 0) < 0) + { + if (SOCK_ERRNO == EINTR) + /* Interrupted system call - we'll just try again */ + goto retry5; + /* we ignore other error conditions */ + } + + /* All done */ + closesocket(tmpsock); + SOCK_ERRNO_SET(save_errno); + return true; + +cancel_errReturn: + + /* + * Make sure we don't overflow the error buffer. Leave space for the \n at + * the end, and for the terminating zero. + */ + maxlen = errbufsize - strlen(errbuf) - 2; + if (maxlen >= 0) + { + /* + * Explanation of IGNORE-BANNED: + * This is well-tested libpq code that we would like to preserve in its + * original form. The appropriate length calculation is done above. + */ + strncat(errbuf, SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)), /* IGNORE-BANNED */ + maxlen); + strcat(errbuf, "\n"); /* IGNORE-BANNED */ + } + if (tmpsock != PGINVALID_SOCKET) + closesocket(tmpsock); + SOCK_ERRNO_SET(save_errno); + return false; +} + +/* *INDENT-ON* */ diff --git a/src/include/distributed/citus_ruleutils.h b/src/include/distributed/citus_ruleutils.h index f84307fc8..938a70578 100644 --- a/src/include/distributed/citus_ruleutils.h +++ b/src/include/distributed/citus_ruleutils.h @@ -43,8 +43,10 @@ extern void deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, extern void deparse_shard_reindex_statement(ReindexStmt *origStmt, Oid distrelid, int64 shardid, StringInfo buffer); extern char * pg_get_indexclusterdef_string(Oid indexRelationId); +extern List * pg_get_table_grants(Oid relationId); extern bool contain_nextval_expression_walker(Node *node, void *context); extern char * pg_get_replica_identity_command(Oid tableRelationId); +extern List * pg_get_row_level_security_commands(Oid relationId); extern const char * RoleSpecString(RoleSpec *spec, bool withQuoteIdentifier); extern char * flatten_reloptions(Oid relid); diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index f69e0a6cc..1db02fd3c 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -19,6 +19,7 @@ #include "nodes/parsenodes.h" #include "tcop/dest.h" #include "tcop/utility.h" +#include "utils/acl.h" extern bool AddAllLocalTablesToMetadata; @@ -180,6 +181,7 @@ extern Oid get_constraint_typid(Oid conoid); /* extension.c - forward declarations */ extern bool IsDropCitusExtensionStmt(Node *parsetree); +extern List * GetDependentFDWsToExtension(Oid extensionId); extern bool IsCreateAlterExtensionUpdateCitusStmt(Node *parsetree); extern bool ShouldMarkRelationDistributed(Oid relationId); extern void ErrorIfUnstableCreateOrAlterExtensionStmt(Node *parsetree); @@ -244,7 +246,17 @@ extern Oid GetReferencedTableId(Oid foreignKeyId); extern Oid GetReferencingTableId(Oid foreignKeyId); extern bool RelationInvolvedInAnyNonInheritedForeignKeys(Oid relationId); + +/* foreign_data_wrapper.c - forward declarations */ +extern List * PreprocessGrantOnFDWStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern Acl * GetPrivilegesForFDW(Oid FDWOid); + + /* foreign_server.c - forward declarations */ +extern List * PreprocessGrantOnForeignServerStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext); extern ObjectAddress CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok); extern ObjectAddress AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok); extern ObjectAddress RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok); @@ -284,11 +296,16 @@ extern List * PreprocessAlterFunctionDependsStmt(Node *stmt, processUtilityContext); extern ObjectAddress AlterFunctionDependsStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * PreprocessGrantOnFunctionStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * PostprocessGrantOnFunctionStmt(Node *node, const char *queryString); /* grant.c - forward declarations */ extern List * PreprocessGrantStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern void deparsePrivileges(StringInfo privsString, GrantStmt *grantStmt); +extern void deparseGrantees(StringInfo granteesString, GrantStmt *grantStmt); /* index.c - forward declarations */ @@ -323,8 +340,8 @@ extern ObjectAddress CreateExtensionStmtObjectAddress(Node *stmt, bool missing_o /* policy.c - forward declarations */ extern List * CreatePolicyCommands(Oid relationId); extern void ErrorIfUnsupportedPolicy(Relation relation); -extern List * PreprocessCreatePolicyStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); +extern void ErrorIfUnsupportedPolicyExpr(Node *expr); +extern List * PostprocessCreatePolicyStmt(Node *node, const char *queryString); extern List * PreprocessAlterPolicyStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PreprocessDropPolicyStmt(Node *stmt, const char *queryString, @@ -357,7 +374,17 @@ extern ObjectAddress AlterRoleStmtObjectAddress(Node *node, bool missing_ok); extern ObjectAddress AlterRoleSetStmtObjectAddress(Node *node, bool missing_ok); +extern List * PreprocessCreateRoleStmt(Node *stmt, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * PreprocessDropRoleStmt(Node *stmt, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * PreprocessGrantRoleStmt(Node *stmt, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * PostprocessGrantRoleStmt(Node *stmt, const char *queryString); extern List * GenerateCreateOrAlterRoleCommand(Oid roleOid); +ObjectAddress CreateRoleStmtObjectAddress(Node *stmt, bool missing_ok); +extern void UnmarkRolesDistributed(List *roles); +extern List * FilterDistributedRoles(List *roles); /* schema.c - forward declarations */ extern List * PreprocessCreateSchemaStmt(Node *node, const char *queryString, @@ -386,6 +413,9 @@ extern List * PreprocessDropSequenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PreprocessRenameSequenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PreprocessGrantOnSequenceStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * PostprocessGrantOnSequenceStmt(Node *node, const char *queryString); extern ObjectAddress AlterSequenceStmtObjectAddress(Node *node, bool missing_ok); extern ObjectAddress AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok); extern ObjectAddress AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok); diff --git a/src/include/distributed/commands/multi_copy.h b/src/include/distributed/commands/multi_copy.h index f7a50644d..1c5d3e176 100644 --- a/src/include/distributed/commands/multi_copy.h +++ b/src/include/distributed/commands/multi_copy.h @@ -149,6 +149,9 @@ typedef struct CitusCopyDestReceiver } CitusCopyDestReceiver; +/* GUCs */ +extern bool SkipJsonbValidationInCopy; + /* managed via GUC, the default is 4MB */ extern int CopySwitchOverThresholdBytes; diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 246d413d9..d0160a218 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -41,6 +41,7 @@ extern PropSetCmdBehavior PropagateSetCommands; extern bool EnableDDLPropagation; extern int CreateObjectPropagationMode; extern bool EnableCreateTypePropagation; +extern bool EnableCreateRolePropagation; extern bool EnableAlterRolePropagation; extern bool EnableAlterRoleSetPropagation; extern bool EnableAlterDatabaseOwner; diff --git a/src/include/distributed/connection_management.h b/src/include/distributed/connection_management.h index 7a992c19a..5c2e07fe8 100644 --- a/src/include/distributed/connection_management.h +++ b/src/include/distributed/connection_management.h @@ -29,6 +29,10 @@ /* used for libpq commands that get an error buffer. Postgres docs recommend 256. */ #define ERROR_BUFFER_SIZE 256 +/* values with special behavior for authinfo lookup */ +#define WILDCARD_NODE_ID 0 +#define LOCALHOST_NODE_ID -1 + /* application name used for internal connections in Citus */ #define CITUS_APPLICATION_NAME_PREFIX "citus_internal gpid=" @@ -250,6 +254,7 @@ extern struct MemoryContextData *ConnectionContext; extern void AfterXactConnectionHandling(bool isCommit); extern void InitializeConnectionManagement(void); +extern char * GetAuthinfo(char *hostname, int32 port, char *user); extern void InitConnParams(void); extern void ResetConnParams(void); extern void InvalidateConnParamsHashEntries(void); diff --git a/src/include/distributed/coordinator_protocol.h b/src/include/distributed/coordinator_protocol.h index bda318a25..9d687997c 100644 --- a/src/include/distributed/coordinator_protocol.h +++ b/src/include/distributed/coordinator_protocol.h @@ -221,6 +221,7 @@ extern List * GetPostLoadTableCreationCommands(Oid relationId, bool includeIndex extern List * GetPreLoadTableCreationCommands(Oid relationId, IncludeSequenceDefaults includeSequenceDefaults, char *accessMethod); +extern List * GetTableRowLevelSecurityCommands(Oid relationId); extern List * GetTableIndexAndConstraintCommands(Oid relationId, int indexFlags); extern List * GetTableIndexAndConstraintCommandsExcludingReplicaIdentity(Oid relationId, int indexFlags); diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index 5a316d39a..f036d7ea8 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -67,12 +67,16 @@ extern void QualifyAlterDomainOwnerStmt(Node *node); extern void QualifyRenameDomainStmt(Node *node); extern void QualifyAlterDomainSchemaStmt(Node *node); +/* forward declarations for deparse_foreign_data_wrapper_stmts.c */ +extern char * DeparseGrantOnFDWStmt(Node *node); + /* forward declarations for deparse_foreign_server_stmts.c */ extern char * DeparseCreateForeignServerStmt(Node *node); extern char * DeparseAlterForeignServerStmt(Node *node); extern char * DeparseAlterForeignServerRenameStmt(Node *node); extern char * DeparseAlterForeignServerOwnerStmt(Node *node); extern char * DeparseDropForeignServerStmt(Node *node); +extern char * DeparseGrantOnForeignServerStmt(Node *node); /* forward declarations for deparse_table_stmts.c */ extern char * DeparseAlterTableSchemaStmt(Node *stmt); @@ -102,6 +106,9 @@ extern char * DeparseDropSchemaStmt(Node *node); extern char * DeparseGrantOnSchemaStmt(Node *stmt); extern char * DeparseAlterSchemaRenameStmt(Node *stmt); +extern void AppendGrantPrivileges(StringInfo buf, GrantStmt *stmt); +extern void AppendGrantGrantees(StringInfo buf, GrantStmt *stmt); + /* forward declarations for deparse_statistics_stmts.c */ extern char * DeparseCreateStatisticsStmt(Node *node); extern char * DeparseDropStatisticsStmt(List *nameList, bool ifExists); @@ -156,6 +163,8 @@ extern char * DeparseAlterViewSchemaStmt(Node *node); /* forward declarations for deparse_function_stmts.c */ +extern bool isFunction(ObjectType objectType); + extern char * DeparseDropFunctionStmt(Node *stmt); extern char * DeparseAlterFunctionStmt(Node *stmt); @@ -164,6 +173,8 @@ extern char * DeparseAlterFunctionSchemaStmt(Node *stmt); extern char * DeparseAlterFunctionOwnerStmt(Node *stmt); extern char * DeparseAlterFunctionDependsStmt(Node *stmt); +extern char * DeparseGrantOnFunctionStmt(Node *node); + extern void AppendVariableSet(StringInfo buf, VariableSetStmt *setStmt); extern void QualifyAlterFunctionStmt(Node *stmt); @@ -179,6 +190,9 @@ extern char * DeparseAlterRoleSetStmt(Node *stmt); extern List * MakeSetStatementArguments(char *configurationName, char *configurationValue); extern void QualifyAlterRoleSetStmt(Node *stmt); +extern char * DeparseCreateRoleStmt(Node *stmt); +extern char * DeparseDropRoleStmt(Node *stmt); +extern char * DeparseGrantRoleStmt(Node *stmt); /* forward declarations for deparse_extension_stmts.c */ extern DefElem * GetExtensionOption(List *extensionOptions, @@ -210,11 +224,13 @@ extern char * DeparseDropSequenceStmt(Node *node); extern char * DeparseRenameSequenceStmt(Node *node); extern char * DeparseAlterSequenceSchemaStmt(Node *node); extern char * DeparseAlterSequenceOwnerStmt(Node *node); +extern char * DeparseGrantOnSequenceStmt(Node *node); /* forward declarations for qualify_sequence_stmt.c */ extern void QualifyRenameSequenceStmt(Node *node); extern void QualifyDropSequenceStmt(Node *node); extern void QualifyAlterSequenceSchemaStmt(Node *node); extern void QualifyAlterSequenceOwnerStmt(Node *node); +extern void QualifyGrantOnSequenceStmt(Node *node); #endif /* CITUS_DEPARSER_H */ diff --git a/src/include/distributed/errormessage.h b/src/include/distributed/errormessage.h index 7d2d37fee..7e0b1c33d 100644 --- a/src/include/distributed/errormessage.h +++ b/src/include/distributed/errormessage.h @@ -10,7 +10,6 @@ #ifndef ERRORMESSAGE_H #define ERRORMESSAGE_H - #include "c.h" #include "distributed/citus_nodes.h" diff --git a/src/include/distributed/log_utils.h b/src/include/distributed/log_utils.h index 51bc7d846..2c84f4471 100644 --- a/src/include/distributed/log_utils.h +++ b/src/include/distributed/log_utils.h @@ -23,6 +23,7 @@ extern char * HashLogMessage(const char *text); (log_min_messages <= ereport_loglevel ? HashLogMessage(text) : text) #undef ereport + #define ereport(elevel, rest) \ do { \ int ereport_loglevel = elevel; \ diff --git a/src/include/distributed/metadata/distobject.h b/src/include/distributed/metadata/distobject.h index 9511df4cf..2bcb50778 100644 --- a/src/include/distributed/metadata/distobject.h +++ b/src/include/distributed/metadata/distobject.h @@ -35,4 +35,6 @@ extern List * GetDistributedObjectAddressList(void); extern RoleSpec * GetRoleSpecObjectForUser(Oid roleOid); extern void UpdateDistributedObjectColocationId(uint32 oldColocationId, uint32 newColocationId); +extern List * DistributedFunctionList(void); +extern List * DistributedSequenceList(void); #endif /* CITUS_METADATA_DISTOBJECT_H */ diff --git a/src/include/distributed/metadata_cache.h b/src/include/distributed/metadata_cache.h index f06924148..34da96753 100644 --- a/src/include/distributed/metadata_cache.h +++ b/src/include/distributed/metadata_cache.h @@ -254,6 +254,8 @@ extern Oid CitusReadIntermediateResultFuncId(void); Oid CitusReadIntermediateResultArrayFuncId(void); extern Oid CitusExtraDataContainerFuncId(void); extern Oid CitusAnyValueFunctionId(void); +extern Oid CitusTextSendAsJsonbFunctionId(void); +extern Oid TextOutFunctionId(void); extern Oid PgTableVisibleFuncId(void); extern Oid CitusTableVisibleFuncId(void); extern Oid RelationIsAKnownShardFuncId(void); @@ -273,4 +275,8 @@ extern char * CitusExtensionOwnerName(void); extern char * CurrentUserName(void); extern const char * CurrentDatabaseName(void); +/* connection-related functions */ +extern char * GetAuthinfoViaCatalog(const char *roleName, int64 nodeId); +extern char * GetPoolinfoViaCatalog(int64 nodeId); + #endif /* METADATA_CACHE_H */ diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h index e4cdf8830..8f945bafd 100644 --- a/src/include/distributed/metadata_sync.h +++ b/src/include/distributed/metadata_sync.h @@ -38,6 +38,7 @@ extern char * LocalGroupIdUpdateCommand(int32 groupId); extern bool ShouldSyncUserCommandForObject(ObjectAddress objectAddress); extern bool ShouldSyncTableMetadata(Oid relationId); extern bool ShouldSyncTableMetadataViaCatalog(Oid relationId); +extern bool ShouldSyncSequenceMetadata(Oid relationId); extern List * NodeMetadataCreateCommands(void); extern List * DistributedObjectMetadataSyncCommandList(void); extern List * ColocationGroupCreateCommandList(void); @@ -53,12 +54,18 @@ extern char * DistributionDeleteCommand(const char *schemaName, extern char * TableOwnerResetCommand(Oid distributedRelationId); extern char * NodeListInsertCommand(List *workerNodeList); extern List * ShardListInsertCommand(List *shardIntervalList); +extern List * ShardDeleteCommandList(ShardInterval *shardInterval); extern char * NodeDeleteCommand(uint32 nodeId); extern char * NodeStateUpdateCommand(uint32 nodeId, bool isActive); extern char * ShouldHaveShardsUpdateCommand(uint32 nodeId, bool shouldHaveShards); extern char * ColocationIdUpdateCommand(Oid relationId, uint32 colocationId); extern char * CreateSchemaDDLCommand(Oid schemaId); extern List * GrantOnSchemaDDLCommands(Oid schemaId); +extern List * GrantOnFunctionDDLCommands(Oid functionOid); +extern List * GrantOnForeignServerDDLCommands(Oid serverId); +extern List * GenerateGrantOnForeignServerQueriesFromAclItem(Oid serverId, + AclItem *aclItem); +extern List * GenerateGrantOnFDWQueriesFromAclItem(Oid serverId, AclItem *aclItem); extern char * PlacementUpsertCommand(uint64 shardId, uint64 placementId, int shardState, uint64 shardLength, int32 groupId); extern TableDDLCommand * TruncateTriggerCreateCommand(Oid relationId); diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h index 0b4c344e0..4e48b5cc0 100644 --- a/src/include/distributed/metadata_utility.h +++ b/src/include/distributed/metadata_utility.h @@ -270,6 +270,7 @@ extern bool ShouldPropagateObject(const ObjectAddress *address); extern List * ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort); /* Remaining metadata utility functions */ +extern Oid TableOwnerOid(Oid relationId); extern char * TableOwner(Oid relationId); extern void EnsureTablePermissions(Oid relationId, AclMode mode); extern void EnsureTableOwner(Oid relationId); diff --git a/src/include/distributed/multi_logical_replication.h b/src/include/distributed/multi_logical_replication.h new file mode 100644 index 000000000..629179c54 --- /dev/null +++ b/src/include/distributed/multi_logical_replication.h @@ -0,0 +1,33 @@ +/*------------------------------------------------------------------------- + * + * multi_logical_replication.h + * + * Declarations for public functions and variables used in logical replication + * on the distributed tables while moving shards. + * + * Copyright (c) 2017, Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ +#ifndef MULTI_LOGICAL_REPLICATION_H_ +#define MULTI_LOGICAL_REPLICATION_H_ + + +#include "nodes/pg_list.h" + + +/* Config variables managed via guc.c */ +extern int LogicalReplicationTimeout; + +extern bool PlacementMovedUsingLogicalReplicationInTX; + + +extern void LogicallyReplicateShards(List *shardList, char *sourceNodeName, + int sourceNodePort, char *targetNodeName, + int targetNodePort); + +#define SHARD_MOVE_PUBLICATION_PREFIX "citus_shard_move_publication_" +#define SHARD_MOVE_SUBSCRIPTION_PREFIX "citus_shard_move_subscription_" +#define SHARD_MOVE_SUBSCRIPTION_ROLE_PREFIX "citus_shard_move_subscription_role_" + +#endif /* MULTI_LOGICAL_REPLICATION_H_ */ diff --git a/src/include/distributed/query_stats.h b/src/include/distributed/query_stats.h index 75153a991..3c73d03ab 100644 --- a/src/include/distributed/query_stats.h +++ b/src/include/distributed/query_stats.h @@ -15,5 +15,16 @@ extern void InitializeCitusQueryStats(void); extern void CitusQueryStatsExecutorsEntry(uint64 queryId, MultiExecutorType executorType, char *partitionKey); +extern void CitusQueryStatsSynchronizeEntries(void); +extern int StatStatementsPurgeInterval; +extern int StatStatementsMax; +extern int StatStatementsTrack; + + +typedef enum +{ + STAT_STATEMENTS_TRACK_NONE = 0, + STAT_STATEMENTS_TRACK_ALL = 1 +} StatStatementsTrackType; #endif /* QUERY_STATS_H */ diff --git a/src/include/distributed/resource_lock.h b/src/include/distributed/resource_lock.h index b87896b99..9af280b69 100644 --- a/src/include/distributed/resource_lock.h +++ b/src/include/distributed/resource_lock.h @@ -40,7 +40,8 @@ typedef enum AdvisoryLocktagClass ADV_LOCKTAG_CLASS_CITUS_REBALANCE_COLOCATION = 7, ADV_LOCKTAG_CLASS_CITUS_COLOCATED_SHARDS_METADATA = 8, ADV_LOCKTAG_CLASS_CITUS_OPERATIONS = 9, - ADV_LOCKTAG_CLASS_CITUS_PLACEMENT_CLEANUP = 10 + ADV_LOCKTAG_CLASS_CITUS_PLACEMENT_CLEANUP = 10, + ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION = 12 } AdvisoryLocktagClass; /* CitusOperations has constants for citus operations */ @@ -110,6 +111,16 @@ typedef enum CitusOperations (uint32) 0, \ ADV_LOCKTAG_CLASS_CITUS_PLACEMENT_CLEANUP) +/* reuse advisory lock, but with different, unused field 4 (12) + * Also it has the database hardcoded to MyDatabaseId, to ensure the locks + * are local to each database */ +#define SET_LOCKTAG_LOGICAL_REPLICATION(tag) \ + SET_LOCKTAG_ADVISORY(tag, \ + MyDatabaseId, \ + (uint32) 0, \ + (uint32) 0, \ + ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION) + /* * DistLockConfigs are used to configure the locking behaviour of AcquireDistributedLockOnRelations */ @@ -156,6 +167,7 @@ extern void UnlockColocationId(int colocationId, LOCKMODE lockMode); /* Lock multiple shards for safe modification */ extern void LockShardListMetadata(List *shardIntervalList, LOCKMODE lockMode); +extern void LockShardListMetadataOnWorkers(LOCKMODE lockmode, List *shardIntervalList); extern void LockShardsInPlacementListMetadata(List *shardPlacementList, LOCKMODE lockMode); diff --git a/src/include/distributed/shard_rebalancer.h b/src/include/distributed/shard_rebalancer.h index 11730492a..ed13248c3 100644 --- a/src/include/distributed/shard_rebalancer.h +++ b/src/include/distributed/shard_rebalancer.h @@ -4,7 +4,9 @@ * * Type and function declarations for the shard rebalancer tool. * - * Copyright (c), Citus Data, Inc. + * Copyright (c) 2016, Citus Data, Inc. + * + * $Id$ * *------------------------------------------------------------------------- */ diff --git a/src/include/distributed/string_utils.h b/src/include/distributed/string_utils.h index c43e63ce8..1c5b4ad1a 100644 --- a/src/include/distributed/string_utils.h +++ b/src/include/distributed/string_utils.h @@ -15,4 +15,7 @@ extern char * ConvertIntToString(int val); +#define StringStartsWith(str, prefix) \ + (strncmp(str, prefix, strlen(prefix)) == 0) + #endif /* CITUS_STRING_UTILS_H */ diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h index ad7a8bbb0..0992c0387 100644 --- a/src/include/pg_version_compat.h +++ b/src/include/pg_version_compat.h @@ -67,6 +67,7 @@ #define PQ_LARGE_MESSAGE_LIMIT 0 #define make_simple_restrictinfo_compat(a, b) make_simple_restrictinfo(b) #define pull_varnos_compat(a, b) pull_varnos(b) +#define ROLE_PG_READ_ALL_STATS DEFAULT_ROLE_READ_ALL_STATS #endif #if PG_VERSION_NUM >= PG_VERSION_13 diff --git a/src/test/regress/Makefile b/src/test/regress/Makefile index 6636a082a..21a0201c7 100644 --- a/src/test/regress/Makefile +++ b/src/test/regress/Makefile @@ -16,9 +16,9 @@ MAKEFILE_DIR := $(dir $(realpath $(firstword $(MAKEFILE_LIST)))) export PATH := $(MAKEFILE_DIR)/bin:$(PATH) export PG_REGRESS_DIFF_OPTS = -dU10 -w # Use lower isolation test timeout, the 5 minute default is waaay too long for -# us so we use 5 seconds instead. We should detect blockages very quickly and +# us so we use 20 seconds instead. We should detect blockages very quickly and # the queries we run are also very fast. -export PGISOLATIONTIMEOUT = 5 +export PGISOLATIONTIMEOUT = 20 ## ## Citus regression support @@ -41,9 +41,11 @@ output_files := $(patsubst $(citus_abs_srcdir)/output/%.source,expected/%.out, $ # have make check actually run all tests, but keep check-full as an # intermediate, for muscle memory backward compatibility. -check: check-full +check: check-full check-enterprise-full # check-full triggers all tests that ought to be run routinely check-full: check-multi check-multi-mx check-multi-1 check-worker check-operations check-follower-cluster check-isolation check-failure +# check-enterprise-full triggers all enterprise specific tests +check-enterprise-full: check-enterprise check-enterprise-isolation check-enterprise-failure ISOLATION_DEPDIR=.deps/isolation @@ -136,6 +138,9 @@ check-multi: all $(pg_regress_multi_check) --load-extension=citus \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS) +check-enterprise: all + $(pg_regress_multi_check) --load-extension=citus \ + -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/enterprise_schedule $(EXTRA_TESTS) check-multi-1: all $(pg_regress_multi_check) --load-extension=citus \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_1_schedule $(EXTRA_TESTS) @@ -167,6 +172,24 @@ check-isolation: all $(isolation_test_files) $(pg_regress_multi_check) --load-extension=citus --isolationtester \ -- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/isolation_schedule $(EXTRA_TESTS) +check-enterprise-isolation: all $(isolation_test_files) + $(pg_regress_multi_check) --load-extension=citus --isolationtester \ + -- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/enterprise_isolation_schedule $(EXTRA_TESTS) + +# we have separate targets for logical replication tests because they take very long to complete +# hence this increases parallelism a lot without sacrifing any coverage. +check-enterprise-isolation-logicalrep-1: all $(isolation_test_files) + $(pg_regress_multi_check) --load-extension=citus --isolationtester \ + -- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/enterprise_isolation_logicalrep_1_schedule $(EXTRA_TESTS) + +check-enterprise-isolation-logicalrep-2: all $(isolation_test_files) + $(pg_regress_multi_check) --load-extension=citus --isolationtester \ + -- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/enterprise_isolation_logicalrep_2_schedule $(EXTRA_TESTS) + +check-enterprise-isolation-logicalrep-3: all $(isolation_test_files) + $(pg_regress_multi_check) --load-extension=citus --isolationtester \ + -- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/enterprise_isolation_logicalrep_3_schedule $(EXTRA_TESTS) + check-isolation-base: all $(isolation_test_files) $(pg_regress_multi_check) --load-extension=citus --isolationtester \ -- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/base_isolation_schedule $(EXTRA_TESTS) @@ -207,6 +230,10 @@ check-failure-base: all $(pg_regress_multi_check) --load-extension=citus --mitmproxy \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/failure_base_schedule $(EXTRA_TESTS) +check-enterprise-failure: all + $(pg_regress_multi_check) --load-extension=citus --mitmproxy \ + -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/enterprise_failure_schedule $(EXTRA_TESTS) + check-pg-upgrade: $(pg_upgrade_check) --old-bindir=$(old-bindir) --new-bindir=$(new-bindir) --pgxsdir=$(pgxsdir) diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 2c15915a2..2fbb026ed 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -95,6 +95,7 @@ s/ *$//g # pg12 changes s/Partitioned table "/Table "/g s/\) TABLESPACE pg_default$/\)/g +s/invalid input syntax for type bigint:/invalid input syntax for integer:/g s/invalid input syntax for type /invalid input syntax for /g s/_id_ref_id_fkey/_id_fkey/g s/_ref_id_id_fkey_/_ref_id_fkey_/g @@ -250,6 +251,8 @@ s/ERROR: parallel workers for vacuum must/ERROR: parallel vacuum degree must/g s/(CONTEXT: PL\/pgSQL function .* line )([0-9]+)/\1XX/g s/^(PL\/pgSQL function .* line) [0-9]+ (.*)/\1 XX \2/g +# normalize a test difference in multi_move_mx +s/ connection to server at "\w+" \(127\.0\.0\.1\), port [0-9]+ failed://g # can be removed after dropping PG13 support s/ERROR: parallel workers for vacuum must be between/ERROR: parallel vacuum degree must be between/g s/ERROR: fake_fetch_row_version not implemented/ERROR: fake_tuple_update not implemented/g @@ -265,6 +268,8 @@ s/TRIM\(BOTH FROM value\)/btrim\(value\)/g s/pg14\.idx.*/pg14\.xxxxx/g s/CREATE TABLESPACE test_tablespace LOCATION.*/CREATE TABLESPACE test_tablespace LOCATION XXXX/g +/DETAIL: Subqueries are not supported in policies on distributed tables/d +s/ERROR: unexpected non-SELECT command in SubLink/ERROR: cannot create policy/g # columnar log for var correlation s/(.*absolute correlation \()([0,1]\.[0-9]+)(\) of var attribute [0-9]+ is smaller than.*)/\1X\.YZ\3/g diff --git a/src/test/regress/enterprise_failure_schedule b/src/test/regress/enterprise_failure_schedule new file mode 100644 index 000000000..253134142 --- /dev/null +++ b/src/test/regress/enterprise_failure_schedule @@ -0,0 +1,12 @@ +# import this file (from psql you can use \i) to use mitmproxy manually +test: failure_test_helpers + +# this should only be run by pg_regress_multi, you don't need it +test: failure_setup +test: multi_test_helpers multi_test_helpers_superuser + +test: failure_online_move_shard_placement +test: failure_on_create_subscription +test: failure_offline_move_shard_placement +test: failure_tenant_isolation +test: check_mx diff --git a/src/test/regress/enterprise_isolation_logicalrep_1_schedule b/src/test/regress/enterprise_isolation_logicalrep_1_schedule new file mode 100644 index 000000000..7edae72d9 --- /dev/null +++ b/src/test/regress/enterprise_isolation_logicalrep_1_schedule @@ -0,0 +1,7 @@ +# tests that change node metadata should precede +# isolation_cluster_management such that tests +# that come later can be parallelized +test: isolation_cluster_management + +test: isolation_logical_replication_single_shard_commands +test: isolation_logical_replication_multi_shard_commands diff --git a/src/test/regress/enterprise_isolation_logicalrep_2_schedule b/src/test/regress/enterprise_isolation_logicalrep_2_schedule new file mode 100644 index 000000000..1863d4dc5 --- /dev/null +++ b/src/test/regress/enterprise_isolation_logicalrep_2_schedule @@ -0,0 +1,7 @@ +# tests that change node metadata should precede +# isolation_cluster_management such that tests +# that come later can be parallelized +test: isolation_cluster_management + +test: isolation_logical_replication_single_shard_commands_on_mx +test: isolation_logical_replication_multi_shard_commands_on_mx diff --git a/src/test/regress/enterprise_isolation_logicalrep_3_schedule b/src/test/regress/enterprise_isolation_logicalrep_3_schedule new file mode 100644 index 000000000..0292e4763 --- /dev/null +++ b/src/test/regress/enterprise_isolation_logicalrep_3_schedule @@ -0,0 +1,6 @@ +# tests that change node metadata should precede +# isolation_cluster_management such that tests +# that come later can be parallelized +test: isolation_cluster_management + +test: isolation_logical_replication_with_partitioning diff --git a/src/test/regress/enterprise_isolation_schedule b/src/test/regress/enterprise_isolation_schedule new file mode 100644 index 000000000..ef64eff92 --- /dev/null +++ b/src/test/regress/enterprise_isolation_schedule @@ -0,0 +1,13 @@ +# tests that change node metadata should precede +# isolation_cluster_management such that tests +# that come later can be parallelized +test: isolation_cluster_management + +test: isolation_move_placement_vs_move_placement +test: isolation_move_placement_vs_modification +test: isolation_move_placement_vs_modification_fk +test: isolation_tenant_isolation_with_fkey_to_reference +test: isolation_ref2ref_foreign_keys_enterprise +test: isolation_pg_send_cancellation +test: isolation_shard_move_vs_start_metadata_sync +test: isolation_tenant_isolation diff --git a/src/test/regress/enterprise_schedule b/src/test/regress/enterprise_schedule new file mode 100644 index 000000000..b8dcf53ad --- /dev/null +++ b/src/test/regress/enterprise_schedule @@ -0,0 +1,37 @@ +# should come before multi_cluster_management +# as it touches node metadata +test: single_node_enterprise + +test: multi_test_helpers multi_test_helpers_superuser +test: multi_cluster_management +test: multi_test_catalog_views +test: multi_data_types +test: multi_tenant_isolation + +# -------- +# Tests that require adding coordinator to pg_dist_node +# -------- +test: add_coordinator + +test: citus_local_tables_ent + +test: remove_coordinator +# -------- + +test: logical_replication +test: multi_create_table +test: multi_create_table_superuser +test: multi_create_role_dependency +test: multi_create_users +test: multi_multiuser_master_protocol +test: multi_multiuser_load_data +test: multi_multiuser_basic_queries +test: multi_multiuser_grant +test: multi_multiuser_copy +test: multi_multiuser_auth +test: multi_poolinfo_usage +test: multi_alter_table_row_level_security +test: multi_alter_table_row_level_security_escape +test: stat_statements +test: shard_move_constraints +test: check_mx diff --git a/src/test/regress/expected/.gitignore b/src/test/regress/expected/.gitignore index 8316587de..3e6806e13 100644 --- a/src/test/regress/expected/.gitignore +++ b/src/test/regress/expected/.gitignore @@ -5,6 +5,7 @@ /multi_agg_distinct.out /multi_agg_type_conversion.out /multi_alter_table_statements.out +/multi_alter_table_statements_0.out /multi_behavioral_analytics_create_table.out /multi_behavioral_analytics_create_table_superuser.out /multi_complex_count_distinct.out @@ -12,6 +13,8 @@ /multi_load_data.out /multi_load_data_superuser.out /multi_load_more_data.out +/multi_multiuser_load_data.out +/multi_multiuser_load_data_0.out /multi_mx_copy_data.out /multi_outer_join.out /multi_outer_join_reference.out diff --git a/src/test/regress/expected/aggregate_support.out b/src/test/regress/expected/aggregate_support.out index 2bf44bd1b..ba9d9e2f3 100644 --- a/src/test/regress/expected/aggregate_support.out +++ b/src/test/regress/expected/aggregate_support.out @@ -645,15 +645,6 @@ select array_collect_sort(val) from aggdata; -- Test multiuser scenario create user notsuper; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -select run_command_on_workers($$create user notsuper$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - grant all on schema aggregate_support to notsuper; grant all on all tables in schema aggregate_support to notsuper; select 1 from run_command_on_workers($$ diff --git a/src/test/regress/expected/alter_database_owner.out b/src/test/regress/expected/alter_database_owner.out index 1fb118242..edf8627e3 100644 --- a/src/test/regress/expected/alter_database_owner.out +++ b/src/test/regress/expected/alter_database_owner.out @@ -1,27 +1,11 @@ CREATE SCHEMA alter_database_owner; SET search_path TO alter_database_owner, public; CREATE USER database_owner_1; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER database_owner_2; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -SELECT run_command_on_workers('CREATE USER database_owner_1'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - -SELECT run_command_on_workers('CREATE USER database_owner_2'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - -- make sure the propagation of ALTER DATABASE ... OWNER TO ... is on -SET citus.enable_alter_database_owner TO on; +-- in enterprise we default to on, to verify this we don't set it explicitly to on and run +-- the rest of the tests exactly as on community +-- SET citus.enable_alter_database_owner TO on; -- list the owners of the current database on all nodes SELECT run_command_on_workers($$ SELECT u.rolname @@ -149,6 +133,48 @@ $$); (localhost,57638,t,database_owner_1) (2 rows) +-- verify that a user not present on the newly added node will automatically be propagated +SELECT master_remove_node('localhost', :worker_2_port); + master_remove_node +--------------------------------------------------------------------- + +(1 row) + +CREATE USER database_owner_3; +ALTER DATABASE regression OWNER TO database_owner_3; +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- list the owners of the current database on all nodes +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); + rolname +--------------------------------------------------------------------- + database_owner_3 +(1 row) + +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,database_owner_3) + (localhost,57638,t,database_owner_3) +(2 rows) + +-- reset back to original owner to not break subsequent tests and remove enterprise test user +ALTER DATABASE regression OWNER TO database_owner_1; +DROP USER database_owner_3; CREATE TABLE t (a int PRIMARY KEY); SELECT create_distributed_table('t', 'a'); create_distributed_table @@ -283,19 +309,5 @@ $$); DROP USER database_owner_1; DROP USER database_owner_2; -SELECT run_command_on_workers('DROP USER database_owner_1'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP ROLE") - (localhost,57638,t,"DROP ROLE") -(2 rows) - -SELECT run_command_on_workers('DROP USER database_owner_2'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP ROLE") - (localhost,57638,t,"DROP ROLE") -(2 rows) - SET client_min_messages TO warning; DROP SCHEMA alter_database_owner CASCADE; diff --git a/src/test/regress/expected/alter_distributed_table.out b/src/test/regress/expected/alter_distributed_table.out index 2bdc36b8c..3ca4d934a 100644 --- a/src/test/regress/expected/alter_distributed_table.out +++ b/src/test/regress/expected/alter_distributed_table.out @@ -629,6 +629,74 @@ NOTICE: renaming the new table to alter_distributed_table.par_table (1 row) +-- test changing shard count into a default colocation group with shard split +-- ensure there is no colocation group with 23 shards +SELECT count(*) FROM pg_dist_colocation WHERE shardcount = 23; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SET citus.shard_count TO 23; +CREATE TABLE shard_split_table (a int, b int); +SELECT create_distributed_table ('shard_split_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- show the difference in pg_dist_colocation and citus_tables shard counts +SELECT + ( + SELECT shardcount FROM pg_dist_colocation WHERE colocationid IN + ( + SELECT colocation_id FROM public.citus_tables WHERE table_name = 'shard_split_table'::regclass + ) + ) AS "pg_dist_colocation", + (SELECT shard_count FROM public.citus_tables WHERE table_name = 'shard_split_table'::regclass) AS "citus_tables"; + pg_dist_colocation | citus_tables +--------------------------------------------------------------------- + 23 | 25 +(1 row) + +SET citus.shard_count TO 4; +-- distribute another table and then change shard count to 23 +CREATE TABLE shard_split_table_2 (a int, b int); +SELECT create_distributed_table ('shard_split_table_2', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT alter_distributed_table ('shard_split_table_2', shard_count:=23, cascade_to_colocated:=false); +NOTICE: creating a new table for alter_distributed_table.shard_split_table_2 +NOTICE: moving the data of alter_distributed_table.shard_split_table_2 +NOTICE: dropping the old alter_distributed_table.shard_split_table_2 +NOTICE: renaming the new table to alter_distributed_table.shard_split_table_2 + alter_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT a.colocation_id = b.colocation_id FROM public.citus_tables a, public.citus_tables b + WHERE a.table_name = 'shard_split_table'::regclass AND b.table_name = 'shard_split_table_2'::regclass; + ?column? +--------------------------------------------------------------------- + f +(1 row) + +SELECT shard_count FROM public.citus_tables WHERE table_name = 'shard_split_table_2'::regclass; + shard_count +--------------------------------------------------------------------- + 23 +(1 row) + -- test messages -- test nothing to change SELECT alter_distributed_table('dist_table'); diff --git a/src/test/regress/expected/auto_undist_citus_local.out b/src/test/regress/expected/auto_undist_citus_local.out index 8051f8a8a..afb725f3e 100644 --- a/src/test/regress/expected/auto_undist_citus_local.out +++ b/src/test/regress/expected/auto_undist_citus_local.out @@ -1193,17 +1193,8 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre (4 rows) -- test DROP OWNED BY --- Citus does not support "ALTER TABLE OWNER TO" commands. Also, not to deal with tests output --- difference between community and enterprise, let's disable enable_ddl_propagation here. -SET citus.enable_ddl_propagation to OFF; CREATE USER another_user; -SELECT run_command_on_workers('CREATE USER another_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - +-- Citus does not support "ALTER TABLE OWNER TO" commands. ALTER TABLE reference_table_1 OWNER TO another_user; SELECT run_command_on_placements('reference_table_1', 'ALTER TABLE %s OWNER TO another_user'); run_command_on_placements @@ -1213,7 +1204,6 @@ SELECT run_command_on_placements('reference_table_1', 'ALTER TABLE %s OWNER TO a (localhost,57638,1810093,t,"ALTER TABLE") (3 rows) -SET citus.enable_ddl_propagation to ON; BEGIN; DROP OWNED BY another_user cascade; SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalrelid IN ( 'citus_local_table_1'::regclass, 'citus_local_table_2'::regclass, 'citus_local_table_3'::regclass) ORDER BY logicalrelid; diff --git a/src/test/regress/expected/citus_local_tables.out b/src/test/regress/expected/citus_local_tables.out index 9dac373cd..1c95f578b 100644 --- a/src/test/regress/expected/citus_local_tables.out +++ b/src/test/regress/expected/citus_local_tables.out @@ -170,17 +170,19 @@ NOTICE: executing the command locally: SELECT value FROM citus_local_tables_tes (1 row) ROLLBACK; --- show that we do not support policies in citus community -- +-- show that we support policies in citus enterprise -- BEGIN; CREATE TABLE citus_local_table_3 (table_user text); ALTER TABLE citus_local_table_3 ENABLE ROW LEVEL SECURITY; CREATE ROLE table_users; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes CREATE POLICY table_policy ON citus_local_table_3 TO table_users USING (table_user = current_user); - -- this should error out SELECT citus_add_local_table_to_metadata('citus_local_table_3'); -ERROR: policies on distributed tables are only supported in Citus Enterprise + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + ROLLBACK; -- show that we properly handle sequences on citus local tables -- BEGIN; @@ -291,13 +293,13 @@ ERROR: relation "citus_local_table_1" is a local table SELECT get_shard_id_for_distribution_column('citus_local_table_1', 'not_checking_this_arg_for_non_dist_tables'); get_shard_id_for_distribution_column --------------------------------------------------------------------- - 1504013 + 1504014 (1 row) SELECT get_shard_id_for_distribution_column('citus_local_table_1'); get_shard_id_for_distribution_column --------------------------------------------------------------------- - 1504013 + 1504014 (1 row) -- master_copy_shard_placement is not supported @@ -309,7 +311,7 @@ BEGIN; SELECT undistribute_table('citus_local_table_1'); NOTICE: creating a new table for citus_local_tables_test_schema.citus_local_table_1 NOTICE: moving the data of citus_local_tables_test_schema.citus_local_table_1 -NOTICE: executing the command locally: SELECT a FROM citus_local_tables_test_schema.citus_local_table_1_1504013 citus_local_table_1 +NOTICE: executing the command locally: SELECT a FROM citus_local_tables_test_schema.citus_local_table_1_1504014 citus_local_table_1 NOTICE: dropping the old citus_local_tables_test_schema.citus_local_table_1 NOTICE: executing the command locally: DROP TABLE IF EXISTS citus_local_tables_test_schema.citus_local_table_1_xxxxx CASCADE NOTICE: renaming the new table to citus_local_tables_test_schema.citus_local_table_1 @@ -352,7 +354,7 @@ SELECT citus_add_local_table_to_metadata('"CiTUS!LocalTables"."LocalTabLE.1!?!"' -- drop the table before creating it when the search path is set SET search_path to "CiTUS!LocalTables" ; DROP TABLE "LocalTabLE.1!?!"; -NOTICE: executing the command locally: DROP TABLE IF EXISTS "CiTUS!LocalTables"."LocalTabLE.1!?!_1504021" CASCADE +NOTICE: executing the command locally: DROP TABLE IF EXISTS "CiTUS!LocalTables"."LocalTabLE.1!?!_1504022" CASCADE -- have a custom type in the local table CREATE TYPE local_type AS (key int, value jsonb); -- create btree_gist for GiST index @@ -402,10 +404,10 @@ SELECT citus_add_local_table_to_metadata('"LocalTabLE.1!?!9012345678901234567890 -- create some objects after citus_add_local_table_to_metadata CREATE INDEX "my!Index2" ON "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789"(id) WITH ( fillfactor = 90 ) WHERE id < 20; NOTICE: identifier "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789" will be truncated to "LocalTabLE.1!?!901234567890123456789012345678901234567890123456" -NOTICE: executing the command locally: CREATE INDEX "my!Index2_1504022" ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id ) WITH (fillfactor = '90' ) WHERE (id < 20) +NOTICE: executing the command locally: CREATE INDEX "my!Index2_1504023" ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504023" USING btree (id ) WITH (fillfactor = '90' ) WHERE (id < 20) CREATE UNIQUE INDEX uniqueIndex2 ON "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789"(id); NOTICE: identifier "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789" will be truncated to "LocalTabLE.1!?!901234567890123456789012345678901234567890123456" -NOTICE: executing the command locally: CREATE UNIQUE INDEX uniqueindex2_1504022 ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id ) +NOTICE: executing the command locally: CREATE UNIQUE INDEX uniqueindex2_1504023 ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504023" USING btree (id ) --------------------------------------------------------------------- ---- utility command execution ---- --------------------------------------------------------------------- @@ -498,7 +500,7 @@ CREATE TABLE local_table_4 ( b int references local_table_4(a)); NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (xxxxx, 'citus_local_tables_test_schema', xxxxx, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_tables_test_schema.local_table_4 ADD CONSTRAINT local_table_4_a_fkey FOREIGN KEY (a) REFERENCES citus_local_tables_test_schema.citus_local_table_1(a)') ALTER TABLE citus_local_table_1 ADD COLUMN b int NOT NULL; -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504013, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_table_1 ADD COLUMN b int NOT NULL;') +NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504014, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_table_1 ADD COLUMN b int NOT NULL;') -- show that we added column with NOT NULL SELECT table_name, column_name, is_nullable FROM INFORMATION_SCHEMA.COLUMNS @@ -507,11 +509,11 @@ ORDER BY 1; table_name | column_name | is_nullable --------------------------------------------------------------------- citus_local_table_1 | b | NO - citus_local_table_1_1504013 | b | NO + citus_local_table_1_1504014 | b | NO (2 rows) ALTER TABLE citus_local_table_1 ADD CONSTRAINT unique_a_b UNIQUE (a, b); -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504013, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_table_1 ADD CONSTRAINT unique_a_b UNIQUE (a, b);') +NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504014, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_table_1 ADD CONSTRAINT unique_a_b UNIQUE (a, b);') -- show that we defined unique constraints SELECT conrelid::regclass, conname, conkey FROM pg_constraint @@ -519,12 +521,12 @@ WHERE conrelid::regclass::text LIKE 'citus_local_table_1%' AND contype = 'u' ORDER BY 1; conrelid | conname | conkey --------------------------------------------------------------------- - citus_local_table_1_1504013 | unique_a_b_1504013 | {1,2} + citus_local_table_1_1504014 | unique_a_b_1504014 | {1,2} citus_local_table_1 | unique_a_b | {1,2} (2 rows) CREATE UNIQUE INDEX citus_local_table_1_idx ON citus_local_table_1(b); -NOTICE: executing the command locally: CREATE UNIQUE INDEX citus_local_table_1_idx_1504013 ON citus_local_tables_test_schema.citus_local_table_1_1504013 USING btree (b ) +NOTICE: executing the command locally: CREATE UNIQUE INDEX citus_local_table_1_idx_1504014 ON citus_local_tables_test_schema.citus_local_table_1_1504014 USING btree (b ) -- show that we successfully defined the unique index SELECT indexrelid::regclass, indrelid::regclass, indkey FROM pg_index @@ -533,7 +535,7 @@ ORDER BY 1; indexrelid | indrelid | indkey --------------------------------------------------------------------- unique_a_b | citus_local_table_1 | 1 2 - unique_a_b_1504013 | citus_local_table_1_1504013 | 1 2 + unique_a_b_1504014 | citus_local_table_1_1504014 | 1 2 (2 rows) -- test creating citus local table with an index from non-default schema @@ -541,7 +543,7 @@ CREATE SCHEMA "test_\'index_schema"; CREATE TABLE "test_\'index_schema".testindex (a int, b int); CREATE INDEX ind ON "test_\'index_schema".testindex (a); ALTER TABLE "test_\'index_schema".testindex ADD CONSTRAINT fkey_to_dummy_ref FOREIGN KEY (a) REFERENCES dummy_reference_table(a); -NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1504026, E'test_\\''index_schema', 1504023, 'citus_local_tables_test_schema', E'ALTER TABLE "test_\\''index_schema".testindex ADD CONSTRAINT fkey_to_dummy_ref FOREIGN KEY (a) REFERENCES dummy_reference_table(a);') +NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1504027, E'test_\\''index_schema', 1504024, 'citus_local_tables_test_schema', E'ALTER TABLE "test_\\''index_schema".testindex ADD CONSTRAINT fkey_to_dummy_ref FOREIGN KEY (a) REFERENCES dummy_reference_table(a);') SELECT COUNT(*)=2 FROM pg_indexes WHERE tablename LIKE 'testindex%' AND indexname LIKE 'ind%'; ?column? --------------------------------------------------------------------- @@ -566,7 +568,7 @@ DROP TABLE citus_local_table_1, citus_local_table_2, distributed_table, local_ta NOTICE: executing the command locally: DROP TABLE IF EXISTS citus_local_tables_test_schema.local_table_4_xxxxx CASCADE NOTICE: executing the command locally: DROP TABLE IF EXISTS citus_local_tables_test_schema.reference_table_xxxxx CASCADE NOTICE: executing the command locally: DROP TABLE IF EXISTS citus_local_tables_test_schema.local_table_xxxxx CASCADE -NOTICE: drop cascades to constraint fkey_c_to_local_1504013 on table citus_local_tables_test_schema.citus_local_table_1_1504013 +NOTICE: drop cascades to constraint fkey_c_to_local_1504014 on table citus_local_tables_test_schema.citus_local_table_1_1504014 NOTICE: executing the command locally: DROP TABLE IF EXISTS citus_local_tables_test_schema.citus_local_table_2_xxxxx CASCADE NOTICE: executing the command locally: DROP TABLE IF EXISTS citus_local_tables_test_schema.citus_local_table_1_xxxxx CASCADE -- test some other udf's with citus local tables @@ -739,7 +741,7 @@ CREATE STATISTICS stx1 ON a, b FROM test_citus_local_table_with_stats; ALTER TABLE test_citus_local_table_with_stats ADD CONSTRAINT fkey_to_dummy_ref FOREIGN KEY (a) REFERENCES dummy_reference_table(a); NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (xxxxx, 'citus_local_tables_test_schema', xxxxx, 'citus_local_tables_test_schema', 'ALTER TABLE test_citus_local_table_with_stats ADD CONSTRAINT fkey_to_dummy_ref FOREIGN KEY (a) REFERENCES dummy_reference_table(a);') CREATE STATISTICS "CiTUS!LocalTables"."Bad\'StatName" ON a, b FROM test_citus_local_table_with_stats; -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504030, 'citus_local_tables_test_schema', E'CREATE STATISTICS "CiTUS!LocalTables"."Bad\\''StatName" ON a, b FROM citus_local_tables_test_schema.test_citus_local_table_with_stats') +NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504031, 'citus_local_tables_test_schema', E'CREATE STATISTICS "CiTUS!LocalTables"."Bad\\''StatName" ON a, b FROM citus_local_tables_test_schema.test_citus_local_table_with_stats') SELECT COUNT(*)=4 FROM pg_statistic_ext WHERE stxname LIKE 'stx1%' or stxname LIKE 'Bad\\''StatName%' ; ?column? --------------------------------------------------------------------- @@ -790,11 +792,11 @@ SELECT partition, from_value, to_value, access_method partition | from_value | to_value | access_method --------------------------------------------------------------------- partition1 | Thu Apr 12 17:00:00 2018 PDT | Fri Apr 13 17:00:00 2018 PDT | heap - partition1_1504031 | Thu Apr 12 17:00:00 2018 PDT | Fri Apr 13 17:00:00 2018 PDT | heap + partition1_1504032 | Thu Apr 12 17:00:00 2018 PDT | Fri Apr 13 17:00:00 2018 PDT | heap partition2 | Fri Apr 13 17:00:00 2018 PDT | Sat Apr 14 17:00:00 2018 PDT | heap - partition2_1504032 | Fri Apr 13 17:00:00 2018 PDT | Sat Apr 14 17:00:00 2018 PDT | heap + partition2_1504033 | Fri Apr 13 17:00:00 2018 PDT | Sat Apr 14 17:00:00 2018 PDT | heap partition3 | Sat Apr 14 17:00:00 2018 PDT | Sun Apr 15 17:00:00 2018 PDT | heap - partition3_1504034 | Sat Apr 14 17:00:00 2018 PDT | Sun Apr 15 17:00:00 2018 PDT | heap + partition3_1504035 | Sat Apr 14 17:00:00 2018 PDT | Sun Apr 15 17:00:00 2018 PDT | heap (6 rows) -- undistribute succesfully @@ -850,22 +852,22 @@ SELECT relname FROM pg_class --------------------------------------------------------------------- partitioned_distributed partitioned_distributed_1 - partitioned_distributed_1504038 - partitioned_distributed_1504040 - partitioned_distributed_1_1504042 - partitioned_distributed_1_1504044 + partitioned_distributed_1504039 + partitioned_distributed_1504041 + partitioned_distributed_1_1504043 + partitioned_distributed_1_1504045 partitioned_distributed_1_a_key - partitioned_distributed_1_a_key_1504042 - partitioned_distributed_1_a_key_1504044 + partitioned_distributed_1_a_key_1504043 + partitioned_distributed_1_a_key_1504045 partitioned_distributed_2 - partitioned_distributed_2_1504046 - partitioned_distributed_2_1504048 + partitioned_distributed_2_1504047 + partitioned_distributed_2_1504049 partitioned_distributed_2_a_key - partitioned_distributed_2_a_key_1504046 - partitioned_distributed_2_a_key_1504048 + partitioned_distributed_2_a_key_1504047 + partitioned_distributed_2_a_key_1504049 partitioned_distributed_a_key - partitioned_distributed_a_key_1504038 - partitioned_distributed_a_key_1504040 + partitioned_distributed_a_key_1504039 + partitioned_distributed_a_key_1504041 (18 rows) \c - - - :master_port diff --git a/src/test/regress/expected/citus_local_tables_ent.out b/src/test/regress/expected/citus_local_tables_ent.out new file mode 100644 index 000000000..3d67e44a9 --- /dev/null +++ b/src/test/regress/expected/citus_local_tables_ent.out @@ -0,0 +1,88 @@ +\set VERBOSITY terse +SET citus.next_shard_id TO 1511000; +SET citus.shard_replication_factor TO 1; +SET citus.enable_local_execution TO ON; +SET citus.log_local_commands TO ON; +CREATE SCHEMA citus_local_tables_ent; +SET search_path TO citus_local_tables_ent; +-- ensure that coordinator is added to pg_dist_node +SET client_min_messages to ERROR; +SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +RESET client_min_messages; +CREATE TABLE citus_local_table (a int, b int); +SELECT citus_add_local_table_to_metadata('citus_local_table'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +-- isolate_tenant_to_new_shard is not supported +SELECT isolate_tenant_to_new_shard('citus_local_table', 100); +ERROR: cannot isolate tenant because tenant isolation is only support for hash distributed tables +-- master_copy_shard_placement is not supported +SELECT master_copy_shard_placement(shardid, 'localhost', :master_port, 'localhost', :worker_1_port, false) +FROM (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='citus_local_table'::regclass) as shardid; +ERROR: Table 'citus_local_table' is a local table. Replicating shard of a local table added to metadata currently is not supported +-- master_move_shard_placement is not supported +SELECT master_move_shard_placement(shardid, 'localhost', :master_port, 'localhost', :worker_1_port) +FROM (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='citus_local_table'::regclass) as shardid; +ERROR: table citus_local_tables_ent.citus_local_table is a local table, moving shard of a local table added to metadata is currently not supported +-- replicate_table_shards is not suported +SELECT replicate_table_shards('citus_local_table'::regclass, 2); +NOTICE: Copying shard xxxxx from localhost:xxxxx to localhost:xxxxx ... +ERROR: Table 'citus_local_table' is a local table. Replicating shard of a local table added to metadata currently is not supported +-- rebalance_table_shards is not supported +SELECT rebalance_table_shards('citus_local_table'); +ERROR: table citus_local_tables_ent.citus_local_table is a local table, moving shard of a local table added to metadata is currently not supported +-- get_rebalance_table_shards_plan is not supported +SELECT get_rebalance_table_shards_plan('citus_local_table'); +ERROR: table citus_local_tables_ent.citus_local_table is a local table, moving shard of a local table added to metadata is currently not supported +-- test a policy defined after creating a citus local table +-- create another user for policy test +CREATE USER user_can_select_a_1; +ALTER ROLE user_can_select_a_1 SET search_path TO citus_local_tables_ent; +GRANT USAGE ON SCHEMA citus_local_tables_ent TO user_can_select_a_1; +INSERT INTO citus_local_table VALUES (1,1); +NOTICE: executing the command locally: INSERT INTO citus_local_tables_ent.citus_local_table_1511000 (a, b) VALUES (1, 1) +INSERT INTO citus_local_table VALUES (2,2); +NOTICE: executing the command locally: INSERT INTO citus_local_tables_ent.citus_local_table_1511000 (a, b) VALUES (2, 2) +-- grant access +GRANT SELECT ON TABLE citus_local_table TO user_can_select_a_1; +NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1511000, 'citus_local_tables_ent', 'GRANT select ON citus_local_table TO user_can_select_a_1') +-- enable row level security +ALTER TABLE citus_local_table ENABLE ROW LEVEL SECURITY; +NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1511000, 'citus_local_tables_ent', 'ALTER TABLE citus_local_table ENABLE ROW LEVEL SECURITY;') +-- switch user, it should not be able to see any rows since row level security is enabled +SET ROLE user_can_select_a_1; +SELECT * FROM citus_local_table ORDER BY 1, 2; +NOTICE: executing the command locally: SELECT a, b FROM citus_local_tables_ent.citus_local_table_1511000 citus_local_table ORDER BY a, b + a | b +--------------------------------------------------------------------- +(0 rows) + +RESET ROLE; +-- create policy for user to read access for rows with a=1 +CREATE POLICY user_mod ON citus_local_table +FOR SELECT +TO user_can_select_a_1 +USING (current_user = 'user_can_select_a_1' and a=1); +NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1511000, 'citus_local_tables_ent', 'CREATE POLICY user_mod ON citus_local_tables_ent.citus_local_table FOR SELECT TO user_can_select_a_1 USING (((CURRENT_USER = ''user_can_select_a_1''::name) AND (a = 1)))') +-- switch user, it should be able to see rows with a=1 +SET ROLE user_can_select_a_1; +SELECT * FROM citus_local_table ORDER BY 1, 2; +NOTICE: executing the command locally: SELECT a, b FROM citus_local_tables_ent.citus_local_table_1511000 citus_local_table ORDER BY a, b + a | b +--------------------------------------------------------------------- + 1 | 1 +(1 row) + +-- reset role +RESET ROLE; +-- cleanup at exit +DROP SCHEMA citus_local_tables_ent CASCADE; +NOTICE: drop cascades to 2 other objects diff --git a/src/test/regress/expected/columnar_alter.out b/src/test/regress/expected/columnar_alter.out index 39f6580e2..2fbe16a92 100644 --- a/src/test/regress/expected/columnar_alter.out +++ b/src/test/regress/expected/columnar_alter.out @@ -433,11 +433,7 @@ ERROR: unsupported access method for the index on columnar table circles -- Row level security CREATE TABLE public.row_level_security_col (id int, pgUser CHARACTER VARYING) USING columnar; CREATE USER user1; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER user2; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. INSERT INTO public.row_level_security_col VALUES (1, 'user1'), (2, 'user2'); GRANT SELECT, UPDATE, INSERT, DELETE ON public.row_level_security_col TO user1; GRANT SELECT, UPDATE, INSERT, DELETE ON public.row_level_security_col TO user2; diff --git a/src/test/regress/expected/columnar_permissions.out b/src/test/regress/expected/columnar_permissions.out index eb058c951..6833285ea 100644 --- a/src/test/regress/expected/columnar_permissions.out +++ b/src/test/regress/expected/columnar_permissions.out @@ -1,7 +1,5 @@ select current_user \gset create user columnar_user; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. \c - columnar_user create table columnar_permissions(i int) using columnar; insert into columnar_permissions values(1); diff --git a/src/test/regress/expected/columnar_truncate.out b/src/test/regress/expected/columnar_truncate.out index fe5169535..47707eb37 100644 --- a/src/test/regress/expected/columnar_truncate.out +++ b/src/test/regress/expected/columnar_truncate.out @@ -265,16 +265,7 @@ set columnar.compression = 'pglz'; INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100); set columnar.compression to default; -- create a user that can not truncate -SELECT run_command_on_workers($$CREATE USER truncate_user;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - CREATE USER truncate_user; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT USAGE ON SCHEMA truncate_schema TO truncate_user; GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user; REVOKE TRUNCATE ON TABLE truncate_schema.truncate_tbl FROM truncate_user; diff --git a/src/test/regress/expected/create_role_propagation.out b/src/test/regress/expected/create_role_propagation.out new file mode 100644 index 000000000..e1a8a8ba7 --- /dev/null +++ b/src/test/regress/expected/create_role_propagation.out @@ -0,0 +1,687 @@ +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +--------------------------------------------------------------------- +(0 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- +(0 rows) + +\c - - - :worker_1_port +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +--------------------------------------------------------------------- +(0 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- +(0 rows) + +\c - - - :master_port +CREATE ROLE create_role; +CREATE ROLE create_role_2; +CREATE USER create_user; +CREATE USER create_user_2; +CREATE GROUP create_group; +CREATE GROUP create_group_2; +SELECT master_remove_node('localhost', :worker_2_port); + master_remove_node +--------------------------------------------------------------------- + +(1 row) + +CREATE ROLE create_role_with_everything SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 105 PASSWORD 'strong_password123^' VALID UNTIL '2045-05-05 00:00:00.00+00' IN ROLE create_role, create_group ROLE create_user, create_group_2 ADMIN create_role_2, create_user_2; +CREATE ROLE create_role_with_nothing NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 3 PASSWORD 'weakpassword' VALID UNTIL '2015-05-05 00:00:00.00+00'; +-- show that creating role from worker node is only allowed when create role +-- propagation is off +\c - - - :worker_1_port +CREATE ROLE role_on_worker; +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +BEGIN; +SET citus.enable_create_role_propagation TO off; +CREATE ROLE role_on_worker; +NOTICE: not propagating CREATE ROLE/USER commands to worker nodes +HINT: Connect to worker nodes directly to manually create all necessary users and roles. +ROLLBACK; +\c - - - :master_port +-- edge case role names +CREATE ROLE "create_role'edge"; +CREATE ROLE "create_role""edge"; +-- test grant role +GRANT create_group TO create_role; +GRANT create_group TO create_role_2 WITH ADMIN OPTION; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, (rolpassword != '') as pass_not_empty, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | pass_not_empty | rolvaliduntil +--------------------------------------------------------------------- + create_group | f | t | f | f | f | f | f | -1 | | + create_group_2 | f | t | f | f | f | f | f | -1 | | + create_role | f | t | f | f | f | f | f | -1 | | + create_role"edge | f | t | f | f | f | f | f | -1 | | + create_role'edge | f | t | f | f | f | f | f | -1 | | + create_role_2 | f | t | f | f | f | f | f | -1 | | + create_role_with_everything | t | t | t | t | t | t | t | 105 | t | Thu May 04 17:00:00 2045 PDT + create_role_with_nothing | f | f | f | f | f | f | f | 3 | t | Mon May 04 17:00:00 2015 PDT + create_user | f | t | f | f | t | f | f | -1 | | + create_user_2 | f | t | f | f | t | f | f | -1 | | +(10 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + create_group | create_role | postgres | f + create_group | create_role_2 | postgres | t + create_group | create_role_with_everything | postgres | f + create_role | create_role_with_everything | postgres | f + create_role_with_everything | create_group_2 | postgres | f + create_role_with_everything | create_role_2 | postgres | t + create_role_with_everything | create_user | postgres | f + create_role_with_everything | create_user_2 | postgres | t +(8 rows) + +\c - - - :worker_1_port +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, (rolpassword != '') as pass_not_empty, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | pass_not_empty | rolvaliduntil +--------------------------------------------------------------------- + create_group | f | t | f | f | f | f | f | -1 | | + create_group_2 | f | t | f | f | f | f | f | -1 | | + create_role | f | t | f | f | f | f | f | -1 | | + create_role"edge | f | t | f | f | f | f | f | -1 | | + create_role'edge | f | t | f | f | f | f | f | -1 | | + create_role_2 | f | t | f | f | f | f | f | -1 | | + create_role_with_everything | t | t | t | t | t | t | t | 105 | t | Thu May 04 17:00:00 2045 PDT + create_role_with_nothing | f | f | f | f | f | f | f | 3 | t | Mon May 04 17:00:00 2015 PDT + create_user | f | t | f | f | t | f | f | -1 | | + create_user_2 | f | t | f | f | t | f | f | -1 | | +(10 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + create_group | create_role | postgres | f + create_group | create_role_2 | postgres | t + create_group | create_role_with_everything | postgres | f + create_role | create_role_with_everything | postgres | f + create_role_with_everything | create_group_2 | postgres | f + create_role_with_everything | create_role_2 | postgres | t + create_role_with_everything | create_user | postgres | f + create_role_with_everything | create_user_2 | postgres | t +(8 rows) + +\c - - - :master_port +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :worker_2_port +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, (rolpassword != '') as pass_not_empty, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | pass_not_empty | rolvaliduntil +--------------------------------------------------------------------- + create_group | f | t | f | f | f | f | f | -1 | | infinity + create_group_2 | f | t | f | f | f | f | f | -1 | | infinity + create_role | f | t | f | f | f | f | f | -1 | | infinity + create_role"edge | f | t | f | f | f | f | f | -1 | | infinity + create_role'edge | f | t | f | f | f | f | f | -1 | | infinity + create_role_2 | f | t | f | f | f | f | f | -1 | | infinity + create_role_with_everything | t | t | t | t | t | t | t | 105 | t | Thu May 04 17:00:00 2045 PDT + create_role_with_nothing | f | f | f | f | f | f | f | 3 | t | Mon May 04 17:00:00 2015 PDT + create_user | f | t | f | f | t | f | f | -1 | | infinity + create_user_2 | f | t | f | f | t | f | f | -1 | | infinity +(10 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + create_group | create_role | postgres | f + create_group | create_role_2 | postgres | t + create_group | create_role_with_everything | postgres | f + create_role | create_role_with_everything | postgres | f + create_role_with_everything | create_group_2 | postgres | f + create_role_with_everything | create_role_2 | postgres | t + create_role_with_everything | create_user | postgres | f + create_role_with_everything | create_user_2 | postgres | t +(8 rows) + +\c - - - :master_port +DROP ROLE create_role_with_everything; +REVOKE create_group FROM create_role; +REVOKE ADMIN OPTION FOR create_group FROM create_role_2; +\c - - - :master_port +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, (rolpassword != '') as pass_not_empty, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | pass_not_empty | rolvaliduntil +--------------------------------------------------------------------- + create_group | f | t | f | f | f | f | f | -1 | | + create_group_2 | f | t | f | f | f | f | f | -1 | | + create_role | f | t | f | f | f | f | f | -1 | | + create_role"edge | f | t | f | f | f | f | f | -1 | | + create_role'edge | f | t | f | f | f | f | f | -1 | | + create_role_2 | f | t | f | f | f | f | f | -1 | | + create_role_with_nothing | f | f | f | f | f | f | f | 3 | t | Mon May 04 17:00:00 2015 PDT + create_user | f | t | f | f | t | f | f | -1 | | + create_user_2 | f | t | f | f | t | f | f | -1 | | +(9 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + create_group | create_role_2 | postgres | f +(1 row) + +\c - - - :worker_1_port +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, (rolpassword != '') as pass_not_empty, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | pass_not_empty | rolvaliduntil +--------------------------------------------------------------------- + create_group | f | t | f | f | f | f | f | -1 | | + create_group_2 | f | t | f | f | f | f | f | -1 | | + create_role | f | t | f | f | f | f | f | -1 | | + create_role"edge | f | t | f | f | f | f | f | -1 | | + create_role'edge | f | t | f | f | f | f | f | -1 | | + create_role_2 | f | t | f | f | f | f | f | -1 | | + create_role_with_nothing | f | f | f | f | f | f | f | 3 | t | Mon May 04 17:00:00 2015 PDT + create_user | f | t | f | f | t | f | f | -1 | | + create_user_2 | f | t | f | f | t | f | f | -1 | | +(9 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + create_group | create_role_2 | postgres | f +(1 row) + +\c - - - :master_port +-- test grants with distributed and non-distributed roles +SELECT master_remove_node('localhost', :worker_2_port); + master_remove_node +--------------------------------------------------------------------- + +(1 row) + +CREATE ROLE dist_role_1 SUPERUSER; +CREATE ROLE dist_role_2; +CREATE ROLE dist_role_3; +CREATE ROLE dist_role_4; +SET citus.enable_create_role_propagation TO OFF; +CREATE ROLE non_dist_role_1 SUPERUSER; +NOTICE: not propagating CREATE ROLE/USER commands to worker nodes +HINT: Connect to worker nodes directly to manually create all necessary users and roles. +CREATE ROLE non_dist_role_2; +NOTICE: not propagating CREATE ROLE/USER commands to worker nodes +HINT: Connect to worker nodes directly to manually create all necessary users and roles. +CREATE ROLE non_dist_role_3; +NOTICE: not propagating CREATE ROLE/USER commands to worker nodes +HINT: Connect to worker nodes directly to manually create all necessary users and roles. +CREATE ROLE non_dist_role_4; +NOTICE: not propagating CREATE ROLE/USER commands to worker nodes +HINT: Connect to worker nodes directly to manually create all necessary users and roles. +SET citus.enable_create_role_propagation TO ON; +SET ROLE dist_role_1; +GRANT non_dist_role_1 TO non_dist_role_2; +SET citus.enable_create_role_propagation TO OFF; +SET ROLE non_dist_role_1; +GRANT dist_role_1 TO dist_role_2; +RESET ROLE; +SET citus.enable_create_role_propagation TO ON; +GRANT dist_role_3 TO non_dist_role_3; +GRANT non_dist_role_4 TO dist_role_4; +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + dist_role_1 | dist_role_2 | non_dist_role_1 | f + dist_role_3 | non_dist_role_3 | postgres | f + non_dist_role_1 | non_dist_role_2 | dist_role_1 | f + non_dist_role_4 | dist_role_4 | postgres | f +(4 rows) + +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1; + objid +--------------------------------------------------------------------- + dist_role_1 + dist_role_2 + dist_role_3 + dist_role_4 + non_dist_role_4 +(5 rows) + +\c - - - :worker_1_port +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + non_dist_role_4 | dist_role_4 | postgres | f +(1 row) + +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + dist_role_1 + dist_role_2 + dist_role_3 + dist_role_4 + non_dist_role_4 +(5 rows) + +\c - - - :worker_2_port +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + dist_role_1 | dist_role_2 | postgres | f + non_dist_role_4 | dist_role_4 | postgres | f +(2 rows) + +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + dist_role_1 + dist_role_2 + dist_role_3 + dist_role_4 + non_dist_role_4 +(5 rows) + +\c - - - :master_port +DROP ROLE dist_role_3, non_dist_role_3, dist_role_4, non_dist_role_4; +-- test grant with multiple mixed roles +CREATE ROLE dist_mixed_1; +CREATE ROLE dist_mixed_2; +CREATE ROLE dist_mixed_3; +CREATE ROLE dist_mixed_4; +SET citus.enable_create_role_propagation TO OFF; +CREATE ROLE nondist_mixed_1; +NOTICE: not propagating CREATE ROLE/USER commands to worker nodes +HINT: Connect to worker nodes directly to manually create all necessary users and roles. +CREATE ROLE nondist_mixed_2; +NOTICE: not propagating CREATE ROLE/USER commands to worker nodes +HINT: Connect to worker nodes directly to manually create all necessary users and roles. +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- +(0 rows) + +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1; + objid +--------------------------------------------------------------------- + dist_mixed_1 + dist_mixed_2 + dist_mixed_3 + dist_mixed_4 +(4 rows) + +\c - - - :worker_1_port +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- +(0 rows) + +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_mixed%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + dist_mixed_1 + dist_mixed_2 + dist_mixed_3 + dist_mixed_4 +(4 rows) + +\c - - - :master_port +SELECT master_remove_node('localhost', :worker_2_port); + master_remove_node +--------------------------------------------------------------------- + +(1 row) + +GRANT dist_mixed_1, dist_mixed_2, nondist_mixed_1 TO dist_mixed_3, dist_mixed_4, nondist_mixed_2; +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + dist_mixed_1 | dist_mixed_3 | postgres | f + dist_mixed_1 | dist_mixed_4 | postgres | f + dist_mixed_1 | nondist_mixed_2 | postgres | f + dist_mixed_2 | dist_mixed_3 | postgres | f + dist_mixed_2 | dist_mixed_4 | postgres | f + dist_mixed_2 | nondist_mixed_2 | postgres | f + nondist_mixed_1 | dist_mixed_3 | postgres | f + nondist_mixed_1 | dist_mixed_4 | postgres | f + nondist_mixed_1 | nondist_mixed_2 | postgres | f +(9 rows) + +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1; + objid +--------------------------------------------------------------------- + dist_mixed_1 + dist_mixed_2 + dist_mixed_3 + dist_mixed_4 + nondist_mixed_1 +(5 rows) + +\c - - - :worker_1_port +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + dist_mixed_1 | dist_mixed_3 | postgres | f + dist_mixed_1 | dist_mixed_4 | postgres | f + dist_mixed_2 | dist_mixed_3 | postgres | f + dist_mixed_2 | dist_mixed_4 | postgres | f + nondist_mixed_1 | dist_mixed_3 | postgres | f + nondist_mixed_1 | dist_mixed_4 | postgres | f +(6 rows) + +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_mixed%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + dist_mixed_1 + dist_mixed_2 + dist_mixed_3 + dist_mixed_4 + nondist_mixed_1 +(5 rows) + +\c - - - :worker_2_port +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + dist_mixed_1 | dist_mixed_3 | postgres | f + dist_mixed_1 | dist_mixed_4 | postgres | f + dist_mixed_2 | dist_mixed_3 | postgres | f + dist_mixed_2 | dist_mixed_4 | postgres | f + nondist_mixed_1 | dist_mixed_3 | postgres | f + nondist_mixed_1 | dist_mixed_4 | postgres | f +(6 rows) + +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_mixed%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + dist_mixed_1 + dist_mixed_2 + dist_mixed_3 + dist_mixed_4 + nondist_mixed_1 +(5 rows) + +\c - - - :master_port +DROP ROLE dist_mixed_1, dist_mixed_2, dist_mixed_3, dist_mixed_4, nondist_mixed_1, nondist_mixed_2; +-- test drop multiple roles with non-distributed roles +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist%' ORDER BY 1; + objid +--------------------------------------------------------------------- + dist_role_1 + dist_role_2 +(2 rows) + +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + dist_role_1 + dist_role_2 + non_dist_role_1 + non_dist_role_2 +(4 rows) + +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + dist_role_1 + dist_role_2 +(2 rows) + +\c - - - :master_port +DROP ROLE dist_role_1, non_dist_role_1, dist_role_2, non_dist_role_2; +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist%' ORDER BY 1; + objid +--------------------------------------------------------------------- +(0 rows) + +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist%' ORDER BY 1; + rolname +--------------------------------------------------------------------- +(0 rows) + +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist%' ORDER BY 1; + rolname +--------------------------------------------------------------------- +(0 rows) + +\c - - - :master_port +-- test alter part of create or alter role +SELECT master_remove_node('localhost', :worker_2_port); + master_remove_node +--------------------------------------------------------------------- + +(1 row) + +DROP ROLE create_role, create_role_2; +\c - - - :worker_2_port +SELECT rolname, rolcanlogin FROM pg_authid WHERE rolname = 'create_role' OR rolname = 'create_role_2' ORDER BY rolname; + rolname | rolcanlogin +--------------------------------------------------------------------- + create_role | f + create_role_2 | f +(2 rows) + +\c - - - :master_port +CREATE ROLE create_role LOGIN; +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE ROLE create_role_2 LOGIN; +\c - - - :worker_2_port +SELECT rolname, rolcanlogin FROM pg_authid WHERE rolname = 'create_role' OR rolname = 'create_role_2' ORDER BY rolname; + rolname | rolcanlogin +--------------------------------------------------------------------- + create_role | t + create_role_2 | t +(2 rows) + +\c - - - :master_port +-- test cascading grants +SET citus.enable_create_role_propagation TO OFF; +CREATE ROLE nondist_cascade_1; +NOTICE: not propagating CREATE ROLE/USER commands to worker nodes +HINT: Connect to worker nodes directly to manually create all necessary users and roles. +CREATE ROLE nondist_cascade_2; +NOTICE: not propagating CREATE ROLE/USER commands to worker nodes +HINT: Connect to worker nodes directly to manually create all necessary users and roles. +CREATE ROLE nondist_cascade_3; +NOTICE: not propagating CREATE ROLE/USER commands to worker nodes +HINT: Connect to worker nodes directly to manually create all necessary users and roles. +SET citus.enable_create_role_propagation TO ON; +CREATE ROLE dist_cascade; +GRANT nondist_cascade_1 TO nondist_cascade_2; +GRANT nondist_cascade_2 TO nondist_cascade_3; +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%cascade%' ORDER BY 1; + objid +--------------------------------------------------------------------- + dist_cascade +(1 row) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%cascade%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + nondist_cascade_1 | nondist_cascade_2 | postgres | f + nondist_cascade_2 | nondist_cascade_3 | postgres | f +(2 rows) + +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%cascade%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + dist_cascade +(1 row) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%cascade%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- +(0 rows) + +\c - - - :master_port +SELECT master_remove_node('localhost', :worker_2_port); + master_remove_node +--------------------------------------------------------------------- + +(1 row) + +GRANT nondist_cascade_3 TO dist_cascade; +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%cascade%' ORDER BY 1; + objid +--------------------------------------------------------------------- + nondist_cascade_1 + nondist_cascade_2 + nondist_cascade_3 + dist_cascade +(4 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%cascade%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + nondist_cascade_1 | nondist_cascade_2 | postgres | f + nondist_cascade_2 | nondist_cascade_3 | postgres | f + nondist_cascade_3 | dist_cascade | postgres | f +(3 rows) + +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%cascade%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + dist_cascade + nondist_cascade_1 + nondist_cascade_2 + nondist_cascade_3 +(4 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%cascade%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + nondist_cascade_1 | nondist_cascade_2 | postgres | f + nondist_cascade_2 | nondist_cascade_3 | postgres | f + nondist_cascade_3 | dist_cascade | postgres | f +(3 rows) + +\c - - - :worker_2_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%cascade%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + dist_cascade + nondist_cascade_1 + nondist_cascade_2 + nondist_cascade_3 +(4 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%cascade%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- + nondist_cascade_1 | nondist_cascade_2 | postgres | f + nondist_cascade_2 | nondist_cascade_3 | postgres | f + nondist_cascade_3 | dist_cascade | postgres | f +(3 rows) + +\c - - - :master_port +DROP ROLE create_role, create_role_2, create_group, create_group_2, create_user, create_user_2, create_role_with_nothing, "create_role'edge", "create_role""edge"; +-- test grant non-existing roles +CREATE ROLE existing_role_1; +CREATE ROLE existing_role_2; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%existing%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- +(0 rows) + +GRANT existing_role_1, nonexisting_role_1 TO existing_role_2, nonexisting_role_2; +ERROR: role "nonexisting_role_2" does not exist +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%existing%' ORDER BY 1, 2; + role | member | grantor | admin_option +--------------------------------------------------------------------- +(0 rows) + +-- test drop non-existing roles +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%existing%' ORDER BY 1; + objid +--------------------------------------------------------------------- + existing_role_1 + existing_role_2 +(2 rows) + +SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + existing_role_1 + existing_role_2 +(2 rows) + +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + existing_role_1 + existing_role_2 +(2 rows) + +\c - - - :master_port +DROP ROLE existing_role_1, existing_role_2, nonexisting_role_1, nonexisting_role_2; +ERROR: role "nonexisting_role_1" does not exist +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%existing%' ORDER BY 1; + objid +--------------------------------------------------------------------- + existing_role_1 + existing_role_2 +(2 rows) + +SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + existing_role_1 + existing_role_2 +(2 rows) + +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; + rolname +--------------------------------------------------------------------- + existing_role_1 + existing_role_2 +(2 rows) + +\c - - - :master_port +DROP ROLE IF EXISTS existing_role_1, existing_role_2, nonexisting_role_1, nonexisting_role_2; +NOTICE: role "nonexisting_role_1" does not exist, skipping +NOTICE: role "nonexisting_role_2" does not exist, skipping +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%existing%' ORDER BY 1; + objid +--------------------------------------------------------------------- +(0 rows) + +SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; + rolname +--------------------------------------------------------------------- +(0 rows) + +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; + rolname +--------------------------------------------------------------------- +(0 rows) + +\c - - - :master_port diff --git a/src/test/regress/expected/disable_object_propagation.out b/src/test/regress/expected/disable_object_propagation.out index 6ebe15089..f58e946a3 100644 --- a/src/test/regress/expected/disable_object_propagation.out +++ b/src/test/regress/expected/disable_object_propagation.out @@ -1,8 +1,6 @@ SET citus.next_shard_id TO 20030000; SET citus.enable_metadata_sync TO false; -- all tests here verify old behaviour without distributing types,functions,etc automatically CREATE USER typeowner_for_disabled_object_propagation_guc; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE SCHEMA disabled_object_propagation; CREATE SCHEMA disabled_object_propagation2; SET search_path TO disabled_object_propagation; diff --git a/src/test/regress/expected/distributed_collations.out b/src/test/regress/expected/distributed_collations.out index 21b3f7f8e..0c03c8be7 100644 --- a/src/test/regress/expected/distributed_collations.out +++ b/src/test/regress/expected/distributed_collations.out @@ -1,14 +1,5 @@ SET citus.next_shard_id TO 20050000; CREATE USER collationuser; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -SELECT run_command_on_workers($$CREATE USER collationuser;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - CREATE SCHEMA collation_tests AUTHORIZATION collationuser; CREATE SCHEMA collation_tests2 AUTHORIZATION collationuser; SET search_path to collation_tests; @@ -156,13 +147,6 @@ SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA collation_tests CASCADE; DROP SCHEMA collation_tests2 CASCADE; DROP USER collationuser; -SELECT run_command_on_workers($$DROP USER collationuser;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP ROLE") - (localhost,57638,t,"DROP ROLE") -(2 rows) - \c - - - :worker_1_port -- test creating a collation on a worker CREATE COLLATION another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk'); diff --git a/src/test/regress/expected/distributed_types.out b/src/test/regress/expected/distributed_types.out index 524b7016a..3ce06b9bb 100644 --- a/src/test/regress/expected/distributed_types.out +++ b/src/test/regress/expected/distributed_types.out @@ -1,14 +1,5 @@ SET citus.next_shard_id TO 20010000; CREATE USER typeuser; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -SELECT run_command_on_workers($$CREATE USER typeuser;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - CREATE SCHEMA type_tests AUTHORIZATION typeuser; CREATE SCHEMA type_tests2 AUTHORIZATION typeuser; -- to test creation in a specific schema and moving to schema SET search_path TO type_tests; @@ -592,10 +583,3 @@ SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA type_tests CASCADE; DROP SCHEMA type_tests2 CASCADE; DROP USER typeuser; -SELECT run_command_on_workers($$DROP USER typeuser;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP ROLE") - (localhost,57638,t,"DROP ROLE") -(2 rows) - diff --git a/src/test/regress/expected/failure_offline_move_shard_placement.out b/src/test/regress/expected/failure_offline_move_shard_placement.out new file mode 100644 index 000000000..48aed1169 --- /dev/null +++ b/src/test/regress/expected/failure_offline_move_shard_placement.out @@ -0,0 +1,223 @@ +-- +-- failure_offline_move_shard_placement +-- +-- The tests cover moving shard placements without using logical replication. +CREATE SCHEMA IF NOT EXISTS move_shard_offline; +SET SEARCH_PATH = move_shard_offline; +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 200; +SET citus.shard_replication_factor TO 1; +SELECT pg_backend_pid() as pid \gset +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE t(id int PRIMARY KEY, int_data int); +SELECT create_distributed_table('t', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW shards_in_workers AS + SELECT shardid, + (CASE WHEN nodeport = :worker_1_port THEN 'worker1' ELSE 'worker2' END) AS worker + FROM pg_dist_placement NATURAL JOIN pg_dist_node + WHERE shardstate != 4 + ORDER BY 1,2 ASC; +CREATE VIEW indices_on_shard_201 AS + SELECT * FROM run_command_on_workers( $cmd$ + SELECT CASE WHEN COUNT(*) > 0 THEN TRUE ELSE FALSE END + FROM pg_index WHERE indrelid = 'move_shard_offline.t_201'::regclass + $cmd$); +CREATE VIEW find_index_for_shard_201_in_workers AS + SELECT CASE nodeport WHEN :worker_1_port THEN 'worker1' ELSE 'worker2' END + FROM indices_on_shard_201 WHERE result = 't'; +-- Insert some data +INSERT INTO t SELECT x, x+1 FROM generate_series(1,100) AS f(x); +-- Initial shard placements +SELECT * FROM shards_in_workers; + shardid | worker +--------------------------------------------------------------------- + 200 | worker2 + 201 | worker1 + 202 | worker2 + 203 | worker1 +(4 rows) + +SELECT * FROM find_index_for_shard_201_in_workers; + case +--------------------------------------------------------------------- + worker1 +(1 row) + +-- failure on sanity checks +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS move_shard_offline.t CASCADE").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on sanity checks +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS move_shard_offline.t CASCADE").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); +ERROR: canceling statement due to user request +-- failure on move_shard table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard_offline.t").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on move_shard table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard_offline.t").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); +ERROR: canceling statement due to user request +-- failure on blocking append_table_to_shard operation on target node +SELECT citus.mitmproxy('conn.onQuery(query="worker_append_table_to_shard").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on blocking append_table_to_shard operation on target node +SELECT citus.mitmproxy('conn.onQuery(query="worker_append_table_to_shard").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); +ERROR: canceling statement due to user request +-- failure on adding constraints on target node +SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on adding constraints on target node +SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); +ERROR: canceling statement due to user request +-- failure on CopyData operation on source node +SELECT citus.mitmproxy('conn.onCopyData().kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(200, 'localhost', :worker_2_proxy_port, 'localhost', :worker_1_port, 'block_writes'); +ERROR: could not copy table "t_200" from "localhost:xxxxx" +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on CopyData operation on source node +SELECT citus.mitmproxy('conn.onCopyData().cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(200, 'localhost', :worker_2_proxy_port, 'localhost', :worker_1_port, 'block_writes'); +ERROR: canceling statement due to user request +CALL citus_cleanup_orphaned_shards(); +-- Verify that the shard is not moved and the number of rows are still 100k +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM shards_in_workers; + shardid | worker +--------------------------------------------------------------------- + 200 | worker2 + 201 | worker1 + 202 | worker2 + 203 | worker1 +(4 rows) + +SELECT count(*) FROM t; + count +--------------------------------------------------------------------- + 100 +(1 row) + +SELECT * FROM find_index_for_shard_201_in_workers; + case +--------------------------------------------------------------------- + worker1 +(1 row) + +-- Verify that shard can be moved after a temporary failure +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 1 orphaned shards +SELECT * FROM shards_in_workers; + shardid | worker +--------------------------------------------------------------------- + 200 | worker2 + 201 | worker2 + 202 | worker2 + 203 | worker1 +(4 rows) + +SELECT count(*) FROM t; + count +--------------------------------------------------------------------- + 100 +(1 row) + +SELECT * FROM find_index_for_shard_201_in_workers; + case +--------------------------------------------------------------------- + worker2 +(1 row) + +DROP SCHEMA move_shard_offline CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table t +drop cascades to view shards_in_workers +drop cascades to view indices_on_shard_201 +drop cascades to view find_index_for_shard_201_in_workers diff --git a/src/test/regress/expected/failure_on_create_subscription.out b/src/test/regress/expected/failure_on_create_subscription.out new file mode 100644 index 000000000..5ffd2a6b3 --- /dev/null +++ b/src/test/regress/expected/failure_on_create_subscription.out @@ -0,0 +1,103 @@ +-- +-- failure_on_create_subscription +-- +-- Since the result of these tests depends on the success of background +-- process that creating the replication slot on the publisher. These +-- tests are separated. +CREATE SCHEMA IF NOT EXISTS move_shard; +SET SEARCH_PATH = move_shard; +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 100; +SET citus.shard_replication_factor TO 1; +SELECT pg_backend_pid() as pid \gset +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE t(id int PRIMARY KEY, int_data int, data text); +SELECT create_distributed_table('t', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW shards_in_workers AS +SELECT shardid, + (CASE WHEN nodeport = :worker_1_port THEN 'worker1' ELSE 'worker2' END) AS worker +FROM pg_dist_placement NATURAL JOIN pg_dist_node +WHERE shardstate != 4 +ORDER BY 1,2 ASC; +-- Insert some data +INSERT INTO t SELECT x, x+1, MD5(random()::text) FROM generate_series(1,100000) AS f(x); +-- Initial shard placements +SELECT * FROM shards_in_workers; + shardid | worker +--------------------------------------------------------------------- + 100 | worker2 + 101 | worker1 + 102 | worker2 + 103 | worker1 +(4 rows) + +-- failure on creating the subscription +SELECT citus.mitmproxy('conn.onQuery(query="CREATE SUBSCRIPTION").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- Verify that the shard is not moved and the number of rows are still 100k +SELECT * FROM shards_in_workers; + shardid | worker +--------------------------------------------------------------------- + 100 | worker2 + 101 | worker1 + 102 | worker2 + 103 | worker1 +(4 rows) + +SELECT count(*) FROM t; + count +--------------------------------------------------------------------- + 100000 +(1 row) + +-- Verify that shard can be moved after a temporary failure +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM shards_in_workers; + shardid | worker +--------------------------------------------------------------------- + 100 | worker2 + 101 | worker2 + 102 | worker2 + 103 | worker1 +(4 rows) + +SELECT count(*) FROM t; + count +--------------------------------------------------------------------- + 100000 +(1 row) + +DROP SCHEMA move_shard CASCADE ; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table t +drop cascades to view shards_in_workers diff --git a/src/test/regress/expected/failure_on_create_subscription_0.out b/src/test/regress/expected/failure_on_create_subscription_0.out new file mode 100644 index 000000000..15b894f27 --- /dev/null +++ b/src/test/regress/expected/failure_on_create_subscription_0.out @@ -0,0 +1,105 @@ +-- +-- failure_on_create_subscription +-- +-- Since the result of these tests depends on the success of background +-- process that creating the replication slot on the publisher. These +-- tests are separated. +CREATE SCHEMA IF NOT EXISTS move_shard; +SET SEARCH_PATH = move_shard; +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 100; +SET citus.shard_replication_factor TO 1; +SELECT pg_backend_pid() as pid \gset +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE t(id int PRIMARY KEY, int_data int, data text); +SELECT create_distributed_table('t', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW shards_in_workers AS +SELECT shardid, + (CASE WHEN nodeport = :worker_1_port THEN 'worker1' ELSE 'worker2' END) AS worker +FROM pg_dist_placement NATURAL JOIN pg_dist_node +ORDER BY 1,2 ASC; +-- Insert some data +INSERT INTO t SELECT x, x+1, MD5(random()::text) FROM generate_series(1,100000) AS f(x); +-- Initial shard placements +SELECT * FROM shards_in_workers; + shardid | worker +--------------------------------------------------------------------- + 100 | worker2 + 101 | worker1 + 102 | worker2 + 103 | worker1 +(4 rows) + +-- failure on creating the subscription +SELECT citus.mitmproxy('conn.onQuery(query="CREATE SUBSCRIPTION").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +WARNING: could not drop the replication slot "citus_shard_move_subscription" on publisher +DETAIL: The error was: ERROR: replication slot "citus_shard_move_subscription" does not exist +CONTEXT: while executing command on localhost:xxxxx +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- Verify that the shard is not moved and the number of rows are still 100k +SELECT * FROM shards_in_workers; + shardid | worker +--------------------------------------------------------------------- + 100 | worker2 + 101 | worker1 + 102 | worker2 + 103 | worker1 +(4 rows) + +SELECT count(*) FROM t; + count +--------------------------------------------------------------------- + 100000 +(1 row) + +-- Verify that shard can be moved after a temporary failure +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM shards_in_workers; + shardid | worker +--------------------------------------------------------------------- + 100 | worker2 + 101 | worker2 + 102 | worker2 + 103 | worker1 +(4 rows) + +SELECT count(*) FROM t; + count +--------------------------------------------------------------------- + 100000 +(1 row) + +DROP SCHEMA move_shard CASCADE ; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table t +drop cascades to view shards_in_workers diff --git a/src/test/regress/expected/failure_online_move_shard_placement.out b/src/test/regress/expected/failure_online_move_shard_placement.out new file mode 100644 index 000000000..5fa344897 --- /dev/null +++ b/src/test/regress/expected/failure_online_move_shard_placement.out @@ -0,0 +1,357 @@ +-- +-- failure_online_move_shard_placement +-- +-- The tests cover moving shard placements using logical replication. +CREATE SCHEMA IF NOT EXISTS move_shard; +SET SEARCH_PATH = move_shard; +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 100; +SET citus.shard_replication_factor TO 1; +SET citus.max_adaptive_executor_pool_size TO 1; +SELECT pg_backend_pid() as pid \gset +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE t(id int PRIMARY KEY, int_data int, data text); +CREATE INDEX index_failure ON t(id); +SELECT create_distributed_table('t', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW shards_in_workers AS +SELECT shardid, + (CASE WHEN nodeport = :worker_1_port THEN 'worker1' ELSE 'worker2' END) AS worker +FROM pg_dist_placement NATURAL JOIN pg_dist_node +WHERE shardstate != 4 +ORDER BY 1,2 ASC; +-- Insert some data +INSERT INTO t SELECT x, x+1, MD5(random()::text) FROM generate_series(1,100000) AS f(x); +-- Initial shard placements +SELECT * FROM shards_in_workers; + shardid | worker +--------------------------------------------------------------------- + 100 | worker2 + 101 | worker1 + 102 | worker2 + 103 | worker1 +(4 rows) + +-- failure on sanity checks +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS move_shard.t CASCADE").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on sanity checks +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS move_shard.t CASCADE").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: canceling statement due to user request +-- failure on move_shard table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard.t").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on move_shard table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard.t").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: canceling statement due to user request +-- failure on polling subscription state +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT count\(\*\) FROM pg_subscription_rel").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on polling subscription state +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT count\(\*\) FROM pg_subscription_rel").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: canceling statement due to user request +-- failure on getting subscriber state +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT sum").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on getting subscriber state +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT sum").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: canceling statement due to user request +-- failure on polling last write-ahead log location reported to origin WAL sender +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT min\(latest_end_lsn").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on polling last write-ahead log location reported to origin WAL sender +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT min\(latest_end_lsn").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: canceling statement due to user request +-- failure on dropping subscription +SELECT citus.mitmproxy('conn.onQuery(query="^DROP SUBSCRIPTION").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +WARNING: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +WARNING: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection not open +CONTEXT: while executing command on localhost:xxxxx +WARNING: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +WARNING: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection not open +CONTEXT: while executing command on localhost:xxxxx +ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection not open +-- cancellation on dropping subscription +SELECT citus.mitmproxy('conn.onQuery(query="^DROP SUBSCRIPTION").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: canceling statement due to user request +-- failure on creating the primary key +SELECT citus.mitmproxy('conn.onQuery(query="t_pkey").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on creating the primary key +SELECT citus.mitmproxy('conn.onQuery(query="t_pkey").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +ERROR: canceling statement due to user request +-- failure on create index +SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +WARNING: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +WARNING: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection not open +CONTEXT: while executing command on localhost:xxxxx +ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection not open +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +-- lets create few more indexes and fail with both +-- parallel mode and sequential mode +CREATE INDEX index_failure_2 ON t(id); +CREATE INDEX index_failure_3 ON t(id); +CREATE INDEX index_failure_4 ON t(id); +CREATE INDEX index_failure_5 ON t(id); +-- failure on the third create index +ALTER SYSTEM SET citus.max_adaptive_executor_pool_size TO 1; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +WARNING: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +WARNING: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection not open +CONTEXT: while executing command on localhost:xxxxx +ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection not open +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +-- failure on parallel create index +ALTER SYSTEM RESET citus.max_adaptive_executor_pool_size; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +WARNING: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +WARNING: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection not open +CONTEXT: while executing command on localhost:xxxxx +ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection not open +-- Verify that the shard is not moved and the number of rows are still 100k +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM shards_in_workers; + shardid | worker +--------------------------------------------------------------------- + 100 | worker2 + 101 | worker1 + 102 | worker2 + 103 | worker1 +(4 rows) + +SELECT count(*) FROM t; + count +--------------------------------------------------------------------- + 100000 +(1 row) + +-- Verify that shard can be moved after a temporary failure +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM shards_in_workers; + shardid | worker +--------------------------------------------------------------------- + 100 | worker2 + 101 | worker2 + 102 | worker2 + 103 | worker1 +(4 rows) + +SELECT count(*) FROM t; + count +--------------------------------------------------------------------- + 100000 +(1 row) + +DROP SCHEMA move_shard CASCADE ; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table t +drop cascades to view shards_in_workers diff --git a/src/test/regress/expected/failure_tenant_isolation.out b/src/test/regress/expected/failure_tenant_isolation.out new file mode 100644 index 000000000..c07165e9a --- /dev/null +++ b/src/test/regress/expected/failure_tenant_isolation.out @@ -0,0 +1,418 @@ +-- +-- failure_tenant_isolation +-- +-- due to different libpq versions +-- some warning messages differ +-- between local and CI +SET client_min_messages TO ERROR; +CREATE SCHEMA IF NOT EXISTS tenant_isolation; +SET SEARCH_PATH = tenant_isolation; +SET citus.shard_count TO 2; +SET citus.next_shard_id TO 300; +SET citus.shard_replication_factor TO 1; +SELECT pg_backend_pid() as pid \gset +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE table_1 (id int PRIMARY KEY); +CREATE TABLE table_2 (ref_id int REFERENCES table_1(id) UNIQUE, data int); +SELECT create_distributed_table('table_1', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('table_2', 'ref_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW shard_sizes AS + SELECT shardid, result AS row_count + FROM run_command_on_placements('table_1', 'SELECT count(*) FROM %s'); +INSERT INTO table_1 +SELECT x +FROM generate_series(1, 100) AS f (x); +INSERT INTO table_2 +SELECT x, x +FROM generate_series(1, 100) AS f (x); +-- initial shard sizes +SELECT * FROM shard_sizes ORDER BY 1; + shardid | row_count +--------------------------------------------------------------------- + 300 | 49 + 301 | 51 +(2 rows) + +-- failure on colocated table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on colocated table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").after(1).cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on colocated table population +SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO tenant_isolation.table_2").after(2).kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on colocated table population +SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO tenant_isolation.table_2").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on colocated table constraints +SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(1).kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on colocated table constraints +SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").after(1).cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on table population +SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO tenant_isolation.table_1").after(2).kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on table population +SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO tenant_isolation.table_1").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on table constraints +SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(1).kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on table constraints +SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on dropping old colocated shard +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on dropping old colocated shard +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on dropping old shard +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- cancellation on dropping old shard +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on foreign key creation +SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +CONTEXT: while executing command on localhost:xxxxx +-- failure on foreign key creation +SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").after(2).cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on shard split transaction +SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: failure on connection marked as essential: localhost:xxxxx +-- failure on shard split transaction +SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on shard split transaction commit +SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: failure on connection marked as essential: localhost:xxxxx +-- failure on shard split transaction commit +SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on transaction for dropping old tables +SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: failure on connection marked as essential: localhost:xxxxx +-- failure on transaction for dropping old tables +SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on transaction for foreign key creation +SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: failure on connection marked as essential: localhost:xxxxx +-- failure on transaction for foreign key creation +SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on transaction commit for foreign key creation +SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: failure on connection marked as essential: localhost:xxxxx +-- failure on transaction commit for foreign key creation +SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on transaction prepare for dropping old tables +SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +-- due to libpq version differences, the output might change +-- hence use code block to catch the error +\set VERBOSITY terse +DO LANGUAGE plpgsql +$$ +BEGIN + SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + EXCEPTION WHEN OTHERS THEN + RAISE 'Command failed to execute'; +END; +$$; +ERROR: Command failed to execute +\set VERBOSITY default +-- failure on transaction prepare for dropping old tables +SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- failure on transaction commit for dropping old tables +SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: failure on connection marked as essential: localhost:xxxxx +-- failure on transaction commit for dropping old tables +SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").cancel(' || :pid || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); +ERROR: canceling statement due to user request +-- verify that the tenant is not isolated +SELECT * FROM shard_sizes ORDER BY 1; + shardid | row_count +--------------------------------------------------------------------- + 300 | 49 + 301 | 51 +(2 rows) + +-- Verify that tenant can be isolated after unsuccessful attempts +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +-- shard sizes after successful tenant isolation +CREATE TABLE old_shards AS SELECT shardid FROM pg_dist_shard; +WITH new_shard AS ( + SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE') AS shardid +) +SELECT row_count +FROM shard_sizes +JOIN new_shard ON shard_sizes.shardid = new_shard.shardid; + row_count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT row_count +FROM shard_sizes +WHERE shard_sizes.shardid NOT IN (SELECT * FROM old_shards) +ORDER BY 1; + row_count +--------------------------------------------------------------------- + 1 + 20 + 28 +(3 rows) + +\set VERBOSITY terse +DROP SCHEMA tenant_isolation CASCADE; +\set VERBOSITY default diff --git a/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out b/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out index 04314061f..0140cc814 100644 --- a/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out +++ b/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out @@ -49,7 +49,7 @@ SELECT create_distributed_table('referencing_table2', 'id'); INSERT INTO referenced_table SELECT i,i FROM generate_series (0, 100) i; INSERT INTO referencing_table SELECT i,i FROM generate_series (0, 100) i; INSERT INTO referencing_table2 SELECT i,i FROM generate_series (0, 100) i; -SELECT master_move_shard_placement(15000009, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_move_shard_placement(15000009, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); master_move_shard_placement --------------------------------------------------------------------- @@ -181,7 +181,7 @@ SELECT * FROM get_foreign_key_to_reference_table_commands('referenceing_dist_tab (5 rows) -- and show that rebalancer works fine -SELECT master_move_shard_placement(15000018, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_move_shard_placement(15000018, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); master_move_shard_placement --------------------------------------------------------------------- diff --git a/src/test/regress/expected/grant_on_foreign_server_propagation.out b/src/test/regress/expected/grant_on_foreign_server_propagation.out new file mode 100644 index 000000000..90a1e2468 --- /dev/null +++ b/src/test/regress/expected/grant_on_foreign_server_propagation.out @@ -0,0 +1,189 @@ +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve +\gset +\if :server_version_above_twelve +\else +\q +\endif +-- +-- GRANT_ON_FOREIGN_SERVER_PROPAGATION +-- We can't execute this file for PG12, as 'password_required' option for USER MAPPING +-- is introduced in PG13. +-- +CREATE SCHEMA "grant on server"; +SET search_path TO "grant on server"; +-- remove one of the worker nodes to test adding a new node later +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +select 1 from citus_add_node('localhost',:master_port,groupId=>0); +NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SET citus.use_citus_managed_tables TO ON; +-- create target table and insert some data +CREATE TABLE foreign_table_test (id integer NOT NULL, data text); +INSERT INTO foreign_table_test VALUES (1, 'text_test'); +CREATE EXTENSION postgres_fdw; +CREATE ROLE ownerrole WITH LOGIN; +GRANT ALL ON FOREIGN DATA WRAPPER postgres_fdw TO ownerrole WITH GRANT OPTION; +SET ROLE ownerrole; +-- verify we can create server using the privilege on FDW granted to non-superuser role +CREATE SERVER "Foreign Server" + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'localhost', port :'master_port', dbname 'regression'); +RESET ROLE; +CREATE USER MAPPING FOR CURRENT_USER + SERVER "Foreign Server" + OPTIONS (user 'postgres'); +-- foreign table owned by superuser +CREATE FOREIGN TABLE foreign_table_owned_by_superuser ( + id integer NOT NULL, + data text +) + SERVER "Foreign Server" + OPTIONS (schema_name 'grant on server', table_name 'foreign_table_test'); +-- create a non-superuser role +CREATE ROLE role_test_servers; +ALTER ROLE role_test_servers WITH LOGIN; +CREATE ROLE role_test_servers_2 WITH LOGIN; +SET ROLE ownerrole; +-- verify that non-superuser role can GRANT on other non-superuser roles, on FDWs +-- if WITH GRANT OPTION is provided +GRANT USAGE ON FOREIGN DATA WRAPPER postgres_fdw TO role_test_servers_2; +RESET ROLE; +-- grant privileges +GRANT ALL ON SCHEMA "grant on server" TO role_test_servers WITH GRANT OPTION; +GRANT ALL ON foreign_table_owned_by_superuser TO role_test_servers WITH GRANT OPTION; +GRANT ALL ON foreign_table_test TO role_test_servers WITH GRANT OPTION; +GRANT ALL ON foreign server "Foreign Server" TO role_test_servers, postgres WITH GRANT OPTION; +SET ROLE role_test_servers; +GRANT ALL ON SCHEMA "grant on server" TO role_test_servers_2 WITH GRANT OPTION; +GRANT ALL ON foreign_table_owned_by_superuser TO role_test_servers_2 WITH GRANT OPTION; +GRANT ALL ON foreign_table_test TO role_test_servers_2 WITH GRANT OPTION; +GRANT ALL ON foreign server "Foreign Server" TO role_test_servers_2, postgres WITH GRANT OPTION; +RESET ROLE; +-- add user mapping for the role +CREATE USER MAPPING FOR role_test_servers + SERVER "Foreign Server" + OPTIONS (user 'role_test_servers', password_required 'false'); +CREATE USER MAPPING FOR role_test_servers_2 + SERVER "Foreign Server" + OPTIONS (user 'role_test_servers_2', password_required 'false'); +SET ROLE role_test_servers_2; +-- foreign table owned by non-superuser +CREATE FOREIGN TABLE foreign_table_owned_by_regular_user ( + id integer NOT NULL, + data text +) + SERVER "Foreign Server" + OPTIONS (schema_name 'grant on server', table_name 'foreign_table_test'); +RESET ROLE; +-- now add the node and verify that all propagated correctly +select 1 from citus_add_node('localhost', :'worker_2_port'); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :worker_2_port +SET search_path TO "grant on server"; +SET ROLE role_test_servers_2; +SELECT * from foreign_table_owned_by_superuser; + id | data +--------------------------------------------------------------------- + 1 | text_test +(1 row) + +SELECT * from foreign_table_owned_by_regular_user; + id | data +--------------------------------------------------------------------- + 1 | text_test +(1 row) + +SET ROLE postgres; +SELECT * from foreign_table_owned_by_superuser; + id | data +--------------------------------------------------------------------- + 1 | text_test +(1 row) + +SELECT * from foreign_table_owned_by_regular_user; + id | data +--------------------------------------------------------------------- + 1 | text_test +(1 row) + +\c - - - :master_port +-- verify that the non-superuser privileges has been propagated to the existing worker, +-- and also granted on the newly added worker as well +SELECT run_command_on_workers($$ + SELECT fdwacl FROM pg_foreign_data_wrapper WHERE fdwname = 'postgres_fdw';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"{postgres=U/postgres,ownerrole=U*/postgres,role_test_servers_2=U/ownerrole}") + (localhost,57638,t,"{postgres=U/postgres,ownerrole=U*/postgres,role_test_servers_2=U/ownerrole}") +(2 rows) + +SELECT run_command_on_workers($$ + SELECT rolname FROM pg_roles WHERE oid IN (SELECT srvowner FROM pg_foreign_server WHERE srvname = 'Foreign Server');$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,ownerrole) + (localhost,57638,t,ownerrole) +(2 rows) + +REVOKE GRANT OPTION FOR ALL ON FOREIGN SERVER "Foreign Server" FROM role_test_servers CASCADE; +REVOKE GRANT OPTION FOR ALL ON FOREIGN DATA WRAPPER postgres_fdw FROM ownerrole CASCADE; +SELECT run_command_on_workers($$ + SELECT fdwacl FROM pg_foreign_data_wrapper WHERE fdwname = 'postgres_fdw';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"{postgres=U/postgres,ownerrole=U/postgres}") + (localhost,57638,t,"{postgres=U/postgres,ownerrole=U/postgres}") +(2 rows) + +SELECT run_command_on_workers($$ + SELECT srvacl FROM pg_foreign_server WHERE srvname = 'Foreign Server';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"{ownerrole=U/ownerrole,role_test_servers=U/ownerrole,postgres=U*/ownerrole}") + (localhost,57638,t,"{ownerrole=U/ownerrole,role_test_servers=U/ownerrole,postgres=U*/ownerrole}") +(2 rows) + +REVOKE ALL ON FOREIGN DATA WRAPPER postgres_fdw FROM role_test_servers_2, ownerrole CASCADE; +SELECT run_command_on_workers($$ + SELECT fdwacl FROM pg_foreign_data_wrapper WHERE fdwname = 'postgres_fdw';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,{postgres=U/postgres}) + (localhost,57638,t,{postgres=U/postgres}) +(2 rows) + +REVOKE ALL ON FOREIGN SERVER "Foreign Server" FROM role_test_servers, postgres CASCADE; +SELECT run_command_on_workers($$ + SELECT srvacl FROM pg_foreign_server WHERE srvname = 'Foreign Server';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,{ownerrole=U/ownerrole}) + (localhost,57638,t,{ownerrole=U/ownerrole}) +(2 rows) + +REVOKE ALL ON SCHEMA "grant on server" FROM role_test_servers CASCADE; +-- cleanup +SET client_min_messages TO ERROR; +DROP SERVER "Foreign Server" CASCADE; +DROP SCHEMA "grant on server" CASCADE; +DROP ROLE role_test_servers, role_test_servers_2, ownerrole; +SELECT 1 FROM citus_remove_node('localhost', :master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + diff --git a/src/test/regress/expected/grant_on_foreign_server_propagation_0.out b/src/test/regress/expected/grant_on_foreign_server_propagation_0.out new file mode 100644 index 000000000..e25fbb82d --- /dev/null +++ b/src/test/regress/expected/grant_on_foreign_server_propagation_0.out @@ -0,0 +1,6 @@ +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve +\gset +\if :server_version_above_twelve +\else +\q diff --git a/src/test/regress/expected/grant_on_function_propagation.out b/src/test/regress/expected/grant_on_function_propagation.out new file mode 100644 index 000000000..914c8f8b7 --- /dev/null +++ b/src/test/regress/expected/grant_on_function_propagation.out @@ -0,0 +1,543 @@ +-- +-- GRANT_ON_FUNCTION_PROPAGATION +-- +CREATE SCHEMA grant_on_function; +SET search_path TO grant_on_function, public; +-- remove one of the worker nodes to test adding a new node later +SET citus.shard_replication_factor TO 1; +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- create some simple functions +CREATE OR REPLACE FUNCTION function_notice(text) +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE '%', $1; +END; +$$; +CREATE OR REPLACE FUNCTION function_notice() +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'Constant Notice'; +END; +$$; +CREATE OR REPLACE FUNCTION function_hello() +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'Hello World'; +END; +$$; +SET citus.enable_metadata_sync TO OFF; +CREATE OR REPLACE FUNCTION not_distributed_function() +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'not_distributed_function'; +END; +$$; +RESET citus.enable_metadata_sync; +-- create some users and grant them permission on grant_on_function schema +CREATE USER function_user_1; +CREATE USER function_user_2; +CREATE USER function_user_3; +GRANT ALL ON SCHEMA grant_on_function TO function_user_1, function_user_2, function_user_3; +-- do some varying grants +GRANT EXECUTE ON FUNCTION function_notice() TO function_user_1; +GRANT EXECUTE ON FUNCTION function_notice() TO function_user_2 WITH GRANT OPTION; +SET ROLE function_user_2; +GRANT EXECUTE ON FUNCTION function_notice() TO function_user_3; +RESET ROLE; +SELECT create_distributed_function('function_notice()'); +NOTICE: procedure grant_on_function.function_notice is already distributed +DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- re-distributing the same function with GRANTs should be fine +SELECT create_distributed_function('function_notice()'); +NOTICE: procedure grant_on_function.function_notice is already distributed +DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- check grants propagated correctly after create_distributed_function +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_notice | 0 | {=X/postgres,postgres=X/postgres,function_user_1=X/postgres,function_user_2=X*/postgres,function_user_3=X/function_user_2} + function_notice | 1 | +(2 rows) + +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_notice | 0 | {=X/postgres,postgres=X/postgres,function_user_1=X/postgres,function_user_2=X*/postgres,function_user_3=X/function_user_2} + function_notice | 1 | +(2 rows) + +\c - - - :master_port +-- do some varying revokes +REVOKE EXECUTE ON FUNCTION grant_on_function.function_notice() FROM function_user_1, function_user_3; +REVOKE GRANT OPTION FOR EXECUTE ON FUNCTION grant_on_function.function_notice() FROM function_user_2 CASCADE; +-- check revokes propagated correctly for the distributed function function_notice() +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_notice | 0 | {=X/postgres,postgres=X/postgres,function_user_2=X/postgres} + function_notice | 1 | +(2 rows) + +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_notice | 0 | {=X/postgres,postgres=X/postgres,function_user_2=X/postgres} + function_notice | 1 | +(2 rows) + +\c - - - :master_port +REVOKE EXECUTE ON FUNCTION grant_on_function.function_notice() FROM function_user_2; +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_notice | 0 | {=X/postgres,postgres=X/postgres} + function_notice | 1 | +(2 rows) + +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_notice | 0 | {=X/postgres,postgres=X/postgres} + function_notice | 1 | +(2 rows) + +\c - - - :master_port +-- distribute another function +SET search_path TO grant_on_function, public; +SELECT create_distributed_function('function_notice(text)'); +NOTICE: procedure grant_on_function.function_notice is already distributed +DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- GRANT .. ON ALL FUNCTIONS IN SCHEMA .. with multiple roles +GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA grant_on_function TO function_user_1, function_user_3; +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_notice | 0 | {=X/postgres,postgres=X/postgres,function_user_1=X/postgres,function_user_3=X/postgres} + function_notice | 1 | {=X/postgres,postgres=X/postgres,function_user_1=X/postgres,function_user_3=X/postgres} +(2 rows) + +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_notice | 0 | {=X/postgres,postgres=X/postgres,function_user_1=X/postgres,function_user_3=X/postgres} + function_notice | 1 | {=X/postgres,postgres=X/postgres,function_user_1=X/postgres,function_user_3=X/postgres} +(2 rows) + +\c - - - :master_port +-- REVOKE .. ON ALL FUNCTIONS IN SCHEMA .. with multiple roles +REVOKE EXECUTE ON ALL FUNCTIONS IN SCHEMA grant_on_function FROM function_user_1, function_user_3; +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_notice | 0 | {=X/postgres,postgres=X/postgres} + function_notice | 1 | {=X/postgres,postgres=X/postgres} +(2 rows) + +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_notice | 0 | {=X/postgres,postgres=X/postgres} + function_notice | 1 | {=X/postgres,postgres=X/postgres} +(2 rows) + +\c - - - :master_port +-- distribute another function +SET search_path TO grant_on_function, public; +SELECT create_distributed_function('function_hello()'); +NOTICE: procedure grant_on_function.function_hello is already distributed +DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- GRANT with multiple functions and multiple roles +-- function_hello needs no arguments since no other function has that name +GRANT EXECUTE ON FUNCTION grant_on_function.function_hello, grant_on_function.function_notice(), grant_on_function.function_notice(text), grant_on_function.not_distributed_function() TO function_user_2 WITH GRANT OPTION; +SET ROLE function_user_2; +GRANT EXECUTE ON FUNCTION grant_on_function.function_hello, grant_on_function.function_notice(), grant_on_function.function_notice(text), grant_on_function.not_distributed_function() TO function_user_1, function_user_3; +RESET ROLE; +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('function_notice', 'function_hello', 'not_distributed_function') ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_hello | 0 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} + function_notice | 0 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} + function_notice | 1 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} + not_distributed_function | 0 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} +(4 rows) + +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('function_notice', 'function_hello', 'not_distributed_function') ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_hello | 0 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} + function_notice | 0 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} + function_notice | 1 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} +(3 rows) + +\c - - - :master_port +-- add the previously removed node +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- check if the grants are propagated correctly +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('function_notice', 'function_hello') ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_hello | 0 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} + function_notice | 0 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} + function_notice | 1 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} +(3 rows) + +\c - - - :worker_2_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('function_notice', 'function_hello') ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + function_hello | 0 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} + function_notice | 0 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} + function_notice | 1 | {=X/postgres,postgres=X/postgres,function_user_2=X*/postgres,function_user_1=X/function_user_2,function_user_3=X/function_user_2} +(3 rows) + +\c - - - :master_port +-- check that it works correctly with a user that is not distributed +CREATE OR REPLACE FUNCTION not_propagated_function_user_test() +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'not_propagated_function_user_test'; +END; +$$; +SELECT create_distributed_function('not_propagated_function_user_test()'); +NOTICE: procedure public.not_propagated_function_user_test is already distributed +DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SET citus.enable_ddl_propagation TO off; +CREATE USER not_propagated_function_user_4; +SET citus.enable_ddl_propagation TO on; +GRANT EXECUTE ON FUNCTION not_propagated_function_user_test TO function_user_1, not_propagated_function_user_4; +-- check if the grants are propagated correctly +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('not_propagated_function_user_test') ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + not_propagated_function_user_test | 0 | {=X/postgres,postgres=X/postgres,function_user_1=X/postgres,not_propagated_function_user_4=X/postgres} +(1 row) + +\c - - - :worker_2_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('not_propagated_function_user_test') ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + not_propagated_function_user_test | 0 | {=X/postgres,postgres=X/postgres,function_user_1=X/postgres,not_propagated_function_user_4=X/postgres} +(1 row) + +\c - - - :master_port +SET search_path TO grant_on_function, public; +-- the following should fail is in plain PG +GRANT EXECUTE ON FUNCTION function_notice(), non_existent_function TO function_user_1; +ERROR: could not find a function named "non_existent_function" +GRANT EXECUTE ON FUNCTION function_notice() TO function_user_1, non_existent_user; +ERROR: role "non_existent_user" does not exist +GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA grant_on_function, non_existent_schema TO function_user_1; +ERROR: schema "non_existent_schema" does not exist +DROP FUNCTION function_notice(), function_notice(text), function_hello, not_distributed_function, not_propagated_function_user_test; +-- add similar tests for procedures +-- remove one of the worker nodes to test adding a new node later +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- create some simple procedures +CREATE OR REPLACE PROCEDURE procedure_notice(text) +LANGUAGE PLPGSQL AS $proc$ +BEGIN + RAISE NOTICE '%', $1; +END; +$proc$; +CREATE OR REPLACE PROCEDURE procedure_notice() +LANGUAGE PLPGSQL AS $proc$ +BEGIN + RAISE NOTICE 'Constant Notice'; +END; +$proc$; +CREATE OR REPLACE PROCEDURE procedure_hello() +LANGUAGE PLPGSQL AS $proc$ +BEGIN + RAISE NOTICE 'Hello World'; +END; +$proc$; +SET citus.enable_metadata_sync TO OFF; +CREATE OR REPLACE PROCEDURE not_distributed_procedure() +LANGUAGE PLPGSQL AS $proc$ +BEGIN + RAISE NOTICE 'not_distributed_procedure'; +END; +$proc$; +RESET citus.enable_metadata_sync; +-- create some users and grant them permission on grant_on_function schema +CREATE USER procedure_user_1; +CREATE USER procedure_user_2; +CREATE USER procedure_user_3; +GRANT ALL ON SCHEMA grant_on_function TO procedure_user_1, procedure_user_2, procedure_user_3; +-- do some varying grants +GRANT EXECUTE ON PROCEDURE procedure_notice() TO procedure_user_1; +GRANT EXECUTE ON PROCEDURE procedure_notice() TO procedure_user_2 WITH GRANT OPTION; +SET ROLE procedure_user_2; +GRANT EXECUTE ON PROCEDURE procedure_notice() TO procedure_user_3; +RESET ROLE; +SELECT create_distributed_function('procedure_notice()'); +NOTICE: procedure grant_on_function.procedure_notice is already distributed +DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- check grants propagated correctly after create_distributed_function +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_notice | 0 | {=X/postgres,postgres=X/postgres,procedure_user_1=X/postgres,procedure_user_2=X*/postgres,procedure_user_3=X/procedure_user_2} + procedure_notice | 1 | +(2 rows) + +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_notice | 0 | {=X/postgres,postgres=X/postgres,procedure_user_1=X/postgres,procedure_user_2=X*/postgres,procedure_user_3=X/procedure_user_2} + procedure_notice | 1 | +(2 rows) + +\c - - - :master_port +-- do some varying revokes +REVOKE EXECUTE ON PROCEDURE grant_on_function.procedure_notice() FROM procedure_user_1, procedure_user_3; +REVOKE GRANT OPTION FOR EXECUTE ON PROCEDURE grant_on_function.procedure_notice() FROM procedure_user_2 CASCADE; +-- check revokes propagated correctly for the distributed procedure procedure_notice() +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_notice | 0 | {=X/postgres,postgres=X/postgres,procedure_user_2=X/postgres} + procedure_notice | 1 | +(2 rows) + +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_notice | 0 | {=X/postgres,postgres=X/postgres,procedure_user_2=X/postgres} + procedure_notice | 1 | +(2 rows) + +\c - - - :master_port +REVOKE EXECUTE ON PROCEDURE grant_on_function.procedure_notice() FROM procedure_user_2; +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_notice | 0 | {=X/postgres,postgres=X/postgres} + procedure_notice | 1 | +(2 rows) + +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_notice | 0 | {=X/postgres,postgres=X/postgres} + procedure_notice | 1 | +(2 rows) + +\c - - - :master_port +-- distribute another procedure +SET search_path TO grant_on_function, public; +SELECT create_distributed_function('procedure_notice(text)'); +NOTICE: procedure grant_on_function.procedure_notice is already distributed +DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- GRANT .. ON ALL PROCEDURES IN SCHEMA .. with multiple roles +GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA grant_on_function TO procedure_user_1, procedure_user_3; +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_notice | 0 | {=X/postgres,postgres=X/postgres,procedure_user_1=X/postgres,procedure_user_3=X/postgres} + procedure_notice | 1 | {=X/postgres,postgres=X/postgres,procedure_user_1=X/postgres,procedure_user_3=X/postgres} +(2 rows) + +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_notice | 0 | {=X/postgres,postgres=X/postgres,procedure_user_1=X/postgres,procedure_user_3=X/postgres} + procedure_notice | 1 | {=X/postgres,postgres=X/postgres,procedure_user_1=X/postgres,procedure_user_3=X/postgres} +(2 rows) + +\c - - - :master_port +-- REVOKE .. ON ALL PROCEDURES IN SCHEMA .. with multiple roles +REVOKE EXECUTE ON ALL PROCEDURES IN SCHEMA grant_on_function FROM procedure_user_1, procedure_user_3; +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_notice | 0 | {=X/postgres,postgres=X/postgres} + procedure_notice | 1 | {=X/postgres,postgres=X/postgres} +(2 rows) + +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_notice | 0 | {=X/postgres,postgres=X/postgres} + procedure_notice | 1 | {=X/postgres,postgres=X/postgres} +(2 rows) + +\c - - - :master_port +-- distribute another procedure +SET search_path TO grant_on_function, public; +SELECT create_distributed_function('procedure_hello()'); +NOTICE: procedure grant_on_function.procedure_hello is already distributed +DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- GRANT with multiple procedures and multiple roles +-- procedure_hello needs no arguments since no other procedure has that name +GRANT EXECUTE ON PROCEDURE grant_on_function.procedure_hello, grant_on_function.procedure_notice(), grant_on_function.procedure_notice(text), grant_on_function.not_distributed_procedure() TO procedure_user_2 WITH GRANT OPTION; +SET ROLE procedure_user_2; +GRANT EXECUTE ON PROCEDURE grant_on_function.procedure_hello, grant_on_function.procedure_notice(), grant_on_function.procedure_notice(text), grant_on_function.not_distributed_procedure() TO procedure_user_1, procedure_user_3; +RESET ROLE; +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('procedure_notice', 'procedure_hello', 'not_distributed_procedure') ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + not_distributed_procedure | 0 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} + procedure_hello | 0 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} + procedure_notice | 0 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} + procedure_notice | 1 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} +(4 rows) + +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('procedure_notice', 'procedure_hello', 'not_distributed_procedure') ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_hello | 0 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} + procedure_notice | 0 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} + procedure_notice | 1 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} +(3 rows) + +\c - - - :master_port +-- add the previously removed node +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- check if the grants are propagated correctly +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('procedure_notice', 'procedure_hello') ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_hello | 0 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} + procedure_notice | 0 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} + procedure_notice | 1 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} +(3 rows) + +\c - - - :worker_2_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('procedure_notice', 'procedure_hello') ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + procedure_hello | 0 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} + procedure_notice | 0 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} + procedure_notice | 1 | {=X/postgres,postgres=X/postgres,procedure_user_2=X*/postgres,procedure_user_1=X/procedure_user_2,procedure_user_3=X/procedure_user_2} +(3 rows) + +\c - - - :master_port +-- check that it works correctly with a user that is not distributed +CREATE OR REPLACE PROCEDURE not_propagated_procedure_user_test() +LANGUAGE PLPGSQL AS $proc$ +BEGIN + RAISE NOTICE 'not_propagated_procedure_user_test'; +END; +$proc$; +SELECT create_distributed_function('not_propagated_procedure_user_test()'); +NOTICE: procedure public.not_propagated_procedure_user_test is already distributed +DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SET citus.enable_ddl_propagation TO off; +CREATE USER not_propagated_procedure_user_4; +SET citus.enable_ddl_propagation TO on; +GRANT EXECUTE ON PROCEDURE not_propagated_procedure_user_test TO procedure_user_1, not_propagated_procedure_user_4; +-- check if the grants are propagated correctly +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('not_propagated_procedure_user_test') ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + not_propagated_procedure_user_test | 0 | {=X/postgres,postgres=X/postgres,procedure_user_1=X/postgres,not_propagated_procedure_user_4=X/postgres} +(1 row) + +\c - - - :worker_2_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('not_propagated_procedure_user_test') ORDER BY 1, 2; + proname | pronargs | proacl +--------------------------------------------------------------------- + not_propagated_procedure_user_test | 0 | {=X/postgres,postgres=X/postgres,procedure_user_1=X/postgres,not_propagated_procedure_user_4=X/postgres} +(1 row) + +\c - - - :master_port +SET search_path TO grant_on_function, public; +-- the following should fail is in plain PG +GRANT EXECUTE ON PROCEDURE procedure_notice(), non_existent_procedure TO procedure_user_1; +ERROR: could not find a procedure named "non_existent_procedure" +GRANT EXECUTE ON PROCEDURE procedure_notice() TO procedure_user_1, non_existent_user; +ERROR: role "non_existent_user" does not exist +GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA grant_on_function, non_existent_schema TO procedure_user_1; +ERROR: schema "non_existent_schema" does not exist +DROP PROCEDURE procedure_notice(), procedure_notice(text), procedure_hello, not_distributed_procedure, not_propagated_procedure_user_test; +SELECT run_command_on_coordinator_and_workers('DROP SCHEMA grant_on_function CASCADE'); + run_command_on_coordinator_and_workers +--------------------------------------------------------------------- + +(1 row) + +DROP USER function_user_1, function_user_2, function_user_3, not_propagated_function_user_4; +DROP USER procedure_user_1, procedure_user_2, procedure_user_3, not_propagated_procedure_user_4; diff --git a/src/test/regress/expected/grant_on_schema_propagation.out b/src/test/regress/expected/grant_on_schema_propagation.out index 97c02f074..795420a9d 100644 --- a/src/test/regress/expected/grant_on_schema_propagation.out +++ b/src/test/regress/expected/grant_on_schema_propagation.out @@ -10,36 +10,9 @@ SET citus.enable_ddl_propagation TO off; CREATE SCHEMA non_dist_schema; SET citus.enable_ddl_propagation TO on; -- create roles on all nodes -SELECT run_command_on_coordinator_and_workers('CREATE USER role_1'); -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CONTEXT: SQL statement "CREATE USER role_1" -PL/pgSQL function run_command_on_coordinator_and_workers(text) line XX at EXECUTE - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - -SELECT run_command_on_coordinator_and_workers('CREATE USER role_2'); -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CONTEXT: SQL statement "CREATE USER role_2" -PL/pgSQL function run_command_on_coordinator_and_workers(text) line XX at EXECUTE - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - -SELECT run_command_on_coordinator_and_workers('CREATE USER role_3'); -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CONTEXT: SQL statement "CREATE USER role_3" -PL/pgSQL function run_command_on_coordinator_and_workers(text) line XX at EXECUTE - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - +CREATE USER role_1; +CREATE USER role_2; +CREATE USER role_3; -- do some varying grants GRANT USAGE, CREATE ON SCHEMA dist_schema TO role_1 WITH GRANT OPTION; GRANT USAGE ON SCHEMA dist_schema TO role_2; @@ -186,7 +159,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'anoth nspname | nspacl --------------------------------------------------------------------- another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres} - dist_schema | {postgres=UC/postgres,role_3=UC/postgres,role_1=U*/postgres} + dist_schema | {postgres=UC/postgres,role_3=UC/postgres,role_1=U*C/postgres} (2 rows) \c - - - :master_port @@ -412,9 +385,4 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspna \c - - - :master_port DROP TABLE public_schema_table; -SELECT run_command_on_coordinator_and_workers('DROP ROLE role_1, role_2, role_3'); - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - +DROP ROLE role_1, role_2, role_3; diff --git a/src/test/regress/expected/grant_on_sequence_propagation.out b/src/test/regress/expected/grant_on_sequence_propagation.out new file mode 100644 index 000000000..3e7893445 --- /dev/null +++ b/src/test/regress/expected/grant_on_sequence_propagation.out @@ -0,0 +1,336 @@ +-- +-- GRANT_ON_SEQUENCE_PROPAGATION +-- +SET citus.shard_replication_factor TO 1; +CREATE SCHEMA grant_on_sequence; +SET search_path TO grant_on_sequence, public; +-- create some simple sequences +CREATE SEQUENCE dist_seq_0; +CREATE SEQUENCE dist_seq_1; +CREATE SEQUENCE non_dist_seq_0; +-- create some users and grant them permission on grant_on_sequence schema +CREATE USER seq_user_0; +CREATE USER seq_user_1; +CREATE USER seq_user_2; +GRANT ALL ON SCHEMA grant_on_sequence TO seq_user_0, seq_user_1, seq_user_2; +-- do some varying grants +GRANT SELECT ON SEQUENCE dist_seq_0 TO seq_user_0; +GRANT USAGE ON SEQUENCE dist_seq_0 TO seq_user_1 WITH GRANT OPTION; +SET ROLE seq_user_1; +GRANT USAGE ON SEQUENCE dist_seq_0 TO seq_user_2; +RESET ROLE; +-- distribute a sequence +-- reminder: a sequence is distributed when used in a distributed table AND cluster has metadata workers +CREATE TABLE seq_test_0 (a int, b bigint DEFAULT nextval('dist_seq_0')); +SELECT create_distributed_table('seq_test_0', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- check grants propagated correctly after sequence is distributed +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_0' ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres,seq_user_0=r/postgres,seq_user_1=U*/postgres,seq_user_2=U/seq_user_1} +(1 row) + +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_0' ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres,seq_user_0=r/postgres,seq_user_1=U*/postgres,seq_user_2=U/seq_user_1} +(1 row) + +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- do some varying revokes +REVOKE SELECT ON SEQUENCE dist_seq_0 FROM seq_user_0, seq_user_2; +REVOKE GRANT OPTION FOR USAGE ON SEQUENCE dist_seq_0 FROM seq_user_1 CASCADE; +-- check revokes propagated correctly for the distributed sequence dist_seq_0 +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_0' ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres,seq_user_1=U/postgres} +(1 row) + +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_0' ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres,seq_user_1=U/postgres} +(1 row) + +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +REVOKE USAGE ON SEQUENCE dist_seq_0 FROM seq_user_1; +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_0' ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres} +(1 row) + +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_0' ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres} +(1 row) + +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- distribute another sequence +CREATE TABLE seq_test_1 (a int, b bigint DEFAULT nextval('dist_seq_1')); +SELECT create_distributed_table('seq_test_1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- GRANT .. ON ALL SEQUENCES IN SCHEMA .. with multiple roles +GRANT ALL ON ALL SEQUENCES IN SCHEMA grant_on_sequence TO seq_user_0, seq_user_2; +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1') ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres,seq_user_0=rwU/postgres,seq_user_2=rwU/postgres} + dist_seq_1 | {postgres=rwU/postgres,seq_user_0=rwU/postgres,seq_user_2=rwU/postgres} +(2 rows) + +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1') ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres,seq_user_0=rwU/postgres,seq_user_2=rwU/postgres} + dist_seq_1 | {postgres=rwU/postgres,seq_user_0=rwU/postgres,seq_user_2=rwU/postgres} +(2 rows) + +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- REVOKE .. ON ALL SEQUENCES IN SCHEMA .. with multiple roles +REVOKE ALL ON ALL SEQUENCES IN SCHEMA grant_on_sequence FROM seq_user_0, seq_user_2; +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1') ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres} + dist_seq_1 | {postgres=rwU/postgres} +(2 rows) + +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1') ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres} + dist_seq_1 | {postgres=rwU/postgres} +(2 rows) + +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- GRANT with multiple sequences and multiple roles +GRANT UPDATE ON SEQUENCE dist_seq_0, dist_seq_1, non_dist_seq_0 TO seq_user_1 WITH GRANT OPTION; +SET ROLE seq_user_1; +GRANT UPDATE ON SEQUENCE dist_seq_0, dist_seq_1, non_dist_seq_0 TO seq_user_0, seq_user_2; +RESET ROLE; +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1', 'non_dist_seq_0') ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres,seq_user_1=w*/postgres,seq_user_0=w/seq_user_1,seq_user_2=w/seq_user_1} + dist_seq_1 | {postgres=rwU/postgres,seq_user_1=w*/postgres,seq_user_0=w/seq_user_1,seq_user_2=w/seq_user_1} + non_dist_seq_0 | {postgres=rwU/postgres,seq_user_1=w*/postgres,seq_user_0=w/seq_user_1,seq_user_2=w/seq_user_1} +(3 rows) + +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1', 'non_dist_seq_0') ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres,seq_user_1=w*/postgres,seq_user_0=w/seq_user_1,seq_user_2=w/seq_user_1} + dist_seq_1 | {postgres=rwU/postgres,seq_user_1=w*/postgres,seq_user_0=w/seq_user_1,seq_user_2=w/seq_user_1} +(2 rows) + +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- sync metadata to another node +SELECT start_metadata_sync_to_node('localhost', :worker_2_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- check if the grants are propagated correctly +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1', 'non_dist_seq_0') ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres,seq_user_1=w*/postgres,seq_user_0=w/seq_user_1,seq_user_2=w/seq_user_1} + dist_seq_1 | {postgres=rwU/postgres,seq_user_1=w*/postgres,seq_user_0=w/seq_user_1,seq_user_2=w/seq_user_1} + non_dist_seq_0 | {postgres=rwU/postgres,seq_user_1=w*/postgres,seq_user_0=w/seq_user_1,seq_user_2=w/seq_user_1} +(3 rows) + +\c - - - :worker_2_port +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1', 'non_dist_seq_0') ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_0 | {postgres=rwU/postgres,seq_user_1=w*/postgres,seq_user_0=w/seq_user_1,seq_user_2=w/seq_user_1} + dist_seq_1 | {postgres=rwU/postgres,seq_user_1=w*/postgres,seq_user_0=w/seq_user_1,seq_user_2=w/seq_user_1} +(2 rows) + +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- check that it works correctly with a user that is not distributed +CREATE SEQUENCE dist_seq_2; +ALTER TABLE seq_test_1 ALTER COLUMN b SET DEFAULT nextval('dist_seq_2'); +SET citus.enable_ddl_propagation TO off; +CREATE USER not_propagated_sequence_user_4; +SET citus.enable_ddl_propagation TO on; +-- when running below command, not_propagated_sequence_user_4 should be propagated +-- to the worker nodes as part of dist_seq_2's dependencies +GRANT USAGE ON sequence dist_seq_2 TO seq_user_0, not_propagated_sequence_user_4; +-- check if the grants are propagated correctly +-- check that we can see the not_propagated_sequence_user_4 +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_2' ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_2 | {postgres=rwU/postgres,seq_user_0=U/postgres,not_propagated_sequence_user_4=U/postgres} +(1 row) + +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_2' ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_2 | {postgres=rwU/postgres,seq_user_0=U/postgres,not_propagated_sequence_user_4=U/postgres} +(1 row) + +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- the following should fail as in plain PG +GRANT USAGE ON sequence dist_seq_0, non_existent_sequence TO seq_user_0; +ERROR: relation "non_existent_sequence" does not exist +GRANT UPDATE ON sequence dist_seq_0 TO seq_user_0, non_existent_user; +ERROR: role "non_existent_user" does not exist +GRANT SELECT ON ALL SEQUENCES IN SCHEMA grant_on_sequence, non_existent_schema TO seq_user_0; +ERROR: schema "non_existent_schema" does not exist +-- check that GRANT ON TABLE that redirects to sequences works properly +CREATE SEQUENCE dist_seq_3; +ALTER TABLE seq_test_1 ALTER COLUMN b SET DEFAULT nextval('dist_seq_3'); +GRANT UPDATE ON TABLE seq_test_1, dist_seq_3 TO seq_user_0; +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_3' ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_3 | {postgres=rwU/postgres,seq_user_0=w/postgres} +(1 row) + +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_3' ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_3 | {postgres=rwU/postgres,seq_user_0=w/postgres} +(1 row) + +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +REVOKE ALL ON TABLE seq_test_1, dist_seq_3 FROM seq_user_0; +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_3' ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_3 | {postgres=rwU/postgres} +(1 row) + +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_3' ORDER BY 1; + relname | relacl +--------------------------------------------------------------------- + dist_seq_3 | {postgres=rwU/postgres} +(1 row) + +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +SELECT start_metadata_sync_to_node('localhost', :worker_2_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +DROP SCHEMA grant_on_sequence CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to sequence dist_seq_0 +drop cascades to sequence dist_seq_1 +drop cascades to sequence non_dist_seq_0 +drop cascades to table seq_test_0 +drop cascades to table seq_test_1 +drop cascades to sequence dist_seq_2 +drop cascades to sequence dist_seq_3 +SET search_path TO public; diff --git a/src/test/regress/expected/ignoring_orphaned_shards.out b/src/test/regress/expected/ignoring_orphaned_shards.out index dff3f0691..2c12a5ae4 100644 --- a/src/test/regress/expected/ignoring_orphaned_shards.out +++ b/src/test/regress/expected/ignoring_orphaned_shards.out @@ -147,7 +147,7 @@ SELECT logicalrelid FROM pg_dist_partition WHERE colocationid = 92448300 ORDER B rep1 (1 row) -SELECT citus_move_shard_placement(92448300, 'localhost', :worker_1_port, 'localhost', :master_port); +SELECT citus_move_shard_placement(92448300, 'localhost', :worker_1_port, 'localhost', :master_port, 'block_writes'); citus_move_shard_placement --------------------------------------------------------------------- @@ -263,7 +263,7 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing ROLLBACK DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -- Cause the orphaned shard to be local -SELECT 1 FROM citus_drain_node('localhost', :master_port); +SELECT 1 FROM citus_drain_node('localhost', :master_port, 'block_writes'); NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ... ?column? --------------------------------------------------------------------- diff --git a/src/test/regress/expected/intermediate_results.out b/src/test/regress/expected/intermediate_results.out index 01f079f8f..b8b028ef1 100644 --- a/src/test/regress/expected/intermediate_results.out +++ b/src/test/regress/expected/intermediate_results.out @@ -571,13 +571,6 @@ WARNING: Query could not find the intermediate result file "squares_2", it was -- test refreshing mat views SET client_min_messages TO ERROR; -SELECT run_command_on_workers($$CREATE USER some_other_user;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - CREATE USER some_other_user; SELECT run_command_on_workers($$GRANT ALL ON DATABASE regression TO some_other_user;$$); run_command_on_workers diff --git a/src/test/regress/expected/isolation_get_all_active_transactions.out b/src/test/regress/expected/isolation_get_all_active_transactions.out index 87f4e6f33..d2f526e03 100644 --- a/src/test/regress/expected/isolation_get_all_active_transactions.out +++ b/src/test/regress/expected/isolation_get_all_active_transactions.out @@ -1,27 +1,9 @@ Parsed test spec with 3 sessions starting permutation: s1-grant s1-begin-insert s2-begin-insert s3-as-admin s3-as-user-1 s3-as-readonly s3-as-monitor s1-commit s2-commit -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"GRANT ROLE") -(localhost,57638,t,"GRANT ROLE") -(2 rows) - step s1-grant: GRANT ALL ON test_table TO test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_1'); GRANT ALL ON test_table TO test_user_2; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - -bool_and ---------------------------------------------------------------------- -t -(1 row) - -bool_and ---------------------------------------------------------------------- -t -(1 row) step s1-begin-insert: BEGIN; @@ -49,7 +31,7 @@ count (1 row) step s3-as-user-1: - -- Eventhough we change the user via SET ROLE, the backends' (e.g., s1/2-begin-insert) + -- Even though we change the user via SET ROLE, the backends' (e.g., s1/2-begin-insert) -- userId (e.g., PG_PROC->userId) does not change, and hence none of the -- transactions show up because here we are using test_user_1. This is a -- limitation of isolation tester, we should be able to re-connect with @@ -69,7 +51,7 @@ count (1 row) step s3-as-readonly: - -- Eventhough we change the user via SET ROLE, the backends' (e.g., s1/2-begin-insert) + -- Even though we change the user via SET ROLE, the backends' (e.g., s1/2-begin-insert) -- userId (e.g., PG_PROC->userId) does not change, and hence none of the -- transactions show up because here we are using test_readonly. This is a -- limitation of isolation tester, we should be able to re-connect with @@ -111,9 +93,3 @@ step s1-commit: step s2-commit: COMMIT; -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,f,"ERROR: role ""test_user_1"" cannot be dropped because some objects depend on it") -(localhost,57638,f,"ERROR: role ""test_user_1"" cannot be dropped because some objects depend on it") -(2 rows) - diff --git a/src/test/regress/expected/isolation_logical_replication_0.out b/src/test/regress/expected/isolation_logical_replication_0.out new file mode 100644 index 000000000..434260965 --- /dev/null +++ b/src/test/regress/expected/isolation_logical_replication_0.out @@ -0,0 +1,223 @@ +Parsed test spec with 3 sessions + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-insert s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock + + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +master_move_shard_placement + + +step s2-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock + +t +step s1-end: + COMMIT; + +step s2-insert: <... completed> + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-upsert s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock + + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +master_move_shard_placement + + +step s2-upsert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + + INSERT INTO logical_replicate_placement VALUES (15, 15) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock + +t +step s1-end: + COMMIT; + +step s2-upsert: <... completed> + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-update s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock + + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +master_move_shard_placement + + +step s2-update: + UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock + +t +step s1-end: + COMMIT; + +step s2-update: <... completed> + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-delete s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock + + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +master_move_shard_placement + + +step s2-delete: + DELETE FROM logical_replicate_placement WHERE x = 15; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock + +t +step s1-end: + COMMIT; + +step s2-delete: <... completed> + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-copy s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock + + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +master_move_shard_placement + + +step s2-copy: + COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock + +t +step s1-end: + COMMIT; + +step s2-copy: <... completed> + +starting permutation: s1-begin s1-move-placement s2-truncate s1-end +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +master_move_shard_placement + + +step s2-truncate: + TRUNCATE logical_replicate_placement; + +step s1-end: + COMMIT; + +step s2-truncate: <... completed> + +starting permutation: s1-begin s1-move-placement s2-alter-table s1-end +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +master_move_shard_placement + + +step s2-alter-table: + ALTER TABLE logical_replicate_placement ADD COLUMN z INT; + +step s1-end: + COMMIT; + +step s2-alter-table: <... completed> + +starting permutation: s1-begin s2-truncate s1-move-placement s1-end +step s1-begin: + BEGIN; + +step s2-truncate: + TRUNCATE logical_replicate_placement; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +master_move_shard_placement + + +step s1-end: + COMMIT; + + +starting permutation: s1-begin s2-alter-table s1-move-placement s1-end +step s1-begin: + BEGIN; + +step s2-alter-table: + ALTER TABLE logical_replicate_placement ADD COLUMN z INT; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +master_move_shard_placement + + +step s1-end: + COMMIT; + diff --git a/src/test/regress/expected/isolation_logical_replication_multi_shard_commands.out b/src/test/regress/expected/isolation_logical_replication_multi_shard_commands.out new file mode 100644 index 000000000..194862301 --- /dev/null +++ b/src/test/regress/expected/isolation_logical_replication_multi_shard_commands.out @@ -0,0 +1,769 @@ +Parsed test spec with 3 sessions + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-insert s3-release-advisory-lock s1-end s1-select s1-get-shard-distribution +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- + 15| 15 +172|172 +(2 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-upsert s3-release-advisory-lock s1-end s1-select s1-get-shard-distribution +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-upsert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- + 15| 16 +172|173 +(2 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-update s3-release-advisory-lock s1-end s1-select s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-update: + UPDATE logical_replicate_placement SET y = y + 1; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- + 15| 16 +172|173 +(2 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-delete s3-release-advisory-lock s1-end s1-select s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-delete: + DELETE FROM logical_replicate_placement; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + +x|y +--------------------------------------------------------------------- +(0 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-select s3-release-advisory-lock s1-end s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-select: + SELECT * FROM logical_replicate_placement ORDER BY y; + + x| y +--------------------------------------------------------------------- + 15| 15 +172|172 +(2 rows) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-copy s3-release-advisory-lock s1-end s1-select s1-get-shard-distribution +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-copy: + COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5\n15,30"' WITH CSV; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- + 1| 1 + 2| 2 + 3| 3 + 4| 4 + 5| 5 +15|30 +(6 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s1-begin s1-move-placement s2-truncate s1-end s1-select s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-truncate: + TRUNCATE logical_replicate_placement; + +step s1-end: + COMMIT; + +step s2-truncate: <... completed> +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + +x|y +--------------------------------------------------------------------- +(0 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-alter-table s3-release-advisory-lock s1-end s1-select s1-get-shard-distribution +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-alter-table: + ALTER TABLE logical_replicate_placement ADD COLUMN z INT; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s2-alter-table: <... completed> +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + +x|y|z +--------------------------------------------------------------------- +(0 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-begin s2-begin s2-insert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-end: + COMMIT; + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- + 15| 15 +172|172 +(2 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-begin s2-begin s2-upsert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-upsert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-end: + COMMIT; + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- + 15| 16 +172|173 +(2 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s1-begin s2-begin s2-update s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-update: + UPDATE logical_replicate_placement SET y = y + 1; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-end: + COMMIT; + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- + 15| 16 +172|173 +(2 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s1-begin s2-begin s2-delete s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-delete: + DELETE FROM logical_replicate_placement; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-end: + COMMIT; + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + +x|y +--------------------------------------------------------------------- +(0 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s1-begin s2-begin s2-select s1-move-placement s2-end s1-end s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-select: + SELECT * FROM logical_replicate_placement ORDER BY y; + + x| y +--------------------------------------------------------------------- + 15| 15 +172|172 +(2 rows) + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-end: + COMMIT; + +step s1-end: + COMMIT; + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-begin s2-begin s2-copy s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-copy: + COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5\n15,30"' WITH CSV; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-end: + COMMIT; + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- + 1| 1 + 2| 2 + 3| 3 + 4| 4 + 5| 5 +15|30 +(6 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s1-begin s2-begin s2-truncate s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-truncate: + TRUNCATE logical_replicate_placement; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-end: + COMMIT; + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + +x|y +--------------------------------------------------------------------- +(0 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-begin s2-begin s2-alter-table s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-alter-table: + ALTER TABLE logical_replicate_placement ADD COLUMN z INT; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-end: + COMMIT; + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + +x|y|z +--------------------------------------------------------------------- +(0 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + diff --git a/src/test/regress/expected/isolation_logical_replication_multi_shard_commands_on_mx.out b/src/test/regress/expected/isolation_logical_replication_multi_shard_commands_on_mx.out new file mode 100644 index 000000000..a5eb30520 --- /dev/null +++ b/src/test/regress/expected/isolation_logical_replication_multi_shard_commands_on_mx.out @@ -0,0 +1,692 @@ +Parsed test spec with 3 sessions + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-insert s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-insert: + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172)'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- + 15| 15 +172|172 +(2 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-update s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-update: + SELECT run_commands_on_session_level_connection_to_node('UPDATE logical_replicate_placement SET y = y + 1'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- + 15| 16 +172|173 +(2 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-delete s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-delete: + SELECT run_commands_on_session_level_connection_to_node('DELETE FROM logical_replicate_placement'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + +x|y +--------------------------------------------------------------------- +(0 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-select s2-commit-worker s3-release-advisory-lock s1-commit s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-select: + SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement ORDER BY y'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-begin s2-start-session-level-connection s2-begin-on-worker s2-insert s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection +step s1-begin: + BEGIN; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-insert: + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172)'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- + 15| 15 +172|172 +(2 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-update s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s1-begin: + BEGIN; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-update: + SELECT run_commands_on_session_level_connection_to_node('UPDATE logical_replicate_placement SET y = y + 1'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- + 15| 16 +172|173 +(2 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-delete s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s1-begin: + BEGIN; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-delete: + SELECT run_commands_on_session_level_connection_to_node('DELETE FROM logical_replicate_placement'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + +x|y +--------------------------------------------------------------------- +(0 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + +step s1-begin: + BEGIN; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-select: + SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement ORDER BY y'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/isolation_logical_replication_single_shard_commands.out b/src/test/regress/expected/isolation_logical_replication_single_shard_commands.out new file mode 100644 index 000000000..2e8125eb9 --- /dev/null +++ b/src/test/regress/expected/isolation_logical_replication_single_shard_commands.out @@ -0,0 +1,609 @@ +Parsed test spec with 3 sessions + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-insert s3-release-advisory-lock s1-end s1-select s1-get-shard-distribution +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- +15|15 +(1 row) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-upsert s3-release-advisory-lock s1-end s1-select s1-get-shard-distribution +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-upsert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + INSERT INTO logical_replicate_placement VALUES (15, 15) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- +15|16 +(1 row) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-update s3-release-advisory-lock s1-end s1-select s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-update: + UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- +15|16 +(1 row) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-delete s3-release-advisory-lock s1-end s1-select s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-delete: + DELETE FROM logical_replicate_placement WHERE x = 15; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + +x|y +--------------------------------------------------------------------- +(0 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-select s3-release-advisory-lock s1-end s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-select: + SELECT * FROM logical_replicate_placement ORDER BY y; + + x| y +--------------------------------------------------------------------- +15|15 +(1 row) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-select-for-update s3-release-advisory-lock s1-end s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-select-for-update: + SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE; + + x| y +--------------------------------------------------------------------- +15|15 +(1 row) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-begin s2-begin s2-insert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-end: + COMMIT; + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- +15|15 +(1 row) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-begin s2-begin s2-upsert s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-upsert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + INSERT INTO logical_replicate_placement VALUES (15, 15) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-end: + COMMIT; + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- +15|16 +(1 row) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s1-begin s2-begin s2-update s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-update: + UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-end: + COMMIT; + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- +15|16 +(1 row) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s1-begin s2-begin s2-delete s1-move-placement s2-end s1-end s1-select s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-delete: + DELETE FROM logical_replicate_placement WHERE x = 15; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-end: + COMMIT; + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + +x|y +--------------------------------------------------------------------- +(0 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s1-begin s2-begin s2-select s1-move-placement s2-end s1-end s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-select: + SELECT * FROM logical_replicate_placement ORDER BY y; + + x| y +--------------------------------------------------------------------- +15|15 +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-end: + COMMIT; + +step s1-end: + COMMIT; + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-insert s1-begin s2-begin s2-select-for-update s1-move-placement s2-end s1-end s1-get-shard-distribution +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-select-for-update: + SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE; + + x| y +--------------------------------------------------------------------- +15|15 +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-end: + COMMIT; + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + + +starting permutation: s1-begin s2-begin s1-move-placement s2-move-placement s1-end s2-end +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-move-placement: + SELECT master_move_shard_placement( + get_shard_id_for_distribution_column('logical_replicate_placement', 4), + 'localhost', 57637, 'localhost', 57638); + +step s1-end: + COMMIT; + +step s2-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-end: + COMMIT; + diff --git a/src/test/regress/expected/isolation_logical_replication_single_shard_commands_on_mx.out b/src/test/regress/expected/isolation_logical_replication_single_shard_commands_on_mx.out new file mode 100644 index 000000000..43ee52eeb --- /dev/null +++ b/src/test/regress/expected/isolation_logical_replication_single_shard_commands_on_mx.out @@ -0,0 +1,850 @@ +Parsed test spec with 3 sessions + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-insert s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-insert: + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO logical_replicate_placement VALUES (15, 15)'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- +15|15 +(1 row) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-update s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-update: + SELECT run_commands_on_session_level_connection_to_node('UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- +15|16 +(1 row) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-delete s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-delete: + SELECT run_commands_on_session_level_connection_to_node('DELETE FROM logical_replicate_placement WHERE x = 15'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + +x|y +--------------------------------------------------------------------- +(0 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-select s2-commit-worker s3-release-advisory-lock s1-commit s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-select: + SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement ORDER BY y'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s2-commit-worker s3-release-advisory-lock s1-commit s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-select-for-update: + SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-begin s2-start-session-level-connection s2-begin-on-worker s2-insert s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection +step s1-begin: + BEGIN; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-insert: + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO logical_replicate_placement VALUES (15, 15)'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- +15|15 +(1 row) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-update s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s1-begin: + BEGIN; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-update: + SELECT run_commands_on_session_level_connection_to_node('UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + + x| y +--------------------------------------------------------------------- +15|16 +(1 row) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-delete s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s1-begin: + BEGIN; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-delete: + SELECT run_commands_on_session_level_connection_to_node('DELETE FROM logical_replicate_placement WHERE x = 15'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-select: + SELECT * FROM logical_replicate_placement order by y; + +x|y +--------------------------------------------------------------------- +(0 rows) + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s1-begin: + BEGIN; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-select: + SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement ORDER BY y'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection +step s1-insert: + INSERT INTO logical_replicate_placement VALUES (15, 15); + +step s1-begin: + BEGIN; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-select-for-update: + SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); + +step s2-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: + COMMIT; + +step s1-get-shard-distribution: + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; + +nodeport +--------------------------------------------------------------------- + 57638 +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/isolation_logical_replication_with_partitioning.out b/src/test/regress/expected/isolation_logical_replication_with_partitioning.out new file mode 100644 index 000000000..216167fc5 --- /dev/null +++ b/src/test/regress/expected/isolation_logical_replication_with_partitioning.out @@ -0,0 +1,519 @@ +Parsed test spec with 3 sessions + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-partitioned s2-insert-partitioned s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement-partitioned: + SELECT master_move_shard_placement((SELECT * FROM selected_partitioned_shard), 'localhost', 57638, 'localhost', 57637); + +step s2-insert-partitioned: + INSERT INTO logical_replicate_partitioned VALUES (5, 15); + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement-partitioned: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-partitioned s2-upsert-partitioned s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement-partitioned: + SELECT master_move_shard_placement((SELECT * FROM selected_partitioned_shard), 'localhost', 57638, 'localhost', 57637); + +step s2-upsert-partitioned: + INSERT INTO logical_replicate_partitioned VALUES (5, 15); + INSERT INTO logical_replicate_partitioned VALUES (5, 15) ON CONFLICT (x, y) DO UPDATE SET y = logical_replicate_partitioned.y + 1; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement-partitioned: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-partitioned s2-update-partitioned s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement-partitioned: + SELECT master_move_shard_placement((SELECT * FROM selected_partitioned_shard), 'localhost', 57638, 'localhost', 57637); + +step s2-update-partitioned: + UPDATE logical_replicate_partitioned SET y = y + 1 WHERE x = 5; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement-partitioned: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-partitioned s2-delete-partitioned s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement-partitioned: + SELECT master_move_shard_placement((SELECT * FROM selected_partitioned_shard), 'localhost', 57638, 'localhost', 57637); + +step s2-delete-partitioned: + DELETE FROM logical_replicate_partitioned WHERE x = 5; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement-partitioned: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-partitioned s2-copy-partitioned s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement-partitioned: + SELECT master_move_shard_placement((SELECT * FROM selected_partitioned_shard), 'localhost', 57638, 'localhost', 57637); + +step s2-copy-partitioned: + COPY logical_replicate_partitioned FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement-partitioned: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-single-partition s2-insert-partitioned s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement-single-partition: + SELECT master_move_shard_placement((SELECT * FROM selected_single_partition_shard), 'localhost', 57638, 'localhost', 57637); + +step s2-insert-partitioned: + INSERT INTO logical_replicate_partitioned VALUES (5, 15); + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement-single-partition: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-single-partition s2-upsert-partitioned s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement-single-partition: + SELECT master_move_shard_placement((SELECT * FROM selected_single_partition_shard), 'localhost', 57638, 'localhost', 57637); + +step s2-upsert-partitioned: + INSERT INTO logical_replicate_partitioned VALUES (5, 15); + INSERT INTO logical_replicate_partitioned VALUES (5, 15) ON CONFLICT (x, y) DO UPDATE SET y = logical_replicate_partitioned.y + 1; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement-single-partition: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-single-partition s2-update-partitioned s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement-single-partition: + SELECT master_move_shard_placement((SELECT * FROM selected_single_partition_shard), 'localhost', 57638, 'localhost', 57637); + +step s2-update-partitioned: + UPDATE logical_replicate_partitioned SET y = y + 1 WHERE x = 5; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement-single-partition: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-single-partition s2-delete-partitioned s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement-single-partition: + SELECT master_move_shard_placement((SELECT * FROM selected_single_partition_shard), 'localhost', 57638, 'localhost', 57637); + +step s2-delete-partitioned: + DELETE FROM logical_replicate_partitioned WHERE x = 5; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement-single-partition: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-single-partition s2-copy-partitioned s3-release-advisory-lock s1-end +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + +step s1-move-placement-single-partition: + SELECT master_move_shard_placement((SELECT * FROM selected_single_partition_shard), 'localhost', 57638, 'localhost', 57637); + +step s2-copy-partitioned: + COPY logical_replicate_partitioned FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s1-move-placement-single-partition: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s1-begin s1-move-placement-partitioned s2-truncate-partitioned s1-end +step s1-begin: + BEGIN; + +step s1-move-placement-partitioned: + SELECT master_move_shard_placement((SELECT * FROM selected_partitioned_shard), 'localhost', 57638, 'localhost', 57637); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-truncate-partitioned: + TRUNCATE logical_replicate_partitioned; + +step s1-end: + COMMIT; + +step s2-truncate-partitioned: <... completed> + +starting permutation: s1-begin s1-move-placement-partitioned s2-alter-table-partitioned s1-end +step s1-begin: + BEGIN; + +step s1-move-placement-partitioned: + SELECT master_move_shard_placement((SELECT * FROM selected_partitioned_shard), 'localhost', 57638, 'localhost', 57637); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-alter-table-partitioned: + ALTER TABLE logical_replicate_partitioned ADD COLUMN z INT; + +step s1-end: + COMMIT; + +step s2-alter-table-partitioned: <... completed> + +starting permutation: s1-begin s2-truncate-partitioned s1-move-placement-partitioned s1-end +step s1-begin: + BEGIN; + +step s2-truncate-partitioned: + TRUNCATE logical_replicate_partitioned; + +step s1-move-placement-partitioned: + SELECT master_move_shard_placement((SELECT * FROM selected_partitioned_shard), 'localhost', 57638, 'localhost', 57637); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s1-begin s2-alter-table-partitioned s1-move-placement-partitioned s1-end +step s1-begin: + BEGIN; + +step s2-alter-table-partitioned: + ALTER TABLE logical_replicate_partitioned ADD COLUMN z INT; + +step s1-move-placement-partitioned: + SELECT master_move_shard_placement((SELECT * FROM selected_partitioned_shard), 'localhost', 57638, 'localhost', 57637); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s1-begin s1-move-placement-single-partition s2-truncate-partitioned s1-end +step s1-begin: + BEGIN; + +step s1-move-placement-single-partition: + SELECT master_move_shard_placement((SELECT * FROM selected_single_partition_shard), 'localhost', 57638, 'localhost', 57637); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-truncate-partitioned: + TRUNCATE logical_replicate_partitioned; + +step s1-end: + COMMIT; + +step s2-truncate-partitioned: <... completed> + +starting permutation: s1-begin s1-move-placement-single-partition s2-alter-table-partitioned s1-end +step s1-begin: + BEGIN; + +step s1-move-placement-single-partition: + SELECT master_move_shard_placement((SELECT * FROM selected_single_partition_shard), 'localhost', 57638, 'localhost', 57637); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-alter-table-partitioned: + ALTER TABLE logical_replicate_partitioned ADD COLUMN z INT; + +step s1-end: + COMMIT; + +step s2-alter-table-partitioned: <... completed> + +starting permutation: s1-begin s2-truncate-partitioned s1-move-placement-single-partition s1-end +step s1-begin: + BEGIN; + +step s2-truncate-partitioned: + TRUNCATE logical_replicate_partitioned; + +step s1-move-placement-single-partition: + SELECT master_move_shard_placement((SELECT * FROM selected_single_partition_shard), 'localhost', 57638, 'localhost', 57637); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + + +starting permutation: s1-begin s2-alter-table-partitioned s1-move-placement-single-partition s1-end +step s1-begin: + BEGIN; + +step s2-alter-table-partitioned: + ALTER TABLE logical_replicate_partitioned ADD COLUMN z INT; + +step s1-move-placement-single-partition: + SELECT master_move_shard_placement((SELECT * FROM selected_single_partition_shard), 'localhost', 57638, 'localhost', 57637); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-end: + COMMIT; + diff --git a/src/test/regress/expected/isolation_logical_replication_with_partitioning_0.out b/src/test/regress/expected/isolation_logical_replication_with_partitioning_0.out new file mode 100644 index 000000000..7c39e576a --- /dev/null +++ b/src/test/regress/expected/isolation_logical_replication_with_partitioning_0.out @@ -0,0 +1,4 @@ +Parsed test spec with 3 sessions + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-partitioned s2-insert-partitioned s3-release-advisory-lock s1-end +setup failed: ERROR: primary key constraints are not supported on partitioned tables diff --git a/src/test/regress/expected/isolation_logical_replication_with_partitioning_1.out b/src/test/regress/expected/isolation_logical_replication_with_partitioning_1.out new file mode 100644 index 000000000..87b003946 --- /dev/null +++ b/src/test/regress/expected/isolation_logical_replication_with_partitioning_1.out @@ -0,0 +1,4 @@ +Parsed test spec with 3 sessions + +starting permutation: s3-acquire-advisory-lock s1-begin s1-move-placement-partitioned s2-insert-partitioned s3-release-advisory-lock s1-end +setup failed: ERROR: syntax error at or near "PARTITION" diff --git a/src/test/regress/expected/isolation_move_placement_vs_modification.out b/src/test/regress/expected/isolation_move_placement_vs_modification.out new file mode 100644 index 000000000..850334649 --- /dev/null +++ b/src/test/regress/expected/isolation_move_placement_vs_modification.out @@ -0,0 +1,738 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-begin s2-move-placement s1-update s2-commit s1-commit s2-print-content s2-print-placements +step s1-load-cache: + TRUNCATE test_move_placement; + +step s1-insert: + INSERT INTO test_move_placement VALUES (5, 10); + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM test_move_placement WHERE x = 5; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-update: + UPDATE test_move_placement SET y = 5 WHERE x = 5; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +step s1-commit: + COMMIT; + +step s2-print-content: + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_move_placement', 'select y from %s WHERE x = 5') + WHERE + shardid IN (SELECT * FROM selected_shard) + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 5 +(1 row) + +step s2-print-placements: + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; + +nodename |nodeport|count +--------------------------------------------------------------------- +localhost| 57637| 5 +localhost| 57638| 3 +(2 rows) + + +starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-begin s2-move-placement s1-delete s2-commit s1-commit s2-print-content s2-print-placements +step s1-load-cache: + TRUNCATE test_move_placement; + +step s1-insert: + INSERT INTO test_move_placement VALUES (5, 10); + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM test_move_placement WHERE x = 5; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-delete: + DELETE FROM test_move_placement WHERE x = 5; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +step s1-commit: + COMMIT; + +step s2-print-content: + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_move_placement', 'select y from %s WHERE x = 5') + WHERE + shardid IN (SELECT * FROM selected_shard) + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | +(1 row) + +step s2-print-placements: + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; + +nodename |nodeport|count +--------------------------------------------------------------------- +localhost| 57637| 5 +localhost| 57638| 3 +(2 rows) + + +starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-move-placement s1-insert s2-commit s1-commit s2-print-content s2-print-placements +step s1-load-cache: + TRUNCATE test_move_placement; + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM test_move_placement WHERE x = 5; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + INSERT INTO test_move_placement VALUES (5, 10); + +step s2-commit: + COMMIT; + +step s1-insert: <... completed> +step s1-commit: + COMMIT; + +step s2-print-content: + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_move_placement', 'select y from %s WHERE x = 5') + WHERE + shardid IN (SELECT * FROM selected_shard) + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 10 +(1 row) + +step s2-print-placements: + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; + +nodename |nodeport|count +--------------------------------------------------------------------- +localhost| 57637| 5 +localhost| 57638| 3 +(2 rows) + + +starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-move-placement s1-copy s2-commit s1-commit s2-print-content s2-print-placements +step s1-load-cache: + TRUNCATE test_move_placement; + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM test_move_placement WHERE x = 5; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-copy: + COPY test_move_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-commit: + COMMIT; + +step s1-copy: <... completed> +step s1-commit: + COMMIT; + +step s2-print-content: + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_move_placement', 'select y from %s WHERE x = 5') + WHERE + shardid IN (SELECT * FROM selected_shard) + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 5 +(1 row) + +step s2-print-placements: + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; + +nodename |nodeport|count +--------------------------------------------------------------------- +localhost| 57637| 5 +localhost| 57638| 3 +(2 rows) + + +starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-move-placement s1-ddl s2-commit s1-commit s2-print-index-count s2-print-placements +step s1-load-cache: + TRUNCATE test_move_placement; + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM test_move_placement WHERE x = 5; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-ddl: + CREATE INDEX test_move_placement_index ON test_move_placement(x); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-index-count: + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_move_placement', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 1 + 57637|t | 1 + 57637|t | 1 + 57637|t | 1 + 57637|t | 1 + 57638|t | 1 + 57638|t | 1 + 57638|t | 1 +(8 rows) + +step s2-print-placements: + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; + +nodename |nodeport|count +--------------------------------------------------------------------- +localhost| 57637| 5 +localhost| 57638| 3 +(2 rows) + + +starting permutation: s1-insert s1-begin s1-select s2-begin s2-move-placement s1-update s2-commit s1-commit s2-print-content s2-print-placements +step s1-insert: + INSERT INTO test_move_placement VALUES (5, 10); + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM test_move_placement WHERE x = 5; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-update: + UPDATE test_move_placement SET y = 5 WHERE x = 5; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +step s1-commit: + COMMIT; + +step s2-print-content: + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_move_placement', 'select y from %s WHERE x = 5') + WHERE + shardid IN (SELECT * FROM selected_shard) + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 5 +(1 row) + +step s2-print-placements: + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; + +nodename |nodeport|count +--------------------------------------------------------------------- +localhost| 57637| 5 +localhost| 57638| 3 +(2 rows) + + +starting permutation: s1-insert s1-begin s1-select s2-begin s2-move-placement s1-delete s2-commit s1-commit s2-print-content s2-print-placements +step s1-insert: + INSERT INTO test_move_placement VALUES (5, 10); + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM test_move_placement WHERE x = 5; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-delete: + DELETE FROM test_move_placement WHERE x = 5; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +step s1-commit: + COMMIT; + +step s2-print-content: + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_move_placement', 'select y from %s WHERE x = 5') + WHERE + shardid IN (SELECT * FROM selected_shard) + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | +(1 row) + +step s2-print-placements: + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; + +nodename |nodeport|count +--------------------------------------------------------------------- +localhost| 57637| 5 +localhost| 57638| 3 +(2 rows) + + +starting permutation: s1-begin s1-select s2-begin s2-move-placement s1-insert s2-commit s1-commit s2-print-content s2-print-placements +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM test_move_placement WHERE x = 5; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + INSERT INTO test_move_placement VALUES (5, 10); + +step s2-commit: + COMMIT; + +step s1-insert: <... completed> +step s1-commit: + COMMIT; + +step s2-print-content: + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_move_placement', 'select y from %s WHERE x = 5') + WHERE + shardid IN (SELECT * FROM selected_shard) + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 10 +(1 row) + +step s2-print-placements: + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; + +nodename |nodeport|count +--------------------------------------------------------------------- +localhost| 57637| 5 +localhost| 57638| 3 +(2 rows) + + +starting permutation: s1-begin s1-select s2-begin s2-move-placement s1-copy s2-commit s1-commit s2-print-content s2-print-placements +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM test_move_placement WHERE x = 5; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-copy: + COPY test_move_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-commit: + COMMIT; + +step s1-copy: <... completed> +step s1-commit: + COMMIT; + +step s2-print-content: + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_move_placement', 'select y from %s WHERE x = 5') + WHERE + shardid IN (SELECT * FROM selected_shard) + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 5 +(1 row) + +step s2-print-placements: + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; + +nodename |nodeport|count +--------------------------------------------------------------------- +localhost| 57637| 5 +localhost| 57638| 3 +(2 rows) + + +starting permutation: s1-begin s1-select s2-begin s2-move-placement s1-ddl s2-commit s1-commit s2-print-index-count s2-print-placements +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM test_move_placement WHERE x = 5; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-ddl: + CREATE INDEX test_move_placement_index ON test_move_placement(x); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-index-count: + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_move_placement', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 1 + 57637|t | 1 + 57637|t | 1 + 57637|t | 1 + 57637|t | 1 + 57638|t | 1 + 57638|t | 1 + 57638|t | 1 +(8 rows) + +step s2-print-placements: + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; + +nodename |nodeport|count +--------------------------------------------------------------------- +localhost| 57637| 5 +localhost| 57638| 3 +(2 rows) + diff --git a/src/test/regress/expected/isolation_move_placement_vs_modification_fk.out b/src/test/regress/expected/isolation_move_placement_vs_modification_fk.out new file mode 100644 index 000000000..32dfc769f --- /dev/null +++ b/src/test/regress/expected/isolation_move_placement_vs_modification_fk.out @@ -0,0 +1,555 @@ +Parsed test spec with 3 sessions + +starting permutation: s2-add-fkey s1-insert-referenced s1-insert-referencing s1-begin s2-begin s2-move-placement-blocking s1-delete s2-commit s1-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-move-placement-blocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-delete: + DELETE FROM referenced_table WHERE id < 5; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1600002|t | 4 + 57637|1600003|t | 2 +(2 rows) + +id|value +--------------------------------------------------------------------- + 5| 5 + 6| 6 + 7| 7 + 8| 8 + 9| 9 +10| 10 +(6 rows) + + +starting permutation: s2-add-fkey s1-insert-referenced s1-insert-referencing s1-begin s2-begin s2-move-placement-blocking s1-update s2-commit s1-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-move-placement-blocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-update: + UPDATE referenced_table SET value = 5 WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1600006|t | 7 + 57637|1600007|t | 3 +(2 rows) + +id|value +--------------------------------------------------------------------- + 1| 1 + 2| 2 + 3| 3 + 4| 4 + 5| 5 + 6| 6 + 7| 7 + 8| 8 + 9| 9 +10| 10 +(10 rows) + + +starting permutation: s2-add-fkey s1-insert-referenced s1-insert-referencing s1-begin s2-begin s2-move-placement-blocking s1-ddl s2-commit s1-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-move-placement-blocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-ddl: + CREATE INDEX referenced_table_index ON referenced_table(id); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1600010|t | 7 + 57637|1600011|t | 3 +(2 rows) + +id|value +--------------------------------------------------------------------- + 1| 1 + 2| 2 + 3| 3 + 4| 4 + 5| 5 + 6| 6 + 7| 7 + 8| 8 + 9| 9 +10| 10 +(10 rows) + + +starting permutation: s2-add-fkey s1-insert-referenced s1-begin s2-begin s2-move-placement-blocking s1-insert-referencing s2-commit s1-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-move-placement-blocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s2-commit: + COMMIT; + +step s1-insert-referencing: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1600014|t | 7 + 57637|1600015|t | 3 +(2 rows) + +id|value +--------------------------------------------------------------------- + 1| 1 + 2| 2 + 3| 3 + 4| 4 + 5| 5 + 6| 6 + 7| 7 + 8| 8 + 9| 9 +10| 10 +(10 rows) + + +starting permutation: s2-add-fkey s3-acquire-advisory-lock s1-insert-referenced s1-insert-referencing s2-begin s2-move-placement-nonblocking s1-delete s3-release-advisory-lock s2-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s2-begin: + BEGIN; + +step s2-move-placement-nonblocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637); + +step s1-delete: + DELETE FROM referenced_table WHERE id < 5; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s2-move-placement-nonblocking: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1600018|t | 4 + 57637|1600019|t | 2 +(2 rows) + +id|value +--------------------------------------------------------------------- + 5| 5 + 6| 6 + 7| 7 + 8| 8 + 9| 9 +10| 10 +(6 rows) + + +starting permutation: s2-add-fkey s3-acquire-advisory-lock s1-insert-referenced s1-insert-referencing s2-begin s2-move-placement-nonblocking s1-update s3-release-advisory-lock s2-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s2-begin: + BEGIN; + +step s2-move-placement-nonblocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637); + +step s1-update: + UPDATE referenced_table SET value = 5 WHERE id = 5; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s2-move-placement-nonblocking: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1600022|t | 7 + 57637|1600023|t | 3 +(2 rows) + +id|value +--------------------------------------------------------------------- + 1| 1 + 2| 2 + 3| 3 + 4| 4 + 5| 5 + 6| 6 + 7| 7 + 8| 8 + 9| 9 +10| 10 +(10 rows) + + +starting permutation: s2-add-fkey s3-acquire-advisory-lock s1-insert-referenced s1-insert-referencing s2-begin s2-move-placement-nonblocking s1-ddl s3-release-advisory-lock s2-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s2-begin: + BEGIN; + +step s2-move-placement-nonblocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637); + +step s1-ddl: + CREATE INDEX referenced_table_index ON referenced_table(id); + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s2-move-placement-nonblocking: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1600026|t | 7 + 57637|1600027|t | 3 +(2 rows) + +id|value +--------------------------------------------------------------------- + 1| 1 + 2| 2 + 3| 3 + 4| 4 + 5| 5 + 6| 6 + 7| 7 + 8| 8 + 9| 9 +10| 10 +(10 rows) + + +starting permutation: s2-add-fkey s3-acquire-advisory-lock s1-insert-referenced s2-begin s2-move-placement-nonblocking s1-insert-referencing s3-release-advisory-lock s2-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s2-begin: + BEGIN; + +step s2-move-placement-nonblocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s2-move-placement-nonblocking: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1600030|t | 7 + 57637|1600031|t | 3 +(2 rows) + +id|value +--------------------------------------------------------------------- + 1| 1 + 2| 2 + 3| 3 + 4| 4 + 5| 5 + 6| 6 + 7| 7 + 8| 8 + 9| 9 +10| 10 +(10 rows) + diff --git a/src/test/regress/expected/isolation_move_placement_vs_modification_fk_0.out b/src/test/regress/expected/isolation_move_placement_vs_modification_fk_0.out new file mode 100644 index 000000000..47b9c3bb3 --- /dev/null +++ b/src/test/regress/expected/isolation_move_placement_vs_modification_fk_0.out @@ -0,0 +1,499 @@ +Parsed test spec with 3 sessions + +starting permutation: s2-add-fkey s1-insert-referenced s1-insert-referencing s1-begin s2-begin s2-move-placement-blocking s1-delete s2-commit s1-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-move-placement-blocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes'); + +master_move_shard_placement + + +step s1-delete: + DELETE FROM referenced_table WHERE id < 5; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport shardid success result + +57637 102159 t 4 +57637 102160 t 2 +id value + +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 + +starting permutation: s2-add-fkey s1-insert-referenced s1-insert-referencing s1-begin s2-begin s2-move-placement-blocking s1-update s2-commit s1-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-move-placement-blocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes'); + +master_move_shard_placement + + +step s1-update: + UPDATE referenced_table SET value = 5 WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport shardid success result + +57637 102162 t 7 +57637 102163 t 3 +id value + +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 + +starting permutation: s2-add-fkey s1-insert-referenced s1-insert-referencing s1-begin s2-begin s2-move-placement-blocking s1-ddl s2-commit s1-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-move-placement-blocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes'); + +master_move_shard_placement + + +step s1-ddl: + CREATE INDEX referenced_table_index ON referenced_table(id); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport shardid success result + +57637 102165 t 7 +57637 102166 t 3 +id value + +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 + +starting permutation: s2-add-fkey s1-insert-referenced s1-begin s2-begin s2-move-placement-blocking s1-insert-referencing s2-commit s1-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-move-placement-blocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes'); + +master_move_shard_placement + + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s2-commit: + COMMIT; + +step s1-insert-referencing: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport shardid success result + +57637 102168 t 7 +57637 102169 t 3 +id value + +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 + +starting permutation: s2-add-fkey s3-acquire-advisory-lock s1-insert-referenced s1-insert-referencing s2-begin s2-move-placement-nonblocking s1-delete s3-release-advisory-lock s2-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock + + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s2-begin: + BEGIN; + +step s2-move-placement-nonblocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637); + +master_move_shard_placement + + +step s1-delete: + DELETE FROM referenced_table WHERE id < 5; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock + +t +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport shardid success result + +57637 102171 t 4 +57637 102172 t 2 +id value + +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 + +starting permutation: s2-add-fkey s3-acquire-advisory-lock s1-insert-referenced s1-insert-referencing s2-begin s2-move-placement-nonblocking s1-update s3-release-advisory-lock s2-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock + + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s2-begin: + BEGIN; + +step s2-move-placement-nonblocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637); + +master_move_shard_placement + + +step s1-update: + UPDATE referenced_table SET value = 5 WHERE id = 5; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock + +t +step s2-commit: + COMMIT; + +step s1-update: <... completed> +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport shardid success result + +57637 102174 t 7 +57637 102175 t 3 +id value + +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 + +starting permutation: s2-add-fkey s3-acquire-advisory-lock s1-insert-referenced s1-insert-referencing s2-begin s2-move-placement-nonblocking s1-ddl s3-release-advisory-lock s2-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock + + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s2-begin: + BEGIN; + +step s2-move-placement-nonblocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637); + +master_move_shard_placement + + +step s1-ddl: + CREATE INDEX referenced_table_index ON referenced_table(id); + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock + +t +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport shardid success result + +57637 102177 t 7 +57637 102178 t 3 +id value + +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 + +starting permutation: s2-add-fkey s3-acquire-advisory-lock s1-insert-referenced s2-begin s2-move-placement-nonblocking s1-insert-referencing s3-release-advisory-lock s2-commit s2-print-cluster +step s2-add-fkey: + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock + + +step s1-insert-referenced: + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s2-begin: + BEGIN; + +step s2-move-placement-nonblocking: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637); + +master_move_shard_placement + + +step s1-insert-referencing: + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock + +t +step s2-commit: + COMMIT; + +step s1-insert-referencing: <... completed> +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT * FROM referencing_table ORDER BY 1; + +nodeport shardid success result + +57637 102180 t 7 +57637 102181 t 3 +id value + +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 diff --git a/src/test/regress/expected/isolation_move_placement_vs_move_placement.out b/src/test/regress/expected/isolation_move_placement_vs_move_placement.out new file mode 100644 index 000000000..43ca63fcc --- /dev/null +++ b/src/test/regress/expected/isolation_move_placement_vs_move_placement.out @@ -0,0 +1,80 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-load-cache s2-begin s2-move-placement s1-move-placement s2-commit s2-print-placements +step s1-load-cache: + COPY test_move_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-begin: + BEGIN; + +step s2-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +step s2-commit: + COMMIT; + +step s1-move-placement: <... completed> +ERROR: source placement must be in active state +step s2-print-placements: + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; + +nodename |nodeport|count +--------------------------------------------------------------------- +localhost| 57638| 2 +(1 row) + + +starting permutation: s2-begin s2-move-placement s1-move-placement s2-commit s2-print-placements +step s2-begin: + BEGIN; + +step s2-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s1-move-placement: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +step s2-commit: + COMMIT; + +step s1-move-placement: <... completed> +ERROR: source placement must be in active state +step s2-print-placements: + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; + +nodename |nodeport|count +--------------------------------------------------------------------- +localhost| 57638| 2 +(1 row) + diff --git a/src/test/regress/expected/isolation_multiuser_locking.out b/src/test/regress/expected/isolation_multiuser_locking.out index 30f474b84..943d579a4 100644 --- a/src/test/regress/expected/isolation_multiuser_locking.out +++ b/src/test/regress/expected/isolation_multiuser_locking.out @@ -1,6 +1,17 @@ Parsed test spec with 2 sessions -starting permutation: s1-begin s2-begin s2-reindex s1-insert s2-commit s1-commit +starting permutation: s1-no-connection-cache s2-no-connection-cache s1-begin s2-begin s2-reindex s1-insert s2-commit s1-commit +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s2-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + step s1-begin: BEGIN; SET ROLE test_user_1; @@ -23,23 +34,21 @@ step s1-commit: COMMIT; -starting permutation: s1-grant s1-begin s2-begin s2-reindex s1-insert s2-insert s2-commit s1-commit -step s1-grant: - SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - GRANT ALL ON test_table TO test_user_2; - SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); - -bool_and +starting permutation: s1-no-connection-cache s2-no-connection-cache s1-grant s1-begin s2-begin s2-reindex s1-insert s2-insert s2-commit s1-commit +create_distributed_table --------------------------------------------------------------------- -t + (1 row) -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,SET) -(localhost,57638,t,SET) -(2 rows) +step s1-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s2-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s1-grant: + SET ROLE test_user_1; + GRANT ALL ON test_table TO test_user_2; step s1-begin: BEGIN; @@ -67,23 +76,21 @@ step s1-commit: COMMIT; -starting permutation: s1-grant s1-begin s2-begin s1-reindex s2-insert s1-insert s1-commit s2-commit -step s1-grant: - SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - GRANT ALL ON test_table TO test_user_2; - SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); - -bool_and +starting permutation: s1-no-connection-cache s2-no-connection-cache s1-grant s1-begin s2-begin s1-reindex s2-insert s1-insert s1-commit s2-commit +create_distributed_table --------------------------------------------------------------------- -t + (1 row) -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,SET) -(localhost,57638,t,SET) -(2 rows) +step s1-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s2-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s1-grant: + SET ROLE test_user_1; + GRANT ALL ON test_table TO test_user_2; step s1-begin: BEGIN; @@ -110,7 +117,18 @@ step s2-commit: COMMIT; -starting permutation: s1-begin s2-begin s2-index s1-insert s2-commit s1-commit s2-drop-index +starting permutation: s1-no-connection-cache s2-no-connection-cache s1-begin s2-begin s2-index s1-insert s2-commit s1-commit s2-drop-index +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s2-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + step s1-begin: BEGIN; SET ROLE test_user_1; @@ -136,23 +154,21 @@ step s2-drop-index: DROP INDEX IF EXISTS test_index; -starting permutation: s1-grant s1-begin s2-begin s2-insert s1-index s2-insert s2-commit s1-commit s1-drop-index -step s1-grant: - SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - GRANT ALL ON test_table TO test_user_2; - SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); - -bool_and +starting permutation: s1-no-connection-cache s2-no-connection-cache s1-grant s1-begin s2-begin s2-insert s1-index s2-insert s2-commit s1-commit s1-drop-index +create_distributed_table --------------------------------------------------------------------- -t + (1 row) -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,SET) -(localhost,57638,t,SET) -(2 rows) +step s1-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s2-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s1-grant: + SET ROLE test_user_1; + GRANT ALL ON test_table TO test_user_2; step s1-begin: BEGIN; @@ -182,23 +198,21 @@ step s1-drop-index: DROP INDEX IF EXISTS test_index; -starting permutation: s1-grant s1-begin s2-begin s1-index s2-index s1-insert s1-commit s2-commit s1-drop-index s2-drop-index -step s1-grant: - SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - GRANT ALL ON test_table TO test_user_2; - SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); - -bool_and +starting permutation: s1-no-connection-cache s2-no-connection-cache s1-grant s1-begin s2-begin s1-index s2-index s1-insert s1-commit s2-commit s1-drop-index s2-drop-index +create_distributed_table --------------------------------------------------------------------- -t + (1 row) -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,SET) -(localhost,57638,t,SET) -(2 rows) +step s1-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s2-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s1-grant: + SET ROLE test_user_1; + GRANT ALL ON test_table TO test_user_2; step s1-begin: BEGIN; @@ -231,7 +245,18 @@ step s2-drop-index: DROP INDEX IF EXISTS test_index; -starting permutation: s1-begin s2-begin s2-truncate s1-insert s2-commit s1-commit +starting permutation: s1-no-connection-cache s2-no-connection-cache s1-begin s2-begin s2-truncate s1-insert s2-commit s1-commit +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s2-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + step s1-begin: BEGIN; SET ROLE test_user_1; @@ -254,23 +279,21 @@ step s1-commit: COMMIT; -starting permutation: s1-grant s1-begin s2-begin s1-truncate s2-insert s1-insert s1-commit s2-commit -step s1-grant: - SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - GRANT ALL ON test_table TO test_user_2; - SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); - -bool_and +starting permutation: s1-no-connection-cache s2-no-connection-cache s1-grant s1-begin s2-begin s1-truncate s2-insert s1-insert s1-commit s2-commit +create_distributed_table --------------------------------------------------------------------- -t + (1 row) -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,SET) -(localhost,57638,t,SET) -(2 rows) +step s1-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s2-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s1-grant: + SET ROLE test_user_1; + GRANT ALL ON test_table TO test_user_2; step s1-begin: BEGIN; @@ -297,23 +320,21 @@ step s2-commit: COMMIT; -starting permutation: s1-grant s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit -step s1-grant: - SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - GRANT ALL ON test_table TO test_user_2; - SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); - -bool_and +starting permutation: s1-no-connection-cache s2-no-connection-cache s1-grant s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit +create_distributed_table --------------------------------------------------------------------- -t + (1 row) -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,SET) -(localhost,57638,t,SET) -(2 rows) +step s1-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s2-no-connection-cache: + SET citus.max_cached_conns_per_worker to 0; + +step s1-grant: + SET ROLE test_user_1; + GRANT ALL ON test_table TO test_user_2; step s1-begin: BEGIN; diff --git a/src/test/regress/expected/isolation_pg_send_cancellation.out b/src/test/regress/expected/isolation_pg_send_cancellation.out new file mode 100644 index 000000000..4b1475352 --- /dev/null +++ b/src/test/regress/expected/isolation_pg_send_cancellation.out @@ -0,0 +1,42 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-register s2-lock s1-lock s2-wrong-cancel-1 s2-wrong-cancel-2 s2-cancel +step s1-register: + INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key()); + +step s2-lock: + BEGIN; + LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE; + +step s1-lock: + BEGIN; + LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE; + END; + +step s2-wrong-cancel-1: + SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table; + +run_pg_send_cancellation +--------------------------------------------------------------------- + +(1 row) + +step s2-wrong-cancel-2: + SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table; + +run_pg_send_cancellation +--------------------------------------------------------------------- + +(1 row) + +step s2-cancel: + SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table; + END; + +run_pg_send_cancellation +--------------------------------------------------------------------- + +(1 row) + +step s1-lock: <... completed> +ERROR: canceling statement due to user request diff --git a/src/test/regress/expected/isolation_ref2ref_foreign_keys_enterprise.out b/src/test/regress/expected/isolation_ref2ref_foreign_keys_enterprise.out new file mode 100644 index 000000000..120cef697 --- /dev/null +++ b/src/test/regress/expected/isolation_ref2ref_foreign_keys_enterprise.out @@ -0,0 +1,442 @@ +Parsed test spec with 3 sessions + +starting permutation: s1-insert-table-1 s3-acquire-advisory-lock s2-begin s2-move-shards s1-delete-table-1 s3-release-advisory-lock s2-commit s1-select-table-1 s1-select-dist-table +step s1-insert-table-1: + INSERT INTO ref_table_2 VALUES (5); + INSERT INTO dist_table VALUES (5, 5); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-shards: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_dist_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +step s1-delete-table-1: + DELETE FROM ref_table_1 WHERE value = 5; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s2-move-shards: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s1-select-table-1: + SELECT * FROM ref_table_1 ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 7| 7 + 9| 9 +11| 11 +(5 rows) + +step s1-select-dist-table: + SELECT * FROM dist_table ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 7| 7 + 9| 9 +11| 11 +(5 rows) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert-table-1 s3-acquire-advisory-lock s2-begin s2-move-shards s1-update-table-1 s3-release-advisory-lock s2-commit s1-select-table-1 s1-select-dist-table +step s1-insert-table-1: + INSERT INTO ref_table_2 VALUES (5); + INSERT INTO dist_table VALUES (5, 5); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-shards: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_dist_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +step s1-update-table-1: + UPDATE ref_table_1 SET id = id + 1 WHERE value = 5; + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s2-move-shards: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s1-select-table-1: + SELECT * FROM ref_table_1 ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 6| 5 + 7| 7 + 9| 9 +11| 11 +(6 rows) + +step s1-select-dist-table: + SELECT * FROM dist_table ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 5| 6 + 7| 7 + 9| 9 +11| 11 +(6 rows) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s3-acquire-advisory-lock s2-begin s2-move-shards s1-insert-table-1 s3-release-advisory-lock s2-commit s1-select-table-1 s1-select-dist-table +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-shards: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_dist_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +step s1-insert-table-1: + INSERT INTO ref_table_2 VALUES (5); + INSERT INTO dist_table VALUES (5, 5); + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s2-move-shards: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s1-select-table-1: + SELECT * FROM ref_table_1 ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 5| 5 + 7| 7 + 9| 9 +11| 11 +(6 rows) + +step s1-select-dist-table: + SELECT * FROM dist_table ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 5| 5 + 7| 7 + 9| 9 +11| 11 +(6 rows) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert-table-1 s3-acquire-advisory-lock s2-begin s2-move-shards s1-select-table-1 s3-release-advisory-lock s2-commit +step s1-insert-table-1: + INSERT INTO ref_table_2 VALUES (5); + INSERT INTO dist_table VALUES (5, 5); + +step s3-acquire-advisory-lock: + SELECT pg_advisory_lock(44000, 55152); + +pg_advisory_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-begin: + BEGIN; + +step s2-move-shards: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_dist_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +step s1-select-table-1: + SELECT * FROM ref_table_1 ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 5| 5 + 7| 7 + 9| 9 +11| 11 +(6 rows) + +step s3-release-advisory-lock: + SELECT pg_advisory_unlock(44000, 55152); + +pg_advisory_unlock +--------------------------------------------------------------------- +t +(1 row) + +step s2-move-shards: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert-table-1 s2-begin s1-begin s1-delete-table-1 s2-move-shards s1-commit s2-commit s1-select-table-1 s1-select-dist-table +step s1-insert-table-1: + INSERT INTO ref_table_2 VALUES (5); + INSERT INTO dist_table VALUES (5, 5); + +step s2-begin: + BEGIN; + +step s1-begin: + BEGIN; + +step s1-delete-table-1: + DELETE FROM ref_table_1 WHERE value = 5; + +step s2-move-shards: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_dist_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +step s1-commit: + COMMIT; + +step s2-move-shards: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s1-select-table-1: + SELECT * FROM ref_table_1 ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 7| 7 + 9| 9 +11| 11 +(5 rows) + +step s1-select-dist-table: + SELECT * FROM dist_table ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 7| 7 + 9| 9 +11| 11 +(5 rows) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-insert-table-1 s2-begin s1-begin s1-update-table-1 s2-move-shards s1-commit s2-commit s1-select-table-1 s1-select-dist-table +step s1-insert-table-1: + INSERT INTO ref_table_2 VALUES (5); + INSERT INTO dist_table VALUES (5, 5); + +step s2-begin: + BEGIN; + +step s1-begin: + BEGIN; + +step s1-update-table-1: + UPDATE ref_table_1 SET id = id + 1 WHERE value = 5; + +step s2-move-shards: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_dist_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +step s1-commit: + COMMIT; + +step s2-move-shards: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s1-select-table-1: + SELECT * FROM ref_table_1 ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 6| 5 + 7| 7 + 9| 9 +11| 11 +(6 rows) + +step s1-select-dist-table: + SELECT * FROM dist_table ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 5| 6 + 7| 7 + 9| 9 +11| 11 +(6 rows) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s2-begin s1-begin s1-insert-table-1 s2-move-shards s1-commit s2-commit s1-select-table-1 s1-select-dist-table +step s2-begin: + BEGIN; + +step s1-begin: + BEGIN; + +step s1-insert-table-1: + INSERT INTO ref_table_2 VALUES (5); + INSERT INTO dist_table VALUES (5, 5); + +step s2-move-shards: + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_dist_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +step s1-commit: + COMMIT; + +step s2-move-shards: <... completed> +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: + COMMIT; + +step s1-select-table-1: + SELECT * FROM ref_table_1 ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 5| 5 + 7| 7 + 9| 9 +11| 11 +(6 rows) + +step s1-select-dist-table: + SELECT * FROM dist_table ORDER BY id, value; + +id|value +--------------------------------------------------------------------- + 1| 1 + 3| 3 + 5| 5 + 7| 7 + 9| 9 +11| 11 +(6 rows) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/isolation_shard_move_vs_start_metadata_sync.out b/src/test/regress/expected/isolation_shard_move_vs_start_metadata_sync.out new file mode 100644 index 000000000..61925557c --- /dev/null +++ b/src/test/regress/expected/isolation_shard_move_vs_start_metadata_sync.out @@ -0,0 +1,107 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-begin s1-move-shard-force-logical s2-start-metadata-sync s1-commit s1-count-shards-by-worker +step s1-begin: + BEGIN; + +step s1-move-shard-force-logical: + SELECT * FROM master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, 'force_logical'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-start-metadata-sync: + SELECT * FROM start_metadata_sync_to_node('localhost', 57637); + +step s1-commit: + COMMIT; + +step s2-start-metadata-sync: <... completed> +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-count-shards-by-worker: + SELECT COUNT(*) FROM pg_dist_placement WHERE groupid=1 AND shardstate != 4; + SELECT * FROM run_command_on_workers('SELECT COUNT(*) FROM pg_dist_placement WHERE groupid=1') ORDER BY 1, 2, 3, 4; + SELECT COUNT(*) FROM pg_dist_placement WHERE groupid=2 AND shardstate != 4; + SELECT * FROM run_command_on_workers('SELECT COUNT(*) FROM pg_dist_placement WHERE groupid=2') ORDER BY 1, 2, 3, 4; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +nodename |nodeport|success|result +--------------------------------------------------------------------- +localhost| 57637|t | 1 +localhost| 57638|t | 1 +(2 rows) + +count +--------------------------------------------------------------------- + 3 +(1 row) + +nodename |nodeport|success|result +--------------------------------------------------------------------- +localhost| 57637|t | 3 +localhost| 57638|t | 3 +(2 rows) + + +starting permutation: s1-begin s1-move-shard-block-writes s2-start-metadata-sync s1-commit s1-count-shards-by-worker +step s1-begin: + BEGIN; + +step s1-move-shard-block-writes: + SELECT * FROM master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, 'block_writes'); + +master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step s2-start-metadata-sync: + SELECT * FROM start_metadata_sync_to_node('localhost', 57637); + +step s1-commit: + COMMIT; + +step s2-start-metadata-sync: <... completed> +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-count-shards-by-worker: + SELECT COUNT(*) FROM pg_dist_placement WHERE groupid=1 AND shardstate != 4; + SELECT * FROM run_command_on_workers('SELECT COUNT(*) FROM pg_dist_placement WHERE groupid=1') ORDER BY 1, 2, 3, 4; + SELECT COUNT(*) FROM pg_dist_placement WHERE groupid=2 AND shardstate != 4; + SELECT * FROM run_command_on_workers('SELECT COUNT(*) FROM pg_dist_placement WHERE groupid=2') ORDER BY 1, 2, 3, 4; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +nodename |nodeport|success|result +--------------------------------------------------------------------- +localhost| 57637|t | 1 +localhost| 57638|t | 1 +(2 rows) + +count +--------------------------------------------------------------------- + 3 +(1 row) + +nodename |nodeport|success|result +--------------------------------------------------------------------- +localhost| 57637|t | 3 +localhost| 57638|t | 3 +(2 rows) + diff --git a/src/test/regress/expected/isolation_tenant_isolation.out b/src/test/regress/expected/isolation_tenant_isolation.out new file mode 100644 index 000000000..abaf61990 --- /dev/null +++ b/src/test/regress/expected/isolation_tenant_isolation.out @@ -0,0 +1,849 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-begin s2-isolate-tenant s1-update s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + TRUNCATE isolation_table; + +step s1-insert: + INSERT INTO isolation_table VALUES (5, 10); + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM isolation_table WHERE id = 5; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500004 +(1 row) + +step s1-update: + UPDATE isolation_table SET value = 5 WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500003|t | 0 + 57637|1500004|t | 1 + 57637|1500005|t | 0 + 57638|1500002|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- + 5| 10 +(1 row) + + +starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-begin s2-isolate-tenant s1-delete s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + TRUNCATE isolation_table; + +step s1-insert: + INSERT INTO isolation_table VALUES (5, 10); + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM isolation_table WHERE id = 5; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500010 +(1 row) + +step s1-delete: + DELETE FROM isolation_table WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500009|t | 0 + 57637|1500010|t | 1 + 57637|1500011|t | 0 + 57638|1500008|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- + 5| 10 +(1 row) + + +starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-isolate-tenant s1-insert s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + TRUNCATE isolation_table; + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM isolation_table WHERE id = 5; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500016 +(1 row) + +step s1-insert: + INSERT INTO isolation_table VALUES (5, 10); + +step s2-commit: + COMMIT; + +step s1-insert: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500015|t | 0 + 57637|1500016|t | 0 + 57637|1500017|t | 0 + 57638|1500014|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-isolate-tenant s1-copy s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + TRUNCATE isolation_table; + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM isolation_table WHERE id = 5; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500022 +(1 row) + +step s1-copy: + COPY isolation_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-commit: + COMMIT; + +step s1-copy: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500021|t | 0 + 57637|1500022|t | 0 + 57637|1500023|t | 0 + 57638|1500020|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-isolate-tenant s1-ddl s2-commit s1-commit s2-print-cluster s2-print-index-count +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + TRUNCATE isolation_table; + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM isolation_table WHERE id = 5; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500028 +(1 row) + +step s1-ddl: + CREATE INDEX test_table_index ON isolation_table(id); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500027|t | 0 + 57637|1500028|t | 0 + 57637|1500029|t | 0 + 57638|1500026|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + +step s2-print-index-count: + SELECT + nodeport, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 1 + 57637|t | 1 + 57637|t | 1 + 57638|t | 1 +(4 rows) + + +starting permutation: s1-insert s1-begin s1-select s2-begin s2-isolate-tenant s1-update s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + INSERT INTO isolation_table VALUES (5, 10); + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM isolation_table WHERE id = 5; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500034 +(1 row) + +step s1-update: + UPDATE isolation_table SET value = 5 WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500033|t | 0 + 57637|1500034|t | 1 + 57637|1500035|t | 0 + 57638|1500032|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- + 5| 10 +(1 row) + + +starting permutation: s1-insert s1-begin s1-select s2-begin s2-isolate-tenant s1-delete s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + INSERT INTO isolation_table VALUES (5, 10); + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM isolation_table WHERE id = 5; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500040 +(1 row) + +step s1-delete: + DELETE FROM isolation_table WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500039|t | 0 + 57637|1500040|t | 1 + 57637|1500041|t | 0 + 57638|1500038|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- + 5| 10 +(1 row) + + +starting permutation: s1-begin s1-select s2-begin s2-isolate-tenant s1-insert s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM isolation_table WHERE id = 5; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500046 +(1 row) + +step s1-insert: + INSERT INTO isolation_table VALUES (5, 10); + +step s2-commit: + COMMIT; + +step s1-insert: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500045|t | 0 + 57637|1500046|t | 0 + 57637|1500047|t | 0 + 57638|1500044|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s1-begin s1-select s2-begin s2-isolate-tenant s1-copy s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM isolation_table WHERE id = 5; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500052 +(1 row) + +step s1-copy: + COPY isolation_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-commit: + COMMIT; + +step s1-copy: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500051|t | 0 + 57637|1500052|t | 0 + 57637|1500053|t | 0 + 57638|1500050|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s1-begin s1-select s2-begin s2-isolate-tenant s1-ddl s2-commit s1-commit s2-print-cluster s2-print-index-count +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM isolation_table WHERE id = 5; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500058 +(1 row) + +step s1-ddl: + CREATE INDEX test_table_index ON isolation_table(id); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500057|t | 0 + 57637|1500058|t | 0 + 57637|1500059|t | 0 + 57638|1500056|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + +step s2-print-index-count: + SELECT + nodeport, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 1 + 57637|t | 1 + 57637|t | 1 + 57638|t | 1 +(4 rows) + + +starting permutation: s1-load-cache s1-insert s1-begin s1-isolate-tenant s2-isolate-tenant s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + TRUNCATE isolation_table; + +step s1-insert: + INSERT INTO isolation_table VALUES (5, 10); + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 2); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500064 +(1 row) + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +step s1-commit: + COMMIT; + +step s2-isolate-tenant: <... completed> +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500067 +(1 row) + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500066|t | 0 + 57637|1500067|t | 1 + 57637|1500068|t | 0 + 57638|1500063|t | 0 + 57638|1500064|t | 0 + 57638|1500065|t | 0 +(6 rows) + +id|value +--------------------------------------------------------------------- + 5| 10 +(1 row) + + +starting permutation: s1-insert s1-begin s1-isolate-tenant s2-isolate-tenant s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + INSERT INTO isolation_table VALUES (5, 10); + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 2); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500073 +(1 row) + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +step s1-commit: + COMMIT; + +step s2-isolate-tenant: <... completed> +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1500076 +(1 row) + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500075|t | 0 + 57637|1500076|t | 1 + 57637|1500077|t | 0 + 57638|1500072|t | 0 + 57638|1500073|t | 0 + 57638|1500074|t | 0 +(6 rows) + +id|value +--------------------------------------------------------------------- + 5| 10 +(1 row) + diff --git a/src/test/regress/expected/isolation_tenant_isolation_with_fkey_to_reference.out b/src/test/regress/expected/isolation_tenant_isolation_with_fkey_to_reference.out new file mode 100644 index 000000000..14ff5ca51 --- /dev/null +++ b/src/test/regress/expected/isolation_tenant_isolation_with_fkey_to_reference.out @@ -0,0 +1,286 @@ +Parsed test spec with 2 sessions + +starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-delete s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1300005 +(1 row) + +step s1-delete: + DELETE FROM reference_table WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1300004|t | 0 + 57637|1300005|t | 0 + 57637|1300006|t | 0 + 57638|1300003|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-update s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1300012 +(1 row) + +step s1-update: + UPDATE reference_table SET value = 5 WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1300011|t | 0 + 57637|1300012|t | 0 + 57637|1300013|t | 0 + 57638|1300010|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-insert s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1300019 +(1 row) + +step s1-insert: + INSERT INTO reference_table VALUES (5, 10); + +step s2-commit: + COMMIT; + +step s1-insert: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1300018|t | 0 + 57637|1300019|t | 0 + 57637|1300020|t | 0 + 57638|1300017|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-copy s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1300026 +(1 row) + +step s1-copy: + COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-commit: + COMMIT; + +step s1-copy: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1300025|t | 0 + 57637|1300026|t | 0 + 57637|1300027|t | 0 + 57638|1300024|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-ddl s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1300033 +(1 row) + +step s1-ddl: + CREATE INDEX reference_table_index ON reference_table(id); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1300032|t | 0 + 57637|1300033|t | 0 + 57637|1300034|t | 0 + 57638|1300031|t | 0 +(4 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + diff --git a/src/test/regress/expected/isolation_tenant_isolation_with_fkey_to_reference_0.out b/src/test/regress/expected/isolation_tenant_isolation_with_fkey_to_reference_0.out new file mode 100644 index 000000000..6a3b55e24 --- /dev/null +++ b/src/test/regress/expected/isolation_tenant_isolation_with_fkey_to_reference_0.out @@ -0,0 +1,251 @@ +Parsed test spec with 2 sessions + +starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-delete s2-commit s1-commit s2-print-cluster +create_distributed_table + + +step s2-add-fkey: + ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard + +102258 +step s1-delete: + DELETE FROM reference_table WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport shardid success result + +57637 102257 t 0 +57637 102258 t 0 +57637 102259 t 0 +57638 102256 t 0 +id value + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-update s2-commit s1-commit s2-print-cluster +create_distributed_table + + +step s2-add-fkey: + ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard + +102264 +step s1-update: + UPDATE reference_table SET value = 5 WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport shardid success result + +57637 102263 t 0 +57637 102264 t 0 +57637 102265 t 0 +57638 102262 t 0 +id value + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-insert s2-commit s1-commit s2-print-cluster +create_distributed_table + + +step s2-add-fkey: + ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard + +102270 +step s1-insert: + INSERT INTO reference_table VALUES (5, 10); + +step s2-commit: + COMMIT; + +step s1-insert: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport shardid success result + +57637 102269 t 0 +57637 102270 t 0 +57637 102271 t 0 +57638 102268 t 0 +id value + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-copy s2-commit s1-commit s2-print-cluster +create_distributed_table + + +step s2-add-fkey: + ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard + +102276 +step s1-copy: + COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-commit: + COMMIT; + +step s1-copy: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport shardid success result + +57637 102275 t 0 +57637 102276 t 0 +57637 102277 t 0 +57638 102274 t 0 +id value + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-isolate-tenant s1-ddl s2-commit s1-commit s2-print-cluster +create_distributed_table + + +step s2-add-fkey: + ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-isolate-tenant: + SELECT isolate_tenant_to_new_shard('isolation_table', 5); + +isolate_tenant_to_new_shard + +102282 +step s1-ddl: + CREATE INDEX reference_table_index ON reference_table(id); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; + +nodeport shardid success result + +57637 102281 t 0 +57637 102282 t 0 +57637 102283 t 0 +57638 102280 t 0 +id value + diff --git a/src/test/regress/expected/logical_replication.out b/src/test/regress/expected/logical_replication.out new file mode 100644 index 000000000..6440c172b --- /dev/null +++ b/src/test/regress/expected/logical_replication.out @@ -0,0 +1,140 @@ +SET citus.next_shard_id TO 6830000; +CREATE SCHEMA logical_replication; +SET search_path TO logical_replication; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +CREATE TABLE dist ( + id bigserial PRIMARY KEY +); +SELECT oid AS postgres_oid FROM pg_roles where rolname = 'postgres' \gset +SELECT create_distributed_table('dist', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT 1 from citus_add_node('localhost', :master_port, groupId := 0); +NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :worker_1_port +SET search_path TO logical_replication; +CREATE PUBLICATION citus_shard_move_publication_:postgres_oid FOR TABLE dist_6830000; +\c - - - :master_port +SET search_path TO logical_replication; +\set connection_string '\'user=postgres host=localhost port=' :worker_1_port '\'' +CREATE SUBSCRIPTION citus_shard_move_subscription_:postgres_oid CONNECTION :connection_string PUBLICATION citus_shard_move_publication_:postgres_oid; +NOTICE: created replication slot "citus_shard_move_subscription_10" on publisher +SELECT count(*) from pg_subscription; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) from pg_publication; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) from pg_replication_slots; + count +--------------------------------------------------------------------- + 0 +(1 row) + +\c - - - :worker_1_port +SELECT count(*) from pg_subscription; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) from pg_publication; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) from pg_replication_slots; + count +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :master_port +select citus_move_shard_placement(6830002, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_remove_node('localhost', :master_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +SELECT count(*) from pg_subscription; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) from pg_publication; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) from pg_replication_slots; + count +--------------------------------------------------------------------- + 0 +(1 row) + +\c - - - :worker_1_port +SELECT count(*) from pg_subscription; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) from pg_publication; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) from pg_replication_slots; + count +--------------------------------------------------------------------- + 0 +(1 row) + +\c - - - :worker_2_port +SELECT count(*) from pg_subscription; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) from pg_publication; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) from pg_replication_slots; + count +--------------------------------------------------------------------- + 0 +(1 row) + +\c - - - :master_port +SET search_path TO public; +DROP SCHEMA logical_replication CASCADE; +NOTICE: drop cascades to table logical_replication.dist diff --git a/src/test/regress/expected/master_copy_shard_placement.out b/src/test/regress/expected/master_copy_shard_placement.out index 35ca01498..6d7fd4a69 100644 --- a/src/test/regress/expected/master_copy_shard_placement.out +++ b/src/test/regress/expected/master_copy_shard_placement.out @@ -112,12 +112,14 @@ SET citus.shard_replication_factor TO 1; -- metadata sync will succeed even if we have rep > 1 tables INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles'::regclass::oid, 0); INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles_single_shard'::regclass::oid, 0); +SET client_min_messages TO warning; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) +RESET client_min_messages; CREATE TABLE mx_table(a int); SELECT create_distributed_table('mx_table', 'a'); create_distributed_table diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index 2fd4707b2..a620758c0 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -5,13 +5,6 @@ SET citus.next_placement_id TO 1500000; -- supress notice messages to make sure that the tests -- do not diverge with enterprise SET client_min_messages TO WARNING; -SELECT run_command_on_workers($$CREATE ROLE metadata_sync_helper_role WITH LOGIN;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - CREATE ROLE metadata_sync_helper_role WITH LOGIN; GRANT ALL ON SCHEMA metadata_sync_helpers TO metadata_sync_helper_role; RESET client_min_messages; diff --git a/src/test/regress/expected/multi_alter_table_row_level_security.out b/src/test/regress/expected/multi_alter_table_row_level_security.out new file mode 100644 index 000000000..962b037fc --- /dev/null +++ b/src/test/regress/expected/multi_alter_table_row_level_security.out @@ -0,0 +1,633 @@ +-- +-- MULTI_ALTER_TABLE_ROW_LEVEL_SECURITY +-- +-- Test checks whether row level security can be enabled via +-- ALTER TABLE ... ENABLE | DISABLE ROW LEVEL SECURITY +SET citus.next_shard_id TO 1800000; +SET citus.shard_replication_factor TO 1; +-- Setup user accounts and schema for tests. +CREATE USER rls_table_owner; +CREATE USER rls_tenant_1; +CREATE USER rls_tenant_2; +SET client_min_messages TO WARNING; +CREATE SCHEMA alter_table_rls; +SET search_path TO alter_table_rls; +ALTER ROLE rls_table_owner SET search_path TO alter_table_rls; +ALTER ROLE rls_tenant_1 SET search_path TO alter_table_rls; +ALTER ROLE rls_tenant_2 SET search_path TO alter_table_rls; +GRANT USAGE ON SCHEMA alter_table_rls TO rls_table_owner; +GRANT USAGE ON SCHEMA alter_table_rls TO rls_tenant_1; +GRANT USAGE ON SCHEMA alter_table_rls TO rls_tenant_2; +-- +-- The first phase tests enabling Row Level Security only after the table has been +-- turned into a distributed table. +-- +-- This demonstrates that enabling Row Level Security on a distributed table correctly +-- enables Row Level Security on all shards that were in the system. +-- +CREATE TABLE events ( + tenant_id int, + id int, + type text +); +SELECT create_distributed_table('events','tenant_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- running ALTER TABLE ... OWNER TO ... only after distribution, otherwise ownership +-- information is lost. +ALTER TABLE events OWNER TO rls_table_owner; +INSERT INTO events VALUES (1,1,'push'); +INSERT INTO events VALUES (2,2,'push'); +-- grant access for tenants to table and shards +GRANT SELECT ON TABLE events TO rls_tenant_1; +GRANT SELECT ON TABLE events TO rls_tenant_2; +-- Base line test to verify all rows are visible +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +-- Switch user that has been granted rights and read table +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +RESET ROLE; +-- Enable row level security +ALTER TABLE events ENABLE ROW LEVEL SECURITY; +-- Switch user to owner, all rows should be visible +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +RESET ROLE; +-- Switch user that has been granted rights, should not be able to see any rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- +(0 rows) + +RESET ROLE; +-- Create policy for tenants to read access their own rows +CREATE POLICY user_mod ON events FOR SELECT TO rls_tenant_1, rls_tenant_2 USING (current_user = 'rls_tenant_' || tenant_id::text); +-- Switch user that has been granted rights, should be able to see their own rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push +(1 row) + +RESET ROLE; +-- Switch other user that has been granted rights, should be able to see their own rows +SET ROLE rls_tenant_2; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 2 | 2 | push +(1 row) + +RESET ROLE; +-- We will test both renaming and deletion of Policies by renaming the user_mod policy +-- and drop the renamed version. If tenants cannot see rows afterwards both the RENAME and +-- the DROP has worked correctly +ALTER POLICY user_mod ON events RENAME TO user_mod_renamed; +DROP POLICY user_mod_renamed ON events; +-- Switch to tenant user, should not see any rows after above DDL's +SET ROLE rls_tenant_2; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- +(0 rows) + +RESET ROLE; +-- Test Force Row Level Security, should also apply RLS to table owner +ALTER TABLE events FORCE ROW LEVEL SECURITY; +-- Verify all rows are still visible for admin +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +-- Switch user to owner, no rows should be visible +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- +(0 rows) + +RESET ROLE; +-- Allow admins to read all rows +CREATE POLICY owner_mod ON events TO rls_table_owner USING (true) WITH CHECK (true); +-- Verify all rows are visible +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +-- Verify the owner can insert a record +INSERT INTO events VALUES (3,3,'push'); +-- See its in the table +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push + 3 | 3 | push +(3 rows) + +-- Verify the owner can delete a record +DELETE FROM events WHERE tenant_id = 3; +-- Verify the record is gone +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +RESET ROLE; +-- Alter the policy and verify no rows are visible for admin +ALTER POLICY owner_mod ON events USING (false); +-- Verify no rows are visible +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- +(0 rows) + +RESET ROLE; +-- Test No Force Row Level Security, owner will not go through RLS anymore +ALTER TABLE events NO FORCE ROW LEVEL SECURITY; +-- Verify all rows are visible +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +RESET ROLE; +-- Disable row level security +ALTER TABLE events DISABLE ROW LEVEL SECURITY; +-- Switch user that has been granted rights and read table +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +RESET ROLE; +-- Clean up test +DROP TABLE events; +-- +-- The second phase tests enables Row Level Security before the table will be turned into +-- a distributed table. +-- +-- This demonstrates that tables having Row Level Security enabled before they are +-- distributed to correctly have Row Level Security enabled on the shards after +-- distributing. +-- +CREATE TABLE events ( + tenant_id int, + id int, + type text +); +INSERT INTO events VALUES (1,1,'push'); +INSERT INTO events VALUES (2,2,'push'); +-- grant access for tenants to table +GRANT SELECT ON TABLE events TO rls_tenant_1; +GRANT SELECT ON TABLE events TO rls_tenant_2; +-- Base line test to verify all rows are visible +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +-- Switch user that has been granted rights and read table +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +RESET ROLE; +-- Enable row level security +ALTER TABLE events ENABLE ROW LEVEL SECURITY; +ALTER TABLE events FORCE ROW LEVEL SECURITY; +-- create all types of policies before distribution +CREATE POLICY owner_read ON events FOR SELECT TO rls_table_owner USING (true); +CREATE POLICY owner_insert ON events FOR INSERT TO rls_table_owner WITH CHECK (false); +CREATE POLICY owner_delete ON events FOR DELETE TO rls_table_owner USING (false); +CREATE POLICY owner_update ON events FOR UPDATE TO rls_table_owner WITH CHECK (false); +-- Distribute table +SELECT create_distributed_table('events','tenant_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- running ALTER TABLE ... OWNER TO ... only after distribution, otherwise ownership +-- information is lost. +ALTER TABLE events OWNER TO rls_table_owner; +SET ROLE rls_table_owner; +-- Verify owner can see all rows +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +-- Verify owner cannot insert anything +INSERT INTO events VALUES (3,3,'push'); +ERROR: new row violates row-level security policy for table "events_1800005" +CONTEXT: while executing command on localhost:xxxxx +-- Verify owner cannot delete anything +DELETE FROM events WHERE tenant_id = 1; +-- Verify owner cannot updat anything +UPDATE events SET id = 10 WHERE tenant_id = 2; +-- Double check the table content +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +RESET ROLE; +-- Switch user that has been granted rights, should not be able to see any rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- +(0 rows) + +RESET ROLE; +-- Create policy for tenants to read access their own rows +CREATE POLICY user_mod ON events TO PUBLIC USING (current_user = 'rls_tenant_' || tenant_id::text) WITH CHECK (false); +-- Switch user that has been granted rights, should be able to see their own rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push +(1 row) + +RESET ROLE; +-- Switch other user that has been granted rights, should be able to see their own rows +SET ROLE rls_tenant_2; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 2 | 2 | push +(1 row) + +RESET ROLE; +ALTER POLICY user_mod ON events TO rls_tenant_1; +-- Switch user that has been allowed, should be able to see their own rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push +(1 row) + +RESET ROLE; +-- Switch other user that has been disallowed +SET ROLE rls_tenant_2; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- +(0 rows) + +RESET ROLE; +ALTER POLICY user_mod ON events TO rls_tenant_1, rls_tenant_2; +-- Switch user that stayed allowed, should be able to see their own rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push +(1 row) + +RESET ROLE; +-- Switch other user that got allowed +SET ROLE rls_tenant_2; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 2 | 2 | push +(1 row) + +RESET ROLE; +-- Set RLS to NO FORCE +ALTER TABLE events NO FORCE ROW LEVEL SECURITY; +-- Switch to owner to verify all rows are visible +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +RESET ROLE; +-- Disable row level security +ALTER TABLE events DISABLE ROW LEVEL SECURITY; +-- Switch user that has been granted rights and read table +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +RESET ROLE; +-- Clean up test +DROP TABLE events; +-- +-- The third phase tests creates policies before distributing the tables. Only after +-- distribution RLS will be enabled. +-- +-- This test demonstrates all RLS policy/FORCE settings are configured the same way even +-- if RLS is not enabled at the time of distribution. +-- +CREATE TABLE events ( + tenant_id int, + id int, + type text +); +INSERT INTO events VALUES + (1,1,'push'), + (2,2,'push'); +-- grant access for tenants to table +GRANT ALL ON TABLE events TO rls_tenant_1; +GRANT ALL ON TABLE events TO rls_tenant_2; +-- Base line test to verify all rows are visible +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +-- Switch user that has been granted rights and read table +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +RESET ROLE; +-- Configure FORCE and some policies before distribution +ALTER TABLE events FORCE ROW LEVEL SECURITY; +CREATE POLICY user_mod ON events TO rls_tenant_1, rls_tenant_2 + USING (current_user = 'rls_tenant_' || tenant_id::text) + WITH CHECK (current_user = 'rls_tenant_' || tenant_id::text AND id = 2); +-- Distribute table +SELECT create_distributed_table('events','tenant_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +ALTER TABLE events ENABLE ROW LEVEL SECURITY; +-- running ALTER TABLE ... OWNER TO ... only after distribution, otherwise ownership +-- information is lost. +ALTER TABLE events OWNER TO rls_table_owner; +-- Verify owner cannot see any rows due to FORCE RLS +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- +(0 rows) + +RESET ROLE; +-- owner mod will only be set after distribution because it can test if FORCE has been +-- propagated during distribution +CREATE POLICY owner_mod ON events TO rls_table_owner USING (true); +-- Verify owner now can see rows +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | push +(2 rows) + +RESET ROLE; +-- Switch user that has been granted rights, should be able to see their own rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push +(1 row) + +RESET ROLE; +-- Switch other user that has been granted rights, should be able to see their own rows +SET ROLE rls_tenant_2; +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 2 | 2 | push +(1 row) + +RESET ROLE; +-- We will test 2 different updates to the databse. +-- tenant 1 should not be able to update its record because its id is not set to 2 as +-- required by with check, tenant 2 should be able to update its record. +SET ROLE rls_tenant_1; +UPDATE events SET type = 'pull'; +ERROR: new row violates row-level security policy for table "events_1800008" +CONTEXT: while executing command on localhost:xxxxx +RESET ROLE; +SET ROLE rls_tenant_2; +UPDATE events SET type = 'pull'; +RESET ROLE; +-- only items for tenant 2 should read pull in the result. +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | push + 2 | 2 | pull +(2 rows) + +-- allow all users to update their own values +ALTER POLICY user_mod ON events WITH CHECK (true); +SET ROLE rls_tenant_1; +UPDATE events SET type = 'pull tenant 1'; +RESET ROLE; +SET ROLE rls_tenant_2; +UPDATE events SET type = 'pull tenant 2'; +RESET ROLE; +-- records should read their own tenant pull items +SELECT * FROM events ORDER BY 1; + tenant_id | id | type +--------------------------------------------------------------------- + 1 | 1 | pull tenant 1 + 2 | 2 | pull tenant 2 +(2 rows) + +-- Clean up test +DROP TABLE events; +-- +-- The forth phase tests some disallowed policies for distributed tables +-- slight based on example https://www.postgresql.org/docs/9.5/static/ddl-rowsecurity.html +-- +CREATE TABLE groups ( + group_id int PRIMARY KEY, + group_name text NOT NULL +); +INSERT INTO groups VALUES + (1, 'low'), + (2, 'medium'), + (5, 'high'); +-- definition of users' privilege levels +CREATE TABLE users ( + user_name text PRIMARY KEY, + group_id int NOT NULL +); +INSERT INTO users VALUES + ('alice', 5), + ('bob', 2), + ('mallory', 2); +-- table holding the information to be protected +CREATE TABLE information ( + info text, + group_id int NOT NULL +); +INSERT INTO information VALUES + ('barely secret', 1), + ('slightly secret', 2), + ('very secret', 5); +ALTER TABLE information ENABLE ROW LEVEL SECURITY; +-- this policy is disallowed because it has a subquery in it +CREATE POLICY fp_s ON information FOR SELECT + USING (group_id <= (SELECT group_id FROM users WHERE user_name = current_user)); +-- this attempt for distribution fails because the table has a disallowed expression +SELECT create_distributed_table('information', 'group_id'); +ERROR: cannot create policy +-- DROP the expression so we can distribute the table +DROP POLICY fp_s ON information; +SELECT create_distributed_table('information', 'group_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Try and create the expression on a distributed table, this should also fail +CREATE POLICY fp_s ON information FOR SELECT + USING (group_id <= (SELECT group_id FROM users WHERE user_name = current_user)); +ERROR: cannot create policy +-- Clean up test +DROP TABLE information, groups, users; +SET citus.next_shard_id TO 1810000; +CREATE TABLE test(x int, y int); +SELECT create_distributed_table('test','x'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +GRANT SELECT ON TABLE test TO rls_tenant_2; +ALTER TABLE test ENABLE ROW LEVEL SECURITY; +CREATE POLICY id_2_only +ON test +FOR SELECT TO rls_tenant_2 +USING (x = 2); +INSERT INTO test SELECT i,i FROM generate_series(0,100)i; +SELECT count(*) FROM test ; + count +--------------------------------------------------------------------- + 101 +(1 row) + +SET ROLE rls_tenant_2; +SELECT count(*) FROM test ; + count +--------------------------------------------------------------------- + 1 +(1 row) + +RESET ROLE; +SELECT master_move_shard_placement(get_shard_id_for_distribution_column('test', 2), + 'localhost', :worker_2_port, 'localhost', :worker_1_port, shard_transfer_mode:='block_writes'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +SET ROLE rls_tenant_2; +SELECT count(*) FROM test ; + count +--------------------------------------------------------------------- + 1 +(1 row) + +RESET ROLE; +-- Show that having nondistributed table via policy is checked +BEGIN; + CREATE TABLE table_1_check_policy ( + tenant_id int, + id int + ); + CREATE TABLE table_2_check_policy ( + tenant_id int, + id int + ); + ALTER TABLE table_1_check_policy ENABLE ROW LEVEL SECURITY; + ALTER TABLE table_1_check_policy FORCE ROW LEVEL SECURITY; + CREATE OR REPLACE FUNCTION func_in_transaction(param_1 int, param_2 table_2_check_policy) + RETURNS boolean + LANGUAGE plpgsql AS + $$ + BEGIN + return param_1 > 5; + END; + $$; +WARNING: "function func_in_transaction(integer,table_2_check_policy)" has dependency to "table table_2_check_policy" that is not in Citus' metadata +DETAIL: "function func_in_transaction(integer,table_2_check_policy)" will be created only locally +HINT: Distribute "table table_2_check_policy" first to distribute "function func_in_transaction(integer,table_2_check_policy)" + CREATE POLICY owner_read ON table_1_check_policy FOR SELECT USING (func_in_transaction(id, NULL::table_2_check_policy)); + CREATE POLICY owner_insert ON table_1_check_policy FOR INSERT WITH CHECK (func_in_transaction(id, NULL::table_2_check_policy)); + -- It should error out + SELECT create_distributed_table('table_1_check_policy', 'tenant_id'); +ERROR: "table table_1_check_policy" has dependency to "table table_2_check_policy" that is not in Citus' metadata +HINT: Distribute "table table_2_check_policy" first to distribute "table table_1_check_policy" +ROLLBACK; +-- Clean up test suite +DROP SCHEMA alter_table_rls CASCADE; +DROP USER rls_table_owner; +DROP USER rls_tenant_1; +DROP USER rls_tenant_2; diff --git a/src/test/regress/expected/multi_alter_table_row_level_security_escape.out b/src/test/regress/expected/multi_alter_table_row_level_security_escape.out new file mode 100644 index 000000000..5738d2209 --- /dev/null +++ b/src/test/regress/expected/multi_alter_table_row_level_security_escape.out @@ -0,0 +1,21 @@ +-- +-- MULTI_ALTER_TABLE_ROW_LEVEL_SECURITY_ESCAPE +-- +-- Test set that checks all row level security commands for +-- accepting identifiers that require escaping +SET citus.next_shard_id TO 1900000; +CREATE SCHEMA alter_table_rls_quote; +SET search_path TO alter_table_rls_quote; +CREATE TABLE "t1""" (id int, name text); +CREATE POLICY "policy1""" ON "t1""" USING (true); +SELECT create_distributed_table('t1"', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +ALTER POLICY "policy1""" ON "t1""" RENAME TO "policy2"""; +ALTER POLICY "policy2""" ON "t1""" USING (false); +DROP POLICY "policy2""" ON "t1"""; +DROP SCHEMA alter_table_rls_quote CASCADE; +NOTICE: drop cascades to table "t1""" diff --git a/src/test/regress/expected/multi_colocated_shard_rebalance.out b/src/test/regress/expected/multi_colocated_shard_rebalance.out index 94097218b..7484dc702 100644 --- a/src/test/regress/expected/multi_colocated_shard_rebalance.out +++ b/src/test/regress/expected/multi_colocated_shard_rebalance.out @@ -48,7 +48,7 @@ SELECT master_create_empty_shard('table6_append'); -- Mark tables as non-mx tables, in order to be able to test master_copy_shard_placement UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN - ('table1_group1'::regclass, 'table2_group1'::regclass, 'table5_groupX'::regclass); + ('table1_group1'::regclass, 'table2_group1'::regclass, 'table5_groupX'::regclass); -- test copy -- test copying colocated shards -- status before shard copy @@ -76,8 +76,13 @@ ORDER BY s.shardid, sp.nodeport; 13000011 | table2_group1 | 57638 (12 rows) --- copy colocated shards +-- try to copy colocated shards without a replica identity SELECT master_copy_shard_placement(13000000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false); +ERROR: cannot use logical replication to transfer shards of the relation table2_group1 since it doesn't have a REPLICA IDENTITY or PRIMARY KEY +DETAIL: UPDATE and DELETE commands on the shard will error out during logical replication unless there is a REPLICA IDENTITY or PRIMARY KEY. +HINT: If you wish to continue without a replica identity set the shard_transfer_mode to 'force_logical' or 'block_writes'. +-- copy colocated shards +SELECT master_copy_shard_placement(13000000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false, 'force_logical'); master_copy_shard_placement --------------------------------------------------------------------- @@ -127,7 +132,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table2_ \c - - - :master_port -- copy colocated shards again to see error message SELECT master_copy_shard_placement(13000000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false, 'force_logical'); -ERROR: the force_logical transfer mode is currently unsupported +ERROR: shard xxxxx already exists in the target node -- test copying NOT colocated shard -- status before shard copy SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport @@ -151,7 +156,7 @@ ORDER BY s.shardid, sp.nodeport; (8 rows) -- copy NOT colocated shard -SELECT master_copy_shard_placement(13000012, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false); +SELECT master_copy_shard_placement(13000012, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false, 'force_logical'); master_copy_shard_placement --------------------------------------------------------------------- @@ -197,7 +202,11 @@ ORDER BY s.shardid, sp.nodeport; -- copy shard in append distributed table SELECT master_copy_shard_placement(13000020, 'localhost', :worker_2_port, 'localhost', :worker_1_port, false, 'force_logical'); -ERROR: the force_logical transfer mode is currently unsupported + master_copy_shard_placement +--------------------------------------------------------------------- + +(1 row) + -- status after shard copy SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport FROM @@ -209,9 +218,10 @@ WHERE ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport --------------------------------------------------------------------- + 13000020 | table6_append | 57637 13000020 | table6_append | 57638 13000021 | table6_append | 57637 -(2 rows) +(3 rows) -- test move -- test moving colocated shards @@ -242,11 +252,8 @@ ORDER BY s.shardid, sp.nodeport; 13000011 | table2_group1 | 57638 (14 rows) --- try force_logical -SELECT master_move_shard_placement(13000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); -ERROR: the force_logical transfer mode is currently unsupported -- move colocated shards -SELECT master_move_shard_placement(13000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port); +SELECT master_move_shard_placement(13000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); master_move_shard_placement --------------------------------------------------------------------- @@ -320,7 +327,7 @@ ORDER BY s.shardid, sp.nodeport; (9 rows) -- move NOT colocated shard -SELECT master_move_shard_placement(13000013, 'localhost', :worker_2_port, 'localhost', :worker_1_port); +SELECT master_move_shard_placement(13000013, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); master_move_shard_placement --------------------------------------------------------------------- @@ -362,12 +369,13 @@ WHERE ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport --------------------------------------------------------------------- + 13000020 | table6_append | 57637 13000020 | table6_append | 57638 13000021 | table6_append | 57637 -(2 rows) +(3 rows) -- move shard in append distributed table -SELECT master_move_shard_placement(13000021, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_move_shard_placement(13000021, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); master_move_shard_placement --------------------------------------------------------------------- @@ -385,12 +393,13 @@ WHERE ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport --------------------------------------------------------------------- + 13000020 | table6_append | 57637 13000020 | table6_append | 57638 13000021 | table6_append | 57638 -(2 rows) +(3 rows) -- try to move shard from wrong node -SELECT master_move_shard_placement(13000021, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_move_shard_placement(13000021, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); ERROR: source placement must be in active state -- test shard move with foreign constraints DROP TABLE IF EXISTS table1_group1, table2_group1; @@ -413,7 +422,7 @@ SELECT create_distributed_table('table2_group1', 'table1_id', 'hash'); -- Mark the tables as non-mx tables UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN - ('table1_group1'::regclass, 'table2_group1'::regclass); + ('table1_group1'::regclass, 'table2_group1'::regclass); -- status before shard rebalance SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport FROM @@ -533,12 +542,14 @@ SELECT * FROM run_command_on_placements('serial_move_test', 'SELECT DISTINCT key localhost | 57637 | 13000034 | t | 15 (1 row) -SELECT master_move_shard_placement(13000034, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_move_shard_placement(13000034, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); master_move_shard_placement --------------------------------------------------------------------- (1 row) +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 5 orphaned shards -- confirm the successfull move SELECT * FROM run_command_on_placements('serial_move_test', 'SELECT DISTINCT key FROM %s WHERE key = 15') WHERE result = '15' AND shardid = 13000034; nodename | nodeport | shardid | success | result @@ -559,12 +570,227 @@ INSERT INTO serial_move_test (key) VALUES (15) RETURNING *; 15 | 4 (1 row) +-- lets do some failure testing +CREATE TABLE logical_failure_test (key int); +SET citus.shard_replication_factor TO 1; +SET citus.shard_count TO 4; +SELECT create_distributed_table('logical_failure_test', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- ensure that the shard is created for this user +\c - - - :worker_2_port +\dt logical_failure_test_13000038 + List of relations + Schema | Name | Type | Owner +--------------------------------------------------------------------- + public | logical_failure_test_13000038 | table | postgres +(1 row) + +DROP TABLE logical_failure_test_13000038; +-- should fail since the command wouldn't be able to connect to the worker_1 +\c - - - :master_port +SELECT master_move_shard_placement(13000038, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); +WARNING: relation "public.logical_failure_test_13000038" does not exist +CONTEXT: while executing command on localhost:xxxxx +ERROR: cannot get the size because of a connection error +CALL citus_cleanup_orphaned_shards(); +DROP TABLE logical_failure_test; +-- lets test the logical replication modes +CREATE TABLE test_with_pkey (key int PRIMARY KEY, value int NOT NULL); +SET citus.shard_replication_factor TO 1; +SET citus.shard_count TO 4; +SELECT create_distributed_table('test_with_pkey', 'key', colocate_with => 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- should succeed since there is a replica identity defined +SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 1 orphaned shards +-- should succeed since we still have a replica identity +ALTER TABLE test_with_pkey REPLICA IDENTITY FULL; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'auto'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 1 orphaned shards +-- make sure we have the replica identity after the move +SELECT result FROM run_command_on_placements( 'test_with_pkey', 'SELECT relreplident FROM pg_class WHERE relname = ''%s''') WHERE shardid = 13000042; + result +--------------------------------------------------------------------- + f +(1 row) + +-- this time should fail since we don't have replica identity any more +ALTER TABLE test_with_pkey REPLICA IDENTITY NOTHING; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'auto'); +ERROR: cannot use logical replication to transfer shards of the relation test_with_pkey since it doesn't have a REPLICA IDENTITY or PRIMARY KEY +DETAIL: UPDATE and DELETE commands on the shard will error out during logical replication unless there is a REPLICA IDENTITY or PRIMARY KEY. +HINT: If you wish to continue without a replica identity set the shard_transfer_mode to 'force_logical' or 'block_writes'. +CALL citus_cleanup_orphaned_shards(); +-- make sure we have the replica identity after the move +SELECT result FROM run_command_on_placements( 'test_with_pkey', 'SELECT relreplident FROM pg_class WHERE relname = ''%s''') WHERE shardid = 13000042; + result +--------------------------------------------------------------------- + n +(1 row) + +-- should succeed since we still have a replica identity +ALTER TABLE test_with_pkey REPLICA IDENTITY USING INDEX test_with_pkey_pkey; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 1 orphaned shards +-- make sure we have the replica identity after the move +SELECT result FROM run_command_on_placements( 'test_with_pkey', 'SELECT relreplident FROM pg_class WHERE relname = ''%s''') WHERE shardid = 13000042; + result +--------------------------------------------------------------------- + i +(1 row) + +-- one final test with shard_transfer_mode auto +CREATE UNIQUE INDEX req_rep_idx ON test_with_pkey(key, value); +ALTER TABLE test_with_pkey REPLICA IDENTITY USING INDEX req_rep_idx; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'auto'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 1 orphaned shards +-- make sure we have the replica identity after the move +SELECT result FROM run_command_on_placements( 'test_with_pkey', 'SELECT relreplident FROM pg_class WHERE relname = ''%s''') WHERE shardid = 13000042; + result +--------------------------------------------------------------------- + i +(1 row) + +ALTER TABLE test_with_pkey REPLICA IDENTITY NOTHING; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 1 orphaned shards +-- make sure we have the replica identity after the move +SELECT result FROM run_command_on_placements( 'test_with_pkey', 'SELECT relreplident FROM pg_class WHERE relname = ''%s''') WHERE shardid = 13000042; + result +--------------------------------------------------------------------- + n +(1 row) + +-- should succeed but not use logical replication +ALTER TABLE test_with_pkey REPLICA IDENTITY NOTHING; +SET client_min_messages TO DEBUG1; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes'); +DEBUG: table "test_with_pkey_13000042" does not exist, skipping +DETAIL: from localhost:xxxxx + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +LOG: dropping shard placement xxxxx of shard xxxxx on localhost:xxxxx after it was moved away +NOTICE: cleaned up 1 orphaned shards +SET client_min_messages TO DEFAULT; +-- we don't support multiple shard moves in a single transaction +SELECT + master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical') +FROM + pg_dist_shard_placement where nodeport = :worker_1_port AND + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_with_pkey'::regclass); +ERROR: moving multiple shard placements via logical replication in the same transaction is currently not supported +HINT: If you wish to move multiple shard placements in a single transaction set the shard_transfer_mode to 'block_writes'. +CALL citus_cleanup_orphaned_shards(); +-- similar test with explicit transaction block +BEGIN; + SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + + SELECT master_move_shard_placement(13000044, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); +ERROR: moving multiple shard placements via logical replication in the same transaction is currently not supported +HINT: If you wish to move multiple shard placements in a single transaction set the shard_transfer_mode to 'block_writes'. +COMMIT; + CALL citus_cleanup_orphaned_shards(); +-- we do support the same with block writes +SELECT + master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes') +FROM + pg_dist_shard_placement where nodeport = :worker_1_port AND + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_with_pkey'::regclass); + master_move_shard_placement +--------------------------------------------------------------------- + + +(2 rows) + +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 2 orphaned shards +-- we should be able to move shard placements after COMMIT/ABORT +BEGIN; + SELECT master_move_shard_placement(13000043, 'localhost', :worker_2_port, 'localhost', :worker_1_port, shard_transfer_mode:='force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +COMMIT; +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 1 orphaned shards +SELECT master_move_shard_placement(13000045, 'localhost', :worker_2_port, 'localhost', :worker_1_port, shard_transfer_mode:='force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 1 orphaned shards +BEGIN; + SELECT master_move_shard_placement(13000043, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +ABORT; +SELECT master_move_shard_placement(13000045, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 1 orphaned shards -- we should be able to move shard placements of partitioend tables CREATE SCHEMA move_partitions; CREATE TABLE move_partitions.events ( - id serial, - t timestamptz default now(), - payload text + id serial, + t timestamptz default now(), + payload text ) PARTITION BY RANGE(t); SET citus.shard_count TO 6; @@ -589,12 +815,23 @@ SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localh FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port AND shardstate != 4 +ORDER BY shardid LIMIT 1; +ERROR: cannot use logical replication to transfer shards of the relation events_1 since it doesn't have a REPLICA IDENTITY or PRIMARY KEY +DETAIL: UPDATE and DELETE commands on the shard will error out during logical replication unless there is a REPLICA IDENTITY or PRIMARY KEY. +HINT: If you wish to continue without a replica identity set the shard_transfer_mode to 'force_logical' or 'block_writes'. +CALL citus_cleanup_orphaned_shards(); +-- force logical replication +SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical') +FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port ORDER BY shardid LIMIT 1; master_move_shard_placement --------------------------------------------------------------------- (1 row) +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 2 orphaned shards SELECT count(*) FROM move_partitions.events; count --------------------------------------------------------------------- @@ -613,6 +850,8 @@ ORDER BY shardid LIMIT 1; (1 row) +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 2 orphaned shards SELECT count(*) FROM move_partitions.events; count --------------------------------------------------------------------- @@ -629,6 +868,8 @@ ORDER BY shardid LIMIT 1; (1 row) +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 2 orphaned shards SELECT count(*) FROM move_partitions.events; count --------------------------------------------------------------------- @@ -645,3 +886,6 @@ WHERE logicalrelid::text LIKE 'move_partitions.events%' AND nodeport = :worker_1 (1 row) DROP TABLE move_partitions.events; +-- set back to the defaults and drop the table +SET client_min_messages TO DEFAULT; +DROP TABLE test_with_pkey; diff --git a/src/test/regress/expected/multi_colocated_shard_rebalance_0.out b/src/test/regress/expected/multi_colocated_shard_rebalance_0.out new file mode 100644 index 000000000..95871bc31 --- /dev/null +++ b/src/test/regress/expected/multi_colocated_shard_rebalance_0.out @@ -0,0 +1,788 @@ +-- +-- MULTI_COLOCATED_SHARD_REBALANCE +-- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 13000000; +SET citus.shard_count TO 6; +SET citus.shard_replication_factor TO 1; +-- create distributed tables +CREATE TABLE table1_group1 ( id int PRIMARY KEY); +SELECT create_distributed_table('table1_group1', 'id', 'hash'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE table2_group1 ( id int ); +SELECT create_distributed_table('table2_group1', 'id', 'hash'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SET citus.shard_count TO 8; +CREATE TABLE table5_groupX ( id int ); +SELECT create_distributed_table('table5_groupX', 'id', 'hash'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE table6_append ( id int ); +SELECT master_create_distributed_table('table6_append', 'id', 'append'); + master_create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT master_create_empty_shard('table6_append'); + master_create_empty_shard +--------------------------------------------------------------------- + 13000020 +(1 row) + +SELECT master_create_empty_shard('table6_append'); + master_create_empty_shard +--------------------------------------------------------------------- + 13000021 +(1 row) + +-- Mark tables as non-mx tables, in order to be able to test master_copy_shard_placement +UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN + ('table1_group1'::regclass, 'table2_group1'::regclass, 'table5_groupX'::regclass); +-- test copy +-- test copying colocated shards +-- status before shard copy +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000000 | table1_group1 | 57637 + 13000001 | table1_group1 | 57638 + 13000002 | table1_group1 | 57637 + 13000003 | table1_group1 | 57638 + 13000004 | table1_group1 | 57637 + 13000005 | table1_group1 | 57638 + 13000006 | table2_group1 | 57637 + 13000007 | table2_group1 | 57638 + 13000008 | table2_group1 | 57637 + 13000009 | table2_group1 | 57638 + 13000010 | table2_group1 | 57637 + 13000011 | table2_group1 | 57638 +(12 rows) + +-- try to copy colocated shards without a replica identity +SELECT master_copy_shard_placement(13000000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false); + master_copy_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- copy colocated shards +SELECT master_copy_shard_placement(13000000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false, 'force_logical'); +ERROR: shard xxxxx already exist in target placement +-- status after shard copy +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000000 | table1_group1 | 57637 + 13000000 | table1_group1 | 57638 + 13000001 | table1_group1 | 57638 + 13000002 | table1_group1 | 57637 + 13000003 | table1_group1 | 57638 + 13000004 | table1_group1 | 57637 + 13000005 | table1_group1 | 57638 + 13000006 | table2_group1 | 57637 + 13000006 | table2_group1 | 57638 + 13000007 | table2_group1 | 57638 + 13000008 | table2_group1 | 57637 + 13000009 | table2_group1 | 57638 + 13000010 | table2_group1 | 57637 + 13000011 | table2_group1 | 57638 +(14 rows) + +-- also connect worker to verify we successfully copied given shard (and other colocated shards) +\c - - - :worker_2_port +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table1_group1_13000000'::regclass; + Column | Type | Modifiers +--------------------------------------------------------------------- + id | integer | not null +(1 row) + +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table2_group1_13000006'::regclass; + Column | Type | Modifiers +--------------------------------------------------------------------- + id | integer | +(1 row) + +\c - - - :master_port +-- copy colocated shards again to see error message +SELECT master_copy_shard_placement(13000000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false, 'force_logical'); +ERROR: shard xxxxx already exist in target placement +-- test copying NOT colocated shard +-- status before shard copy +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + p.logicalrelid = 'table5_groupX'::regclass +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000012 | table5_groupx | 57637 + 13000013 | table5_groupx | 57638 + 13000014 | table5_groupx | 57637 + 13000015 | table5_groupx | 57638 + 13000016 | table5_groupx | 57637 + 13000017 | table5_groupx | 57638 + 13000018 | table5_groupx | 57637 + 13000019 | table5_groupx | 57638 +(8 rows) + +-- copy NOT colocated shard +SELECT master_copy_shard_placement(13000012, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false, 'force_logical'); + master_copy_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- status after shard copy +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + p.logicalrelid = 'table5_groupX'::regclass +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000012 | table5_groupx | 57637 + 13000012 | table5_groupx | 57638 + 13000013 | table5_groupx | 57638 + 13000014 | table5_groupx | 57637 + 13000015 | table5_groupx | 57638 + 13000016 | table5_groupx | 57637 + 13000017 | table5_groupx | 57638 + 13000018 | table5_groupx | 57637 + 13000019 | table5_groupx | 57638 +(9 rows) + +-- test copying shard in append distributed table +-- status before shard copy +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + p.logicalrelid = 'table6_append'::regclass +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000020 | table6_append | 57638 + 13000021 | table6_append | 57637 +(2 rows) + +-- copy shard in append distributed table +SELECT master_copy_shard_placement(13000020, 'localhost', :worker_2_port, 'localhost', :worker_1_port, false, 'force_logical'); + master_copy_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- status after shard copy +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + p.logicalrelid = 'table6_append'::regclass +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000020 | table6_append | 57637 + 13000020 | table6_append | 57638 + 13000021 | table6_append | 57637 +(3 rows) + +-- test move +-- test moving colocated shards +-- status before shard move +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000000 | table1_group1 | 57637 + 13000000 | table1_group1 | 57638 + 13000001 | table1_group1 | 57638 + 13000002 | table1_group1 | 57637 + 13000003 | table1_group1 | 57638 + 13000004 | table1_group1 | 57637 + 13000005 | table1_group1 | 57638 + 13000006 | table2_group1 | 57637 + 13000006 | table2_group1 | 57638 + 13000007 | table2_group1 | 57638 + 13000008 | table2_group1 | 57637 + 13000009 | table2_group1 | 57638 + 13000010 | table2_group1 | 57637 + 13000011 | table2_group1 | 57638 +(14 rows) + +-- move colocated shards +SELECT master_move_shard_placement(13000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- status after shard move +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000000 | table1_group1 | 57637 + 13000000 | table1_group1 | 57638 + 13000001 | table1_group1 | 57637 + 13000002 | table1_group1 | 57637 + 13000003 | table1_group1 | 57638 + 13000004 | table1_group1 | 57637 + 13000005 | table1_group1 | 57638 + 13000006 | table2_group1 | 57637 + 13000006 | table2_group1 | 57638 + 13000007 | table2_group1 | 57637 + 13000008 | table2_group1 | 57637 + 13000009 | table2_group1 | 57638 + 13000010 | table2_group1 | 57637 + 13000011 | table2_group1 | 57638 +(14 rows) + +-- also connect worker to verify we successfully moved given shard (and other colocated shards) +\c - - - :worker_1_port +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table1_group1_13000001'::regclass; + Column | Type | Modifiers +--------------------------------------------------------------------- + id | integer | not null +(1 row) + +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table2_group1_13000007'::regclass; + Column | Type | Modifiers +--------------------------------------------------------------------- + id | integer | +(1 row) + +\c - - - :master_port +-- test moving NOT colocated shard +-- status before shard move +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + p.logicalrelid = 'table5_groupX'::regclass +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000012 | table5_groupx | 57637 + 13000012 | table5_groupx | 57638 + 13000013 | table5_groupx | 57638 + 13000014 | table5_groupx | 57637 + 13000015 | table5_groupx | 57638 + 13000016 | table5_groupx | 57637 + 13000017 | table5_groupx | 57638 + 13000018 | table5_groupx | 57637 + 13000019 | table5_groupx | 57638 +(9 rows) + +-- move NOT colocated shard +SELECT master_move_shard_placement(13000013, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- status after shard move +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + p.logicalrelid = 'table5_groupX'::regclass +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000012 | table5_groupx | 57637 + 13000012 | table5_groupx | 57638 + 13000013 | table5_groupx | 57637 + 13000014 | table5_groupx | 57637 + 13000015 | table5_groupx | 57638 + 13000016 | table5_groupx | 57637 + 13000017 | table5_groupx | 57638 + 13000018 | table5_groupx | 57637 + 13000019 | table5_groupx | 57638 +(9 rows) + +-- test moving shard in append distributed table +-- status before shard move +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + p.logicalrelid = 'table6_append'::regclass +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000020 | table6_append | 57637 + 13000020 | table6_append | 57638 + 13000021 | table6_append | 57637 +(3 rows) + +-- move shard in append distributed table +SELECT master_move_shard_placement(13000021, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- status after shard move +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + p.logicalrelid = 'table6_append'::regclass +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000020 | table6_append | 57637 + 13000020 | table6_append | 57638 + 13000021 | table6_append | 57638 +(3 rows) + +-- try to move shard from wrong node +SELECT master_move_shard_placement(13000021, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); +ERROR: could not find placement matching "localhost:xxxxx" +HINT: Confirm the placement still exists and try again. +-- test shard move with foreign constraints +DROP TABLE IF EXISTS table1_group1, table2_group1; +SET citus.shard_count TO 6; +SET citus.shard_replication_factor TO 1; +-- create distributed tables +CREATE TABLE table1_group1 ( id int PRIMARY KEY); +SELECT create_distributed_table('table1_group1', 'id', 'hash'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE table2_group1 ( id int, table1_id int, FOREIGN KEY(table1_id) REFERENCES table1_group1(id)); +SELECT create_distributed_table('table2_group1', 'table1_id', 'hash'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Mark the tables as non-mx tables +UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN + ('table1_group1'::regclass, 'table2_group1'::regclass); +-- status before shard rebalance +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000022 | table1_group1 | 57637 + 13000023 | table1_group1 | 57638 + 13000024 | table1_group1 | 57637 + 13000025 | table1_group1 | 57638 + 13000026 | table1_group1 | 57637 + 13000027 | table1_group1 | 57638 + 13000028 | table2_group1 | 57637 + 13000029 | table2_group1 | 57638 + 13000030 | table2_group1 | 57637 + 13000031 | table2_group1 | 57638 + 13000032 | table2_group1 | 57637 + 13000033 | table2_group1 | 57638 +(12 rows) + +SELECT master_move_shard_placement(13000022, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'block_writes'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- status after shard rebalance +SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport +FROM + pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp +WHERE + p.logicalrelid = s.logicalrelid AND + s.shardid = sp.shardid AND + colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) +ORDER BY s.shardid, sp.nodeport; + shardid | logicalrelid | nodeport +--------------------------------------------------------------------- + 13000022 | table1_group1 | 57638 + 13000023 | table1_group1 | 57638 + 13000024 | table1_group1 | 57637 + 13000025 | table1_group1 | 57638 + 13000026 | table1_group1 | 57637 + 13000027 | table1_group1 | 57638 + 13000028 | table2_group1 | 57638 + 13000029 | table2_group1 | 57638 + 13000030 | table2_group1 | 57637 + 13000031 | table2_group1 | 57638 + 13000032 | table2_group1 | 57637 + 13000033 | table2_group1 | 57638 +(12 rows) + +-- also connect worker to verify we successfully moved given shard (and other colocated shards) +\c - - - :worker_2_port +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table1_group1_13000022'::regclass; + Column | Type | Modifiers +--------------------------------------------------------------------- + id | integer | not null +(1 row) + +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table2_group1_13000028'::regclass; + Column | Type | Modifiers +--------------------------------------------------------------------- + id | integer | + table1_id | integer | +(2 rows) + +-- make sure that we've created the foreign keys +SELECT "Constraint", "Definition" FROM table_fkeys; + Constraint | Definition +--------------------------------------------------------------------- + pg_dist_poolinfo_nodeid_fkey | FOREIGN KEY (nodeid) REFERENCES pg_dist_node(nodeid) + table2_group1_table1_id_fkey_13000028 | FOREIGN KEY (table1_id) REFERENCES table1_group1_13000022(id) + table2_group1_table1_id_fkey_13000029 | FOREIGN KEY (table1_id) REFERENCES table1_group1_13000023(id) + table2_group1_table1_id_fkey_13000031 | FOREIGN KEY (table1_id) REFERENCES table1_group1_13000025(id) + table2_group1_table1_id_fkey_13000033 | FOREIGN KEY (table1_id) REFERENCES table1_group1_13000027(id) + test_constraint_1230019 | FOREIGN KEY (l_orderkey) REFERENCES tenant_isolation.orders_streaming_1230016(o_orderkey) + test_constraint_1230020 | FOREIGN KEY (l_orderkey) REFERENCES tenant_isolation.orders_streaming_1230017(o_orderkey) + test_constraint_1230021 | FOREIGN KEY (l_orderkey) REFERENCES tenant_isolation.orders_streaming_1230018(o_orderkey) + test_constraint_1230025 | FOREIGN KEY (l_orderkey) REFERENCES tenant_isolation.orders_streaming_1230022(o_orderkey) + test_constraint_1230026 | FOREIGN KEY (l_orderkey) REFERENCES tenant_isolation.orders_streaming_1230023(o_orderkey) + test_constraint_1230027 | FOREIGN KEY (l_orderkey) REFERENCES tenant_isolation.orders_streaming_1230024(o_orderkey) +(11 rows) + +\c - - - :master_port +-- test shard copy with foreign constraints +-- we expect it to error out because we do not support foreign constraints with replication factor > 1 +SELECT master_copy_shard_placement(13000022, 'localhost', :worker_2_port, 'localhost', :worker_1_port, false); +ERROR: cannot replicate shards with foreign keys +-- lets also test that master_move_shard_placement doesn't break serials +CREATE TABLE serial_move_test (key int, other_val serial); +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('serial_move_test', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- key 15 goes to shard xxxxx +INSERT INTO serial_move_test (key) VALUES (15) RETURNING *; + key | other_val +--------------------------------------------------------------------- + 15 | 1 +(1 row) + +INSERT INTO serial_move_test (key) VALUES (15) RETURNING *; + key | other_val +--------------------------------------------------------------------- + 15 | 2 +(1 row) + +-- confirm the shard id +SELECT * FROM run_command_on_placements('serial_move_test', 'SELECT DISTINCT key FROM %s WHERE key = 15') WHERE result = '15' AND shardid = 13000034; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57637 | 13000034 | t | 15 +(1 row) + +SELECT master_move_shard_placement(13000034, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- confirm the successfull move +SELECT * FROM run_command_on_placements('serial_move_test', 'SELECT DISTINCT key FROM %s WHERE key = 15') WHERE result = '15' AND shardid = 13000034; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57638 | 13000034 | t | 15 +(1 row) + +-- finally show that serials work fine afterwards +INSERT INTO serial_move_test (key) VALUES (15) RETURNING *; + key | other_val +--------------------------------------------------------------------- + 15 | 3 +(1 row) + +INSERT INTO serial_move_test (key) VALUES (15) RETURNING *; + key | other_val +--------------------------------------------------------------------- + 15 | 4 +(1 row) + +-- lets do some failure testing +CREATE TABLE logical_failure_test (key int); +SET citus.shard_replication_factor TO 1; +SET citus.shard_count TO 4; +SELECT create_distributed_table('logical_failure_test', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- ensure that the shard is created for this user +\c - - - :worker_2_port +\dt logical_failure_test_13000038 + List of relations + Schema | Name | Type | Owner +--------------------------------------------------------------------- + public | logical_failure_test_13000038 | table | postgres +(1 row) + +DROP TABLE logical_failure_test_13000038; +-- should fail since the command wouldn't be able to connect to the worker_1 +\c - - - :master_port +SELECT master_move_shard_placement(13000038, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); +ERROR: could not copy table "logical_failure_test_13000038" from "localhost:xxxxx" +CONTEXT: while executing command on localhost:xxxxx +DROP TABLE logical_failure_test; +-- lets test the logical replication modes +CREATE TABLE test_with_pkey (key int PRIMARY KEY, value int NOT NULL); +SET citus.shard_replication_factor TO 1; +SET citus.shard_count TO 4; +SELECT create_distributed_table('test_with_pkey', 'key', colocate_with => 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- should succeed since there is a replica identity defined +SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- should succeed since we still have a replica identity +ALTER TABLE test_with_pkey REPLICA IDENTITY FULL; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'auto'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- this time should fail since we don't have replica identity any more +ALTER TABLE test_with_pkey REPLICA IDENTITY NOTHING; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'auto'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- should succeed since we still have a replica identity +ALTER TABLE test_with_pkey REPLICA IDENTITY USING INDEX test_with_pkey_pkey; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +ERROR: could not find placement matching "localhost:xxxxx" +HINT: Confirm the placement still exists and try again. +-- one final test with shard_transfer_mode auto +CREATE UNIQUE INDEX req_rep_idx ON test_with_pkey(key, value); +ALTER TABLE test_with_pkey REPLICA IDENTITY USING INDEX req_rep_idx; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'auto'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +ALTER TABLE test_with_pkey REPLICA IDENTITY NOTHING; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- should succeed but not use logical replication +ALTER TABLE test_with_pkey REPLICA IDENTITY NOTHING; +SET client_min_messages TO DEBUG1; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes'); +DEBUG: table "test_with_pkey_13000042" does not exist, skipping +DETAIL: NOTICE from localhost:xxxxx + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +SET client_min_messages TO DEFAULT; +-- we don't support multiple shard moves in a single transaction +SELECT + master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical') +FROM + pg_dist_shard_placement where nodeport = :worker_1_port AND + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_with_pkey'::regclass); + master_move_shard_placement +--------------------------------------------------------------------- + + +(2 rows) + +-- similar test with explicit transaction block +BEGIN; + + SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); +ERROR: could not find placement matching "localhost:xxxxx" +HINT: Confirm the placement still exists and try again. + SELECT master_move_shard_placement(13000044, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); +ERROR: current transaction is aborted, commands ignored until end of transaction block +COMMIT; +-- we do support the same with block writes +SELECT + master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes') +FROM + pg_dist_shard_placement where nodeport = :worker_1_port AND + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_with_pkey'::regclass); + master_move_shard_placement +--------------------------------------------------------------------- +(0 rows) + +-- we should be able to move shard placements after COMMIT/ABORT +BEGIN; + + SELECT master_move_shard_placement(13000043, 'localhost', :worker_2_port, 'localhost', :worker_1_port, shard_transfer_mode:='force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +COMMIT; +SELECT master_move_shard_placement(13000045, 'localhost', :worker_2_port, 'localhost', :worker_1_port, shard_transfer_mode:='force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +BEGIN; + + SELECT master_move_shard_placement(13000043, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +ABORT; +SELECT master_move_shard_placement(13000045, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- we should be able to move shard placements of partitioend tables +CREATE SCHEMA move_partitions; +CREATE TABLE move_partitions.events ( + id serial, + t timestamptz default now(), + payload text +) +PARTITION BY RANGE(t); +ERROR: syntax error at or near "PARTITION" +SET citus.shard_count TO 6; +SELECT create_distributed_table('move_partitions.events', 'id', colocate_with := 'none'); +ERROR: relation "move_partitions.events" does not exist +CREATE TABLE move_partitions.events_1 PARTITION OF move_partitions.events +FOR VALUES FROM ('2015-01-01') TO ('2016-01-01'); +ERROR: syntax error at or near "PARTITION" +INSERT INTO move_partitions.events (t, payload) +SELECT '2015-01-01'::date + (interval '1 day' * s), s FROM generate_series(1, 100) s; +ERROR: relation "move_partitions.events" does not exist +SELECT count(*) FROM move_partitions.events; +ERROR: relation "move_partitions.events" does not exist +-- try to move automatically +SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port) +FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port +ORDER BY shardid LIMIT 1; +ERROR: relation "move_partitions.events" does not exist +-- force logical replication +SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical') +FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port +ORDER BY shardid LIMIT 1; +ERROR: relation "move_partitions.events" does not exist +SELECT count(*) FROM move_partitions.events; +ERROR: relation "move_partitions.events" does not exist +-- add a primary key to the partition +ALTER TABLE move_partitions.events_1 ADD CONSTRAINT e_1_pk PRIMARY KEY (id); +ERROR: relation "move_partitions.events_1" does not exist +-- should be able to move automatically now +SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port) +FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port +ORDER BY shardid LIMIT 1; +ERROR: relation "move_partitions.events" does not exist +SELECT count(*) FROM move_partitions.events; +ERROR: relation "move_partitions.events" does not exist +-- should also be able to move with block writes +SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes') +FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port +ORDER BY shardid LIMIT 1; +ERROR: relation "move_partitions.events" does not exist +SELECT count(*) FROM move_partitions.events; +ERROR: relation "move_partitions.events" does not exist +-- should have moved all shards to node 1 (2*6 = 12) +SELECT count(*) +FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid::text LIKE 'move_partitions.events%' AND nodeport = :worker_1_port; + count +--------------------------------------------------------------------- + 0 +(1 row) + +DROP TABLE move_partitions.events; +ERROR: table "events" does not exist +-- set back to the defaults and drop the table +SET client_min_messages TO DEFAULT; +DROP TABLE test_with_pkey; diff --git a/src/test/regress/expected/multi_create_role_dependency.out b/src/test/regress/expected/multi_create_role_dependency.out new file mode 100644 index 000000000..df1792914 --- /dev/null +++ b/src/test/regress/expected/multi_create_role_dependency.out @@ -0,0 +1,47 @@ +-- Show that dependent user/role objects can be created safely +CREATE USER non_super_user_test_user; +CREATE SCHEMA role_dependency_schema; +CREATE TYPE role_dependency_schema.superusers_type AS (a int, b int); +GRANT CREATE ON SCHEMA role_dependency_schema to non_super_user_test_user; +GRANT USAGE ON SCHEMA role_dependency_schema to non_super_user_test_user; +GRANT USAGE ON TYPE role_dependency_schema.superusers_type TO non_super_user_test_user; +SET ROLE non_super_user_test_user; +CREATE TABLE role_dependency_schema.non_super_user_table(a int, b role_dependency_schema.superusers_type); +SELECT create_distributed_table('role_dependency_schema.non_super_user_table','a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Show that table and superuser's type is marked as distributed +RESET ROLE; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where pg_identify_object_as_address(classid, objid, objsubid)::text like '%non_super_user_table%'; + pg_identify_object_as_address +--------------------------------------------------------------------- + (table,"{role_dependency_schema,non_super_user_table}",{}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where pg_identify_object_as_address(classid, objid, objsubid)::text like '%non_super_user_table%';$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (table,"{role_dependency_schema,non_super_user_table}",{}) + localhost | 57638 | t | (table,"{role_dependency_schema,non_super_user_table}",{}) +(2 rows) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where pg_identify_object_as_address(classid, objid, objsubid)::text like '%superusers_type%'; + pg_identify_object_as_address +--------------------------------------------------------------------- + (type,{role_dependency_schema.superusers_type},{}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where pg_identify_object_as_address(classid, objid, objsubid)::text like '%superusers_type%';$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (type,{role_dependency_schema.superusers_type},{}) + localhost | 57638 | t | (type,{role_dependency_schema.superusers_type},{}) +(2 rows) + +DROP SCHEMA role_dependency_schema CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to type role_dependency_schema.superusers_type +drop cascades to table role_dependency_schema.non_super_user_table diff --git a/src/test/regress/expected/multi_create_table.out b/src/test/regress/expected/multi_create_table.out index 4ec8a27da..ee83adff0 100644 --- a/src/test/regress/expected/multi_create_table.out +++ b/src/test/regress/expected/multi_create_table.out @@ -381,6 +381,86 @@ SELECT create_distributed_table('shard_count_table_2', 'a', shard_count:=12, col (1 row) DROP TABLE shard_count_table, shard_count_table_2; +-- test shard splitting doesn't break shard_count parameter +-- when shard count is given table needs to have exactly that +-- many shards, regardless of shard splitting on other tables +-- ensure there is no colocation group with 9 shards +SELECT count(*) FROM pg_dist_colocation WHERE shardcount = 9; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SET citus.shard_count TO 9; +CREATE TABLE shard_split_table (a int, b int); +SELECT create_distributed_table ('shard_split_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- show the difference in pg_dist_colocation and citus_tables shard counts +SELECT + ( + SELECT shardcount FROM pg_dist_colocation WHERE colocationid IN + ( + SELECT colocation_id FROM citus_tables WHERE table_name = 'shard_split_table'::regclass + ) + ) AS "pg_dist_colocation", + (SELECT shard_count FROM citus_tables WHERE table_name = 'shard_split_table'::regclass) AS "citus_tables"; + pg_dist_colocation | citus_tables +--------------------------------------------------------------------- + 9 | 11 +(1 row) + +CREATE TABLE shard_split_table_2 (a int, b int); +SELECT create_distributed_table ('shard_split_table_2', 'a', shard_count:=9); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT a.colocation_id = b.colocation_id FROM citus_tables a, citus_tables b + WHERE a.table_name = 'shard_split_table'::regclass AND b.table_name = 'shard_split_table_2'::regclass; + ?column? +--------------------------------------------------------------------- + f +(1 row) + +SELECT shard_count FROM citus_tables WHERE table_name = 'shard_split_table_2'::regclass; + shard_count +--------------------------------------------------------------------- + 9 +(1 row) + +-- also check we don't break regular behaviour +CREATE TABLE shard_split_table_3 (a int, b int); +SELECT create_distributed_table ('shard_split_table_3', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT a.colocation_id = b.colocation_id FROM citus_tables a, citus_tables b + WHERE a.table_name = 'shard_split_table'::regclass AND b.table_name = 'shard_split_table_3'::regclass; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT shard_count FROM citus_tables WHERE table_name = 'shard_split_table_3'::regclass; + shard_count +--------------------------------------------------------------------- + 11 +(1 row) + +DROP TABLE shard_split_table, shard_split_table_2, shard_split_table_3; -- test a shard count with an empty default colocation group -- ensure there is no colocation group with 13 shards SELECT count(*) FROM pg_dist_colocation WHERE shardcount = 13; diff --git a/src/test/regress/expected/multi_create_users.out b/src/test/regress/expected/multi_create_users.out new file mode 100644 index 000000000..7154db7d0 --- /dev/null +++ b/src/test/regress/expected/multi_create_users.out @@ -0,0 +1,12 @@ +-- +-- MULTI_CREATE_USERS +-- +-- Create users on all nodes, they're currently automatically +-- replicated. +-- +CREATE USER full_access; +CREATE USER read_access; +CREATE USER no_access; +-- allow access to various users +GRANT ALL ON TABLE lineitem, orders, lineitem, customer, nation, part, supplier TO full_access; +GRANT SELECT ON TABLE lineitem, orders, lineitem, customer, nation, part, supplier TO read_access; diff --git a/src/test/regress/expected/multi_deparse_function.out b/src/test/regress/expected/multi_deparse_function.out index 6ac5dcd17..c53707909 100644 --- a/src/test/regress/expected/multi_deparse_function.out +++ b/src/test/regress/expected/multi_deparse_function.out @@ -370,16 +370,9 @@ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE -- Rename the function back to the original name in the coordinator ALTER FUNCTION summation RENAME TO add; +SET citus.enable_ddl_propagation TO on; CREATE ROLE function_role; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -SELECT run_command_on_workers('CREATE ROLE function_role'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - +SET citus.enable_ddl_propagation TO off; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION add OWNER TO function_role $cmd$); diff --git a/src/test/regress/expected/multi_deparse_procedure.out b/src/test/regress/expected/multi_deparse_procedure.out index 62adca50b..e004feff7 100644 --- a/src/test/regress/expected/multi_deparse_procedure.out +++ b/src/test/regress/expected/multi_deparse_procedure.out @@ -304,16 +304,9 @@ $cmd$); (2 rows) ALTER PROCEDURE summation RENAME TO raise_info; +SET citus.enable_ddl_propagation TO on; CREATE ROLE procedure_role; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -SELECT run_command_on_workers($$CREATE ROLE procedure_role;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - +SET citus.enable_ddl_propagation TO off; SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info OWNER TO procedure_role $cmd$); @@ -403,10 +396,3 @@ $cmd$); SET client_min_messages TO WARNING; -- suppress cascading objects dropping DROP SCHEMA procedure_tests CASCADE; DROP ROLE procedure_role; -SELECT run_command_on_workers($$DROP ROLE procedure_role;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP ROLE") - (localhost,57638,t,"DROP ROLE") -(2 rows) - diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index be0b77c53..7ed37a7af 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1176,8 +1176,6 @@ ALTER EXTENSION citus UPDATE; \c - - - :master_port -- test https://github.com/citusdata/citus/issues/3409 CREATE USER testuser2 SUPERUSER; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. SET ROLE testuser2; DROP EXTENSION Citus; -- Loop until we see there's no maintenance daemon running diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index c0fbb40c0..3eaecfec5 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -141,15 +141,88 @@ CREATE INDEX CONCURRENTLY lineitem_concurrently_index ON public.lineitem (l_orde -- Verify that no-name local CREATE INDEX CONCURRENTLY works CREATE TABLE local_table (id integer, name text); CREATE INDEX CONCURRENTLY ON local_table(id); --- Verify that we warn out on CLUSTER command for distributed tables and no parameter -CLUSTER index_test_hash USING index_test_hash_index_a; -WARNING: not propagating CLUSTER command to worker nodes -CLUSTER; -WARNING: not propagating CLUSTER command to worker nodes -- Vefify we don't warn out on CLUSTER command for local tables CREATE INDEX CONCURRENTLY local_table_index ON local_table(id); CLUSTER local_table USING local_table_index; DROP TABLE local_table; +-- Verify that we can run CLUSTER command +CLUSTER index_test_hash USING index_test_hash_index_a; +-- Verify that we ERROR on CLUSTER VERBOSE +CLUSTER VERBOSE index_test_hash USING index_test_hash_index_a; +ERROR: cannot run CLUSTER command +DETAIL: VERBOSE option is currently unsupported for distributed tables. +-- Verify that we WARN on CLUSTER ALL +CLUSTER; +WARNING: not propagating CLUSTER command to worker nodes +HINT: Provide a specific table in order to CLUSTER distributed tables. +-- Verify that all indexes got created on the master node and one of the workers +SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; + schemaname | tablename | indexname | tablespace | indexdef +--------------------------------------------------------------------- + multi_index_statements | index_test_hash | index_test_hash_a_b_idx | | CREATE INDEX index_test_hash_a_b_idx ON multi_index_statements.index_test_hash USING btree (a) INCLUDE (b) WHERE (value_plus_one(c) > 10) + multi_index_statements | index_test_hash | index_test_hash_a_idx | | CREATE INDEX index_test_hash_a_idx ON multi_index_statements.index_test_hash USING btree (a) WHERE (value_plus_one(c) > 10) + multi_index_statements | index_test_hash | index_test_hash_a_idx1 | | CREATE INDEX index_test_hash_a_idx1 ON multi_index_statements.index_test_hash USING btree (a) WHERE predicate_stable() + multi_index_statements | index_test_hash | index_test_hash_abs_idx | | CREATE INDEX index_test_hash_abs_idx ON multi_index_statements.index_test_hash USING btree (abs(a)) WHERE (value_plus_one(c) > 10) + multi_index_statements | index_test_hash | index_test_hash_c_expr_a_idx | | CREATE INDEX index_test_hash_c_expr_a_idx ON multi_index_statements.index_test_hash USING btree (c, ((c + 0))) INCLUDE (a) + multi_index_statements | index_test_hash | index_test_hash_expr_idx | | CREATE INDEX index_test_hash_expr_idx ON multi_index_statements.index_test_hash USING btree (((value_plus_one(b) + value_plus_one(c)))) WHERE (value_plus_one(c) > 10) + multi_index_statements | index_test_hash | index_test_hash_expr_idx1 | | CREATE INDEX index_test_hash_expr_idx1 ON multi_index_statements.index_test_hash USING btree (((a_text || b_text))) + multi_index_statements | index_test_hash | index_test_hash_expr_idx2 | | CREATE INDEX index_test_hash_expr_idx2 ON multi_index_statements.index_test_hash USING btree (((a_text || b_text))) WHERE (value_plus_one(c) > 10) + multi_index_statements | index_test_hash | index_test_hash_expr_idx3 | | CREATE INDEX index_test_hash_expr_idx3 ON multi_index_statements.index_test_hash USING btree (((a_text || b_text))) WHERE ((a_text || b_text) = 'ttt'::text) + multi_index_statements | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON multi_index_statements.index_test_hash USING btree (a) + multi_index_statements | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON multi_index_statements.index_test_hash USING btree (a, b) + multi_index_statements | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON multi_index_statements.index_test_hash USING btree (a) INCLUDE (b, c) + multi_index_statements | index_test_hash | index_test_hash_index_a_b_partial | | CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON multi_index_statements.index_test_hash USING btree (a, b) WHERE (c IS NOT NULL) + multi_index_statements | index_test_hash | index_test_hash_value_plus_one_c_b_idx | | CREATE INDEX index_test_hash_value_plus_one_c_b_idx ON multi_index_statements.index_test_hash USING btree (value_plus_one(a)) INCLUDE (c, b) WHERE (value_plus_one(c) > 10) + multi_index_statements | index_test_hash | index_test_hash_value_plus_one_idx | | CREATE INDEX index_test_hash_value_plus_one_idx ON multi_index_statements.index_test_hash USING btree (value_plus_one(b)) + multi_index_statements | index_test_hash | index_test_hash_value_plus_one_idx1 | | CREATE INDEX index_test_hash_value_plus_one_idx1 ON multi_index_statements.index_test_hash USING btree (value_plus_one(a)) WHERE (c > 10) + multi_index_statements | index_test_hash | index_test_hash_value_plus_one_idx2 | | CREATE INDEX index_test_hash_value_plus_one_idx2 ON multi_index_statements.index_test_hash USING btree (value_plus_one(b)) + multi_index_statements | index_test_hash | index_test_hash_value_plus_one_idx3 | | CREATE INDEX index_test_hash_value_plus_one_idx3 ON multi_index_statements.index_test_hash USING btree (multi_index_statements_2.value_plus_one(b)) + multi_index_statements | index_test_range | index_test_range_index_a | | CREATE UNIQUE INDEX index_test_range_index_a ON multi_index_statements.index_test_range USING btree (a) + multi_index_statements | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON multi_index_statements.index_test_range USING btree (a, b) + multi_index_statements | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON multi_index_statements.index_test_range USING btree (a, b) WHERE (c IS NOT NULL) + public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON public.lineitem USING btree (record_ne(lineitem.*, NULL::record)) + public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON public.lineitem USING btree (l_orderkey) + public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON public.lineitem USING hash (l_partkey) + public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON public.lineitem USING btree (l_orderkey) + public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON public.lineitem USING btree (l_orderkey) + public | lineitem | lineitem_partial_index | | CREATE INDEX lineitem_partial_index ON public.lineitem USING btree (l_shipdate) WHERE (l_shipdate < '01-01-1995'::date) + public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON public.lineitem USING btree (l_partkey DESC) + public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON public.lineitem USING btree (l_orderkey, l_linenumber) + public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) +(30 rows) + +\c - - - :worker_1_port +SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_%' ORDER BY relname LIMIT 1); + count +--------------------------------------------------------------------- + 9 +(1 row) + +SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash_%'; + count +--------------------------------------------------------------------- + 144 +(1 row) + +SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range_%'; + count +--------------------------------------------------------------------- + 6 +(1 row) + +SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append_%'; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- Verify that we actually run the CLUSTER COMMAND +SELECT sum(indisclustered::integer) FROM pg_index WHERE indrelid::regclass::text SIMILAR TO '%\d'; + sum +--------------------------------------------------------------------- + 8 +(1 row) + \c - - - :master_port SET search_path TO multi_index_statements, public; -- Verify that we error out on unsupported statement types diff --git a/src/test/regress/expected/multi_metadata_access.out b/src/test/regress/expected/multi_metadata_access.out index ed11ff12b..1586910f0 100644 --- a/src/test/regress/expected/multi_metadata_access.out +++ b/src/test/regress/expected/multi_metadata_access.out @@ -3,8 +3,6 @@ -- SET citus.next_shard_id TO 1360000; CREATE USER no_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. SET ROLE no_access; -- list relations in the citus extension without sufficient privileges SELECT pg_class.oid::regclass diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 8fbd78f65..207daef16 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -61,6 +61,7 @@ ALTER ROLE CURRENT_USER WITH PASSWORD 'dummypassword'; SELECT unnest(activate_node_snapshot()) order by 1; unnest --------------------------------------------------------------------- + ALTER DATABASE regression OWNER TO postgres; CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres DELETE FROM pg_catalog.pg_dist_colocation DELETE FROM pg_catalog.pg_dist_object @@ -77,7 +78,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') + SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition SET ROLE postgres SET ROLE postgres @@ -90,7 +91,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_local_group SET groupid = 1 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (1, 1, -1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; -(29 rows) +(30 rows) -- this function is dropped in Citus10, added here for tests SET citus.enable_metadata_sync TO OFF; @@ -125,6 +126,7 @@ UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::re SELECT unnest(activate_node_snapshot()) order by 1; unnest --------------------------------------------------------------------- + ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.user_defined_seq OWNER TO postgres ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) @@ -150,7 +152,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') + SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_create_truncate_trigger('public.mx_test_table') SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition SET ROLE postgres @@ -166,13 +168,14 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(41 rows) +(42 rows) -- Show that CREATE INDEX commands are included in the activate node snapshot CREATE INDEX mx_index ON mx_test_table(col_2); SELECT unnest(activate_node_snapshot()) order by 1; unnest --------------------------------------------------------------------- + ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.user_defined_seq OWNER TO postgres ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) @@ -199,7 +202,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') + SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_create_truncate_trigger('public.mx_test_table') SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition SET ROLE postgres @@ -215,7 +218,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(42 rows) +(43 rows) -- Show that schema changes are included in the activate node snapshot CREATE SCHEMA mx_testing_schema; @@ -223,6 +226,7 @@ ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; SELECT unnest(activate_node_snapshot()) order by 1; unnest --------------------------------------------------------------------- + ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.user_defined_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) @@ -250,7 +254,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') + SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition SET ROLE postgres @@ -266,7 +270,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(43 rows) +(44 rows) -- Show that append distributed tables are not included in the activate node snapshot CREATE TABLE non_mx_test_table (col_1 int, col_2 text); @@ -280,6 +284,7 @@ UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table' SELECT unnest(activate_node_snapshot()) order by 1; unnest --------------------------------------------------------------------- + ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.user_defined_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) @@ -307,7 +312,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') + SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition SET ROLE postgres @@ -323,13 +328,14 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(43 rows) +(44 rows) -- Show that range distributed tables are not included in the activate node snapshot UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(activate_node_snapshot()) order by 1; unnest --------------------------------------------------------------------- + ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.user_defined_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) @@ -357,7 +363,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') + SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition SET ROLE postgres @@ -373,7 +379,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(43 rows) +(44 rows) -- Test start_metadata_sync_to_node and citus_activate_node UDFs -- Ensure that hasmetadata=false for all nodes @@ -1392,16 +1398,6 @@ SELECT master_remove_node('localhost', :worker_2_port); -- the master user needs superuser permissions to change the replication model CREATE USER mx_user WITH SUPERUSER; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -\c - - - :worker_1_port -CREATE USER mx_user; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -\c - - - :worker_2_port -CREATE USER mx_user; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. \c - mx_user - :master_port -- Create an mx table as a different user CREATE TABLE mx_table (a int, b BIGSERIAL); @@ -1482,10 +1478,6 @@ NOTICE: dropping metadata on the node (localhost,57638) (1 row) -DROP USER mx_user; -\c - - - :worker_1_port -DROP USER mx_user; -\c - - - :worker_2_port DROP USER mx_user; -- Check that create_reference_table creates the metadata on workers \c - - - :master_port @@ -1853,6 +1845,7 @@ ALTER TABLE test_table ALTER COLUMN id2 SET DEFAULT nextval('mx_test_sequence_1' SELECT unnest(activate_node_snapshot()) order by 1; unnest --------------------------------------------------------------------- + ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.mx_test_sequence_0 OWNER TO postgres ALTER SEQUENCE public.mx_test_sequence_1 OWNER TO postgres @@ -1908,7 +1901,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_sequence_0 AS integer INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE','integer') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_sequence_1 AS integer INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE','integer') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') + SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_create_truncate_trigger('mx_test_schema_1.mx_table_1') SELECT worker_create_truncate_trigger('mx_test_schema_2.mx_table_2') SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') @@ -1939,7 +1932,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310074, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310075, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310076, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310073, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310083, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310084, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310085, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310086, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(86 rows) +(87 rows) -- shouldn't work since test_table is MX ALTER TABLE test_table ADD COLUMN id3 bigserial; diff --git a/src/test/regress/expected/multi_modifying_xacts.out b/src/test/regress/expected/multi_modifying_xacts.out index 47b113378..35158f75b 100644 --- a/src/test/regress/expected/multi_modifying_xacts.out +++ b/src/test/regress/expected/multi_modifying_xacts.out @@ -1134,13 +1134,6 @@ SELECT count(*) FROM pg_dist_transaction; -- in which we'll make the remote host unavailable -- first create the new user on all nodes CREATE USER test_user; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -\c - - - :worker_1_port -CREATE USER test_user; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -\c - - - :worker_2_port -CREATE USER test_user; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -- now connect back to the master with the new user \c - test_user - :master_port SET citus.next_shard_id TO 1200015; @@ -1311,13 +1304,6 @@ SELECT * FROM run_command_on_workers('ALTER USER test_user_new RENAME TO test_us DROP TABLE reference_modifying_xacts, hash_modifying_xacts, hash_modifying_xacts_second, reference_failure_test, numbers_hash_failure_test; -SELECT * FROM run_command_on_workers('DROP USER test_user'); - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 57637 | t | DROP ROLE - localhost | 57638 | t | DROP ROLE -(2 rows) - DROP USER test_user; -- set up foreign keys to test transactions with co-located and reference tables BEGIN; diff --git a/src/test/regress/expected/multi_move_mx.out b/src/test/regress/expected/multi_move_mx.out index fec42b594..9b07a9f5f 100644 --- a/src/test/regress/expected/multi_move_mx.out +++ b/src/test/regress/expected/multi_move_mx.out @@ -111,10 +111,10 @@ WHERE ORDER BY shardid LIMIT 1; -ERROR: the force_logical transfer mode is currently unsupported +ERROR: Table 'mx_table_1' is streaming replicated. Shards of streaming replicated tables cannot be copied -- Move a shard from worker 1 to worker 2 SELECT - master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port) + master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical') FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE @@ -128,6 +128,49 @@ LIMIT 1; (1 row) +-- Test changing citus.node_conninfo on the target node affects the +-- CREATE SUBSCRIPTION command for shard move +\c - - - :worker_2_port +ALTER SYSTEM SET citus.node_conninfo TO 'sslrootcert=/non/existing/certificate.crt sslmode=verify-full'; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +\c - - - :master_port +BEGIN; +SELECT + master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical') +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + AND nodeport = :worker_1_port +ORDER BY + shardid +LIMIT 1; +ERROR: source placement must be in active state +ROLLBACK; +\c - - - :worker_2_port +-- before reseting citus.node_conninfo, check that CREATE SUBSCRIPTION +-- with citus_use_authinfo takes into account node_conninfo even when +-- one of host, port, or user parameters are not specified. +-- +-- We need to specify host and port to not get an hba error, so we test +-- only with ommitting user. +CREATE SUBSCRIPTION subs_01 CONNECTION 'host=''localhost'' port=57637' +PUBLICATION pub_01 WITH (citus_use_authinfo=true); +ERROR: could not connect to the publisher: root certificate file "/non/existing/certificate.crt" does not exist +Either provide the file or change sslmode to disable server certificate verification. +ALTER SYSTEM RESET citus.node_conninfo; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +\c - - - :master_port -- Check that the shard and its colocated shard is moved, but not the other shards SELECT logicalrelid, shardid, nodename, nodeport diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index f2ccf29e7..005036f2e 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -28,24 +28,14 @@ SELECT create_distributed_table('singleshard', 'id'); (1 row) -- turn off propagation to avoid Enterprise processing the following section -SET citus.enable_ddl_propagation TO off; CREATE USER full_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER usage_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER read_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER no_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE ROLE some_role; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT some_role TO full_access; GRANT some_role TO read_access; +SET citus.enable_ddl_propagation TO off; GRANT ALL ON TABLE test TO full_access; GRANT SELECT ON TABLE test TO read_access; CREATE SCHEMA full_access_user_schema; @@ -54,45 +44,11 @@ GRANT ALL ON SCHEMA full_access_user_schema TO full_access; GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access; SET citus.enable_ddl_propagation TO DEFAULT; \c - - - :worker_1_port -CREATE USER full_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CREATE USER usage_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CREATE USER read_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CREATE USER no_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CREATE ROLE some_role; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -GRANT some_role TO full_access; -GRANT some_role TO read_access; GRANT ALL ON TABLE test_1420000 TO full_access; GRANT SELECT ON TABLE test_1420000 TO read_access; GRANT ALL ON TABLE test_1420002 TO full_access; GRANT SELECT ON TABLE test_1420002 TO read_access; \c - - - :worker_2_port -CREATE USER full_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CREATE USER usage_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CREATE USER read_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CREATE USER no_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CREATE ROLE some_role; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -GRANT some_role TO full_access; -GRANT some_role TO read_access; GRANT ALL ON TABLE test_1420001 TO full_access; GRANT SELECT ON TABLE test_1420001 TO read_access; GRANT ALL ON TABLE test_1420003 TO full_access; @@ -774,6 +730,27 @@ DROP TABLE test, test_coloc, colocation_table; +SELECT run_command_on_workers($$DROP OWNED BY full_access$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"DROP OWNED") + (localhost,57638,t,"DROP OWNED") +(2 rows) + +SELECT run_command_on_workers($$DROP OWNED BY some_role$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"DROP OWNED") + (localhost,57638,t,"DROP OWNED") +(2 rows) + +SELECT run_command_on_workers($$DROP OWNED BY read_access$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"DROP OWNED") + (localhost,57638,t,"DROP OWNED") +(2 rows) + DROP USER full_access; DROP USER read_access; DROP USER no_access; diff --git a/src/test/regress/expected/multi_multiuser_auth.out b/src/test/regress/expected/multi_multiuser_auth.out new file mode 100644 index 000000000..dcbc49fee --- /dev/null +++ b/src/test/regress/expected/multi_multiuser_auth.out @@ -0,0 +1,183 @@ +-- +-- MULTI_MULTIUSER_AUTH +-- +-- Test authentication logic +-- +-- We'll test different passwords for every user/host combo +\set alice_master_pw mug-loth-oaf-mailman-languor +\set alice_worker_1_pw ratline-belabor-fatigue-chive-esdras +\set alice_worker_2_pw boogie-chick-asterisk-nevus-sonny +\set alice_fallback_pw :alice_worker_2_pw +\set bob_master_pw nimbly-milepost-sandbag-cucumber-marksman +\set bob_worker_1_pw triplex-royalty-warranty-stand-cheek +\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile +\set bob_fallback_pw :bob_worker_1_pw +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; + worker_1_id +--------------------------------------------------------------------- + 16 +(1 row) + +\gset +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; + worker_2_id +--------------------------------------------------------------------- + 18 +(1 row) + +\gset +-- alice is a superuser so she can update own password +CREATE USER alice PASSWORD :'alice_master_pw' SUPERUSER; +CREATE USER bob PASSWORD :'bob_master_pw'; +-- note we enter a wrong password for Alice to test cache invalidation +INSERT INTO pg_dist_authinfo (nodeid, rolename, authinfo) VALUES +(-1, 'alice', 'password=' || :'alice_master_pw'), +(:worker_1_id, 'alice', 'password=' || 'wrong_password'), +(0, 'alice', 'password=' || :'alice_fallback_pw'), +(-1, 'bob', 'password=' || :'bob_master_pw'), +(0, 'bob', 'password=' || :'bob_fallback_pw'), +(:worker_2_id, 'bob', 'password=' || :'bob_worker_2_pw'); +\c - - - :worker_1_port +set citus.enable_ddl_propagation to off; +ALTER ROLE alice PASSWORD :'alice_worker_1_pw' SUPERUSER; +ALTER ROLE bob PASSWORD :'bob_worker_1_pw'; +reset citus.enable_ddl_propagation; +-- note the wrong password for loopbacks here; task-tracker will fail +INSERT INTO pg_dist_authinfo (nodeid, rolename, authinfo) VALUES +(0, 'alice', 'password=dummy'), +(-1, 'alice', 'password=' || 'wrong_password'), +(-1, 'bob', 'password=' || :'bob_worker_1_pw'), +(0, 'bob', 'password=' || :'bob_worker_2_pw') +; +\c - - - :worker_2_port +set citus.enable_ddl_propagation to off; +ALTER ROLE alice PASSWORD :'alice_worker_2_pw' SUPERUSER; +ALTER ROLE bob PASSWORD :'bob_worker_2_pw'; +reset citus.enable_ddl_propagation; +INSERT INTO pg_dist_authinfo (nodeid, rolename, authinfo) VALUES +(0, 'alice', 'password=dummy'), +(-1, 'alice', 'password=' || 'wrong_password'), +(-1, 'bob', 'password=' || :'bob_worker_2_pw'), +(0, 'bob', 'password=' || :'bob_worker_1_pw') +; +\c - - - :master_port +-- build format strings to specify PW +SELECT format('user=%s host=localhost port=%s password=%s dbname=regression', + 'alice', :master_port, :'alice_master_pw') AS alice_conninfo; + alice_conninfo +--------------------------------------------------------------------- + user=alice host=localhost port=xxxxx password=mug-loth-oaf-mailman-languor dbname=regression +(1 row) + +\gset +SELECT format('user=%s host=localhost port=%s password=%s dbname=regression', + 'bob', :master_port, :'bob_master_pw') AS bob_conninfo; + bob_conninfo +--------------------------------------------------------------------- + user=bob host=localhost port=xxxxx password=nimbly-milepost-sandbag-cucumber-marksman dbname=regression +(1 row) + +\gset +GRANT ALL ON TABLE lineitem, orders, lineitem, customer, nation, part, supplier TO alice, bob; +\c :alice_conninfo +-- router query (should break because of bad password) +INSERT INTO customer VALUES (12345, 'name', NULL, 5, 'phone', 123.45, 'segment', 'comment'); +ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: password authentication failed for user "alice" +-- fix alice's worker1 password ... +UPDATE pg_dist_authinfo +SET authinfo = ('password=' || :'alice_worker_1_pw') +WHERE nodeid = :worker_1_id AND rolename = 'alice'; +-- and try again because cache should clear, should +-- just get invalid constraint this time, no bad pw +INSERT INTO customer VALUES (12345, 'name', NULL, 5, 'phone', 123.45, 'segment', 'comment'); +ERROR: null value in column "c_address" violates not-null constraint +DETAIL: Failing row contains (12345, name, null, 5, phone , 123.45, segment , comment). +CONTEXT: while executing command on localhost:xxxxx +SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders + WHERE l_orderkey = o_orderkey AND l_orderkey > 9030 + GROUP BY o_orderstatus + HAVING sum(l_linenumber) > 1000 + ORDER BY o_orderstatus; + o_orderstatus | sum | avg +--------------------------------------------------------------------- + F | 8559 | 3.0126715945089757 + O | 8904 | 3.0040485829959514 +(2 rows) + +-- fix worker passwords, which should invalidate task tracker caches +\c - postgres - :worker_1_port +UPDATE pg_dist_authinfo +SET authinfo = ('password=' || :'alice_worker_1_pw') +WHERE nodeid = -1 AND rolename = 'alice'; +\c - postgres - :worker_2_port +UPDATE pg_dist_authinfo +SET authinfo = ('password=' || :'alice_worker_2_pw') +WHERE nodeid = -1 AND rolename = 'alice'; +\c :alice_conninfo +SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders + WHERE l_orderkey = o_orderkey AND l_orderkey > 9030 + GROUP BY o_orderstatus + HAVING sum(l_linenumber) > 1000 + ORDER BY o_orderstatus; + o_orderstatus | sum | avg +--------------------------------------------------------------------- + F | 8559 | 3.0126715945089757 + O | 8904 | 3.0040485829959514 +(2 rows) + +-- adaptive query +RESET citus.task_executor_type; +SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders + WHERE l_orderkey = o_orderkey AND l_orderkey > 9030 + GROUP BY o_orderstatus + HAVING sum(l_linenumber) > 1000 + ORDER BY o_orderstatus; + o_orderstatus | sum | avg +--------------------------------------------------------------------- + F | 8559 | 3.0126715945089757 + O | 8904 | 3.0040485829959514 +(2 rows) + +-- create and distribute table +CREATE TABLE wonderland (id integer, name text); +SELECT create_distributed_table('wonderland', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- copy +COPY wonderland FROM STDIN WITH (FORMAT 'csv'); +SELECT COUNT(*) FROM wonderland; + count +--------------------------------------------------------------------- + 3 +(1 row) + +DROP TABLE wonderland; +\c :bob_conninfo +-- bob can't change authinfo: not a superuser +DELETE FROM pg_dist_authinfo WHERE rolename = 'bob'; +ERROR: permission denied for table pg_dist_authinfo +CREATE TABLE bob_lineitem (LIKE lineitem); +SELECT create_distributed_table('bob_lineitem', 'l_orderkey', 'hash'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO bob_lineitem SELECT * FROM lineitem; +SET citus.enable_repartition_joins TO ON; +SELECT count(*) > 1 from bob_lineitem b , lineitem l where b.l_orderkey = l.l_orderkey LIMIT 10; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT COUNT(*) FROM bob_lineitem; + count +--------------------------------------------------------------------- + 12000 +(1 row) + +DROP TABLE bob_lineitem; diff --git a/src/test/regress/expected/multi_multiuser_basic_queries.out b/src/test/regress/expected/multi_multiuser_basic_queries.out new file mode 100644 index 000000000..93c87cacf --- /dev/null +++ b/src/test/regress/expected/multi_multiuser_basic_queries.out @@ -0,0 +1,177 @@ +-- +-- MULTI_MULTIUSER_BASIC_QUERIES +-- +SET ROLE full_access; +-- Execute simple sum, average, and count queries on data recently uploaded to +-- our partitioned table. +SELECT count(*) FROM lineitem; + count +--------------------------------------------------------------------- + 12000 +(1 row) + +SELECT sum(l_extendedprice) FROM lineitem; + sum +--------------------------------------------------------------------- + 457702024.50 +(1 row) + +SELECT avg(l_extendedprice) FROM lineitem; + avg +--------------------------------------------------------------------- + 38141.835375000000 +(1 row) + +RESET ROLE; +-- and again, to check a read-only user can query +SET ROLE read_access; +SET citus.task_executor_type TO 'task-tracker'; +SELECT count(*) FROM lineitem; + count +--------------------------------------------------------------------- + 12000 +(1 row) + +RESET citus.task_executor_type; +SELECT count(*) FROM lineitem; + count +--------------------------------------------------------------------- + 12000 +(1 row) + +-- and yet again, to prove we're failing when a user doesn't have permissions +SET ROLE no_access; +SET citus.task_executor_type TO 'task-tracker'; +SELECT count(*) FROM lineitem; +ERROR: permission denied for table lineitem +RESET citus.task_executor_type; +SELECT count(*) FROM lineitem; +ERROR: permission denied for table lineitem +RESET ROLE; +-- verify that broadcast joins work +SET citus.large_table_shard_count TO 2; +SET ROLE read_access; +SELECT + l_partkey, o_orderkey, count(*) +FROM + lineitem, part, orders, customer +WHERE + l_orderkey = o_orderkey AND + l_partkey = p_partkey AND + c_custkey = o_custkey AND + (l_quantity > 5.0 OR l_extendedprice > 1200.0) AND + p_size > 8 AND o_totalprice > 10 AND + c_acctbal < 5000.0 +GROUP BY + l_partkey, o_orderkey +ORDER BY + l_partkey, o_orderkey +LIMIT 30; + l_partkey | o_orderkey | count +--------------------------------------------------------------------- + 222 | 9413 | 1 + 278 | 1287 | 1 + 309 | 2374 | 1 + 321 | 5984 | 1 + 337 | 10403 | 1 + 364 | 9347 | 1 + 416 | 640 | 1 + 426 | 10855 | 1 + 484 | 3843 | 1 + 510 | 13569 | 1 + 532 | 3175 | 1 + 641 | 134 | 1 + 669 | 10944 | 1 + 716 | 2885 | 1 + 738 | 4355 | 1 + 802 | 2534 | 1 + 824 | 9287 | 1 + 864 | 3175 | 1 + 960 | 10980 | 1 + 963 | 4580 | 1 +(20 rows) + +RESET ROLE; +SET ROLE no_access; +SELECT + l_partkey, o_orderkey, count(*) +FROM + lineitem, part, orders, customer +WHERE + l_orderkey = o_orderkey AND + l_partkey = p_partkey AND + c_custkey = o_custkey AND + (l_quantity > 5.0 OR l_extendedprice > 1200.0) AND + p_size > 8 AND o_totalprice > 10 AND + c_acctbal < 5000.0 +GROUP BY + l_partkey, o_orderkey +ORDER BY + l_partkey, o_orderkey +LIMIT 30; +ERROR: permission denied for table lineitem +RESET ROLE; +-- verify that re-partition queries work +SET citus.large_table_shard_count TO 1; +SET citus.task_executor_type TO 'task-tracker'; +SET ROLE read_access; +SELECT + l_partkey, o_orderkey, count(*) +FROM + lineitem, part, orders, customer +WHERE + l_orderkey = o_orderkey AND + l_partkey = p_partkey AND + c_custkey = o_custkey AND + (l_quantity > 5.0 OR l_extendedprice > 1200.0) AND + p_size > 8 AND o_totalprice > 10 AND + c_acctbal < 5000.0 +GROUP BY + l_partkey, o_orderkey +ORDER BY + l_partkey, o_orderkey +LIMIT 30; + l_partkey | o_orderkey | count +--------------------------------------------------------------------- + 222 | 9413 | 1 + 278 | 1287 | 1 + 309 | 2374 | 1 + 321 | 5984 | 1 + 337 | 10403 | 1 + 364 | 9347 | 1 + 416 | 640 | 1 + 426 | 10855 | 1 + 484 | 3843 | 1 + 510 | 13569 | 1 + 532 | 3175 | 1 + 641 | 134 | 1 + 669 | 10944 | 1 + 716 | 2885 | 1 + 738 | 4355 | 1 + 802 | 2534 | 1 + 824 | 9287 | 1 + 864 | 3175 | 1 + 960 | 10980 | 1 + 963 | 4580 | 1 +(20 rows) + +RESET ROLE; +SET ROLE no_access; +SELECT + l_partkey, o_orderkey, count(*) +FROM + lineitem, part, orders, customer +WHERE + l_orderkey = o_orderkey AND + l_partkey = p_partkey AND + c_custkey = o_custkey AND + (l_quantity > 5.0 OR l_extendedprice > 1200.0) AND + p_size > 8 AND o_totalprice > 10 AND + c_acctbal < 5000.0 +GROUP BY + l_partkey, o_orderkey +ORDER BY + l_partkey, o_orderkey +LIMIT 30; +ERROR: permission denied for table lineitem +RESET ROLE; diff --git a/src/test/regress/expected/multi_multiuser_copy.out b/src/test/regress/expected/multi_multiuser_copy.out new file mode 100644 index 000000000..ef27beb70 --- /dev/null +++ b/src/test/regress/expected/multi_multiuser_copy.out @@ -0,0 +1,69 @@ +-- +-- MULTI_MULTIUSER_COPY +-- +-- Create a new hash-partitioned table into which to COPY +CREATE TABLE customer_copy_hash ( + c_custkey integer, + c_name varchar(25) not null, + c_address varchar(40), + c_nationkey integer, + c_phone char(15), + c_acctbal decimal(15,2), + c_mktsegment char(10), + c_comment varchar(117), + primary key (c_custkey)); +SELECT create_distributed_table('customer_copy_hash', 'c_custkey', 'hash'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +GRANT ALL ON TABLE customer_copy_hash TO full_access; +GRANT SELECT ON TABLE customer_copy_hash TO read_access; +-- COPY FROM as superuser +COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; +-- COPY FROM as user with ALL access +SET ROLE full_access; +COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; +; +RESET ROLE; +-- COPY FROM as user with SELECT access, should fail +SET ROLE read_access; +COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; +ERROR: permission denied for table customer_copy_hash +3 customer3 +\. +invalid command \. +; +ERROR: syntax error at or near "3" +RESET ROLE; +-- COPY FROM as user with no access, should fail +SET ROLE no_access; +COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; +ERROR: permission denied for table customer_copy_hash +4 customer4 +\. +invalid command \. +; +ERROR: syntax error at or near "4" +RESET ROLE; +-- COPY TO as superuser +COPY (SELECT * FROM customer_copy_hash ORDER BY 1) TO STDOUT; +1 customer1 \N \N \N \N \N \N +2 customer2 \N \N \N \N \N \N +SET ROLE full_access; +COPY (SELECT * FROM customer_copy_hash ORDER BY 1) TO STDOUT; +1 customer1 \N \N \N \N \N \N +2 customer2 \N \N \N \N \N \N +RESET ROLE; +-- COPY FROM as user with SELECT access, should work +SET ROLE read_access; +COPY (SELECT * FROM customer_copy_hash ORDER BY 1) TO STDOUT; +1 customer1 \N \N \N \N \N \N +2 customer2 \N \N \N \N \N \N +RESET ROLE; +-- COPY FROM as user with no access, should fail +SET ROLE no_access; +COPY (SELECT * FROM customer_copy_hash ORDER BY 1) TO STDOUT; +ERROR: permission denied for table customer_copy_hash +RESET ROLE; diff --git a/src/test/regress/expected/multi_multiuser_grant.out b/src/test/regress/expected/multi_multiuser_grant.out new file mode 100644 index 000000000..9edb78c9d --- /dev/null +++ b/src/test/regress/expected/multi_multiuser_grant.out @@ -0,0 +1,66 @@ +-- +-- MULTI_MULTIUSER_GRANT +-- +-- check that after-the-fact REVOKE/GRANTs are taken into account +-- +-- check that permissions are checked on the master and workers +REVOKE ALL ON TABLE customer FROM full_access; +SET ROLE full_access; +SELECT count(*) FROM customer; -- should fail +ERROR: permission denied for table customer +SELECT has_table_privilege('customer', 'SELECT'); + has_table_privilege +--------------------------------------------------------------------- + f +(1 row) + +RESET ROLE; +\c - - - :worker_1_port +SET ROLE full_access; +SELECT oid, relname, relacl FROM pg_class WHERE relkind = 'r' AND relname LIKE 'customer_%' AND has_table_privilege(oid, 'SELECT'); + oid | relname | relacl +--------------------------------------------------------------------- +(0 rows) + +SELECT count(*) FROM pg_class WHERE relkind = 'r' AND relname LIKE 'customer_%' AND NOT has_table_privilege(oid, 'SELECT'); + count +--------------------------------------------------------------------- + 2 +(1 row) + +RESET ROLE; +-- check that GRANT command obeys citus.enable_ddl_propagation setting +\c - - - :master_port +SET citus.enable_ddl_propagation TO 'off'; +GRANT ALL ON TABLE customer TO full_access; +SET ROLE full_access; +SELECT has_table_privilege('customer', 'SELECT'); -- should be true + has_table_privilege +--------------------------------------------------------------------- + t +(1 row) + +\c - - - :worker_1_port +SET ROLE full_access; +SELECT oid, relname, relacl FROM pg_class WHERE relkind = 'r' AND relname LIKE 'customer_%' AND has_table_privilege(oid, 'SELECT'); + oid | relname | relacl +--------------------------------------------------------------------- +(0 rows) + +SELECT count(*) FROM pg_class WHERE relkind = 'r' AND relname LIKE 'customer_%' AND NOT has_table_privilege(oid, 'SELECT'); + count +--------------------------------------------------------------------- + 2 +(1 row) + +\c - - - :master_port +SET citus.enable_ddl_propagation TO 'on'; +GRANT ALL ON TABLE customer TO full_access; +SET ROLE full_access; +SELECT count(*) FROM customer; -- should work again + count +--------------------------------------------------------------------- + 1000 +(1 row) + +RESET ROLE; diff --git a/src/test/regress/expected/multi_multiuser_master_protocol.out b/src/test/regress/expected/multi_multiuser_master_protocol.out new file mode 100644 index 000000000..34389db3a --- /dev/null +++ b/src/test/regress/expected/multi_multiuser_master_protocol.out @@ -0,0 +1,533 @@ +-- +-- MULTI_MULTIUSER_MASTER_PROTOCOL +-- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 109079; +-- Tests that check the metadata returned by the master node. At the +-- same time ensure that any user, not just a superuser, can call +-- these. Note that, for now at least, any user can call these. That's +-- OK-ish, since the schema is visible from the catalogs anyway, and +-- exhausting shardids doesn't seem like a super viable attack path. +SET ROLE no_access; +SELECT * FROM master_get_table_ddl_events('lineitem') order by 1; + master_get_table_ddl_events +--------------------------------------------------------------------- + ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber) + ALTER TABLE public.lineitem OWNER TO postgres + CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) + CREATE TABLE public.lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL) + GRANT DELETE ON public.lineitem TO full_access + GRANT DELETE ON public.lineitem TO postgres + GRANT INSERT ON public.lineitem TO full_access + GRANT INSERT ON public.lineitem TO postgres + GRANT REFERENCES ON public.lineitem TO full_access + GRANT REFERENCES ON public.lineitem TO postgres + GRANT SELECT ON public.lineitem TO full_access + GRANT SELECT ON public.lineitem TO postgres + GRANT SELECT ON public.lineitem TO read_access + GRANT TRIGGER ON public.lineitem TO full_access + GRANT TRIGGER ON public.lineitem TO postgres + GRANT TRUNCATE ON public.lineitem TO full_access + GRANT TRUNCATE ON public.lineitem TO postgres + GRANT UPDATE ON public.lineitem TO full_access + GRANT UPDATE ON public.lineitem TO postgres + REVOKE ALL ON public.lineitem FROM PUBLIC +(20 rows) + +SELECT * FROM master_get_new_shardid(); + master_get_new_shardid +--------------------------------------------------------------------- + 109079 +(1 row) + +SELECT * FROM master_get_active_worker_nodes(); + node_name | node_port +--------------------------------------------------------------------- + localhost | 57638 + localhost | 57637 +(2 rows) + +RESET ROLE; +-- ensure GRANT/REVOKE's do something sane for creating shards of +CREATE TABLE checkperm(key int); +SELECT master_create_distributed_table('checkperm', 'key', 'append'); + master_create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM master_get_table_ddl_events('checkperm'); + master_get_table_ddl_events +--------------------------------------------------------------------- + CREATE TABLE public.checkperm (key integer) + ALTER TABLE public.checkperm OWNER TO postgres +(2 rows) + +REVOKE ALL ON checkperm FROM PUBLIC; +SELECT * FROM master_get_table_ddl_events('checkperm'); + master_get_table_ddl_events +--------------------------------------------------------------------- + CREATE TABLE public.checkperm (key integer) + REVOKE ALL ON public.checkperm FROM PUBLIC + GRANT INSERT ON public.checkperm TO postgres + GRANT SELECT ON public.checkperm TO postgres + GRANT UPDATE ON public.checkperm TO postgres + GRANT DELETE ON public.checkperm TO postgres + GRANT TRUNCATE ON public.checkperm TO postgres + GRANT REFERENCES ON public.checkperm TO postgres + GRANT TRIGGER ON public.checkperm TO postgres + ALTER TABLE public.checkperm OWNER TO postgres +(10 rows) + +GRANT SELECT ON checkperm TO read_access; +GRANT ALL ON checkperm TO full_access; +SELECT * FROM master_get_table_ddl_events('checkperm'); + master_get_table_ddl_events +--------------------------------------------------------------------- + CREATE TABLE public.checkperm (key integer) + REVOKE ALL ON public.checkperm FROM PUBLIC + GRANT INSERT ON public.checkperm TO postgres + GRANT SELECT ON public.checkperm TO postgres + GRANT UPDATE ON public.checkperm TO postgres + GRANT DELETE ON public.checkperm TO postgres + GRANT TRUNCATE ON public.checkperm TO postgres + GRANT REFERENCES ON public.checkperm TO postgres + GRANT TRIGGER ON public.checkperm TO postgres + GRANT SELECT ON public.checkperm TO read_access + GRANT INSERT ON public.checkperm TO full_access + GRANT SELECT ON public.checkperm TO full_access + GRANT UPDATE ON public.checkperm TO full_access + GRANT DELETE ON public.checkperm TO full_access + GRANT TRUNCATE ON public.checkperm TO full_access + GRANT REFERENCES ON public.checkperm TO full_access + GRANT TRIGGER ON public.checkperm TO full_access + ALTER TABLE public.checkperm OWNER TO postgres +(18 rows) + +REVOKE ALL ON checkperm FROM read_access; +GRANT SELECT ON checkperm TO PUBLIC; +SELECT * FROM master_get_table_ddl_events('checkperm'); + master_get_table_ddl_events +--------------------------------------------------------------------- + CREATE TABLE public.checkperm (key integer) + REVOKE ALL ON public.checkperm FROM PUBLIC + GRANT INSERT ON public.checkperm TO postgres + GRANT SELECT ON public.checkperm TO postgres + GRANT UPDATE ON public.checkperm TO postgres + GRANT DELETE ON public.checkperm TO postgres + GRANT TRUNCATE ON public.checkperm TO postgres + GRANT REFERENCES ON public.checkperm TO postgres + GRANT TRIGGER ON public.checkperm TO postgres + GRANT INSERT ON public.checkperm TO full_access + GRANT SELECT ON public.checkperm TO full_access + GRANT UPDATE ON public.checkperm TO full_access + GRANT DELETE ON public.checkperm TO full_access + GRANT TRUNCATE ON public.checkperm TO full_access + GRANT REFERENCES ON public.checkperm TO full_access + GRANT TRIGGER ON public.checkperm TO full_access + GRANT SELECT ON public.checkperm TO PUBLIC + ALTER TABLE public.checkperm OWNER TO postgres +(18 rows) + +GRANT ALL ON checkperm TO full_access WITH GRANT OPTION; +SELECT * FROM master_get_table_ddl_events('checkperm'); + master_get_table_ddl_events +--------------------------------------------------------------------- + CREATE TABLE public.checkperm (key integer) + REVOKE ALL ON public.checkperm FROM PUBLIC + GRANT INSERT ON public.checkperm TO postgres + GRANT SELECT ON public.checkperm TO postgres + GRANT UPDATE ON public.checkperm TO postgres + GRANT DELETE ON public.checkperm TO postgres + GRANT TRUNCATE ON public.checkperm TO postgres + GRANT REFERENCES ON public.checkperm TO postgres + GRANT TRIGGER ON public.checkperm TO postgres + GRANT INSERT ON public.checkperm TO full_access WITH GRANT OPTION + GRANT SELECT ON public.checkperm TO full_access WITH GRANT OPTION + GRANT UPDATE ON public.checkperm TO full_access WITH GRANT OPTION + GRANT DELETE ON public.checkperm TO full_access WITH GRANT OPTION + GRANT TRUNCATE ON public.checkperm TO full_access WITH GRANT OPTION + GRANT REFERENCES ON public.checkperm TO full_access WITH GRANT OPTION + GRANT TRIGGER ON public.checkperm TO full_access WITH GRANT OPTION + GRANT SELECT ON public.checkperm TO PUBLIC + ALTER TABLE public.checkperm OWNER TO postgres +(18 rows) + +-- create table as superuser/postgres +CREATE TABLE trivial_postgres (id int); +SELECT master_create_distributed_table('trivial_postgres', 'id', 'append'); + master_create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +GRANT ALL ON trivial_postgres TO full_access; +SET ROLE full_access; +CREATE TABLE trivial_full_access (id int); +SELECT master_create_distributed_table('trivial_full_access', 'id', 'append'); + master_create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +RESET ROLE; +SELECT relname, rolname, relacl FROM pg_class JOIN pg_roles ON (pg_roles.oid = pg_class.relowner) WHERE relname LIKE 'trivial%' ORDER BY relname; + relname | rolname | relacl +--------------------------------------------------------------------- + trivial_full_access | full_access | + trivial_postgres | postgres | {postgres=arwdDxt/postgres,full_access=arwdDxt/postgres} +(2 rows) + +SET citus.shard_replication_factor = 2; -- on all workers... +-- create shards as each user, verify ownership +SELECT master_create_empty_shard('trivial_postgres'); + master_create_empty_shard +--------------------------------------------------------------------- + 109080 +(1 row) + +SELECT master_create_empty_shard('trivial_full_access'); + master_create_empty_shard +--------------------------------------------------------------------- + 109081 +(1 row) + +SET ROLE full_access; +SELECT master_create_empty_shard('trivial_postgres'); + master_create_empty_shard +--------------------------------------------------------------------- + 109082 +(1 row) + +SELECT master_create_empty_shard('trivial_full_access'); + master_create_empty_shard +--------------------------------------------------------------------- + 109083 +(1 row) + +RESET ROLE; +SET ROLE full_access; +SELECT master_create_empty_shard('trivial_postgres'); + master_create_empty_shard +--------------------------------------------------------------------- + 109084 +(1 row) + +SELECT master_create_empty_shard('trivial_full_access'); + master_create_empty_shard +--------------------------------------------------------------------- + 109085 +(1 row) + +RESET ROLE; +\c - - - :worker_1_port +SELECT relname, rolname, relacl FROM pg_class JOIN pg_roles ON (pg_roles.oid = pg_class.relowner) WHERE relname LIKE 'trivial%' ORDER BY relname; + relname | rolname | relacl +--------------------------------------------------------------------- + trivial_full_access_109081 | full_access | + trivial_full_access_109083 | full_access | + trivial_full_access_109085 | full_access | + trivial_postgres_109080 | postgres | {postgres=arwdDxt/postgres,full_access=arwdDxt/postgres} + trivial_postgres_109082 | postgres | {postgres=arwdDxt/postgres,full_access=arwdDxt/postgres} + trivial_postgres_109084 | postgres | {postgres=arwdDxt/postgres,full_access=arwdDxt/postgres} +(6 rows) + +\c - - - :worker_2_port +SELECT relname, rolname, relacl FROM pg_class JOIN pg_roles ON (pg_roles.oid = pg_class.relowner) WHERE relname LIKE 'trivial%' ORDER BY relname; + relname | rolname | relacl +--------------------------------------------------------------------- + trivial_full_access_109081 | full_access | + trivial_full_access_109083 | full_access | + trivial_full_access_109085 | full_access | + trivial_postgres_109080 | postgres | {postgres=arwdDxt/postgres,full_access=arwdDxt/postgres} + trivial_postgres_109082 | postgres | {postgres=arwdDxt/postgres,full_access=arwdDxt/postgres} + trivial_postgres_109084 | postgres | {postgres=arwdDxt/postgres,full_access=arwdDxt/postgres} +(6 rows) + +\c - - - :master_port +-- ensure COPY into append tables works +CREATE TABLE stage_postgres(id) AS SELECT 2; +GRANT ALL ON stage_postgres TO full_access; +SET ROLE full_access; +CREATE TABLE stage_full_access(id) AS SELECT 1; +RESET ROLE; +SELECT master_create_empty_shard('trivial_postgres') AS shardid \gset +COPY trivial_postgres FROM STDIN WITH (append_to_shard :shardid); +SELECT master_create_empty_shard('trivial_full_access') AS shardid \gset +COPY trivial_full_access FROM STDIN WITH (append_to_shard :shardid); +SET ROLE full_access; +SELECT master_create_empty_shard('trivial_postgres') AS shardid \gset +COPY trivial_postgres FROM STDIN WITH (append_to_shard :shardid); +SELECT master_create_empty_shard('trivial_full_access') AS shardid \gset +COPY trivial_full_access FROM STDIN WITH (append_to_shard :shardid); +RESET ROLE; +SELECT * FROM trivial_postgres ORDER BY id; + id +--------------------------------------------------------------------- + 1 + 1 + 2 + 2 +(4 rows) + +SELECT * FROM trivial_full_access ORDER BY id; + id +--------------------------------------------------------------------- + 1 + 1 + 2 + 2 +(4 rows) + +SET ROLE full_access; +SELECT * FROM trivial_postgres ORDER BY id; + id +--------------------------------------------------------------------- + 1 + 1 + 2 + 2 +(4 rows) + +SELECT * FROM trivial_full_access ORDER BY id; + id +--------------------------------------------------------------------- + 1 + 1 + 2 + 2 +(4 rows) + +RESET ROLE; +-- verify column level grants are not supported +GRANT UPDATE (id) ON trivial_postgres TO read_access; +ERROR: grant/revoke on column list is currently unsupported +DROP TABLE trivial_full_access; +DROP TABLE trivial_postgres; +DROP TABLE stage_full_access; +DROP TABLE stage_postgres; +-- test GRANT/REVOKE on all tables in schema +CREATE SCHEMA multiuser_schema; +CREATE TABLE multiuser_schema.hash_table(a int, b int); +CREATE TABLE multiuser_schema.reference_table(a int, b int); +SELECT create_distributed_table('multiuser_schema.hash_table', 'a', colocate_with => 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- usage right must be granted to user +GRANT USAGE ON SCHEMA multiuser_schema TO read_access; +-- verify test user (read_access) does not have select privilege on both tables +SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57637 | 109090 | t | f + localhost | 57637 | 109091 | t | f + localhost | 57637 | 109092 | t | f + localhost | 57637 | 109093 | t | f + localhost | 57638 | 109090 | t | f + localhost | 57638 | 109091 | t | f + localhost | 57638 | 109092 | t | f + localhost | 57638 | 109093 | t | f +(8 rows) + +-- grant select +GRANT SELECT ON ALL TABLES IN SCHEMA multiuser_schema TO read_access; +-- verify select is granted +SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57637 | 109090 | t | t + localhost | 57637 | 109091 | t | t + localhost | 57637 | 109092 | t | t + localhost | 57637 | 109093 | t | t + localhost | 57638 | 109090 | t | t + localhost | 57638 | 109091 | t | t + localhost | 57638 | 109092 | t | t + localhost | 57638 | 109093 | t | t +(8 rows) + +-- distribute the second table +SELECT create_reference_table('multiuser_schema.reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +-- verify select is also granted +SELECT * FROM run_command_on_placements('multiuser_schema.reference_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57637 | 109094 | t | t + localhost | 57638 | 109094 | t | t +(2 rows) + +-- create another table in the schema, verify select is not granted +CREATE TABLE multiuser_schema.another_table(a int, b int); +SELECT create_distributed_table('multiuser_schema.another_table', 'a', colocate_with => 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM run_command_on_placements('multiuser_schema.another_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57637 | 109095 | t | f + localhost | 57637 | 109096 | t | f + localhost | 57637 | 109097 | t | f + localhost | 57637 | 109098 | t | f + localhost | 57638 | 109095 | t | f + localhost | 57638 | 109096 | t | f + localhost | 57638 | 109097 | t | f + localhost | 57638 | 109098 | t | f +(8 rows) + +-- grant select again, verify it is granted +GRANT SELECT ON ALL TABLES IN SCHEMA multiuser_schema TO read_access; +SELECT * FROM run_command_on_placements('multiuser_schema.another_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57637 | 109095 | t | t + localhost | 57637 | 109096 | t | t + localhost | 57637 | 109097 | t | t + localhost | 57637 | 109098 | t | t + localhost | 57638 | 109095 | t | t + localhost | 57638 | 109096 | t | t + localhost | 57638 | 109097 | t | t + localhost | 57638 | 109098 | t | t +(8 rows) + +-- verify isolate tenant carries grants +SELECT isolate_tenant_to_new_shard('multiuser_schema.hash_table', 5); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 109100 +(1 row) + +SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57637 | 109091 | t | t + localhost | 57637 | 109092 | t | t + localhost | 57637 | 109093 | t | t + localhost | 57637 | 109099 | t | t + localhost | 57637 | 109100 | t | t + localhost | 57637 | 109101 | t | t + localhost | 57638 | 109091 | t | t + localhost | 57638 | 109092 | t | t + localhost | 57638 | 109093 | t | t + localhost | 57638 | 109099 | t | t + localhost | 57638 | 109100 | t | t + localhost | 57638 | 109101 | t | t +(12 rows) + +-- revoke select +REVOKE SELECT ON ALL TABLES IN SCHEMA multiuser_schema FROM read_access; +SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57637 | 109091 | t | f + localhost | 57637 | 109092 | t | f + localhost | 57637 | 109093 | t | f + localhost | 57637 | 109099 | t | f + localhost | 57637 | 109100 | t | f + localhost | 57637 | 109101 | t | f + localhost | 57638 | 109091 | t | f + localhost | 57638 | 109092 | t | f + localhost | 57638 | 109093 | t | f + localhost | 57638 | 109099 | t | f + localhost | 57638 | 109100 | t | f + localhost | 57638 | 109101 | t | f +(12 rows) + +-- test multi-schema grants +CREATE SCHEMA multiuser_second_schema; +CREATE TABLE multiuser_second_schema.hash_table(a int, b int); +SELECT create_distributed_table('multiuser_second_schema.hash_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +GRANT ALL ON ALL TABLES IN SCHEMA multiuser_schema, multiuser_second_schema TO read_access; +SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57637 | 109091 | t | t + localhost | 57637 | 109092 | t | t + localhost | 57637 | 109093 | t | t + localhost | 57637 | 109099 | t | t + localhost | 57637 | 109100 | t | t + localhost | 57637 | 109101 | t | t + localhost | 57638 | 109091 | t | t + localhost | 57638 | 109092 | t | t + localhost | 57638 | 109093 | t | t + localhost | 57638 | 109099 | t | t + localhost | 57638 | 109100 | t | t + localhost | 57638 | 109101 | t | t +(12 rows) + +SELECT * FROM run_command_on_placements('multiuser_second_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57637 | 109102 | t | t + localhost | 57637 | 109103 | t | t + localhost | 57637 | 109104 | t | t + localhost | 57637 | 109105 | t | t + localhost | 57638 | 109102 | t | t + localhost | 57638 | 109103 | t | t + localhost | 57638 | 109104 | t | t + localhost | 57638 | 109105 | t | t +(8 rows) + +-- revoke from multiple schemas, verify result +REVOKE SELECT ON ALL TABLES IN SCHEMA multiuser_schema, multiuser_second_schema FROM read_access; +SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57637 | 109091 | t | f + localhost | 57637 | 109092 | t | f + localhost | 57637 | 109093 | t | f + localhost | 57637 | 109099 | t | f + localhost | 57637 | 109100 | t | f + localhost | 57637 | 109101 | t | f + localhost | 57638 | 109091 | t | f + localhost | 57638 | 109092 | t | f + localhost | 57638 | 109093 | t | f + localhost | 57638 | 109099 | t | f + localhost | 57638 | 109100 | t | f + localhost | 57638 | 109101 | t | f +(12 rows) + +SELECT * FROM run_command_on_placements('multiuser_second_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + nodename | nodeport | shardid | success | result +--------------------------------------------------------------------- + localhost | 57637 | 109102 | t | f + localhost | 57637 | 109103 | t | f + localhost | 57637 | 109104 | t | f + localhost | 57637 | 109105 | t | f + localhost | 57638 | 109102 | t | f + localhost | 57638 | 109103 | t | f + localhost | 57638 | 109104 | t | f + localhost | 57638 | 109105 | t | f +(8 rows) + +DROP SCHEMA multiuser_schema CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table multiuser_schema.hash_table +drop cascades to table multiuser_schema.reference_table +drop cascades to table multiuser_schema.another_table +DROP SCHEMA multiuser_second_schema CASCADE; +NOTICE: drop cascades to table multiuser_second_schema.hash_table diff --git a/src/test/regress/expected/multi_mx_add_coordinator.out b/src/test/regress/expected/multi_mx_add_coordinator.out index ff6debaf9..e810b715e 100644 --- a/src/test/regress/expected/multi_mx_add_coordinator.out +++ b/src/test/regress/expected/multi_mx_add_coordinator.out @@ -2,13 +2,6 @@ CREATE SCHEMA mx_add_coordinator; SET search_path TO mx_add_coordinator,public; SET client_min_messages TO WARNING; CREATE USER reprefuser WITH LOGIN; -SELECT run_command_on_workers('CREATE USER reprefuser WITH LOGIN'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - SET citus.enable_alter_role_propagation TO ON; -- alter role for other than the extension owner works in enterprise, output differs accordingly ALTER ROLE reprefuser WITH CREATEDB; @@ -76,27 +69,34 @@ SELECT create_reference_table('ref'); (1 row) --- alter role from mx worker isn't propagated \c - - - :worker_1_port +-- alter role from mx worker isn't allowed when alter role propagation is on SET citus.enable_alter_role_propagation TO ON; ALTER ROLE reprefuser WITH CREATEROLE; -select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +-- to alter role locally disable alter role propagation first +SET citus.enable_alter_role_propagation TO OFF; +ALTER ROLE reprefuser WITH CREATEROLE; +SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; rolcreatedb | rolcreaterole --------------------------------------------------------------------- - f | t + t | t (1 row) +RESET citus.enable_alter_role_propagation; \c - - - :worker_2_port -select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; +-- show that altering role locally on worker doesn't propagated to other worker +SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; rolcreatedb | rolcreaterole --------------------------------------------------------------------- - f | f + t | f (1 row) \c - - - :master_port SET search_path TO mx_add_coordinator,public; -SET client_min_messages TO WARNING; -select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; +-- show that altering role locally on worker doesn't propagated to coordinator +SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; rolcreatedb | rolcreaterole --------------------------------------------------------------------- t | f diff --git a/src/test/regress/expected/multi_mx_metadata.out b/src/test/regress/expected/multi_mx_metadata.out index a35b8edb1..28274b0cc 100644 --- a/src/test/regress/expected/multi_mx_metadata.out +++ b/src/test/regress/expected/multi_mx_metadata.out @@ -300,15 +300,6 @@ SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; \c - - - :master_port CREATE USER no_access_mx; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -SELECT run_command_on_workers($$CREATE USER no_access_mx;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - SET ROLE no_access_mx; SELECT raise_failed_aclcheck($$ DROP TABLE distributed_mx_table; diff --git a/src/test/regress/expected/multi_poolinfo_usage.out b/src/test/regress/expected/multi_poolinfo_usage.out new file mode 100644 index 000000000..05ebdc7cd --- /dev/null +++ b/src/test/regress/expected/multi_poolinfo_usage.out @@ -0,0 +1,71 @@ +-- +-- MULTI_POOLINFO_USAGE +-- +-- Test pooler info logic +-- +-- Test of ability to override host/port for a node +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 20000000; +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; + worker_1_id +--------------------------------------------------------------------- + 16 +(1 row) + +\gset +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; + worker_2_id +--------------------------------------------------------------------- + 18 +(1 row) + +\gset +CREATE TABLE lotsa_connections (id integer, name text); +SELECT create_distributed_table('lotsa_connections', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO lotsa_connections VALUES (1, 'user'), (2, 'user'), (3, 'user'), (4, 'user'); +SELECT COUNT(*) FROM lotsa_connections; + count +--------------------------------------------------------------------- + 4 +(1 row) + +-- put outright bad values +\set VERBOSITY terse +INSERT INTO pg_dist_poolinfo VALUES (:worker_1_id, 'host=failhost'), + (:worker_2_id, 'port=9999'); +\c +-- supress OS specific error message +DO $$ +BEGIN + BEGIN + SELECT COUNT(*) FROM lotsa_connections; + EXCEPTION WHEN OTHERS THEN + IF SQLERRM LIKE 'connection to the remote node%%' THEN + RAISE 'failed to execute select'; + END IF; + END; +END; +$$; +ERROR: failed to execute select +-- "re-route" worker one to node two and vice-versa +DELETE FROM pg_dist_poolinfo; +INSERT INTO pg_dist_poolinfo VALUES (:worker_1_id, 'port=' || :worker_2_port), + (:worker_2_id, 'port=' || :worker_1_port); +\c +-- this fails because the shards of one worker won't exist on the other and shards +-- are still looked up using the node name, not the effective connection host +INSERT INTO lotsa_connections VALUES (1, 'user'), (2, 'user'), (3, 'user'), (4, 'user'); +ERROR: relation "public.lotsa_connections_20000000" does not exist +-- tweak poolinfo to use 127.0.0.1 instead of localhost; should work! +DELETE FROM pg_dist_poolinfo; +INSERT INTO pg_dist_poolinfo VALUES (:worker_1_id, 'host=127.0.0.1 port=' || :worker_1_port), + (:worker_2_id, 'host=127.0.0.1 port=' || :worker_2_port); +\c +DELETE FROM lotsa_connections; +DROP TABLE lotsa_connections; +DELETE FROM pg_dist_poolinfo; diff --git a/src/test/regress/expected/multi_poolinfo_usage_0.out b/src/test/regress/expected/multi_poolinfo_usage_0.out new file mode 100644 index 000000000..2ccf077e4 --- /dev/null +++ b/src/test/regress/expected/multi_poolinfo_usage_0.out @@ -0,0 +1,60 @@ +-- +-- MULTI_POOLINFO_USAGE +-- +-- Test pooler info logic +-- +-- Test of ability to override host/port for a node +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 20000000; +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; + worker_1_id +--------------------------------------------------------------------- + 16 +(1 row) + +\gset +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; + worker_2_id +--------------------------------------------------------------------- + 18 +(1 row) + +\gset +CREATE TABLE lotsa_connections (id integer, name text); +SELECT create_distributed_table('lotsa_connections', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO lotsa_connections VALUES (1, 'user'), (2, 'user'), (3, 'user'), (4, 'user'); +SELECT COUNT(*) FROM lotsa_connections; + count +--------------------------------------------------------------------- + 4 +(1 row) + +-- put outright bad values +\set VERBOSITY terse +INSERT INTO pg_dist_poolinfo VALUES (:worker_1_id, 'host=failhost'), + (:worker_2_id, 'port=9999'); +\c +SELECT COUNT(*) FROM lotsa_connections; +ERROR: epoll_ctl() failed: No such file or directory +-- "re-route" worker one to node two and vice-versa +DELETE FROM pg_dist_poolinfo; +INSERT INTO pg_dist_poolinfo VALUES (:worker_1_id, 'port=' || :worker_2_port), + (:worker_2_id, 'port=' || :worker_1_port); +\c +-- this fails because the shards of one worker won't exist on the other and shards +-- are still looked up using the node name, not the effective connection host +INSERT INTO lotsa_connections VALUES (1, 'user'), (2, 'user'), (3, 'user'), (4, 'user'); +ERROR: relation "public.lotsa_connections_20000000" does not exist +-- tweak poolinfo to use 127.0.0.1 instead of localhost; should work! +DELETE FROM pg_dist_poolinfo; +INSERT INTO pg_dist_poolinfo VALUES (:worker_1_id, 'host=127.0.0.1 port=' || :worker_1_port), + (:worker_2_id, 'host=127.0.0.1 port=' || :worker_2_port); +\c +DELETE FROM lotsa_connections; +DROP TABLE lotsa_connections; +DELETE FROM pg_dist_poolinfo; diff --git a/src/test/regress/expected/multi_real_time_transaction.out b/src/test/regress/expected/multi_real_time_transaction.out index cedaa205e..633d00dab 100644 --- a/src/test/regress/expected/multi_real_time_transaction.out +++ b/src/test/regress/expected/multi_real_time_transaction.out @@ -401,29 +401,8 @@ ROLLBACK; -- test propagation of SET LOCAL -- gonna need a non-superuser as we'll use RLS to test GUC propagation CREATE USER rls_user; -SELECT run_command_on_workers('CREATE USER rls_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - GRANT ALL ON SCHEMA multi_real_time_transaction TO rls_user; GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user; -SELECT run_command_on_workers('GRANT ALL ON SCHEMA multi_real_time_transaction TO rls_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -- create trigger on one worker to reject access if GUC not \c - - - :worker_1_port SET search_path = 'multi_real_time_transaction'; diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index cc503f766..72209d0c1 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -2451,13 +2451,10 @@ SELECT master_create_worker_shards('failure_test', 2); SET citus.enable_ddl_propagation TO off; CREATE USER router_user; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user; \c - - - :worker_1_port +SET citus.enable_ddl_propagation TO off; CREATE USER router_user; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user; \c - router_user - :master_port -- we will fail to connect to worker 2, since the user does not exist diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index b5255d482..cc52183a7 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -1142,16 +1142,7 @@ SELECT * FROM "citus-teen's scnd schm."."be$t''t*ble"; DROP SCHEMA "cItuS.T E E N'sSchema", "citus-teen's scnd schm." CASCADE; NOTICE: drop cascades to table "citus-teen's scnd schm."."be$t''t*ble" -- test schema propagation with user other than current user -SELECT run_command_on_coordinator_and_workers('CREATE USER "test-user"'); -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CONTEXT: SQL statement "CREATE USER "test-user"" -PL/pgSQL function run_command_on_coordinator_and_workers(text) line XX at EXECUTE - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - +CREATE USER "test-user"; SELECT run_command_on_coordinator_and_workers('GRANT ALL ON DATABASE postgres to "test-user"'); run_command_on_coordinator_and_workers --------------------------------------------------------------------- @@ -1187,12 +1178,7 @@ SELECT run_command_on_workers('DROP OWNED BY "test-user" CASCADE'); (localhost,57638,t,"DROP OWNED") (2 rows) -SELECT run_command_on_coordinator_and_workers('DROP USER "test-user"'); - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - +DROP USER "test-user"; DROP FUNCTION run_command_on_coordinator_and_workers(p_sql text); -- test run_command_on_* UDFs with schema CREATE SCHEMA run_test_schema; diff --git a/src/test/regress/expected/multi_sequence_default.out b/src/test/regress/expected/multi_sequence_default.out index 3303c5d00..c22e8109b 100644 --- a/src/test/regress/expected/multi_sequence_default.out +++ b/src/test/regress/expected/multi_sequence_default.out @@ -523,11 +523,7 @@ SELECT create_distributed_table('seq_test_9', 'x'); -- we can change the owner role of a sequence CREATE ROLE seq_role_0; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE ROLE seq_role_1; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. ALTER SEQUENCE seq_10 OWNER TO seq_role_0; SELECT sequencename, sequenceowner FROM pg_sequences WHERE sequencename = 'seq_10' ORDER BY 1, 2; sequencename | sequenceowner @@ -535,20 +531,6 @@ SELECT sequencename, sequenceowner FROM pg_sequences WHERE sequencename = 'seq_1 seq_10 | seq_role_0 (1 row) -SELECT run_command_on_workers('CREATE ROLE seq_role_0'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - -SELECT run_command_on_workers('CREATE ROLE seq_role_1'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - ALTER TABLE seq_test_9 ALTER COLUMN y SET DEFAULT nextval('seq_10'); ALTER SEQUENCE seq_10 OWNER TO seq_role_1; SELECT sequencename, sequenceowner FROM pg_sequences WHERE sequencename = 'seq_10' ORDER BY 1, 2; @@ -600,13 +582,6 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); DROP SEQUENCE seq_10 CASCADE; NOTICE: drop cascades to default value for column y of table seq_test_9 DROP ROLE seq_role_0, seq_role_1; -SELECT run_command_on_workers('DROP ROLE IF EXISTS seq_role_0, seq_role_1'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP ROLE") - (localhost,57638,t,"DROP ROLE") -(2 rows) - -- Check some cases when default is defined by -- DEFAULT nextval('seq_name'::text) (not by DEFAULT nextval('seq_name')) SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); diff --git a/src/test/regress/expected/multi_tenant_isolation.out b/src/test/regress/expected/multi_tenant_isolation.out new file mode 100644 index 000000000..baf2869bc --- /dev/null +++ b/src/test/regress/expected/multi_tenant_isolation.out @@ -0,0 +1,1265 @@ +-- +-- MULTI_TENANT_ISOLATION +-- +-- Tests tenant isolation feature +-- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000; +SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id +\gset +ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100000; +CREATE SCHEMA "Tenant Isolation"; +SET search_path to "Tenant Isolation"; +CREATE ROLE mx_isolation_role_ent WITH LOGIN; +GRANT ALL ON SCHEMA "Tenant Isolation", public TO mx_isolation_role_ent; +-- connect with this new role +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; +SET citus.shard_replication_factor TO 1; +SET citus.shard_count to 2; +CREATE TABLE lineitem_streaming ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +SELECT create_distributed_table('lineitem_streaming', 'l_orderkey'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE orders_streaming ( + o_orderkey bigint not null primary key, + o_custkey integer not null, + o_orderstatus char(1) not null, + o_totalprice decimal(15,2) not null, + o_orderdate date not null, + o_orderpriority char(15) not null, + o_clerk char(15) not null, + o_shippriority integer not null, + o_comment varchar(79) not null); +SELECT create_distributed_table('orders_streaming', 'o_orderkey'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +\COPY lineitem_streaming FROM STDIN WITH DELIMITER '|' +\COPY orders_streaming FROM STDIN WITH DELIMITER '|' +ALTER TABLE lineitem_streaming ADD CONSTRAINT test_constraint + FOREIGN KEY(l_orderkey) REFERENCES orders_streaming(o_orderkey); +-- test failing foreign constraints +\COPY lineitem_streaming FROM STDIN WITH DELIMITER '|' +ERROR: insert or update on table "lineitem_streaming_1230001" violates foreign key constraint "test_constraint_1230001" +DETAIL: Key (l_orderkey)=(128) is not present in table "orders_streaming_1230003". +-- tests for cluster health +SELECT count(*) FROM lineitem_streaming; + count +--------------------------------------------------------------------- + 22 +(1 row) + +SELECT count(*) FROM orders_streaming; + count +--------------------------------------------------------------------- + 7 +(1 row) + +SELECT + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate +FROM + orders_streaming, + lineitem_streaming +WHERE + l_orderkey = o_orderkey +GROUP BY + l_orderkey, + o_orderdate +ORDER BY + revenue DESC, + o_orderdate; + l_orderkey | revenue | o_orderdate +--------------------------------------------------------------------- + 100 | 181042.2683 | 02-28-1998 + 102 | 159639.9677 | 05-09-1997 + 101 | 124074.5328 | 03-17-1996 + 103 | 119741.5469 | 06-20-1996 + 99 | 109604.3256 | 03-13-1994 + -1995148554 | 16890.6816 | 05-08-1995 + -1686493264 | 1988.7134 | 09-05-1997 +(7 rows) + +-- Checks to see if metadata and data are isolated properly. If there are problems in +-- metadata and/or data on workers, these queries should return different results below +-- after tenant isolation operations are applied. +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 99; + count +--------------------------------------------------------------------- + 4 +(1 row) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 100; + count +--------------------------------------------------------------------- + 5 +(1 row) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101; + count +--------------------------------------------------------------------- + 3 +(1 row) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 102; + count +--------------------------------------------------------------------- + 4 +(1 row) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 103; + count +--------------------------------------------------------------------- + 4 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 99; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 100; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 101; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 102; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT * FROM pg_dist_shard + WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass + ORDER BY shardminvalue::BIGINT, logicalrelid; + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- + lineitem_streaming | 1230000 | t | -2147483648 | -1 + orders_streaming | 1230002 | t | -2147483648 | -1 + lineitem_streaming | 1230001 | t | 0 | 2147483647 + orders_streaming | 1230003 | t | 0 | 2147483647 +(4 rows) + +-- check without cascade option +SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100); +ERROR: cannot isolate tenant because "lineitem_streaming" has colocated tables +HINT: Use CASCADE option to isolate tenants for the colocated tables too. Example usage: isolate_tenant_to_new_shard('lineitem_streaming', '100', 'CASCADE') +-- check with an input not castable to bigint +SELECT isolate_tenant_to_new_shard('lineitem_streaming', 'abc', 'CASCADE'); +ERROR: invalid input syntax for integer: "abc" +SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230005 +(1 row) + +SELECT isolate_tenant_to_new_shard('lineitem_streaming', 101, 'CASCADE'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230011 +(1 row) + +-- add an explain check to see if we hit the new isolated shard +EXPLAIN (COSTS false) SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus Adaptive) + Task Count: 1 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Aggregate + -> Seq Scan on lineitem_streaming_1230011 lineitem_streaming + Filter: (l_orderkey = 101) +(8 rows) + +-- create an MX node +\c - postgres - :master_port +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; +-- test a failing transaction block +BEGIN; +SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230020 +(1 row) + +SELECT isolate_tenant_to_new_shard('lineitem_streaming', 102, 'CASCADE'); +ERROR: table "lineitem_streaming" has already been isolated for the given value +COMMIT; +-- test a rollback transaction block +BEGIN; +SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230026 +(1 row) + +SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230032 +(1 row) + +ROLLBACK; +-- test a succesfull transaction block +BEGIN; +SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230038 +(1 row) + +SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230044 +(1 row) + +COMMIT; +SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE'); +ERROR: table "lineitem_streaming" has already been isolated for the given value +SELECT isolate_tenant_to_new_shard('orders_streaming', 101, 'CASCADE'); +ERROR: table "orders_streaming" has already been isolated for the given value +-- test corner cases: hash(-1995148554) = -2147483648 and hash(-1686493264) = 2147483647 +SELECT isolate_tenant_to_new_shard('lineitem_streaming', -1995148554, 'CASCADE'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230046 +(1 row) + +SELECT isolate_tenant_to_new_shard('orders_streaming', -1686493264, 'CASCADE'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230053 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = -1995148554; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = -1686493264; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- tests for cluster health +SELECT count(*) FROM lineitem_streaming; + count +--------------------------------------------------------------------- + 22 +(1 row) + +SELECT count(*) FROM orders_streaming; + count +--------------------------------------------------------------------- + 7 +(1 row) + +SELECT + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate +FROM + orders_streaming, + lineitem_streaming +WHERE + l_orderkey = o_orderkey +GROUP BY + l_orderkey, + o_orderdate +ORDER BY + revenue DESC, + o_orderdate; + l_orderkey | revenue | o_orderdate +--------------------------------------------------------------------- + 100 | 181042.2683 | 02-28-1998 + 102 | 159639.9677 | 05-09-1997 + 101 | 124074.5328 | 03-17-1996 + 103 | 119741.5469 | 06-20-1996 + 99 | 109604.3256 | 03-13-1994 + -1995148554 | 16890.6816 | 05-08-1995 + -1686493264 | 1988.7134 | 09-05-1997 +(7 rows) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 99; + count +--------------------------------------------------------------------- + 4 +(1 row) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 100; + count +--------------------------------------------------------------------- + 5 +(1 row) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101; + count +--------------------------------------------------------------------- + 3 +(1 row) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 102; + count +--------------------------------------------------------------------- + 4 +(1 row) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 103; + count +--------------------------------------------------------------------- + 4 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 99; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 100; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 101; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 102; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT * FROM pg_dist_shard + WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass + ORDER BY shardminvalue::BIGINT, logicalrelid; + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- + lineitem_streaming | 1230046 | t | -2147483648 | -2147483648 + orders_streaming | 1230048 | t | -2147483648 | -2147483648 + lineitem_streaming | 1230047 | t | -2147483647 | -136164586 + orders_streaming | 1230049 | t | -2147483647 | -136164586 + lineitem_streaming | 1230041 | t | -136164585 | -136164585 + orders_streaming | 1230044 | t | -136164585 | -136164585 + lineitem_streaming | 1230042 | t | -136164584 | -85071815 + orders_streaming | 1230045 | t | -136164584 | -85071815 + lineitem_streaming | 1230011 | t | -85071814 | -85071814 + orders_streaming | 1230014 | t | -85071814 | -85071814 + lineitem_streaming | 1230012 | t | -85071813 | -1 + orders_streaming | 1230015 | t | -85071813 | -1 + lineitem_streaming | 1230004 | t | 0 | 108199380 + orders_streaming | 1230007 | t | 0 | 108199380 + lineitem_streaming | 1230005 | t | 108199381 | 108199381 + orders_streaming | 1230008 | t | 108199381 | 108199381 + lineitem_streaming | 1230034 | t | 108199382 | 412880111 + orders_streaming | 1230037 | t | 108199382 | 412880111 + lineitem_streaming | 1230035 | t | 412880112 | 412880112 + orders_streaming | 1230038 | t | 412880112 | 412880112 + lineitem_streaming | 1230050 | t | 412880113 | 2147483646 + orders_streaming | 1230052 | t | 412880113 | 2147483646 + lineitem_streaming | 1230051 | t | 2147483647 | 2147483647 + orders_streaming | 1230053 | t | 2147483647 | 2147483647 +(24 rows) + +SELECT * FROM pg_dist_shard_placement WHERE shardid >= 1230000 ORDER BY nodeport, shardid; + shardid | shardstate | shardlength | nodename | nodeport | placementid +--------------------------------------------------------------------- + 1230011 | 1 | 0 | localhost | 57637 | 100011 + 1230012 | 1 | 0 | localhost | 57637 | 100012 + 1230014 | 1 | 0 | localhost | 57637 | 100014 + 1230015 | 1 | 0 | localhost | 57637 | 100015 + 1230041 | 1 | 0 | localhost | 57637 | 100041 + 1230042 | 1 | 0 | localhost | 57637 | 100042 + 1230044 | 1 | 0 | localhost | 57637 | 100044 + 1230045 | 1 | 0 | localhost | 57637 | 100045 + 1230046 | 1 | 0 | localhost | 57637 | 100046 + 1230047 | 1 | 0 | localhost | 57637 | 100047 + 1230048 | 1 | 0 | localhost | 57637 | 100048 + 1230049 | 1 | 0 | localhost | 57637 | 100049 + 1230004 | 1 | 0 | localhost | 57638 | 100004 + 1230005 | 1 | 0 | localhost | 57638 | 100005 + 1230007 | 1 | 0 | localhost | 57638 | 100007 + 1230008 | 1 | 0 | localhost | 57638 | 100008 + 1230034 | 1 | 0 | localhost | 57638 | 100034 + 1230035 | 1 | 0 | localhost | 57638 | 100035 + 1230037 | 1 | 0 | localhost | 57638 | 100037 + 1230038 | 1 | 0 | localhost | 57638 | 100038 + 1230050 | 1 | 0 | localhost | 57638 | 100050 + 1230051 | 1 | 0 | localhost | 57638 | 100051 + 1230052 | 1 | 0 | localhost | 57638 | 100052 + 1230053 | 1 | 0 | localhost | 57638 | 100053 +(24 rows) + +-- test failing foreign constraints after multiple tenant isolation +\COPY lineitem_streaming FROM STDIN WITH DELIMITER '|' +ERROR: insert or update on table "lineitem_streaming_1230050" violates foreign key constraint "test_constraint_1230050" +DETAIL: Key (l_orderkey)=(128) is not present in table "orders_streaming_1230052". +-- connect to the worker node with metadata +\c - mx_isolation_role_ent - :worker_1_port +SET search_path to "Tenant Isolation"; +-- check mx tables +SELECT count(*) FROM lineitem_streaming; + count +--------------------------------------------------------------------- + 22 +(1 row) + +SELECT count(*) FROM orders_streaming; + count +--------------------------------------------------------------------- + 7 +(1 row) + +SELECT + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate +FROM + orders_streaming, + lineitem_streaming +WHERE + l_orderkey = o_orderkey +GROUP BY + l_orderkey, + o_orderdate +ORDER BY + revenue DESC, + o_orderdate; + l_orderkey | revenue | o_orderdate +--------------------------------------------------------------------- + 100 | 181042.2683 | 02-28-1998 + 102 | 159639.9677 | 05-09-1997 + 101 | 124074.5328 | 03-17-1996 + 103 | 119741.5469 | 06-20-1996 + 99 | 109604.3256 | 03-13-1994 + -1995148554 | 16890.6816 | 05-08-1995 + -1686493264 | 1988.7134 | 09-05-1997 +(7 rows) + +-- check shards +SET citus.override_table_visibility TO false; +\d + List of relations + Schema | Name | Type | Owner +--------------------------------------------------------------------- + Tenant Isolation | lineitem_streaming | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230011 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230012 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230028 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230029 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230030 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230041 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230042 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230046 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230047 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230014 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230015 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230031 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230032 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230033 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230044 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230045 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230048 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230049 | table | mx_isolation_role_ent +(20 rows) + +\c - postgres - :worker_1_port +SET search_path to "Tenant Isolation"; +SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='orders_streaming_1230045'::regclass; + Column | Type | Modifiers +--------------------------------------------------------------------- + o_orderkey | bigint | not null + o_custkey | integer | not null + o_orderstatus | character(1) | not null + o_totalprice | numeric(15,2) | not null + o_orderdate | date | not null + o_orderpriority | character(15) | not null + o_clerk | character(15) | not null + o_shippriority | integer | not null + o_comment | character varying(79) | not null +(9 rows) + +\c - mx_isolation_role_ent - :worker_1_port +SET search_path to "Tenant Isolation"; +-- check MX metadata +SELECT * FROM pg_dist_shard + WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass + ORDER BY shardminvalue::BIGINT, logicalrelid; + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- + lineitem_streaming | 1230046 | t | -2147483648 | -2147483648 + orders_streaming | 1230048 | t | -2147483648 | -2147483648 + lineitem_streaming | 1230047 | t | -2147483647 | -136164586 + orders_streaming | 1230049 | t | -2147483647 | -136164586 + lineitem_streaming | 1230041 | t | -136164585 | -136164585 + orders_streaming | 1230044 | t | -136164585 | -136164585 + lineitem_streaming | 1230042 | t | -136164584 | -85071815 + orders_streaming | 1230045 | t | -136164584 | -85071815 + lineitem_streaming | 1230011 | t | -85071814 | -85071814 + orders_streaming | 1230014 | t | -85071814 | -85071814 + lineitem_streaming | 1230012 | t | -85071813 | -1 + orders_streaming | 1230015 | t | -85071813 | -1 + lineitem_streaming | 1230004 | t | 0 | 108199380 + orders_streaming | 1230007 | t | 0 | 108199380 + lineitem_streaming | 1230005 | t | 108199381 | 108199381 + orders_streaming | 1230008 | t | 108199381 | 108199381 + lineitem_streaming | 1230034 | t | 108199382 | 412880111 + orders_streaming | 1230037 | t | 108199382 | 412880111 + lineitem_streaming | 1230035 | t | 412880112 | 412880112 + orders_streaming | 1230038 | t | 412880112 | 412880112 + lineitem_streaming | 1230050 | t | 412880113 | 2147483646 + orders_streaming | 1230052 | t | 412880113 | 2147483646 + lineitem_streaming | 1230051 | t | 2147483647 | 2147483647 + orders_streaming | 1230053 | t | 2147483647 | 2147483647 +(24 rows) + +-- return to master node +\c - mx_isolation_role_ent - :master_port +-- test a distribution type which does not have a sql hash function +SET search_path to "Tenant Isolation"; +SET citus.shard_replication_factor TO 2; +SET citus.shard_count to 2; +CREATE TABLE lineitem_date ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +SELECT create_distributed_table('lineitem_date', 'l_shipdate'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +\COPY lineitem_date FROM STDIN WITH DELIMITER '|' +SELECT count(*) FROM lineitem_date; + count +--------------------------------------------------------------------- + 4 +(1 row) + +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-05-26'; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-07-30'; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-01-15'; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-08-08'; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-05-26'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230057 +(1 row) + +SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-07-30'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230060 +(1 row) + +SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-01-15'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230063 +(1 row) + +SELECT count(*) FROM lineitem_date; + count +--------------------------------------------------------------------- + 4 +(1 row) + +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-05-26'; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-07-30'; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-01-15'; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-08-08'; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- test with invalid shard placements +\c - postgres - :master_port +SET search_path to "Tenant Isolation"; +UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_1_port; +SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-08-08'); +ERROR: cannot isolate tenant because relation "lineitem_date" has an inactive shard placement for the shard xxxxx +HINT: Use master_copy_shard_placement UDF to repair the inactive shard placement. +UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE nodeport = :worker_1_port; +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; +DROP TABLE lineitem_date; +-- test on append distributed table +CREATE TABLE test_append ( + tenant_id integer +); +SELECT create_distributed_table('test_append', 'tenant_id', 'append'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT isolate_tenant_to_new_shard('test_append', 100); +ERROR: cannot isolate tenant because tenant isolation is only support for hash distributed tables +-- check metadata for comparison +SELECT * FROM pg_dist_shard + WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass + ORDER BY shardminvalue::BIGINT, logicalrelid; + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- + lineitem_streaming | 1230046 | t | -2147483648 | -2147483648 + orders_streaming | 1230048 | t | -2147483648 | -2147483648 + lineitem_streaming | 1230047 | t | -2147483647 | -136164586 + orders_streaming | 1230049 | t | -2147483647 | -136164586 + lineitem_streaming | 1230041 | t | -136164585 | -136164585 + orders_streaming | 1230044 | t | -136164585 | -136164585 + lineitem_streaming | 1230042 | t | -136164584 | -85071815 + orders_streaming | 1230045 | t | -136164584 | -85071815 + lineitem_streaming | 1230011 | t | -85071814 | -85071814 + orders_streaming | 1230014 | t | -85071814 | -85071814 + lineitem_streaming | 1230012 | t | -85071813 | -1 + orders_streaming | 1230015 | t | -85071813 | -1 + lineitem_streaming | 1230004 | t | 0 | 108199380 + orders_streaming | 1230007 | t | 0 | 108199380 + lineitem_streaming | 1230005 | t | 108199381 | 108199381 + orders_streaming | 1230008 | t | 108199381 | 108199381 + lineitem_streaming | 1230034 | t | 108199382 | 412880111 + orders_streaming | 1230037 | t | 108199382 | 412880111 + lineitem_streaming | 1230035 | t | 412880112 | 412880112 + orders_streaming | 1230038 | t | 412880112 | 412880112 + lineitem_streaming | 1230050 | t | 412880113 | 2147483646 + orders_streaming | 1230052 | t | 412880113 | 2147483646 + lineitem_streaming | 1230051 | t | 2147483647 | 2147483647 + orders_streaming | 1230053 | t | 2147483647 | 2147483647 +(24 rows) + +-- test failure scenarios with triggers on workers +\c - postgres - :worker_1_port +SET search_path to "Tenant Isolation"; +SET citus.enable_metadata_sync TO OFF; +CREATE OR REPLACE FUNCTION abort_any_command() + RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + RAISE EXCEPTION 'command % is disabled', tg_tag; +END; +$$; +RESET citus.enable_metadata_sync; +CREATE EVENT TRIGGER abort_ddl ON ddl_command_end + EXECUTE PROCEDURE abort_any_command(); +SET citus.override_table_visibility TO false; +\d + List of relations + Schema | Name | Type | Owner +--------------------------------------------------------------------- + Tenant Isolation | lineitem_streaming | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230011 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230012 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230028 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230029 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230030 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230041 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230042 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230046 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230047 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230014 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230015 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230031 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230032 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230033 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230044 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230045 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230048 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230049 | table | mx_isolation_role_ent +(20 rows) + +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; +\set VERBOSITY terse +SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE'); +ERROR: command CREATE TABLE is disabled +\set VERBOSITY default +\c - postgres - :worker_1_port +SET search_path to "Tenant Isolation"; +SET citus.override_table_visibility TO false; +\d + List of relations + Schema | Name | Type | Owner +--------------------------------------------------------------------- + Tenant Isolation | lineitem_streaming | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230011 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230012 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230028 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230029 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230030 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230041 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230042 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230046 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230047 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230014 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230015 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230031 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230032 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230033 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230044 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230045 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230048 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230049 | table | mx_isolation_role_ent +(20 rows) + +DROP EVENT TRIGGER abort_ddl; +-- create a trigger for drops +SET citus.enable_metadata_sync TO OFF; +CREATE OR REPLACE FUNCTION abort_drop_command() + RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + RAISE EXCEPTION 'command % is disabled', tg_tag; +END; +$$; +RESET citus.enable_metadata_sync; +CREATE EVENT TRIGGER abort_drop ON sql_drop + EXECUTE PROCEDURE abort_drop_command(); +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; +\set VERBOSITY terse +SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE'); +ERROR: command DROP TABLE is disabled +\set VERBOSITY default +-- check if metadata is changed +SELECT * FROM pg_dist_shard + WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass + ORDER BY shardminvalue::BIGINT, logicalrelid; + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue +--------------------------------------------------------------------- + lineitem_streaming | 1230046 | t | -2147483648 | -2147483648 + orders_streaming | 1230048 | t | -2147483648 | -2147483648 + lineitem_streaming | 1230047 | t | -2147483647 | -136164586 + orders_streaming | 1230049 | t | -2147483647 | -136164586 + lineitem_streaming | 1230041 | t | -136164585 | -136164585 + orders_streaming | 1230044 | t | -136164585 | -136164585 + lineitem_streaming | 1230042 | t | -136164584 | -85071815 + orders_streaming | 1230045 | t | -136164584 | -85071815 + lineitem_streaming | 1230011 | t | -85071814 | -85071814 + orders_streaming | 1230014 | t | -85071814 | -85071814 + lineitem_streaming | 1230012 | t | -85071813 | -1 + orders_streaming | 1230015 | t | -85071813 | -1 + lineitem_streaming | 1230004 | t | 0 | 108199380 + orders_streaming | 1230007 | t | 0 | 108199380 + lineitem_streaming | 1230005 | t | 108199381 | 108199381 + orders_streaming | 1230008 | t | 108199381 | 108199381 + lineitem_streaming | 1230034 | t | 108199382 | 412880111 + orders_streaming | 1230037 | t | 108199382 | 412880111 + lineitem_streaming | 1230035 | t | 412880112 | 412880112 + orders_streaming | 1230038 | t | 412880112 | 412880112 + lineitem_streaming | 1230050 | t | 412880113 | 2147483646 + orders_streaming | 1230052 | t | 412880113 | 2147483646 + lineitem_streaming | 1230051 | t | 2147483647 | 2147483647 + orders_streaming | 1230053 | t | 2147483647 | 2147483647 +(24 rows) + +\c - - - :worker_1_port +SET search_path to "Tenant Isolation"; +-- however, new tables are already created +SET citus.override_table_visibility TO false; +\d + List of relations + Schema | Name | Type | Owner +--------------------------------------------------------------------- + Tenant Isolation | lineitem_streaming | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230011 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230012 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230028 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230029 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230030 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230041 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230042 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230046 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230047 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230071 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230072 | table | mx_isolation_role_ent + Tenant Isolation | lineitem_streaming_1230073 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230014 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230015 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230031 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230032 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230033 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230044 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230045 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230048 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230049 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230074 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230075 | table | mx_isolation_role_ent + Tenant Isolation | orders_streaming_1230076 | table | mx_isolation_role_ent +(26 rows) + +\c - postgres - :worker_1_port +DROP EVENT TRIGGER abort_drop; +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; +-- tests for cluster health +SELECT count(*) FROM lineitem_streaming; + count +--------------------------------------------------------------------- + 22 +(1 row) + +SELECT count(*) FROM orders_streaming; + count +--------------------------------------------------------------------- + 7 +(1 row) + +SELECT + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate +FROM + orders_streaming, + lineitem_streaming +WHERE + l_orderkey = o_orderkey +GROUP BY + l_orderkey, + o_orderdate +ORDER BY + revenue DESC, + o_orderdate; + l_orderkey | revenue | o_orderdate +--------------------------------------------------------------------- + 100 | 181042.2683 | 02-28-1998 + 102 | 159639.9677 | 05-09-1997 + 101 | 124074.5328 | 03-17-1996 + 103 | 119741.5469 | 06-20-1996 + 99 | 109604.3256 | 03-13-1994 + -1995148554 | 16890.6816 | 05-08-1995 + -1686493264 | 1988.7134 | 09-05-1997 +(7 rows) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 99; + count +--------------------------------------------------------------------- + 4 +(1 row) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 100; + count +--------------------------------------------------------------------- + 5 +(1 row) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101; + count +--------------------------------------------------------------------- + 3 +(1 row) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 102; + count +--------------------------------------------------------------------- + 4 +(1 row) + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 103; + count +--------------------------------------------------------------------- + 4 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 99; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 100; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 101; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 102; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- test composite types with tenant isolation +set search_path to default; +\c - postgres - :worker_1_port +SET search_path to "Tenant Isolation", public, pg_catalog; +-- ... create a test HASH function. Though it is a poor hash function, +-- it is acceptable for our tests +SET citus.enable_metadata_sync TO OFF; +CREATE FUNCTION test_composite_type_hash(test_composite_type) RETURNS int +AS 'SELECT hashtext( ($1.i + $1.i2)::text);' +LANGUAGE SQL +IMMUTABLE +RETURNS NULL ON NULL INPUT; +RESET citus.enable_metadata_sync; +CREATE OPERATOR CLASS cats_op_fam_class +DEFAULT FOR TYPE test_composite_type USING HASH AS +OPERATOR 1 = (test_composite_type, test_composite_type), +FUNCTION 1 test_composite_type_hash(test_composite_type); +\c - - - :worker_2_port +SET search_path to "Tenant Isolation", public, pg_catalog; +-- ... create a test HASH function. Though it is a poor hash function, +-- it is acceptable for our tests +SET citus.enable_metadata_sync TO OFF; +CREATE FUNCTION test_composite_type_hash(test_composite_type) RETURNS int +AS 'SELECT hashtext( ($1.i + $1.i2)::text);' +LANGUAGE SQL +IMMUTABLE +RETURNS NULL ON NULL INPUT; +RESET citus.enable_metadata_sync; +CREATE OPERATOR CLASS cats_op_fam_class +DEFAULT FOR TYPE test_composite_type USING HASH AS +OPERATOR 1 = (test_composite_type, test_composite_type), +FUNCTION 1 test_composite_type_hash(test_composite_type); +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation", public, pg_catalog; +CREATE TABLE composite_table ( + composite_key test_composite_type); +SELECT create_distributed_table('composite_table', 'composite_key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO composite_table VALUES ('(1, 2)'::test_composite_type); +INSERT INTO composite_table VALUES ('(1, 3)'::test_composite_type); +INSERT INTO composite_table VALUES ('(1, 4)'::test_composite_type); +SELECT isolate_tenant_to_new_shard('composite_table', '(1, 3)'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230082 +(1 row) + +SELECT count(*) FROM composite_table WHERE composite_key = '(1, 2)'::test_composite_type; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM composite_table WHERE composite_key = '(1, 3)'::test_composite_type; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM composite_table WHERE composite_key = '(1, 4)'::test_composite_type; + count +--------------------------------------------------------------------- + 1 +(1 row) + +DROP TABLE composite_table; +-- create foreign keys from a reference and distributed table +-- to another distributed table +SET search_path to "Tenant Isolation", public, pg_catalog; +SET citus.shard_replication_factor TO 1; +SET citus.shard_count to 8; +CREATE TABLE test_reference_table_fkey(id int PRIMARY KEY); +SELECT create_reference_table('test_reference_table_fkey'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE test_colocated_table_1(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES test_colocated_table_1(id)); +SELECT create_distributed_table('test_colocated_table_1', 'id', colocate_with => 'NONE'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE test_colocated_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id)); +SELECT create_distributed_table('test_colocated_table_2', 'id', colocate_with => 'test_colocated_table_1'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE test_colocated_table_3(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id), FOREIGN KEY(id) REFERENCES test_colocated_table_2(id)); +SELECT create_distributed_table('test_colocated_table_3', 'id', colocate_with => 'test_colocated_table_1'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO test_reference_table_fkey SELECT i FROM generate_series (0, 100) i; +INSERT INTO test_colocated_table_1 SELECT i, i FROM generate_series (0, 100) i; +INSERT INTO test_colocated_table_2 SELECT i, i FROM generate_series (0, 100) i; +INSERT INTO test_colocated_table_3 SELECT i, i FROM generate_series (0, 100) i; +SELECT isolate_tenant_to_new_shard('test_colocated_table_2', 1, 'CASCADE'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 1230113 +(1 row) + +SELECT count(*) FROM test_colocated_table_2; + count +--------------------------------------------------------------------- + 101 +(1 row) + +\c - postgres - :worker_1_port +-- show the foreign keys of the main table & its colocated shard on other tables +SELECT tbl.relname, fk."Constraint", fk."Definition" +FROM pg_catalog.pg_class tbl +JOIN public.table_fkeys fk on tbl.oid = fk.relid +WHERE tbl.relname like 'test_colocated_table_%' +ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + test_colocated_table_1 | test_colocated_table_1_id_fkey | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1(id) + test_colocated_table_1_1230087 | test_colocated_table_1_id_fkey_1230087 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230087(id) + test_colocated_table_1_1230089 | test_colocated_table_1_id_fkey_1230089 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230089(id) + test_colocated_table_1_1230091 | test_colocated_table_1_id_fkey_1230091 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230091(id) + test_colocated_table_1_1230109 | test_colocated_table_1_id_fkey_1230109 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230109(id) + test_colocated_table_1_1230110 | test_colocated_table_1_id_fkey_1230110 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230110(id) + test_colocated_table_1_1230111 | test_colocated_table_1_id_fkey_1230111 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230111(id) + test_colocated_table_2 | test_colocated_table_2_id_fkey | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1(id) + test_colocated_table_2 | test_colocated_table_2_value_1_fkey | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey(id) + test_colocated_table_2_1230095 | test_colocated_table_2_id_fkey_1230095 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230087(id) + test_colocated_table_2_1230095 | test_colocated_table_2_value_1_fkey_1230095 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230084(id) + test_colocated_table_2_1230097 | test_colocated_table_2_id_fkey_1230097 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230089(id) + test_colocated_table_2_1230097 | test_colocated_table_2_value_1_fkey_1230097 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230084(id) + test_colocated_table_2_1230099 | test_colocated_table_2_id_fkey_1230099 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230091(id) + test_colocated_table_2_1230099 | test_colocated_table_2_value_1_fkey_1230099 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230084(id) + test_colocated_table_2_1230112 | test_colocated_table_2_id_fkey_1230112 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230109(id) + test_colocated_table_2_1230112 | test_colocated_table_2_value_1_fkey_1230112 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230084(id) + test_colocated_table_2_1230113 | test_colocated_table_2_id_fkey_1230113 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230110(id) + test_colocated_table_2_1230113 | test_colocated_table_2_value_1_fkey_1230113 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230084(id) + test_colocated_table_2_1230114 | test_colocated_table_2_id_fkey_1230114 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230111(id) + test_colocated_table_2_1230114 | test_colocated_table_2_value_1_fkey_1230114 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230084(id) + test_colocated_table_3 | test_colocated_table_3_id_fkey | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1(id) + test_colocated_table_3 | test_colocated_table_3_id_fkey1 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2(id) + test_colocated_table_3 | test_colocated_table_3_value_1_fkey | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey(id) + test_colocated_table_3_1230103 | test_colocated_table_3_id_fkey1_1230103 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230095(id) + test_colocated_table_3_1230103 | test_colocated_table_3_id_fkey_1230103 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230087(id) + test_colocated_table_3_1230103 | test_colocated_table_3_value_1_fkey_1230103 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230084(id) + test_colocated_table_3_1230105 | test_colocated_table_3_id_fkey1_1230105 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230097(id) + test_colocated_table_3_1230105 | test_colocated_table_3_id_fkey_1230105 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230089(id) + test_colocated_table_3_1230105 | test_colocated_table_3_value_1_fkey_1230105 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230084(id) + test_colocated_table_3_1230107 | test_colocated_table_3_id_fkey1_1230107 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230099(id) + test_colocated_table_3_1230107 | test_colocated_table_3_id_fkey_1230107 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230091(id) + test_colocated_table_3_1230107 | test_colocated_table_3_value_1_fkey_1230107 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230084(id) + test_colocated_table_3_1230115 | test_colocated_table_3_id_fkey1_1230115 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230112(id) + test_colocated_table_3_1230115 | test_colocated_table_3_id_fkey_1230115 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230109(id) + test_colocated_table_3_1230115 | test_colocated_table_3_value_1_fkey_1230115 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230084(id) + test_colocated_table_3_1230116 | test_colocated_table_3_id_fkey1_1230116 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230113(id) + test_colocated_table_3_1230116 | test_colocated_table_3_id_fkey_1230116 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230110(id) + test_colocated_table_3_1230116 | test_colocated_table_3_value_1_fkey_1230116 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230084(id) + test_colocated_table_3_1230117 | test_colocated_table_3_id_fkey1_1230117 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_2_1230114(id) + test_colocated_table_3_1230117 | test_colocated_table_3_id_fkey_1230117 | FOREIGN KEY (id) REFERENCES "Tenant Isolation".test_colocated_table_1_1230111(id) + test_colocated_table_3_1230117 | test_colocated_table_3_value_1_fkey_1230117 | FOREIGN KEY (value_1) REFERENCES "Tenant Isolation".test_reference_table_fkey_1230084(id) +(42 rows) + +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; +-- +-- Make sure that isolate_tenant_to_new_shard() replicats reference tables +-- when replicate_reference_tables_on_activate is off. +-- +CREATE TABLE ref_table(a int); +SELECT create_reference_table('ref_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +\c - postgres - :master_port +SET search_path to "Tenant Isolation"; +SET citus.replicate_reference_tables_on_activate TO off; +SET client_min_messages TO WARNING; +SELECT 1 FROM master_add_node('localhost', :master_port, groupId=>0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass; + count +--------------------------------------------------------------------- + 2 +(1 row) + +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; +SELECT 1 FROM isolate_tenant_to_new_shard('test_colocated_table_2', 2, 'CASCADE'); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass; + count +--------------------------------------------------------------------- + 3 +(1 row) + +\c - postgres - :master_port +SELECT 1 FROM master_remove_node('localhost', :master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SET client_min_messages TO WARNING; +DROP SCHEMA "Tenant Isolation" CASCADE; +-- stop & resync and stop syncing metadata +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); + stop_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); + stop_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- restart metadata sync for rest of the tests +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- make sure there are no tables with non-zero colocationid +SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0; + count +--------------------------------------------------------------------- + 0 +(1 row) + +TRUNCATE TABLE pg_catalog.pg_dist_colocation; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1; +ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; diff --git a/src/test/regress/expected/multi_transaction_recovery.out b/src/test/regress/expected/multi_transaction_recovery.out index 938847576..ad5f5e699 100644 --- a/src/test/regress/expected/multi_transaction_recovery.out +++ b/src/test/regress/expected/multi_transaction_recovery.out @@ -361,7 +361,7 @@ SELECT COUNT(*) FROM pg_dist_transaction; BEGIN; SET LOCAL citus.defer_drop_after_shard_move TO OFF; -SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode := 'block_writes'); citus_move_shard_placement --------------------------------------------------------------------- @@ -380,7 +380,7 @@ SELECT recover_prepared_transactions(); 0 (1 row) -SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_2_port, 'localhost', :worker_1_port); +SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_2_port, 'localhost', :worker_1_port, shard_transfer_mode := 'block_writes'); citus_move_shard_placement --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_transactional_drop_shards.out b/src/test/regress/expected/multi_transactional_drop_shards.out index fd06e5d14..dca2466d6 100644 --- a/src/test/regress/expected/multi_transactional_drop_shards.out +++ b/src/test/regress/expected/multi_transactional_drop_shards.out @@ -696,13 +696,6 @@ SELECT master_remove_node('localhost', :master_port); DROP TABLE transactional_drop_shards, transactional_drop_reference; -- test DROP TABLE as a non-superuser in a transaction block CREATE USER try_drop_table WITH LOGIN; -SELECT run_command_on_workers('CREATE USER try_drop_table WITH LOGIN'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - GRANT ALL ON SCHEMA public TO try_drop_table; \c - try_drop_table - :master_port BEGIN; diff --git a/src/test/regress/expected/multi_utility_warnings.out b/src/test/regress/expected/multi_utility_warnings.out index 76d5a8325..4f50b2a72 100644 --- a/src/test/regress/expected/multi_utility_warnings.out +++ b/src/test/regress/expected/multi_utility_warnings.out @@ -2,26 +2,13 @@ -- MULTI_UTILITY_WARNINGS -- -- Tests to check if we inform the user about potential caveats of creating new --- databases, schemas, and roles. +-- databases, schemas. SET citus.next_shard_id TO 1010000; CREATE DATABASE new_database; NOTICE: Citus partially supports CREATE DATABASE for distributed databases DETAIL: Citus does not propagate CREATE DATABASE command to workers HINT: You can manually create a database and its extensions on workers. -CREATE ROLE new_role; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -CREATE USER new_user; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -INSERT INTO pg_dist_authinfo VALUES (0, 'new_user', 'password=1234'); -ERROR: cannot write to pg_dist_authinfo -DETAIL: Citus Community Edition does not support the use of custom authentication options. -HINT: To learn more about using advanced authentication schemes with Citus, please contact us at https://citusdata.com/about/contact_us BEGIN; INSERT INTO pg_dist_node VALUES (1234567890, 1234567890, 'localhost', 5432); INSERT INTO pg_dist_poolinfo VALUES (1234567890, 'port=1234'); -ERROR: cannot write to pg_dist_poolinfo -DETAIL: Citus Community Edition does not support the use of pooler options. -HINT: To learn more about using advanced pooling schemes with Citus, please contact us at https://citusdata.com/about/contact_us ROLLBACK; diff --git a/src/test/regress/expected/partitioning_issue_3970.out b/src/test/regress/expected/partitioning_issue_3970.out index c5db76c47..1131bfc68 100644 --- a/src/test/regress/expected/partitioning_issue_3970.out +++ b/src/test/regress/expected/partitioning_issue_3970.out @@ -119,7 +119,7 @@ ALTER TABLE part_table RENAME CONSTRAINT my_seq TO my_seq_check; ERROR: renaming constraints belonging to distributed tables is currently unsupported ALTER TABLE part_table ALTER CONSTRAINT my_seq DEFERRABLE; ERROR: alter table command is currently unsupported -DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ATTACH|DETACH PARTITION and TYPE subcommands are supported. +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. -- verify that we can drop the constraints on partitioned tables ALTER TABLE part_table DROP CONSTRAINT my_seq; DROP TABLE part_table, dist CASCADE; diff --git a/src/test/regress/expected/pg12.out b/src/test/regress/expected/pg12.out index 3c2298a4e..7a6f5e319 100644 --- a/src/test/regress/expected/pg12.out +++ b/src/test/regress/expected/pg12.out @@ -653,8 +653,6 @@ SELECT citus_remove_node('localhost', :master_port); CREATE TABLE superuser_columnar_table (a int) USING columnar; CREATE USER read_access; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. SET ROLE read_access; -- user shouldn't be able to execute alter_columnar_table_set -- or alter_columnar_table_reset for a columnar table that it diff --git a/src/test/regress/expected/pg13.out b/src/test/regress/expected/pg13.out index cceab4418..7dc52ee52 100644 --- a/src/test/regress/expected/pg13.out +++ b/src/test/regress/expected/pg13.out @@ -53,7 +53,7 @@ INSERT INTO generated_col_table VALUES (1); -- Make sure that we currently error out ALTER TABLE generated_col_table ALTER COLUMN b DROP EXPRESSION; ERROR: alter table command is currently unsupported -DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ATTACH|DETACH PARTITION and TYPE subcommands are supported. +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. -- alter view rename column works fine CREATE VIEW v AS SELECT * FROM dist_table; ALTER VIEW v RENAME age to new_age; diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out index 1cab59e0e..dcf21f097 100644 --- a/src/test/regress/expected/pg14.out +++ b/src/test/regress/expected/pg14.out @@ -234,13 +234,13 @@ SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regcla -- test column compression propagation in rebalance SELECT shardid INTO moving_shard FROM citus_shards WHERE table_name='col_compression'::regclass AND nodeport=:worker_1_port LIMIT 1; -SELECT citus_move_shard_placement((SELECT * FROM moving_shard), :'public_worker_1_host', :worker_1_port, :'public_worker_2_host', :worker_2_port); +SELECT citus_move_shard_placement((SELECT * FROM moving_shard), :'public_worker_1_host', :worker_1_port, :'public_worker_2_host', :worker_2_port, shard_transfer_mode := 'block_writes'); citus_move_shard_placement --------------------------------------------------------------------- (1 row) -SELECT rebalance_table_shards('col_compression', rebalance_strategy := 'by_shard_count'); +SELECT rebalance_table_shards('col_compression', rebalance_strategy := 'by_shard_count', shard_transfer_mode := 'block_writes'); NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ... rebalance_table_shards --------------------------------------------------------------------- @@ -826,18 +826,7 @@ SELECT create_distributed_table('st1','a'); (1 row) ALTER STATISTICS role_s1 OWNER TO CURRENT_ROLE; -SET citus.enable_ddl_propagation TO off; -- for enterprise CREATE ROLE role_1 WITH LOGIN SUPERUSER; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -SET citus.enable_ddl_propagation TO on; -SELECT run_command_on_workers($$CREATE ROLE role_1 WITH LOGIN SUPERUSER;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - ALTER STATISTICS role_s1 OWNER TO CURRENT_ROLE; SELECT run_command_on_workers($$SELECT rolname FROM pg_roles WHERE oid IN (SELECT stxowner FROM pg_statistic_ext WHERE stxname LIKE 'role\_s1%');$$); run_command_on_workers diff --git a/src/test/regress/expected/shard_move_constraints.out b/src/test/regress/expected/shard_move_constraints.out new file mode 100644 index 000000000..8635f00b4 --- /dev/null +++ b/src/test/regress/expected/shard_move_constraints.out @@ -0,0 +1,461 @@ +CREATE SCHEMA "shard Move Fkeys Indexes"; +SET search_path TO "shard Move Fkeys Indexes"; +SET citus.next_shard_id TO 8970000; +SET citus.next_placement_id TO 8770000; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +-- contrib module required to havve GIST exclude constraints +CREATE EXTENSION btree_gist; +-- create a non-superuser role +CREATE ROLE mx_rebalancer_role_ent WITH LOGIN; +GRANT ALL ON SCHEMA "shard Move Fkeys Indexes" TO mx_rebalancer_role_ent; +-- connect with this new role +\c - mx_rebalancer_role_ent - :master_port +SET search_path TO "shard Move Fkeys Indexes"; +SET citus.next_shard_id TO 8970000; +SET citus.next_placement_id TO 8770000; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +CREATE TABLE sensors( +measureid integer, +eventdatetime date, +measure_data jsonb, +PRIMARY KEY (measureid, eventdatetime, measure_data)) +PARTITION BY RANGE(eventdatetime); +CREATE TABLE sensors_old PARTITION OF sensors FOR VALUES FROM ('2000-01-01') TO ('2020-01-01'); +CREATE TABLE sensors_2020_01_01 PARTITION OF sensors FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); +CREATE TABLE sensors_news PARTITION OF sensors FOR VALUES FROM ('2020-05-01') TO ('2025-01-01'); +CREATE INDEX index_on_parent ON sensors(lower(measureid::text)); +CREATE INDEX index_on_child ON sensors_2020_01_01(lower(measure_data::text)); +CREATE INDEX hash_index ON sensors USING HASH((measure_data->'IsFailed')); +CREATE INDEX index_with_include ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime); +CREATE STATISTICS s1 (dependencies) ON measureid, eventdatetime FROM sensors; +CREATE STATISTICS s2 (dependencies) ON measureid, eventdatetime FROM sensors_2020_01_01; +ALTER INDEX index_on_parent ALTER COLUMN 1 SET STATISTICS 1000; +ALTER INDEX index_on_child ALTER COLUMN 1 SET STATISTICS 1000; +CLUSTER sensors_2020_01_01 USING index_on_child; +SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- due to https://github.com/citusdata/citus/issues/5121 +\c - postgres - :master_port +SET search_path TO "shard Move Fkeys Indexes"; +SELECT update_distributed_table_colocation('sensors_old', 'sensors'); + update_distributed_table_colocation +--------------------------------------------------------------------- + +(1 row) + +SELECT update_distributed_table_colocation('sensors_2020_01_01', 'sensors'); + update_distributed_table_colocation +--------------------------------------------------------------------- + +(1 row) + +SELECT update_distributed_table_colocation('sensors_news', 'sensors'); + update_distributed_table_colocation +--------------------------------------------------------------------- + +(1 row) + +\c - mx_rebalancer_role_ent - :master_port +SET search_path TO "shard Move Fkeys Indexes"; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 8970016; +SET citus.next_placement_id TO 8770016; +-- create a colocated distributed tables and create foreign keys FROM/TO +-- the partitions +CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); +SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CLUSTER colocated_dist_table USING colocated_dist_table_pkey; +CREATE TABLE colocated_partitioned_table( + measureid integer, + eventdatetime date, + PRIMARY KEY (measureid, eventdatetime)) +PARTITION BY RANGE(eventdatetime); +CREATE TABLE colocated_partitioned_table_2020_01_01 PARTITION OF colocated_partitioned_table FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); +SELECT create_distributed_table('colocated_partitioned_table', 'measureid', colocate_with:='sensors'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CLUSTER colocated_partitioned_table_2020_01_01 USING colocated_partitioned_table_2020_01_01_pkey; +CREATE TABLE reference_table (measureid integer PRIMARY KEY); +SELECT create_reference_table('reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +-- this table is used to make sure that index backed +-- replica identites can have clustered indexes +-- and no index statistics +CREATE TABLE index_backed_rep_identity(key int NOT NULL); +CREATE UNIQUE INDEX uqx ON index_backed_rep_identity(key); +ALTER TABLE index_backed_rep_identity REPLICA IDENTITY USING INDEX uqx; +CLUSTER index_backed_rep_identity USING uqx; +SELECT create_distributed_table('index_backed_rep_identity', 'key', colocate_with:='sensors'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- from parent to regular dist +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); +-- from parent to parent +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_parent FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table(measureid, eventdatetime); +-- from parent to child +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_child FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid, eventdatetime); +-- from parent to reference table +-- enable this test when https://github.com/citusdata/citus-enterprise/issues/284 is fixed +-- ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_ref FOREIGN KEY (measureid) REFERENCES reference_table(measureid); +-- from child to regular dist +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); +-- from child to parent +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_parent FOREIGN KEY (measureid,eventdatetime) REFERENCES colocated_partitioned_table(measureid,eventdatetime); +-- from child to child +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_child FOREIGN KEY (measureid,eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid,eventdatetime); +-- from child to reference table +-- enable this test when https://github.com/citusdata/citus-enterprise/issues/284 is fixed +-- ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_ref FOREIGN KEY (measureid) REFERENCES reference_table(measureid); +-- load some data +INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_partitioned_table SELECT i, '2020-01-05' FROM generate_series(0,1000)i; +INSERT INTO sensors SELECT i, '2020-01-05', '{}' FROM generate_series(0,1000)i; +\c - postgres - :worker_1_port +SET search_path TO "shard Move Fkeys Indexes", public, pg_catalog; +-- show the current state of the constraints +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_8970000'::regclass ORDER BY 1,2; + Constraint | Definition +--------------------------------------------------------------------- + fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) + fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970016(measureid) + fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970020(eventdatetime, measureid) + sensors_8970000_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) +(4 rows) + +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_2020_01_01_8970008'::regclass ORDER BY 1,2; + Constraint | Definition +--------------------------------------------------------------------- + fkey_from_child_to_child_8970008 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) + fkey_from_child_to_dist_8970008 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970016(measureid) + fkey_from_child_to_parent_8970008 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970020(eventdatetime, measureid) + fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) + fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970016(measureid) + fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970020(eventdatetime, measureid) + sensors_2020_01_01_8970008_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) +(7 rows) + +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_8970000' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_8970000 | CREATE INDEX hash_index_8970000 ON ONLY "shard Move Fkeys Indexes".sensors_8970000 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8970000 | CREATE INDEX index_on_parent_8970000 ON ONLY "shard Move Fkeys Indexes".sensors_8970000 USING btree (lower((measureid)::text)) + sensors_8970000 | CREATE INDEX index_with_include_8970000 ON ONLY "shard Move Fkeys Indexes".sensors_8970000 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime) + sensors_8970000 | CREATE UNIQUE INDEX sensors_pkey_8970000 ON ONLY "shard Move Fkeys Indexes".sensors_8970000 USING btree (measureid, eventdatetime, measure_data) +(4 rows) + +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_2020_01_01_8970008' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_2020_01_01_8970008 | CREATE INDEX index_on_child_8970008 ON "shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (lower((measure_data)::text)) + sensors_2020_01_01_8970008 | CREATE INDEX sensors_2020_01_01_expr_idx_8970008 ON "shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_2020_01_01_8970008 | CREATE INDEX sensors_2020_01_01_expr_measure_data_eventdatetime_idx_8970008 ON "shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime) + sensors_2020_01_01_8970008 | CREATE INDEX sensors_2020_01_01_lower_idx_8970008 ON "shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (lower((measureid)::text)) + sensors_2020_01_01_8970008 | CREATE UNIQUE INDEX sensors_2020_01_01_pkey_8970008 ON "shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (measureid, eventdatetime, measure_data) +(5 rows) + +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='index_backed_rep_identity_8970029' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + index_backed_rep_identity_8970029 | CREATE UNIQUE INDEX uqx_8970029 ON "shard Move Fkeys Indexes".index_backed_rep_identity_8970029 USING btree (key) +(1 row) + +SELECT indisclustered FROM pg_index where indisclustered AND indrelid = 'index_backed_rep_identity_8970029'::regclass; + indisclustered +--------------------------------------------------------------------- + t +(1 row) + +SELECT stxname FROM pg_statistic_ext +WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('shard Move Fkeys Indexes') +) +ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + s1 + s1_8970000 + s1_8970002 + s2 + s2_8970008 + s2_8970010 +(6 rows) + +SELECT count(*) FROM pg_index +WHERE indisclustered + and +indrelid IN +('sensors_2020_01_01_8970008'::regclass, 'colocated_dist_table_8970016'::regclass, 'colocated_partitioned_table_2020_01_01_8970024'::regclass); + count +--------------------------------------------------------------------- + 3 +(1 row) + +\c - - - :master_port +-- make sure that constrainst are moved sanely with logical replication +SELECT citus_move_shard_placement(8970000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 8 orphaned shards +\c - postgres - :worker_2_port +SET search_path TO "shard Move Fkeys Indexes", public, pg_catalog; +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_8970000'::regclass ORDER BY 1,2; + Constraint | Definition +--------------------------------------------------------------------- + fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) + fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970016(measureid) + fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970020(eventdatetime, measureid) + sensors_8970000_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) +(4 rows) + +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_2020_01_01_8970008'::regclass ORDER BY 1,2; + Constraint | Definition +--------------------------------------------------------------------- + fkey_from_child_to_child_8970008 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) + fkey_from_child_to_dist_8970008 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970016(measureid) + fkey_from_child_to_parent_8970008 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970020(eventdatetime, measureid) + fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) + fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970016(measureid) + fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970020(eventdatetime, measureid) + sensors_2020_01_01_8970008_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) +(7 rows) + +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_8970000' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_8970000 | CREATE INDEX hash_index_8970000 ON ONLY "shard Move Fkeys Indexes".sensors_8970000 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8970000 | CREATE INDEX index_on_parent_8970000 ON ONLY "shard Move Fkeys Indexes".sensors_8970000 USING btree (lower((measureid)::text)) + sensors_8970000 | CREATE INDEX index_with_include_8970000 ON ONLY "shard Move Fkeys Indexes".sensors_8970000 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime) + sensors_8970000 | CREATE UNIQUE INDEX sensors_pkey_8970000 ON ONLY "shard Move Fkeys Indexes".sensors_8970000 USING btree (measureid, eventdatetime, measure_data) +(4 rows) + +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_2020_01_01_8970008' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_2020_01_01_8970008 | CREATE INDEX index_on_child_8970008 ON "shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (lower((measure_data)::text)) + sensors_2020_01_01_8970008 | CREATE INDEX sensors_2020_01_01_expr_idx_8970008 ON "shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_2020_01_01_8970008 | CREATE INDEX sensors_2020_01_01_expr_measure_data_eventdatetime_idx_8970008 ON "shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime) + sensors_2020_01_01_8970008 | CREATE INDEX sensors_2020_01_01_lower_idx_8970008 ON "shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (lower((measureid)::text)) + sensors_2020_01_01_8970008 | CREATE UNIQUE INDEX sensors_2020_01_01_pkey_8970008 ON "shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (measureid, eventdatetime, measure_data) +(5 rows) + +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='index_backed_rep_identity_8970029' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + index_backed_rep_identity_8970029 | CREATE UNIQUE INDEX uqx_8970029 ON "shard Move Fkeys Indexes".index_backed_rep_identity_8970029 USING btree (key) +(1 row) + +SELECT indisclustered FROM pg_index where indisclustered AND indrelid = 'index_backed_rep_identity_8970029'::regclass; + indisclustered +--------------------------------------------------------------------- + t +(1 row) + +SELECT stxname FROM pg_statistic_ext +WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('shard Move Fkeys Indexes') +) +ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + s1 + s1_8970000 + s1_8970001 + s1_8970003 + s2 + s2_8970008 + s2_8970009 + s2_8970011 +(8 rows) + +SELECT count(*) FROM pg_index +WHERE indisclustered + and +indrelid IN +('sensors_2020_01_01_8970008'::regclass, 'colocated_dist_table_8970016'::regclass, 'colocated_partitioned_table_2020_01_01_8970024'::regclass); + count +--------------------------------------------------------------------- + 3 +(1 row) + +\c - mx_rebalancer_role_ent - :master_port +-- verify that the data is consistent +SET search_path TO "shard Move Fkeys Indexes"; +SELECT count(*) FROM reference_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT count(*) FROM colocated_partitioned_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT count(*) FROM colocated_dist_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT count(*) FROM sensors; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +-- we should be able to change/drop constraints +ALTER INDEX index_on_parent RENAME TO index_on_parent_renamed; +ALTER INDEX index_on_child RENAME TO index_on_child_renamed; +ALTER INDEX index_on_parent_renamed ALTER COLUMN 1 SET STATISTICS 200; +ALTER INDEX index_on_child_renamed ALTER COLUMN 1 SET STATISTICS 200; +DROP STATISTICS s1,s2; +DROP INDEX index_on_parent_renamed; +DROP INDEX index_on_child_renamed; +ALTER TABLE sensors DROP CONSTRAINT fkey_from_parent_to_dist; +ALTER TABLE sensors DROP CONSTRAINT fkey_from_parent_to_parent; +ALTER TABLE sensors DROP CONSTRAINT fkey_from_parent_to_child; +ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_dist; +ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_parent; +ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_child; +-- another test with multiple constraints backed by indexes +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +create table multiple_unique_keys( + key int primary key, a int, b int, c int, d int, e int, f int, g int, h int, i int, + UNIQUE(key,a), UNIQUE(key,b),UNIQUE(key,c),UNIQUE(key,d),UNIQUE(key,e),UNIQUE(key,f),UNIQUE(key,g),UNIQUE(key,h), + UNIQUE(key,i),UNIQUE(key,a,b),UNIQUE(key,a,c),UNIQUE(key,a,d),UNIQUE(key,a,e),UNIQUE(key,a,f),UNIQUE(key,a,h), + UNIQUE(key,a,h), + EXCLUDE USING gist (a WITH =, key WITH =), + EXCLUDE USING gist (b WITH =, key WITH =), + EXCLUDE USING gist (c WITH =, key WITH =), + EXCLUDE USING gist (d WITH =, key WITH =), + EXCLUDE USING gist (e WITH =, key WITH =), + EXCLUDE USING gist (f WITH =, key WITH =), + EXCLUDE USING gist (g WITH =, key WITH =), + EXCLUDE USING gist (h WITH =, key WITH =), + EXCLUDE USING gist (i WITH =, key WITH =) +); +CREATE UNIQUE INDEX i1 ON multiple_unique_keys(key); +CREATE UNIQUE INDEX i2 ON multiple_unique_keys(key,b); +CREATE UNIQUE INDEX i3 ON multiple_unique_keys(key,c); +CREATE UNIQUE INDEX i4 ON multiple_unique_keys(key,d); +CREATE UNIQUE INDEX i5 ON multiple_unique_keys(key,a); +CREATE UNIQUE INDEX i6 ON multiple_unique_keys(key,a); +CREATE UNIQUE INDEX i7 ON multiple_unique_keys(key,a); +CREATE UNIQUE INDEX i8 ON multiple_unique_keys(key,a); +CREATE UNIQUE INDEX i9 ON multiple_unique_keys(key,a); +CREATE UNIQUE INDEX i10 ON multiple_unique_keys(key,b,c); +CREATE INDEX ii1 ON multiple_unique_keys(a); +CREATE INDEX ii2 ON multiple_unique_keys(b); +CREATE INDEX ii3 ON multiple_unique_keys(c); +CREATE INDEX ii4 ON multiple_unique_keys(d); +CREATE INDEX ii5 ON multiple_unique_keys(e); +CREATE INDEX ii6 ON multiple_unique_keys(f); +CREATE INDEX ii7 ON multiple_unique_keys(g); +CREATE INDEX ii8 ON multiple_unique_keys(h); +CREATE INDEX ii9 ON multiple_unique_keys(i); +CREATE INDEX ii10 ON multiple_unique_keys(a,b,c); +-- distribute table and load some data +SELECT create_distributed_table('multiple_unique_keys', 'key', colocate_with:='sensors'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO multiple_unique_keys SELECT i,i,i,i,i,i,i,i,i FROM generate_series(0,1000)i; +-- make sure that both online and offline rebalance operations succeed +SELECT citus_move_shard_placement(8970000, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_move_shard_placement(8970000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'block_writes'); + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- even on another schema +SET search_path TO public; +SELECT citus_move_shard_placement(8970000, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_move_shard_placement(8970000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'block_writes'); + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +\c - postgres - :master_port +-- stop and re-sync the metadata to make sure all works fine +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +NOTICE: dropping metadata on the node (localhost,57637) + stop_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +NOTICE: dropping metadata on the node (localhost,57638) + stop_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +SELECT start_metadata_sync_to_node('localhost', :worker_2_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +DROP SCHEMA "shard Move Fkeys Indexes" CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to extension btree_gist +drop cascades to table "shard Move Fkeys Indexes".sensors +drop cascades to table "shard Move Fkeys Indexes".colocated_dist_table +drop cascades to table "shard Move Fkeys Indexes".colocated_partitioned_table +drop cascades to table "shard Move Fkeys Indexes".reference_table +drop cascades to table "shard Move Fkeys Indexes".index_backed_rep_identity +drop cascades to table "shard Move Fkeys Indexes".multiple_unique_keys diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index 918f65147..b9d928acb 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -523,7 +523,7 @@ SELECT * FROM replication_test_table_placements_per_node; (2 rows) -- Replicate the remaining under-replicated shards -SELECT replicate_table_shards('replication_test_table'); +SELECT replicate_table_shards('replication_test_table', shard_transfer_mode:='block_writes'); replicate_table_shards --------------------------------------------------------------------- @@ -671,6 +671,8 @@ CREATE USER testrole; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO testrole; +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. \c - - - :master_port SET client_min_messages TO WARNING; SET ROLE testrole; @@ -683,6 +685,8 @@ CREATE USER testrole; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO testrole; +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. \c - - - :master_port SET client_min_messages TO WARNING; SET citus.next_shard_id TO 123010; @@ -692,6 +696,7 @@ SELECT rebalance_table_shards('rebalance_test_table', ERROR: must be owner of table rebalance_test_table CONTEXT: while executing command on localhost:xxxxx RESET ROLE; +CALL citus_cleanup_orphaned_shards(); -- Confirm no moves took place at all during these errors SELECT * FROM table_placements_per_node; nodeport | logicalrelid | count @@ -733,7 +738,7 @@ SELECT * FROM table_placements_per_node; (2 rows) -- Move the remaining shards using threshold=0 -SELECT rebalance_table_shards('rebalance_test_table', threshold := 0); +SELECT rebalance_table_shards('rebalance_test_table', threshold := 0, shard_transfer_mode:='block_writes'); rebalance_table_shards --------------------------------------------------------------------- @@ -889,10 +894,6 @@ SELECT COUNT(*) FROM imbalanced_table; 12 (1 row) --- Try force_logical -SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_mode:='force_logical'); -ERROR: the force_logical transfer mode is currently unsupported -CALL citus_cleanup_orphaned_shards(); -- Test rebalance operation SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_mode:='block_writes'); rebalance_table_shards @@ -1407,6 +1408,36 @@ SELECT * FROM public.table_placements_per_node; -- Drop some tables for clear consistent error DROP TABLE test_schema_support.colocated_rebalance_test2; +-- testing behaviour when a transfer fails when using master_drain_node +SELECT * from master_drain_node('localhost', :worker_2_port); +ERROR: cannot use logical replication to transfer shards of the relation colocated_rebalance_test since it doesn't have a REPLICA IDENTITY or PRIMARY KEY +DETAIL: UPDATE and DELETE commands on the shard will error out during logical replication unless there is a REPLICA IDENTITY or PRIMARY KEY. +HINT: If you wish to continue without a replica identity set the shard_transfer_mode to 'force_logical' or 'block_writes'. +CONTEXT: while executing command on localhost:xxxxx +-- Make sure shouldhaveshards is false +select shouldhaveshards from pg_dist_node where nodeport = :worker_2_port; + shouldhaveshards +--------------------------------------------------------------------- + f +(1 row) + +-- Make sure no actual nodes are moved +SELECT * FROM public.table_placements_per_node; + nodeport | logicalrelid | count +--------------------------------------------------------------------- + 57637 | colocated_rebalance_test | 2 + 57638 | colocated_rebalance_test | 2 + 57637 | non_colocated_rebalance_test | 2 + 57638 | non_colocated_rebalance_test | 2 +(4 rows) + +-- Make it a data node again +SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); + master_set_node_property +--------------------------------------------------------------------- + +(1 row) + -- Leave no trace on workers RESET search_path; \set VERBOSITY terse @@ -2212,12 +2243,9 @@ SELECT 1 from master_add_node('localhost', :worker_2_port); 1 (1 row) +-- since r2 has no replica identity we expect an error here SELECT rebalance_table_shards(); - rebalance_table_shards ---------------------------------------------------------------------- - -(1 row) - +ERROR: cannot use logical replication to transfer shards of the relation r2 since it doesn't have a REPLICA IDENTITY or PRIMARY KEY CALL citus_cleanup_orphaned_shards(); DROP TABLE t1, r1, r2; -- verify there are no distributed tables before we perform the following tests. Preceding diff --git a/src/test/regress/expected/single_node_enterprise.out b/src/test/regress/expected/single_node_enterprise.out new file mode 100644 index 000000000..e59565f4b --- /dev/null +++ b/src/test/regress/expected/single_node_enterprise.out @@ -0,0 +1,523 @@ +-- we already have lots of tests targeting +-- single node citus clusters in sql/single_node.sql +-- in this file, we are testing enterprise features +CREATE SCHEMA single_node_ent; +SET search_path TO single_node_ent; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 90730500; +-- idempotently add node to allow this test to run without add_coordinator +SET client_min_messages TO WARNING; +SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +RESET client_min_messages; +SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE USER full_access_single_node; +CREATE USER read_access_single_node; +CREATE USER no_access_single_node; +CREATE TYPE new_type AS (n int, m text); +CREATE TABLE test(x int, y int, z new_type); +SELECT create_distributed_table('test','x'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE ref(a int, b int); +SELECT create_reference_table('ref'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +-- we want to test replicate_table_shards() +-- which requiest statement based +CREATE TABLE statement_replicated(a int PRIMARY KEY); +SELECT create_distributed_table('statement_replicated','a', colocate_with:='none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='statement_replicated'::regclass; +-- We create this function to make sure +-- GRANT ALL ON ALL FUNCTIONS IN SCHEMA doesn't get stuck. +CREATE FUNCTION notice(text) +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE '%', $1; +END; +$$; +SELECT create_distributed_function('notice(text)'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- allow access to various users +GRANT ALL ON TABLE test,ref TO full_access_single_node; +GRANT USAGE ON SCHEMA single_node_ent TO full_access_single_node; +ALTER ROLE full_access_single_node WITH LOGIN; +GRANT ALL PRIVILEGES ON DATABASE postgres TO no_access_single_node; +REVOKE ALL PRIVILEGES ON DATABASE postgres FROM no_access_single_node; +REVOKE USAGE, CREATE ON SCHEMA single_node_ent FROM no_access_single_node; +GRANT SELECT ON ref,test TO no_access_single_node; +REVOKE SELECT ON ref,test FROM no_access_single_node; +-- we have to use local execution, otherwise we hit to a known issue +-- (see https://github.com/citusdata/citus-enterprise/ issues/474) +-- to force local execution, use transaction block +BEGIN; +GRANT USAGE ON SCHEMA single_node_ent TO read_access_single_node; +GRANT SELECT ON ALL TABLES IN SCHEMA single_node_ent TO read_access_single_node; +COMMIT; +-- revoke SELECT access for the next 3-4 tests +REVOKE SELECT ON test FROM read_access_single_node; +-- Make sure the access is revoked +SET ROLE read_access_single_node; +SELECT COUNT(*) FROM test; +ERROR: permission denied for table test +SET ROLE postgres; +BEGIN; +GRANT SELECT ON ALL TABLES IN SCHEMA single_node_ent TO read_access_single_node; +-- Make sure we can now read as read_access_single_node role +SET ROLE read_access_single_node; +SELECT COUNT(*) FROM test; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SET ROLE postgres; +-- Make sure REVOKE .. IN SCHEMA also works +REVOKE SELECT ON ALL TABLES IN SCHEMA single_node_ent FROM read_access_single_node; +SET ROLE read_access_single_node; +SELECT COUNT(*) FROM test; +ERROR: permission denied for table test +ROLLBACK; +GRANT ALL ON ALL FUNCTIONS IN SCHEMA single_node_ent to full_access_single_node; +GRANT SELECT ON ALL TABLES IN SCHEMA single_node_ent TO read_access_single_node; +GRANT SELECT ON test, ref TO read_access_single_node; +SET ROLE read_access_single_node; +-- Make sure we can now read as read_access_single_node role +SELECT COUNT(*) FROM test; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SET ROLE full_access_single_node; +INSERT INTO test VALUES (1, 1, (95, 'citus9.5')::new_type); +-- should fail as only read access is allowed +SET ROLE read_access_single_node; +INSERT INTO test VALUES (1, 1, (95, 'citus9.5')::new_type); +ERROR: permission denied for table test +SELECT nodeid AS coordinator_node_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :master_port +\gset +-- pg_dist_poolinfo should work fine for coordinator +-- put outright bad values +SET ROLE postgres; +INSERT INTO pg_dist_poolinfo VALUES (:coordinator_node_id, 'host=failhost'); +\c +SET search_path TO single_node_ent; +\set VERBOSITY terse +-- supress OS specific error message +DO $$ +BEGIN + BEGIN + -- we want to force remote execution + SET LOCAL citus.enable_local_execution TO false; + SELECT COUNT(*) FROM test; + EXCEPTION WHEN OTHERS THEN + IF SQLERRM LIKE 'connection to the remote node%%' THEN + RAISE 'failed to execute select'; + END IF; + END; +END; +$$; +ERROR: failed to execute select +TRUNCATE pg_dist_poolinfo; +-- using 127.0.0.1 should work fine +INSERT INTO pg_dist_poolinfo VALUES (:coordinator_node_id, 'host=127.0.0.1 port=' || :master_port); +\c +SET search_path TO single_node_ent; +SET citus.log_remote_commands TO ON; +SET client_min_messages TO DEBUG1; +-- force multi-shard query to be able to +-- have remote connections +SELECT COUNT(*) FROM test WHERE x = 1 OR x = 2; +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730500 test WHERE ((x OPERATOR(pg_catalog.=) 1) OR (x OPERATOR(pg_catalog.=) 2)) +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730503 test WHERE ((x OPERATOR(pg_catalog.=) 1) OR (x OPERATOR(pg_catalog.=) 2)) + count +--------------------------------------------------------------------- + 1 +(1 row) + +RESET citus.log_remote_commands; +RESET client_min_messages; +TRUNCATE pg_dist_poolinfo; +-- reconnect +\c +SET search_path TO single_node_ent; +-- now, create a colocated table +-- add a new node, and move the +-- shards to the new node +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 90731500; +CREATE TABLE colocated_table (x int PRIMARY KEY, y int); +SELECT create_distributed_table('colocated_table','x', colocate_with:='single_node_ent.test'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- create some foreign keys +TRUNCATE test, ref; +ALTER TABLE test ADD CONSTRAINT p_key PRIMARY KEY(x); +ALTER TABLE ref ADD CONSTRAINT p_key_2 PRIMARY KEY(a); +ALTER TABLE colocated_table ADD CONSTRAINT fkey FOREIGN KEY (x) REFERENCES test(x); +ALTER TABLE test ADD CONSTRAINT fkey FOREIGN KEY (x) REFERENCES ref(a); +-- load some data +INSERT INTO ref SELECT i, i*2 FROM generate_series(0,50)i; +INSERT INTO test SELECT i, i*2, (i, 'citus' || i)::new_type FROM generate_series(0,50)i; +INSERT INTO colocated_table SELECT i, i*2 FROM generate_series(0,50)i; +-- run a very basic query +SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a); + count +--------------------------------------------------------------------- + 51 +(1 row) + +CREATE VIEW view_created_before_shard_moves AS + SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a); +SELECT * FROM view_created_before_shard_moves; + count +--------------------------------------------------------------------- + 51 +(1 row) + +-- show that tenant isolation works fine +SELECT isolate_tenant_to_new_shard('test', 5, 'CASCADE'); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 90731505 +(1 row) + +-- in the first iteration, have an +-- hybrid cluster meaning that +-- the shards exists on both the coordinator +-- and on the workers +SELECT 1 FROM master_add_node('localhost', :worker_1_port); +NOTICE: shards are still on the coordinator after adding the new node +NOTICE: Replicating reference table "ref" to the node localhost:xxxxx + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- make sure that we can replicate tables as well +select replicate_table_shards('statement_replicated', shard_replication_factor:=2, shard_transfer_mode:='block_writes'); +NOTICE: Copying shard xxxxx from localhost:xxxxx to localhost:xxxxx ... +NOTICE: Copying shard xxxxx from localhost:xxxxx to localhost:xxxxx ... +NOTICE: Copying shard xxxxx from localhost:xxxxx to localhost:xxxxx ... +NOTICE: Copying shard xxxxx from localhost:xxxxx to localhost:xxxxx ... + replicate_table_shards +--------------------------------------------------------------------- + +(1 row) + +-- we don't need the table anymore, it complicates the output of rebalances +DROP TABLE statement_replicated; +-- move 1 set of colocated shards in non-blocking mode +-- and the other in block_writes +SELECT rebalance_table_shards(max_shard_moves:=1); +NOTICE: Stopped searching before we were out of moves. Please rerun the rebalancer after it's finished for a more optimal placement. +NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ... + rebalance_table_shards +--------------------------------------------------------------------- + +(1 row) + +SELECT rebalance_table_shards(shard_transfer_mode:='block_writes'); +NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ... +NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ... + rebalance_table_shards +--------------------------------------------------------------------- + +(1 row) + +-- should fail as only read access is allowed +SET ROLE read_access_single_node; +INSERT INTO test VALUES (1, 1, (95, 'citus9.5')::new_type); +ERROR: permission denied for table test +SET ROLE postgres; +\c +SET search_path TO single_node_ent; +-- the same query should work +SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a); + count +--------------------------------------------------------------------- + 51 +(1 row) + +-- make sure that composite type is created +-- on the worker +SELECT * FROM test ORDER BY 1 DESC, 2, 3 LIMIT 1; + x | y | z +--------------------------------------------------------------------- + 50 | 100 | (50,citus50) +(1 row) + +-- make sure that we can execute with intermediate +-- results that are needed on all shards on the +-- final step +WITH cte_1 AS (SELECT * FROM test ORDER BY 1 DESC, 2, 3 LIMIT 5) +SELECT count(*) FROM colocated_table JOIN cte_1 USING (x); + count +--------------------------------------------------------------------- + 5 +(1 row) + +-- make sure that we can still query the view +SELECT * FROM view_created_before_shard_moves; + count +--------------------------------------------------------------------- + 51 +(1 row) + +-- we should be able to switch the cluster to CitusMX +SELECT start_metadata_sync_to_node('localhost', :master_port); +NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- sanity-check: the same queries should work +SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a); + count +--------------------------------------------------------------------- + 51 +(1 row) + +SELECT * FROM test ORDER BY 1 DESC,2,3 LIMIT 1; + x | y | z +--------------------------------------------------------------------- + 50 | 100 | (50,citus50) +(1 row) + +WITH cte_1 AS (SELECT * FROM test ORDER BY 1 DESC,2,3 LIMIT 5) +SELECT count(*) FROM colocated_table JOIN cte_1 USING (x); + count +--------------------------------------------------------------------- + 5 +(1 row) + +-- all DDLs should work +ALTER TABLE colocated_table ADD COLUMN z single_node_ent.new_type; +UPDATE colocated_table SET z = (x, y::text)::new_type; +SELECT * FROM colocated_table ORDER BY 1 DESC,2,3 LIMIT 1; + x | y | z +--------------------------------------------------------------------- + 50 | 100 | (50,100) +(1 row) + +CREATE INDEX i_colocated_table ON colocated_table(y); +BEGIN; + CREATE INDEX i_colocated_table_2 ON colocated_table(x,y); +ROLLBACK; +-- sanity check: transaction blocks spanning both nodes should work fine +BEGIN; + UPDATE colocated_table SET y = y + 1; + UPDATE test SET y = y -1; + SELECT max(y) FROM colocated_table; + max +--------------------------------------------------------------------- + 101 +(1 row) + + SELECT max(y) FROM test; + max +--------------------------------------------------------------------- + 99 +(1 row) + +ROLLBACK; +-- generate data so that we can enforce fkeys +INSERT INTO ref SELECT i, i*2 FROM generate_series(100,150)i; +-- the first insert goes to a shard on the worker +-- the second insert goes to a shard on the coordinator +BEGIN; + SET LOCAL citus.log_remote_commands TO ON; + INSERT INTO test(x,y) VALUES (101,100); +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +NOTICE: issuing INSERT INTO single_node_ent.test_90730501 (x, y) VALUES (101, 100) + INSERT INTO test(x,y) VALUES (102,100); +NOTICE: issuing INSERT INTO single_node_ent.test_90730502 (x, y) VALUES (102, 100) + -- followed by a multi-shard command + SELECT count(*) FROM test; +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730501 test WHERE true +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730502 test WHERE true +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730503 test WHERE true +NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node_ent.test_90731504 test WHERE true +NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node_ent.test_90731505 test WHERE true +NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node_ent.test_90731506 test WHERE true + count +--------------------------------------------------------------------- + 53 +(1 row) + +ROLLBACK; +NOTICE: issuing ROLLBACK +-- the first insert goes to a shard on the coordinator +-- the second insert goes to a shard on the worker +BEGIN; + SET LOCAL citus.log_remote_commands TO ON; + INSERT INTO test(x,y) VALUES (102,100); +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +NOTICE: issuing INSERT INTO single_node_ent.test_90730502 (x, y) VALUES (102, 100) + INSERT INTO test(x,y) VALUES (101,100); +NOTICE: issuing INSERT INTO single_node_ent.test_90730501 (x, y) VALUES (101, 100) + -- followed by a multi-shard command + SELECT count(*) FROM test; +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730501 test WHERE true +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730502 test WHERE true +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730503 test WHERE true +NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node_ent.test_90731504 test WHERE true +NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node_ent.test_90731505 test WHERE true +NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node_ent.test_90731506 test WHERE true + count +--------------------------------------------------------------------- + 53 +(1 row) + +ROLLBACK; +NOTICE: issuing ROLLBACK +-- now, lets move all the shards of distributed tables out of the coordinator +-- block writes is much faster for the sake of the test timings we prefer it +SELECT master_drain_node('localhost', :master_port, shard_transfer_mode:='block_writes'); +NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ... +NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ... +NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ... + master_drain_node +--------------------------------------------------------------------- + +(1 row) + +-- should return false as master_drain_node had just set it to false for coordinator +SELECT shouldhaveshards FROM pg_dist_node WHERE nodeport = :master_port; + shouldhaveshards +--------------------------------------------------------------------- + f +(1 row) + +-- sanity-check: the same queries should work +SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a); + count +--------------------------------------------------------------------- + 51 +(1 row) + +SELECT * FROM test ORDER BY 1 DESC,2 ,3 LIMIT 1; + x | y | z +--------------------------------------------------------------------- + 50 | 100 | (50,citus50) +(1 row) + +WITH cte_1 AS (SELECT * FROM test ORDER BY 1 DESC,2 , 3 LIMIT 5) +SELECT count(*) FROM colocated_table JOIN cte_1 USING (x); + count +--------------------------------------------------------------------- + 5 +(1 row) + +-- make sure that we can still query the view +SELECT * FROM view_created_before_shard_moves; + count +--------------------------------------------------------------------- + 51 +(1 row) + +-- and make sure that all the shards are remote +BEGIN; + SET LOCAL citus.log_remote_commands TO ON; + INSERT INTO test(x,y) VALUES (101,100); +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +NOTICE: issuing INSERT INTO single_node_ent.test_90730501 (x, y) VALUES (101, 100) + INSERT INTO test(x,y) VALUES (102,100); +NOTICE: issuing INSERT INTO single_node_ent.test_90730502 (x, y) VALUES (102, 100) + -- followed by a multi-shard command + SELECT count(*) FROM test; +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730501 test WHERE true +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730502 test WHERE true +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90730503 test WHERE true +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90731504 test WHERE true +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90731505 test WHERE true +NOTICE: issuing SELECT count(*) AS count FROM single_node_ent.test_90731506 test WHERE true + count +--------------------------------------------------------------------- + 53 +(1 row) + +ROLLBACK; +NOTICE: issuing ROLLBACK +-- should fail as only read access is allowed +SET ROLE read_access_single_node; +INSERT INTO test VALUES (1, 1, (95, 'citus9.5')::new_type); +ERROR: permission denied for table test +SET ROLE postgres; +\c +SET search_path TO single_node_ent; +-- Cleanup +RESET citus.log_remote_commands; +SET client_min_messages TO WARNING; +DROP SCHEMA single_node_ent CASCADE; +DROP OWNED BY full_access_single_node; +DROP OWNED BY read_access_single_node; +SELECT run_command_on_workers($$DROP OWNED BY full_access_single_node$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"DROP OWNED") +(1 row) + +SELECT run_command_on_workers($$DROP OWNED BY read_access_single_node$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"DROP OWNED") +(1 row) + +DROP ROLE full_access_single_node; +DROP ROLE read_access_single_node; +-- remove the nodes for next tests +SELECT 1 FROM master_remove_node('localhost', :master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT 1 FROM master_remove_node('localhost', :worker_1_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- restart nodeid sequence so that multi_cluster_management still has the same +-- nodeids +ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 1; +ALTER SEQUENCE pg_dist_groupid_seq RESTART 1; diff --git a/src/test/regress/expected/stat_statements.out b/src/test/regress/expected/stat_statements.out new file mode 100644 index 000000000..1bfeba544 --- /dev/null +++ b/src/test/regress/expected/stat_statements.out @@ -0,0 +1,651 @@ +-- +-- stat_statements +-- +-- tests citus_stat_statements functionality +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 13 AS server_version_above_thirteen +\gset +\if :server_version_above_thirteen +SET compute_query_id = 'on'; +\endif +-- check if pg_stat_statements is available +SELECT name FROM pg_available_extensions WHERE name = 'pg_stat_statements'; + name +--------------------------------------------------------------------- + pg_stat_statements +(1 row) + +SELECT regexp_split_to_array(setting, ',') @> ARRAY['pg_stat_statements'] AS stats_loaded +FROM pg_settings WHERE name = 'shared_preload_libraries'; + stats_loaded +--------------------------------------------------------------------- + t +(1 row) + +DROP EXTENSION IF EXISTS pg_stat_statements; +NOTICE: extension "pg_stat_statements" does not exist, skipping +-- verify it is not loaded yet +SELECT extname FROM pg_extension WHERE extname = 'pg_stat_statements'; + extname +--------------------------------------------------------------------- +(0 rows) + +-- this should error out since extension is not created yet +SELECT * FROM citus_stat_statements; +ERROR: pg_stat_statements is not installed +HINT: install pg_stat_statements extension and try again +CONTEXT: PL/pgSQL function citus_stat_statements() line XX at RAISE +-- create extension if available +SELECT CASE WHEN COUNT(*) > 0 THEN + 'CREATE EXTENSION pg_stat_statements' +ELSE 'SELECT false as pg_stat_statements_available' END +AS create_cmd FROM pg_available_extensions() +WHERE name = 'pg_stat_statements' +\gset +:create_cmd; +CREATE FUNCTION normalize_query_string(query_string text) + RETURNS TEXT + LANGUAGE plpgsql + AS $function$ +BEGIN + RETURN rtrim(regexp_replace(query_string, '\$\d+', '?', 'g'), ';'); +END; +$function$; +-- verify citus stat statements reset +SELECT citus_stat_statements_reset(); + citus_stat_statements_reset +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE test(a int); +SELECT create_distributed_table('test','a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +insert into test values(1); +select query, calls from citus_stat_statements(); + query | calls +--------------------------------------------------------------------- + insert into test values($1) | 1 +(1 row) + +\if :server_version_above_thirteen +SET compute_query_id = 'off'; +\else +set citus.stat_statements_track = 'none'; +\endif +-- for pg >= 14, since compute_query_id is off, this insert +-- shouldn't be tracked +-- for pg < 14, we disable it explicitly so that we don't need +-- to add an alternative output file. +insert into test values(1); +select query, calls from citus_stat_statements(); + query | calls +--------------------------------------------------------------------- + insert into test values($1) | 1 +(1 row) + +\if :server_version_above_thirteen +SET compute_query_id = 'on'; +\else +RESET citus.stat_statements_track; +\endif +SELECT citus_stat_statements_reset(); + citus_stat_statements_reset +--------------------------------------------------------------------- + +(1 row) + +-- it should now succeed, but with empty result +SELECT normalize_query_string(query) FROM citus_stat_statements; + normalize_query_string +--------------------------------------------------------------------- +(0 rows) + +-- run some queries +SELECT count(*) FROM lineitem_hash_part; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM lineitem_hash_part; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT l_orderkey FROM lineitem_hash_part; + l_orderkey +--------------------------------------------------------------------- +(0 rows) + +SELECT l_orderkey FROM lineitem_hash_part WHERE l_orderkey > 100; + l_orderkey +--------------------------------------------------------------------- +(0 rows) + +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 1; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 1200; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- + SELECT count(*) FROM lineitem_hash_part | adaptive | | 2 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 1 | 1 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 1200 | 1 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 4 | 2 + SELECT l_orderkey FROM lineitem_hash_part | adaptive | | 1 + SELECT l_orderkey FROM lineitem_hash_part WHERE l_orderkey > ? | adaptive | | 1 +(6 rows) + +-- test GUC citus.stat_statements_track +SET citus.stat_statements_track TO 'none'; +-- this shouldn't increment the call count since tracking is disabled +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- this should give the same output as above, since the last query is not counted +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- + SELECT count(*) FROM lineitem_hash_part | adaptive | | 2 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 1 | 1 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 1200 | 1 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 4 | 2 + SELECT l_orderkey FROM lineitem_hash_part | adaptive | | 1 + SELECT l_orderkey FROM lineitem_hash_part WHERE l_orderkey > ? | adaptive | | 1 +(6 rows) + +-- reset the GUC to track stats +SET citus.stat_statements_track TO 'all'; +-- reset pg_stat_statements and verify it also cleans citus_stat_statements output +-- verify that entries are actually removed from citus_stat_statements +SELECT pg_stat_statements_reset(); + pg_stat_statements_reset +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM citus_stat_statements; + queryid | userid | dbid | query | executor | partition_key | calls +--------------------------------------------------------------------- +(0 rows) + +-- run some queries +SELECT count(*) FROM lineitem_hash_part; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM lineitem_hash_part; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT l_orderkey FROM lineitem_hash_part; + l_orderkey +--------------------------------------------------------------------- +(0 rows) + +SELECT l_orderkey FROM lineitem_hash_part WHERE l_orderkey > 100; + l_orderkey +--------------------------------------------------------------------- +(0 rows) + +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 1; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 1200; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- show current list, and reset pg_stat_statements +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- + SELECT count(*) FROM lineitem_hash_part | adaptive | | 2 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 1 | 1 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 1200 | 1 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 4 | 2 + SELECT l_orderkey FROM lineitem_hash_part | adaptive | | 1 + SELECT l_orderkey FROM lineitem_hash_part WHERE l_orderkey > ? | adaptive | | 1 +(6 rows) + +SELECT pg_stat_statements_reset(); + pg_stat_statements_reset +--------------------------------------------------------------------- + +(1 row) + +SELECT count(*) FROM lineitem_hash_part; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- verify although pg_stat_statements was reset, some call counts are not +-- if a query is re-issued between pg_stat_statements_reset() and citus_stat_statements() +-- its call count is preserved. +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- + SELECT count(*) FROM lineitem_hash_part | adaptive | | 3 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 1 | 1 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 1200 | 1 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 4 | 3 +(4 rows) + +-- citus_stat_statements_reset() must be called to reset call counts +SELECT citus_stat_statements_reset(); + citus_stat_statements_reset +--------------------------------------------------------------------- + +(1 row) + +SELECT count(*) FROM lineitem_hash_part; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- verify citus stats has only 2 rows +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- + SELECT count(*) FROM lineitem_hash_part | adaptive | | 1 + SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ? | adaptive | 4 | 1 +(2 rows) + +-- create test tables to run update/delete scenarios +CREATE TABLE stat_test_text(user_id text, value int); +CREATE TABLE stat_test_bigint(user_id bigint, value int); +SELECT citus_stat_statements_reset(); + citus_stat_statements_reset +--------------------------------------------------------------------- + +(1 row) + +-- verify regular tables are not included in citus_stat_statements +SELECT * FROM stat_test_text; + user_id | value +--------------------------------------------------------------------- +(0 rows) + +SELECT * FROM stat_test_bigint WHERE user_id = 1::bigint; + user_id | value +--------------------------------------------------------------------- +(0 rows) + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- +(0 rows) + +SELECT create_distributed_table('stat_test_text', 'user_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('stat_test_bigint', 'user_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- +(0 rows) + +SELECT * FROM stat_test_text; + user_id | value +--------------------------------------------------------------------- +(0 rows) + +SELECT * FROM stat_test_text WHERE user_id = 'me'; + user_id | value +--------------------------------------------------------------------- +(0 rows) + +SELECT * FROM stat_test_bigint; + user_id | value +--------------------------------------------------------------------- +(0 rows) + +SELECT * FROM stat_test_bigint WHERE user_id = 2::bigint; + user_id | value +--------------------------------------------------------------------- +(0 rows) + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- + SELECT * FROM stat_test_bigint | adaptive | | 1 + SELECT * FROM stat_test_bigint WHERE user_id = ?::bigint | adaptive | 2 | 1 + SELECT * FROM stat_test_text | adaptive | | 1 + SELECT * FROM stat_test_text WHERE user_id = ? | adaptive | me | 1 +(4 rows) + +-- insert some rows and check stats +INSERT INTO stat_test_bigint VALUES (1, 1); +INSERT INTO stat_test_bigint VALUES (7, 1); +INSERT INTO stat_test_bigint VALUES (7, 1), (2,3); +INSERT INTO stat_test_bigint VALUES (8, 1), (8,3); +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- + INSERT INTO stat_test_bigint VALUES (?, ?) | adaptive | 1 | 1 + INSERT INTO stat_test_bigint VALUES (?, ?) | adaptive | 7 | 1 + INSERT INTO stat_test_bigint VALUES (?, ?), (?,?) | adaptive | 8 | 1 + INSERT INTO stat_test_bigint VALUES (?, ?), (?,?) | adaptive | | 1 + SELECT * FROM stat_test_bigint | adaptive | | 1 + SELECT * FROM stat_test_bigint WHERE user_id = ?::bigint | adaptive | 2 | 1 + SELECT * FROM stat_test_text | adaptive | | 1 + SELECT * FROM stat_test_text WHERE user_id = ? | adaptive | me | 1 +(8 rows) + +-- delete some rows and check stats +SELECT citus_stat_statements_reset(); + citus_stat_statements_reset +--------------------------------------------------------------------- + +(1 row) + +DELETE FROM stat_test_bigint WHERE value > 1000; +DELETE FROM stat_test_bigint WHERE value > 1200; +DELETE FROM stat_test_bigint WHERE user_id > 1000; +DELETE FROM stat_test_bigint WHERE user_id = 1000; +DELETE FROM stat_test_bigint WHERE user_id = 1000; +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- + DELETE FROM stat_test_bigint WHERE user_id = ? | adaptive | 1000 | 2 + DELETE FROM stat_test_bigint WHERE user_id > ? | adaptive | | 1 + DELETE FROM stat_test_bigint WHERE value > ? | adaptive | | 2 +(3 rows) + +-- update some rows and check stats +SELECT citus_stat_statements_reset(); + citus_stat_statements_reset +--------------------------------------------------------------------- + +(1 row) + +UPDATE stat_test_bigint SET value = 300 WHERE value = 3000; +UPDATE stat_test_bigint SET value = 320 WHERE value = 3200; +UPDATE stat_test_bigint SET value = 300 WHERE user_id = 3; +UPDATE stat_test_bigint SET value = 300 WHERE user_id = 3; +UPDATE stat_test_bigint SET value = 3000 WHERE user_id > 500; +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- + UPDATE stat_test_bigint SET value = ? WHERE user_id = ? | adaptive | 3 | 2 + UPDATE stat_test_bigint SET value = ? WHERE user_id > ? | adaptive | | 1 + UPDATE stat_test_bigint SET value = ? WHERE value = ? | adaptive | | 2 +(3 rows) + +-- test joins +CREATE TABLE stat_test_bigint_other(LIKE stat_test_bigint); +SELECT create_distributed_table('stat_test_bigint_other', 'user_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_stat_statements_reset(); + citus_stat_statements_reset +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO stat_test_bigint_other SELECT * FROM stat_test_bigint; +INSERT INTO stat_test_bigint_other SELECT * FROM stat_test_bigint WHERE user_id = 3; +INSERT INTO stat_test_bigint_other SELECT * FROM stat_test_bigint WHERE user_id = 3; +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id); + count +--------------------------------------------------------------------- + 10 +(1 row) + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id); + count +--------------------------------------------------------------------- + 10 +(1 row) + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id) +WHERE b.user_id = 3; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id) +WHERE b.user_id = 3; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id) +WHERE o.user_id = 3; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT normalize_query_string(query), executor, partition_key, calls FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- + INSERT INTO stat_test_bigint_other SELECT * FROM stat_test_bigint | adaptive | | 1 + INSERT INTO stat_test_bigint_other SELECT * FROM stat_test_bigint WHERE user_id = ? | adaptive | | 2 + SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id) | adaptive | | 2 + SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id)+| adaptive | 3 | 2 + WHERE b.user_id = ? | | | + SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id)+| adaptive | 3 | 1 + WHERE o.user_id = ? | | | +(5 rows) + +-- test reference table +CREATE TABLE stat_test_reference(LIKE stat_test_bigint); +SELECT create_reference_table('stat_test_reference'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO stat_test_reference SELECT user_id, count(*) FROM stat_test_bigint GROUP BY user_id; +SELECT citus_stat_statements_reset(); + citus_stat_statements_reset +--------------------------------------------------------------------- + +(1 row) + +SELECT count(*) FROM stat_test_reference; + count +--------------------------------------------------------------------- + 4 +(1 row) + +SELECT count(*) FROM stat_test_reference WHERE user_id = 2; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM stat_test_reference WHERE user_id = 2; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id); + count +--------------------------------------------------------------------- + 6 +(1 row) + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id); + count +--------------------------------------------------------------------- + 6 +(1 row) + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id) +WHERE b.user_id = 1; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id) +WHERE b.user_id = 1 and r.value > 0; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id) +WHERE r.user_id = 1; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + normalize_query_string | executor | partition_key | calls +--------------------------------------------------------------------- + SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id) | adaptive | | 2 + SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id)+| adaptive | 1 | 1 + WHERE b.user_id = ? | | | + SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id)+| adaptive | 1 | 1 + WHERE b.user_id = ? and r.value > ? | | | + SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id)+| adaptive | 1 | 1 + WHERE r.user_id = ? | | | + SELECT count(*) FROM stat_test_reference | adaptive | | 1 + SELECT count(*) FROM stat_test_reference WHERE user_id = ? | adaptive | | 2 +(6 rows) + +-- non-stats role should only see its own entries, even when calling citus_query_stats directly +CREATE USER nostats; +GRANT SELECT ON TABLE lineitem_hash_part TO nostats; +SET ROLE nostats; +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 2; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT partition_key FROM citus_query_stats(); + partition_key +--------------------------------------------------------------------- + 2 +(1 row) + +RESET ROLE; +-- stats-role/superuser should be able to see entries belonging to other users +SELECT partition_key FROM citus_query_stats() WHERE partition_key = '2'; + partition_key +--------------------------------------------------------------------- + 2 +(1 row) + +-- drop pg_stat_statements and verify citus_stat_statement does not work anymore +DROP extension pg_stat_statements; +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; +ERROR: pg_stat_statements is not installed +HINT: install pg_stat_statements extension and try again +CONTEXT: PL/pgSQL function citus_stat_statements() line XX at RAISE +-- drop created tables +DROP TABLE stat_test_text, stat_test_bigint, stat_test_bigint_other, stat_test_reference; +DROP FUNCTION normalize_query_string(text); +\if :server_version_above_thirteen +SET compute_query_id = 'off'; +\endif diff --git a/src/test/regress/expected/text_search.out b/src/test/regress/expected/text_search.out index 39e57326e..b9934a1d4 100644 --- a/src/test/regress/expected/text_search.out +++ b/src/test/regress/expected/text_search.out @@ -211,13 +211,6 @@ SELECT * FROM run_command_on_workers($$ SELECT 'text_search.french_noaccent'::re (2 rows) SET client_min_messages TO 'warning'; -SELECT * FROM run_command_on_workers($$CREATE ROLE text_search_owner;$$) ORDER BY 1,2; - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 57637 | t | CREATE ROLE - localhost | 57638 | t | CREATE ROLE -(2 rows) - CREATE ROLE text_search_owner; RESET client_min_messages; CREATE TEXT SEARCH CONFIGURATION changed_owner ( PARSER = default ); diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index a93e45f81..4b9f26cec 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -2,6 +2,9 @@ -- MULTI_ALTER_TABLE_STATEMENTS -- +-- this test has different output per version +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000; @@ -537,13 +540,38 @@ DROP TABLESPACE super_fast_ssd; -- Cleanup the table and its shards SET citus.enable_ddl_propagation to true; + +CREATE USER alter_table_owner WITH LOGIN; + +GRANT USAGE ON SCHEMA public TO alter_table_owner; + +\c - alter_table_owner - :master_port +-- should not be able to access table without permission +SELECT count(*) FROM lineitem_alter; + +-- should not be able to drop the table as non table owner DROP TABLE lineitem_alter; + +\c - postgres - :master_port +ALTER TABLE lineitem_alter OWNER TO alter_table_owner; + +\c - alter_table_owner - :master_port +-- should be able to query the table as table owner +SELECT count(*) FROM lineitem_alter; + +-- should be able to drop the table as table owner +DROP TABLE lineitem_alter; + -- check that nothing's left over on workers, other than the leftover shard created -- during the unsuccessful COPY -\c - - - :worker_1_port +\c - postgres - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; \c - - - :master_port +-- drop the roles created +REVOKE ALL ON SCHEMA PUBLIC FROM alter_table_owner; +DROP ROLE alter_table_owner; + -- Test alter table with drop table in the same transaction BEGIN; CREATE TABLE test_table_1(id int); diff --git a/src/test/regress/input/multi_copy.source b/src/test/regress/input/multi_copy.source index ba6ad675f..d68f10193 100644 --- a/src/test/regress/input/multi_copy.source +++ b/src/test/regress/input/multi_copy.source @@ -534,7 +534,6 @@ DROP TABLE numbers_append; -- Test copy failures against connection failures -- create and switch to test user CREATE USER test_user; -SELECT * FROM run_command_on_workers('CREATE USER test_user'); \c - test_user SET citus.shard_count to 4; @@ -660,7 +659,6 @@ SELECT shardid, shardstate, nodename, nodeport WHERE logicalrelid = 'numbers_hash'::regclass order by shardid, nodeport; DROP TABLE numbers_hash; -SELECT * FROM run_command_on_workers('DROP USER test_user'); DROP USER test_user; -- Test copy with built-in type without binary output function @@ -754,6 +752,22 @@ red {"r":255,"g":0,"b":0 \. TRUNCATE copy_jsonb; + +-- JSONB when there is a complex column should work. Complex columns force +-- non binary copy format between master and workers. +CREATE TYPE complex_for_copy_test AS (r double precision, i double precision); +CREATE TABLE copy_jsonb_with_complex(key int, value_1 jsonb, value_2 complex_for_copy_test); +SELECT create_distributed_table('copy_jsonb_with_complex', 'key'); + +\COPY copy_jsonb_with_complex FROM STDIN +1 {"f1": 3803, "f2": "v1", "f3": {"f4": 1}} (10,9) +2 {"f5": null, "f6": true} (20,19) +\. +SELECT * FROM copy_jsonb_with_complex ORDER BY key; + +DROP TABLE copy_jsonb_with_complex; +DROP TYPE complex_for_copy_test; + SET citus.skip_jsonb_validation_in_copy TO off; -- JSONB from text should work diff --git a/src/test/regress/input/multi_multiuser_load_data.source b/src/test/regress/input/multi_multiuser_load_data.source new file mode 100644 index 000000000..5681347c6 --- /dev/null +++ b/src/test/regress/input/multi_multiuser_load_data.source @@ -0,0 +1,41 @@ +-- +-- MULTI_MULTIUSER_LOAD_DATA +-- + +-- Tests for loading data in a distributed cluster. Please note that the number +-- of shards uploaded depends on two config values: citusdb.shard_replication_factor and +-- citusdb.shard_max_size. These values are manually set in pg_regress.c. We also set +-- the shard placement policy to the local-node-first policy as other regression +-- tests expect the placements to be in that order. + +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; + +SET citusdb.shard_placement_policy TO 'local-node-first'; + +-- load as superuser +\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' + +-- as user with ALL access +SET ROLE full_access; +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +RESET ROLE; + +-- as user with SELECT access, should fail +SET ROLE read_access; +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +RESET ROLE; + +-- as user with no access, should fail +SET ROLE no_access; +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +RESET ROLE; + +SET ROLE full_access; +\copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +\copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' + +\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' +\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|' +\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 1897f7d6c..9d70f169e 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -35,6 +35,7 @@ test: distributed_triggers test: multi_test_catalog_views test: multi_table_ddl test: multi_sequence_default +test: grant_on_sequence_propagation test: multi_name_lengths test: multi_name_resolution test: multi_metadata_access @@ -229,6 +230,9 @@ test: multi_drop_extension test: multi_metadata_sync test: multi_unsupported_worker_operations +test: grant_on_function_propagation +test: grant_on_foreign_server_propagation + # ---------- # grant_on_schema_propagation tests if the GRANT ... ON SCHEMA queries are propagated correctly # multi_schema_support makes sure we can work with tables in schemas other than public with no problem diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 6a2785553..f9136008f 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -1,5 +1,6 @@ test: multi_test_helpers multi_test_helpers_superuser test: multi_cluster_management +test: create_role_propagation test: multi_create_fdw test: multi_test_catalog_views test: replicated_table_disable_node diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index 3707ab016..7a93bcb7b 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -1,6 +1,14 @@ -- -- MULTI_ALTER_TABLE_STATEMENTS -- +-- this test has different output per version +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; + version_above_ten +------------------- + t +(1 row) + ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000; -- Check that we can run ALTER TABLE statements on distributed tables. -- We set the shardid sequence here so that the shardids in this test @@ -350,7 +358,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite ALTER TABLE lineitem_alter ADD COLUMN int_column3 INTEGER, ALTER COLUMN int_column1 SET STATISTICS 10; ERROR: alter table command is currently unsupported -DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ATTACH|DETACH PARTITION and TYPE subcommands are supported. +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; Column | Type | Modifiers @@ -382,12 +390,12 @@ ERROR: cannot execute ALTER TABLE command involving partition column -- Verify that we error out on unsupported statement types ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey SET STATISTICS 100; ERROR: alter table command is currently unsupported -DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ATTACH|DETACH PARTITION and TYPE subcommands are supported. +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. ALTER TABLE lineitem_alter DROP CONSTRAINT IF EXISTS non_existent_contraint; NOTICE: constraint "non_existent_contraint" of relation "lineitem_alter" does not exist, skipping ALTER TABLE lineitem_alter SET WITHOUT OIDS; ERROR: alter table command is currently unsupported -DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ATTACH|DETACH PARTITION and TYPE subcommands are supported. +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. -- Verify that we error out in case of postgres errors on supported statement -- types ALTER TABLE lineitem_alter ADD COLUMN new_column non_existent_type; @@ -1037,10 +1045,30 @@ HINT: Connect to worker nodes directly to manually move all tables. DROP TABLESPACE super_fast_ssd; -- Cleanup the table and its shards SET citus.enable_ddl_propagation to true; +CREATE USER alter_table_owner WITH LOGIN; +GRANT USAGE ON SCHEMA public TO alter_table_owner; +\c - alter_table_owner - :master_port +-- should not be able to access table without permission +SELECT count(*) FROM lineitem_alter; +ERROR: permission denied for table lineitem_alter +-- should not be able to drop the table as non table owner +DROP TABLE lineitem_alter; +ERROR: must be owner of table lineitem_alter +\c - postgres - :master_port +ALTER TABLE lineitem_alter OWNER TO alter_table_owner; +\c - alter_table_owner - :master_port +-- should be able to query the table as table owner +SELECT count(*) FROM lineitem_alter; + count +------- + 18000 +(1 row) + +-- should be able to drop the table as table owner DROP TABLE lineitem_alter; -- check that nothing's left over on workers, other than the leftover shard created -- during the unsuccessful COPY -\c - - - :worker_1_port +\c - postgres - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; relname ----------------------- @@ -1048,6 +1076,9 @@ SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; (1 row) \c - - - :master_port +-- drop the roles created +REVOKE ALL ON SCHEMA PUBLIC FROM alter_table_owner; +DROP ROLE alter_table_owner; -- Test alter table with drop table in the same transaction BEGIN; CREATE TABLE test_table_1(id int); diff --git a/src/test/regress/output/multi_alter_table_statements_0.source b/src/test/regress/output/multi_alter_table_statements_0.source new file mode 100644 index 000000000..c6a549052 --- /dev/null +++ b/src/test/regress/output/multi_alter_table_statements_0.source @@ -0,0 +1,1237 @@ +-- +-- MULTI_ALTER_TABLE_STATEMENTS +-- +-- this test has different output per version +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; + version_above_ten +------------------- + f +(1 row) + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000; +-- Check that we can run ALTER TABLE statements on distributed tables. +-- We set the shardid sequence here so that the shardids in this test +-- aren't affected by changes to the previous tests. +CREATE TABLE lineitem_alter ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null + ) + WITH ( fillfactor = 80 ); +SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); + create_distributed_table +-------------------------- + +(1 row) + +\copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- verify that the storage options made it to the table definitions +SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; + relname | reloptions +----------------+----------------- + lineitem_alter | {fillfactor=80} +(1 row) + +\c - - - :worker_1_port +SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' ORDER BY relname; + relname | reloptions +-----------------------+----------------- + lineitem_alter_220000 | {fillfactor=80} +(1 row) + +\c - - - :master_port +-- Verify that we can add columns +ALTER TABLE lineitem_alter ADD COLUMN float_column FLOAT; +ALTER TABLE lineitem_alter ADD COLUMN date_column DATE; +ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER DEFAULT 1; +ALTER TABLE lineitem_alter ADD COLUMN int_column2 INTEGER DEFAULT 2; +ALTER TABLE lineitem_alter ADD COLUMN null_column INTEGER; +-- show changed schema on one worker +\c - - - :worker_1_port +SELECT attname, atttypid::regtype +FROM + (SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc + JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) +ORDER BY attnum; + attname | atttypid +-----------------+------------------- + tableoid | oid + cmax | cid + xmax | xid + cmin | cid + xmin | xid + ctid | tid + l_orderkey | bigint + l_partkey | integer + l_suppkey | integer + l_linenumber | integer + l_quantity | numeric + l_extendedprice | numeric + l_discount | numeric + l_tax | numeric + l_returnflag | character + l_linestatus | character + l_shipdate | date + l_commitdate | date + l_receiptdate | date + l_shipinstruct | character + l_shipmode | character + l_comment | character varying + float_column | double precision + date_column | date + int_column1 | integer + int_column2 | integer + null_column | integer +(27 rows) + +\c - - - :master_port +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; + Column | Type | Modifiers +-----------------+-----------------------+----------- + l_orderkey | bigint | not null + l_partkey | integer | not null + l_suppkey | integer | not null + l_linenumber | integer | not null + l_quantity | numeric(15,2) | not null + l_extendedprice | numeric(15,2) | not null + l_discount | numeric(15,2) | not null + l_tax | numeric(15,2) | not null + l_returnflag | character(1) | not null + l_linestatus | character(1) | not null + l_shipdate | date | not null + l_commitdate | date | not null + l_receiptdate | date | not null + l_shipinstruct | character(25) | not null + l_shipmode | character(10) | not null + l_comment | character varying(44) | not null + float_column | double precision | + date_column | date | + int_column1 | integer | default 1 + int_column2 | integer | default 2 + null_column | integer | +(21 rows) + +SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; + float_column | count +--------------+------- + | 6000 +(1 row) + +SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; + int_column1 | count +-------------+------- + 1 | 6000 +(1 row) + +-- Verify that SET|DROP DEFAULT works +ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1; +ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT; +-- \copy to verify that default values take effect +\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; + float_column | count +--------------+------- + | 6000 + 1 | 6000 +(2 rows) + +SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; + int_column1 | count +-------------+------- + | 6000 + 1 | 6000 +(2 rows) + +-- Verify that SET NOT NULL works +ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL; +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; + Column | Type | Modifiers +-----------------+-----------------------+-------------------- + l_orderkey | bigint | not null + l_partkey | integer | not null + l_suppkey | integer | not null + l_linenumber | integer | not null + l_quantity | numeric(15,2) | not null + l_extendedprice | numeric(15,2) | not null + l_discount | numeric(15,2) | not null + l_tax | numeric(15,2) | not null + l_returnflag | character(1) | not null + l_linestatus | character(1) | not null + l_shipdate | date | not null + l_commitdate | date | not null + l_receiptdate | date | not null + l_shipinstruct | character(25) | not null + l_shipmode | character(10) | not null + l_comment | character varying(44) | not null + float_column | double precision | default 1 + date_column | date | + int_column1 | integer | + int_column2 | integer | not null default 2 + null_column | integer | +(21 rows) + +-- Drop default so that NULLs will be inserted for this column +ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT; +-- \copy should fail because it will try to insert NULLs for a NOT NULL column +-- Note, this operation will create a table on the workers but it won't be in the metadata +\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +ERROR: null value in column "int_column2" violates not-null constraint +DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 1996-03-13, 1996-02-12, 1996-03-22, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null). +-- Verify that DROP NOT NULL works +ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; + Column | Type | Modifiers +-----------------+-----------------------+----------- + l_orderkey | bigint | not null + l_partkey | integer | not null + l_suppkey | integer | not null + l_linenumber | integer | not null + l_quantity | numeric(15,2) | not null + l_extendedprice | numeric(15,2) | not null + l_discount | numeric(15,2) | not null + l_tax | numeric(15,2) | not null + l_returnflag | character(1) | not null + l_linestatus | character(1) | not null + l_shipdate | date | not null + l_commitdate | date | not null + l_receiptdate | date | not null + l_shipinstruct | character(25) | not null + l_shipmode | character(10) | not null + l_comment | character varying(44) | not null + float_column | double precision | default 1 + date_column | date | + int_column1 | integer | + int_column2 | integer | + null_column | integer | +(21 rows) + +-- \copy should succeed now +\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +SELECT count(*) from lineitem_alter; + count +------- + 18000 +(1 row) + +-- Verify that SET DATA TYPE works +SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2; + int_column2 | pg_typeof | count +-------------+-----------+------- + | integer | 6000 + 2 | integer | 12000 +(2 rows) + +ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT; +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; + Column | Type | Modifiers +-----------------+-----------------------+----------- + l_orderkey | bigint | not null + l_partkey | integer | not null + l_suppkey | integer | not null + l_linenumber | integer | not null + l_quantity | numeric(15,2) | not null + l_extendedprice | numeric(15,2) | not null + l_discount | numeric(15,2) | not null + l_tax | numeric(15,2) | not null + l_returnflag | character(1) | not null + l_linestatus | character(1) | not null + l_shipdate | date | not null + l_commitdate | date | not null + l_receiptdate | date | not null + l_shipinstruct | character(25) | not null + l_shipmode | character(10) | not null + l_comment | character varying(44) | not null + float_column | double precision | default 1 + date_column | date | + int_column1 | integer | + int_column2 | double precision | + null_column | integer | +(21 rows) + +SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2; + int_column2 | pg_typeof | count +-------------+------------------+------- + | double precision | 6000 + 2 | double precision | 12000 +(2 rows) + +-- Verify that DROP COLUMN works +ALTER TABLE lineitem_alter DROP COLUMN int_column1; +ALTER TABLE lineitem_alter DROP COLUMN float_column; +ALTER TABLE lineitem_alter DROP COLUMN date_column; +-- Verify that RENAME COLUMN works +ALTER TABLE lineitem_alter RENAME COLUMN l_orderkey TO l_orderkey_renamed; +SELECT SUM(l_orderkey_renamed) FROM lineitem_alter; + sum +---------- + 53620791 +(1 row) + +-- Verify that IF EXISTS works as expected +ALTER TABLE non_existent_table ADD COLUMN new_column INTEGER; +ERROR: relation "non_existent_table" does not exist +ALTER TABLE IF EXISTS non_existent_table ADD COLUMN new_column INTEGER; +NOTICE: relation "non_existent_table" does not exist, skipping +ALTER TABLE IF EXISTS lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE INTEGER; +ALTER TABLE lineitem_alter DROP COLUMN non_existent_column; +ERROR: column "non_existent_column" of relation "lineitem_alter" does not exist +ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS non_existent_column; +NOTICE: column "non_existent_column" of relation "lineitem_alter" does not exist, skipping +ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS int_column2; +-- Verify with IF EXISTS for extant table +ALTER TABLE IF EXISTS lineitem_alter RENAME COLUMN l_orderkey_renamed TO l_orderkey; +SELECT SUM(l_orderkey) FROM lineitem_alter; + sum +---------- + 53620791 +(1 row) + +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; + Column | Type | Modifiers +-----------------+-----------------------+----------- + l_orderkey | bigint | not null + l_partkey | integer | not null + l_suppkey | integer | not null + l_linenumber | integer | not null + l_quantity | numeric(15,2) | not null + l_extendedprice | numeric(15,2) | not null + l_discount | numeric(15,2) | not null + l_tax | numeric(15,2) | not null + l_returnflag | character(1) | not null + l_linestatus | character(1) | not null + l_shipdate | date | not null + l_commitdate | date | not null + l_receiptdate | date | not null + l_shipinstruct | character(25) | not null + l_shipmode | character(10) | not null + l_comment | character varying(44) | not null + null_column | integer | +(17 rows) + +-- Verify that we can execute commands with multiple subcommands +ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER, + ADD COLUMN int_column2 INTEGER; +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; + Column | Type | Modifiers +-----------------+-----------------------+----------- + l_orderkey | bigint | not null + l_partkey | integer | not null + l_suppkey | integer | not null + l_linenumber | integer | not null + l_quantity | numeric(15,2) | not null + l_extendedprice | numeric(15,2) | not null + l_discount | numeric(15,2) | not null + l_tax | numeric(15,2) | not null + l_returnflag | character(1) | not null + l_linestatus | character(1) | not null + l_shipdate | date | not null + l_commitdate | date | not null + l_receiptdate | date | not null + l_shipinstruct | character(25) | not null + l_shipmode | character(10) | not null + l_comment | character varying(44) | not null + null_column | integer | + int_column1 | integer | + int_column2 | integer | +(19 rows) + +ALTER TABLE lineitem_alter ADD COLUMN int_column3 INTEGER, + ALTER COLUMN int_column1 SET STATISTICS 10; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2; +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; + Column | Type | Modifiers +-----------------+-----------------------+----------- + l_orderkey | bigint | not null + l_partkey | integer | not null + l_suppkey | integer | not null + l_linenumber | integer | not null + l_quantity | numeric(15,2) | not null + l_extendedprice | numeric(15,2) | not null + l_discount | numeric(15,2) | not null + l_tax | numeric(15,2) | not null + l_returnflag | character(1) | not null + l_linestatus | character(1) | not null + l_shipdate | date | not null + l_commitdate | date | not null + l_receiptdate | date | not null + l_shipinstruct | character(25) | not null + l_shipmode | character(10) | not null + l_comment | character varying(44) | not null + null_column | integer | +(17 rows) + +-- Verify that we cannot execute alter commands on the distribution column +ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey DROP NOT NULL; +ERROR: cannot execute ALTER TABLE command involving partition column +ALTER TABLE lineitem_alter DROP COLUMN l_orderkey; +ERROR: cannot execute ALTER TABLE command involving partition column +-- Verify that we error out on unsupported statement types +ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey SET STATISTICS 100; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +ALTER TABLE lineitem_alter DROP CONSTRAINT IF EXISTS non_existent_contraint; +NOTICE: constraint "non_existent_contraint" of relation "lineitem_alter" does not exist, skipping +ALTER TABLE lineitem_alter SET WITHOUT OIDS; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +-- Verify that we error out in case of postgres errors on supported statement +-- types +ALTER TABLE lineitem_alter ADD COLUMN new_column non_existent_type; +ERROR: type "non_existent_type" does not exist +LINE 1: ALTER TABLE lineitem_alter ADD COLUMN new_column non_existen... + ^ +ALTER TABLE lineitem_alter ALTER COLUMN null_column SET NOT NULL; +ERROR: column "null_column" contains null values +CONTEXT: while executing command on localhost:57638 +ALTER TABLE lineitem_alter ALTER COLUMN l_partkey SET DEFAULT 'a'; +ERROR: invalid input syntax for integer: "a" +-- Verify that we error out on RENAME CONSTRAINT statement +ALTER TABLE lineitem_alter RENAME CONSTRAINT constraint_a TO constraint_b; +ERROR: renaming constraints belonging to distributed tables is currently unsupported +-- Verify that IF EXISTS works as expected with RENAME statements +ALTER TABLE non_existent_table RENAME TO non_existent_table_renamed; +ERROR: relation "non_existent_table" does not exist +ALTER TABLE IF EXISTS non_existent_table RENAME TO non_existent_table_renamed; +NOTICE: relation "non_existent_table" does not exist, skipping +ALTER TABLE IF EXISTS non_existent_table RENAME COLUMN column1 TO column2; +NOTICE: relation "non_existent_table" does not exist, skipping +-- Verify that none of the failed alter table commands took effect on the master +-- node +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; + Column | Type | Modifiers +-----------------+-----------------------+----------- + l_orderkey | bigint | not null + l_partkey | integer | not null + l_suppkey | integer | not null + l_linenumber | integer | not null + l_quantity | numeric(15,2) | not null + l_extendedprice | numeric(15,2) | not null + l_discount | numeric(15,2) | not null + l_tax | numeric(15,2) | not null + l_returnflag | character(1) | not null + l_linestatus | character(1) | not null + l_shipdate | date | not null + l_commitdate | date | not null + l_receiptdate | date | not null + l_shipinstruct | character(25) | not null + l_shipmode | character(10) | not null + l_comment | character varying(44) | not null + null_column | integer | +(17 rows) + +-- verify that non-propagated ddl commands are allowed inside a transaction block +SET citus.enable_ddl_propagation to false; +BEGIN; +CREATE INDEX temp_index_1 ON lineitem_alter(l_linenumber); +COMMIT; +SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; + indexname | tablename +--------------+---------------- + temp_index_1 | lineitem_alter +(1 row) + +DROP INDEX temp_index_1; +-- verify that single distributed ddl commands are allowed inside a transaction block +SET citus.enable_ddl_propagation to true; +BEGIN; +CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); +COMMIT; +SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; + indexname | tablename +--------------+---------------- + temp_index_2 | lineitem_alter +(1 row) + +DROP INDEX temp_index_2; +-- and so are multiple ddl statements +BEGIN; +CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); +ALTER TABLE lineitem_alter ADD COLUMN first integer; +COMMIT; +SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; + Column | Type | Modifiers +-----------------+-----------------------+----------- + l_orderkey | bigint | not null + l_partkey | integer | not null + l_suppkey | integer | not null + l_linenumber | integer | not null + l_quantity | numeric(15,2) | not null + l_extendedprice | numeric(15,2) | not null + l_discount | numeric(15,2) | not null + l_tax | numeric(15,2) | not null + l_returnflag | character(1) | not null + l_linestatus | character(1) | not null + l_shipdate | date | not null + l_commitdate | date | not null + l_receiptdate | date | not null + l_shipinstruct | character(25) | not null + l_shipmode | character(10) | not null + l_comment | character varying(44) | not null + null_column | integer | + first | integer | +(18 rows) + +SELECT "Column", "Type", "Definition" FROM index_attrs WHERE + relid = 'temp_index_2'::regclass; + Column | Type | Definition +------------+--------+------------ + l_orderkey | bigint | l_orderkey +(1 row) + +ALTER TABLE lineitem_alter DROP COLUMN first; +DROP INDEX temp_index_2; +-- ensure that user-specified rollback causes full rollback +BEGIN; +CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); +CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); +ROLLBACK; +SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; + indexname | tablename +-----------+----------- +(0 rows) + +-- ensure that errors cause full rollback +BEGIN; +CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); +CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); +ERROR: relation "temp_index_2" already exists +ROLLBACK; +SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; + indexname | tablename +-----------+----------- +(0 rows) + +-- verify that SAVEPOINT is allowed... +BEGIN; +CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); +SAVEPOINT my_savepoint; +CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); +ROLLBACK; +-- and also rolling back to it is also allowed +BEGIN; +CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); +SAVEPOINT my_savepoint; +CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); +ROLLBACK TO my_savepoint; +COMMIT; +SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; + indexname | tablename +--------------+---------------- + temp_index_2 | lineitem_alter +(1 row) + +DROP INDEX temp_index_2; +-- Add column on only one worker... +\c - - - :worker_2_port +ALTER TABLE lineitem_alter_220000 ADD COLUMN first integer; +\c - - - :master_port +-- and try to add it in a multi-statement block, which fails +BEGIN; +CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); +ALTER TABLE lineitem_alter ADD COLUMN first integer; +ERROR: column "first" of relation "lineitem_alter_220000" already exists +CONTEXT: while executing command on localhost:57638 +COMMIT; +-- Nothing from the block should have committed +SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; + indexname | tablename +-----------+----------- +(0 rows) + +-- Create single-shard table (to avoid deadlocks in the upcoming test hackery) +CREATE TABLE single_shard_items (id integer NOT NULL, name text); +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 2; +SELECT create_distributed_table('single_shard_items', 'id', 'hash'); + create_distributed_table +-------------------------- + +(1 row) + +-- Verify that ALTER TABLE .. REPLICATION IDENTITY [USING INDEX]* .. works +CREATE UNIQUE INDEX replica_idx on single_shard_items(id); +SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; + relreplident +-------------- + d +(1 row) + +SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); + run_command_on_workers +------------------------ + (localhost,57637,t,d) + (localhost,57638,t,d) +(2 rows) + +ALTER TABLE single_shard_items REPLICA IDENTITY nothing; +SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; + relreplident +-------------- + n +(1 row) + +SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); + run_command_on_workers +------------------------ + (localhost,57637,t,n) + (localhost,57638,t,n) +(2 rows) + +ALTER TABLE single_shard_items REPLICA IDENTITY full; +SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; + relreplident +-------------- + f +(1 row) + +SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); + run_command_on_workers +------------------------ + (localhost,57637,t,f) + (localhost,57638,t,f) +(2 rows) + +ALTER TABLE single_shard_items REPLICA IDENTITY USING INDEX replica_idx; +SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; + relreplident +-------------- + i +(1 row) + +SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); + run_command_on_workers +------------------------ + (localhost,57637,t,i) + (localhost,57638,t,i) +(2 rows) + +ALTER TABLE single_shard_items REPLICA IDENTITY default, REPLICA IDENTITY USING INDEX replica_idx, REPLICA IDENTITY nothing; +SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; + relreplident +-------------- + n +(1 row) + +SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); + run_command_on_workers +------------------------ + (localhost,57637,t,n) + (localhost,57638,t,n) +(2 rows) + +ALTER TABLE single_shard_items ADD COLUMN test_col int, REPLICA IDENTITY full; +DROP INDEX replica_idx; +ALTER TABLE single_shard_items REPLICA IDENTITY default; +-- Drop the column from the worker... +\c - - - :worker_2_port +ALTER TABLE lineitem_alter_220000 DROP COLUMN first; +-- Create table to trigger at-xact-end (deferred) failure +CREATE TABLE ddl_commands (command text UNIQUE DEFERRABLE INITIALLY DEFERRED); +-- Use an event trigger to log all DDL event tags in it +CREATE FUNCTION log_ddl_tag() RETURNS event_trigger AS $ldt$ + BEGIN + INSERT INTO ddl_commands VALUES (tg_tag); + END; +$ldt$ LANGUAGE plpgsql; +CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_tag(); +\c - - - :master_port +-- The above trigger will cause failure at transaction end on one placement. +-- We'll test 2PC first, as it should handle this "best" (no divergence) +SET citus.multi_shard_commit_protocol TO '2pc'; +BEGIN; +CREATE INDEX single_index_2 ON single_shard_items(id); +CREATE INDEX single_index_3 ON single_shard_items(name); +COMMIT; +ERROR: duplicate key value violates unique constraint "ddl_commands_command_key" +DETAIL: Key (command)=(CREATE INDEX) already exists. +CONTEXT: while executing command on localhost:57638 +-- Nothing from the block should have committed +SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; + indexname | tablename +-----------+----------- +(0 rows) + +-- Now try with 2pc off +SET citus.multi_shard_commit_protocol TO '1pc'; +BEGIN; +CREATE INDEX single_index_2 ON single_shard_items(id); +CREATE INDEX single_index_3 ON single_shard_items(name); +COMMIT; +WARNING: duplicate key value violates unique constraint "ddl_commands_command_key" +DETAIL: Key (command)=(CREATE INDEX) already exists. +CONTEXT: while executing command on localhost:57638 +WARNING: failed to commit transaction on localhost:57638 +-- The block should have committed with a warning +SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; + indexname | tablename +----------------+-------------------- + single_index_2 | single_shard_items + single_index_3 | single_shard_items +(2 rows) + +\c - - - :worker_2_port +DROP EVENT TRIGGER log_ddl_tag; +DROP FUNCTION log_ddl_tag(); +DROP TABLE ddl_commands; +\c - - - :master_port +-- Distributed SELECTs may appear after ALTER +BEGIN; +CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); +SELECT count(*) FROM lineitem_alter; + count +------- + 18000 +(1 row) + +ROLLBACK; +-- and before +BEGIN; +SELECT count(*) FROM lineitem_alter; + count +------- + 18000 +(1 row) + +CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); +COMMIT; +SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; + indexname | tablename +--------------+---------------- + temp_index_2 | lineitem_alter +(1 row) + +DROP INDEX temp_index_2; +--- verify that distributed ddl commands can be used with 2pc +SET citus.multi_shard_commit_protocol TO '2pc'; +CREATE INDEX temp_index_3 ON lineitem_alter(l_orderkey); +SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; + indexname | tablename +--------------+---------------- + temp_index_3 | lineitem_alter +(1 row) + +DROP INDEX temp_index_3; +SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; + indexname | tablename +-----------+----------- +(0 rows) + +RESET citus.multi_shard_commit_protocol; +-- verify that not any of shard placements are marked as failed when a query failure occurs +CREATE TABLE test_ab (a int, b int); +SET citus.shard_count TO 8; +SELECT create_distributed_table('test_ab', 'a', 'hash'); + create_distributed_table +-------------------------- + +(1 row) + +INSERT INTO test_ab VALUES (2, 10); +INSERT INTO test_ab VALUES (2, 11); +CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a); +ERROR: could not create unique index "temp_unique_index_1_220011" +DETAIL: Key (a)=(2) is duplicated. +CONTEXT: while executing command on localhost:57638 +SELECT shardid FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard +WHERE logicalrelid='test_ab'::regclass AND shardstate=3; + shardid +--------- +(0 rows) + +-- Check that the schema on the worker still looks reasonable +\c - - - :worker_1_port +SELECT attname, atttypid::regtype +FROM + (SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc + JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) +ORDER BY attnum; + attname | atttypid +-------------------------------+------------------- + tableoid | oid + cmax | cid + xmax | xid + cmin | cid + xmin | xid + ctid | tid + l_orderkey | bigint + l_partkey | integer + l_suppkey | integer + l_linenumber | integer + l_quantity | numeric + l_extendedprice | numeric + l_discount | numeric + l_tax | numeric + l_returnflag | character + l_linestatus | character + l_shipdate | date + l_commitdate | date + l_receiptdate | date + l_shipinstruct | character + l_shipmode | character + l_comment | character varying + ........pg.dropped.17........ | - + ........pg.dropped.18........ | - + ........pg.dropped.19........ | - + ........pg.dropped.20........ | - + null_column | integer + ........pg.dropped.22........ | - + ........pg.dropped.23........ | - + ........pg.dropped.24........ | - +(30 rows) + +\c - - - :master_port +-- verify that we can rename distributed tables +SHOW citus.enable_ddl_propagation; + citus.enable_ddl_propagation +------------------------------ + on +(1 row) + +ALTER TABLE lineitem_alter RENAME TO lineitem_renamed; +-- verify rename is performed +SELECT relname FROM pg_class WHERE relname = 'lineitem_renamed'; + relname +------------------ + lineitem_renamed +(1 row) + +-- show rename worked on one worker, too +\c - - - :worker_1_port +SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_renamed%' ORDER BY relname; + relname +------------------------- + lineitem_renamed_220000 + lineitem_renamed_220001 + lineitem_renamed_220003 +(3 rows) + +\c - - - :master_port +-- revert it to original name +ALTER TABLE lineitem_renamed RENAME TO lineitem_alter; +-- show rename worked on one worker, too +\c - - - :worker_1_port +SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname; + relname +----------------------- + lineitem_alter_220000 + lineitem_alter_220001 + lineitem_alter_220003 +(3 rows) + +\c - - - :master_port +-- verify that we can set and reset storage parameters +ALTER TABLE lineitem_alter SET(fillfactor=40); +SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; + relname | reloptions +----------------+----------------- + lineitem_alter | {fillfactor=40} +(1 row) + +\c - - - :worker_1_port +SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname; + relname | reloptions +-----------------------+----------------- + lineitem_alter_220000 | {fillfactor=40} + lineitem_alter_220001 | {fillfactor=40} + lineitem_alter_220003 | {fillfactor=40} +(3 rows) + +\c - - - :master_port +ALTER TABLE lineitem_alter RESET(fillfactor); +SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; + relname | reloptions +----------------+------------ + lineitem_alter | +(1 row) + +\c - - - :worker_1_port +SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname; + relname | reloptions +-----------------------+------------ + lineitem_alter_220000 | + lineitem_alter_220001 | + lineitem_alter_220003 | +(3 rows) + +\c - - - :master_port +-- verify that we can rename indexes on distributed tables +CREATE INDEX temp_index_1 ON lineitem_alter(l_linenumber); +ALTER INDEX temp_index_1 RENAME TO idx_lineitem_linenumber; +-- verify rename is performed +SELECT relname FROM pg_class WHERE relname = 'idx_lineitem_linenumber'; + relname +------------------------- + idx_lineitem_linenumber +(1 row) + +-- show rename worked on one worker, too +\c - - - :worker_1_port +SELECT relname FROM pg_class WHERE relname LIKE 'idx_lineitem_linenumber%' ORDER BY relname; + relname +-------------------------------- + idx_lineitem_linenumber_220000 + idx_lineitem_linenumber_220001 + idx_lineitem_linenumber_220003 +(3 rows) + +\c - - - :master_port +-- now get rid of the index +DROP INDEX idx_lineitem_linenumber; +-- verify that we don't intercept DDL commands if propagation is turned off +SET citus.enable_ddl_propagation to false; +-- table rename statement can be performed on the coordinator only now +ALTER TABLE lineitem_alter RENAME TO lineitem_renamed; +-- verify rename is performed +SELECT relname FROM pg_class WHERE relname = 'lineitem_alter' or relname = 'lineitem_renamed'; + relname +------------------ + lineitem_renamed +(1 row) + +-- revert it to original name +ALTER TABLE lineitem_renamed RENAME TO lineitem_alter; +-- this column is added to master table and not workers +ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int; +-- verify newly added column is not present in a worker shard +\c - - - :worker_1_port +SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0; +ERROR: column "column_only_added_to_master" does not exist +LINE 1: SELECT column_only_added_to_master FROM lineitem_alter_22000... + ^ +\c - - - :master_port +-- ddl propagation flag is reset to default, disable it again +SET citus.enable_ddl_propagation to false; +-- following query succeeds since it accesses an previously existing column +SELECT l_orderkey FROM lineitem_alter LIMIT 0; + l_orderkey +------------ +(0 rows) + +-- make master and workers have the same schema again +ALTER TABLE lineitem_alter DROP COLUMN column_only_added_to_master; +-- now this should succeed +SELECT * FROM lineitem_alter LIMIT 0; + l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment | null_column +------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+----------------+------------+-----------+------------- +(0 rows) + +-- previously unsupported statements are accepted by postgresql now +ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey SET STATISTICS 100; +ALTER TABLE lineitem_alter DROP CONSTRAINT IF EXISTS non_existent_contraint; +NOTICE: constraint "non_existent_contraint" of relation "lineitem_alter" does not exist, skipping +ALTER TABLE lineitem_alter SET WITHOUT OIDS; +-- distribution column still cannot be dropped. +ALTER TABLE lineitem_alter DROP COLUMN l_orderkey; +ERROR: cannot execute ALTER TABLE command dropping partition column +-- Even unique indexes on l_partkey (non-partition column) are allowed. +-- Citus would have prevented that. +CREATE UNIQUE INDEX unique_lineitem_partkey on lineitem_alter(l_partkey); +SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; + indexname | tablename +-------------------------+---------------- + unique_lineitem_partkey | lineitem_alter +(1 row) + +-- verify index is not created on worker +\c - - - :worker_1_port +SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alter_%'; + indexname | tablename +-----------+----------- +(0 rows) + +\c - - - :master_port +-- verify alter table and drop sequence in the same transaction does not cause deadlock +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 2; +CREATE TABLE sequence_deadlock_test (a serial, b serial); +SELECT create_distributed_table('sequence_deadlock_test', 'a'); + create_distributed_table +-------------------------- + +(1 row) + +BEGIN; +ALTER TABLE sequence_deadlock_test ADD COLUMN c int; +-- suppress notice message caused by DROP ... CASCADE to prevent pg version difference +SET client_min_messages TO 'WARNING'; +DROP SEQUENCE sequence_deadlock_test_b_seq CASCADE; +RESET client_min_messages; +END; +DROP TABLE sequence_deadlock_test; +-- verify enable/disable trigger all works +SET citus.shard_replication_factor TO 1; +SET citus.shard_count TO 1; +CREATE TABLE trigger_table ( + id int, + value text +); +SELECT create_distributed_table('trigger_table', 'id'); + create_distributed_table +-------------------------- + +(1 row) + +-- first set a trigger on a shard +\c - - - :worker_1_port +CREATE FUNCTION update_value() RETURNS trigger AS $up$ + BEGIN + NEW.value := 'trigger enabled'; + RETURN NEW; + END; +$up$ LANGUAGE plpgsql; +CREATE TRIGGER update_value +BEFORE INSERT ON trigger_table_220017 +FOR EACH ROW EXECUTE PROCEDURE update_value(); +\c - - - :master_port +INSERT INTO trigger_table VALUES (1, 'trigger disabled'); +SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; + value | count +-----------------+------- + trigger enabled | 1 +(1 row) + +ALTER TABLE trigger_table DISABLE TRIGGER ALL; +INSERT INTO trigger_table VALUES (1, 'trigger disabled'); +SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; + value | count +------------------+------- + trigger disabled | 1 + trigger enabled | 1 +(2 rows) + +ALTER TABLE trigger_table ENABLE TRIGGER ALL; +INSERT INTO trigger_table VALUES (1, 'trigger disabled'); +SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; + value | count +------------------+------- + trigger disabled | 1 + trigger enabled | 2 +(2 rows) + +DROP TABLE trigger_table; +-- test ALTER TABLE ALL IN TABLESPACE +-- we expect that it will warn out +CREATE TABLESPACE super_fast_ssd LOCATION '@abs_srcdir@/data'; +ALTER TABLE ALL IN TABLESPACE pg_default SET TABLESPACE super_fast_ssd; +WARNING: not propagating ALTER TABLE ALL IN TABLESPACE commands to worker nodes +HINT: Connect to worker nodes directly to manually move all tables. +ALTER TABLE ALL IN TABLESPACE super_fast_ssd SET TABLESPACE pg_default; +WARNING: not propagating ALTER TABLE ALL IN TABLESPACE commands to worker nodes +HINT: Connect to worker nodes directly to manually move all tables. +DROP TABLESPACE super_fast_ssd; +-- Cleanup the table and its shards +SET citus.enable_ddl_propagation to true; +CREATE USER alter_table_owner WITH LOGIN; +GRANT USAGE ON SCHEMA public TO alter_table_owner; +\c - alter_table_owner - :master_port +-- should not be able to access table without permission +SELECT count(*) FROM lineitem_alter; +ERROR: permission denied for relation lineitem_alter +-- should not be able to drop the table as non table owner +DROP TABLE lineitem_alter; +ERROR: must be owner of relation lineitem_alter +\c - postgres - :master_port +ALTER TABLE lineitem_alter OWNER TO alter_table_owner; +\c - alter_table_owner - :master_port +-- should be able to query the table as table owner +SELECT count(*) FROM lineitem_alter; + count +------- + 18000 +(1 row) + +-- should be able to drop the table as table owner +DROP TABLE lineitem_alter; +-- check that nothing's left over on workers, other than the leftover shard created +-- during the unsuccessful COPY +\c - postgres - :worker_1_port +SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; + relname +----------------------- + lineitem_alter_220002 +(1 row) + +\c - - - :master_port +-- drop the roles created +REVOKE ALL ON SCHEMA PUBLIC FROM alter_table_owner; +DROP ROLE alter_table_owner; +SELECT run_command_on_workers('DROP ROLE alter_table_owner'); + run_command_on_workers +--------------------------------- + (localhost,57637,t,"DROP ROLE") + (localhost,57638,t,"DROP ROLE") +(2 rows) + +-- Test alter table with drop table in the same transaction +BEGIN; +CREATE TABLE test_table_1(id int); +SELECT create_distributed_table('test_table_1','id'); + create_distributed_table +-------------------------- + +(1 row) + +ALTER TABLE test_table_1 ADD CONSTRAINT u_key UNIQUE(id); +DROP TABLE test_table_1; +END; +-- There should be no test_table_1 shard on workers +\c - - - :worker_1_port +SELECT relname FROM pg_class WHERE relname LIKE 'test_table_1%'; + relname +--------- +(0 rows) + +\c - - - :master_port +-- Test WITH options on a normal simple hash-distributed table +CREATE TABLE hash_dist(id bigint primary key, f1 text) WITH (fillfactor=40); +SELECT create_distributed_table('hash_dist','id'); + create_distributed_table +-------------------------- + +(1 row) + +-- verify that the storage options made it to the table definitions +SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist'; + relname | reloptions +-----------+----------------- + hash_dist | {fillfactor=40} +(1 row) + +\c - - - :worker_1_port +SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist%' ORDER BY relname; + relname | reloptions +------------------+----------------- + hash_dist_220022 | {fillfactor=40} + hash_dist_220023 | {fillfactor=40} + hash_dist_220024 | {fillfactor=40} + hash_dist_220025 | {fillfactor=40} +(4 rows) + +\c - - - :master_port +-- verify that we can set and reset index storage parameters +ALTER INDEX hash_dist_pkey SET(fillfactor=40); +SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey'; + relname | reloptions +----------------+----------------- + hash_dist_pkey | {fillfactor=40} +(1 row) + +\c - - - :worker_1_port +SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname; + relname | reloptions +-----------------------+----------------- + hash_dist_pkey_220022 | {fillfactor=40} + hash_dist_pkey_220023 | {fillfactor=40} + hash_dist_pkey_220024 | {fillfactor=40} + hash_dist_pkey_220025 | {fillfactor=40} +(4 rows) + +\c - - - :master_port +ALTER INDEX hash_dist_pkey RESET(fillfactor); +SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey'; + relname | reloptions +----------------+------------ + hash_dist_pkey | +(1 row) + +\c - - - :worker_1_port +SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname; + relname | reloptions +-----------------------+------------ + hash_dist_pkey_220022 | + hash_dist_pkey_220023 | + hash_dist_pkey_220024 | + hash_dist_pkey_220025 | +(4 rows) + +\c - - - :master_port +-- verify error message on ALTER INDEX, SET TABLESPACE is unsupported +ALTER INDEX hash_dist_pkey SET TABLESPACE foo; +ERROR: alter index ... set tablespace ... is currently unsupported +DETAIL: Only RENAME TO, SET (), and RESET () are supported. +-- verify that we can add indexes with new storage options +CREATE UNIQUE INDEX another_index ON hash_dist(id) WITH (fillfactor=50); +-- show the index and its storage options on coordinator, then workers +SELECT relname, reloptions FROM pg_class WHERE relname = 'another_index'; + relname | reloptions +---------------+----------------- + another_index | {fillfactor=50} +(1 row) + +\c - - - :worker_1_port +SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index%' ORDER BY relname; + relname | reloptions +----------------------+----------------- + another_index_220022 | {fillfactor=50} + another_index_220023 | {fillfactor=50} + another_index_220024 | {fillfactor=50} + another_index_220025 | {fillfactor=50} +(4 rows) + +\c - - - :master_port +-- get rid of the index +DROP INDEX another_index; +-- check if we fail properly when a column with un-supported constraint is added +-- UNIQUE, PRIMARY KEY on non-distribution column is not supported +-- CHECK, FOREIGN KEY, UNIQE, PRIMARY KEY cannot be added together with ADD COLUMN +SET citus.shard_replication_factor TO 1; +CREATE TABLE test_table_1(id int); +SELECT create_distributed_table('test_table_1', 'id'); + create_distributed_table +-------------------------- + +(1 row) + +ALTER TABLE test_table_1 ADD COLUMN test_col int UNIQUE; +ERROR: cannot create constraint on "test_table_1" +DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). +ALTER TABLE test_table_1 ADD COLUMN test_col int PRIMARY KEY; +ERROR: cannot create constraint on "test_table_1" +DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). +ALTER TABLE test_table_1 ADD COLUMN test_col int CHECK (test_col > 3); +ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints +DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names +HINT: You can issue each command separately such as ALTER TABLE test_table_1 ADD COLUMN test_col data_type; ALTER TABLE test_table_1 ADD CONSTRAINT constraint_name CHECK (check_expression); +CREATE TABLE reference_table(i int UNIQUE); +SELECT create_reference_table('reference_table'); + create_reference_table +------------------------ + +(1 row) + +ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES reference_table(i) ON DELETE CASCADE; +ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints +DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names +HINT: You can issue each command separately such as ALTER TABLE test_table_1 ADD COLUMN test_col data_type; ALTER TABLE test_table_1 ADD CONSTRAINT constraint_name FOREIGN KEY (test_col) REFERENCES reference_table(i) ON DELETE CASCADE; +ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES reference_table(i) ON DELETE CASCADE ON UPDATE SET NULL; +ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints +DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names +HINT: You can issue each command separately such as ALTER TABLE test_table_1 ADD COLUMN test_col data_type; ALTER TABLE test_table_1 ADD CONSTRAINT constraint_name FOREIGN KEY (test_col) REFERENCES reference_table(i) ON DELETE CASCADE ON UPDATE SET NULL; +DROP TABLE reference_table; +CREATE TABLE referenced_table(i int UNIQUE); +SELECT create_distributed_table('referenced_table', 'i'); + create_distributed_table +-------------------------- + +(1 row) + +ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES referenced_table(i); +ERROR: cannot create foreign key constraint +DETAIL: Foreign keys are supported in two cases, either in between two colocated tables including partition column in the same ordinal in the both tables or from distributed to reference tables +DROP TABLE referenced_table, test_table_1; diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index 5692ed347..07eebd2d5 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -669,15 +669,6 @@ DROP TABLE numbers_append; -- Test copy failures against connection failures -- create and switch to test user CREATE USER test_user; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -SELECT * FROM run_command_on_workers('CREATE USER test_user'); - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 57637 | t | CREATE ROLE - localhost | 57638 | t | CREATE ROLE -(2 rows) - \c - test_user SET citus.shard_count to 4; CREATE TABLE numbers_hash (a int, b int); @@ -855,13 +846,6 @@ SELECT shardid, shardstate, nodename, nodeport (8 rows) DROP TABLE numbers_hash; -SELECT * FROM run_command_on_workers('DROP USER test_user'); - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 57637 | t | DROP ROLE - localhost | 57638 | t | DROP ROLE -(2 rows) - DROP USER test_user; -- Test copy with built-in type without binary output function CREATE TABLE test_binaryless_builtin ( @@ -990,9 +974,27 @@ SELECT * FROM copy_jsonb ORDER BY key; \COPY copy_jsonb (key, value) FROM STDIN ERROR: invalid input syntax for json DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: {"r":255,"g":0,"b":0 -COPY copy_jsonb, line 1, column value: "{"r":255,"g":0,"b":0" TRUNCATE copy_jsonb; +-- JSONB when there is a complex column should work. Complex columns force +-- non binary copy format between master and workers. +CREATE TYPE complex_for_copy_test AS (r double precision, i double precision); +CREATE TABLE copy_jsonb_with_complex(key int, value_1 jsonb, value_2 complex_for_copy_test); +SELECT create_distributed_table('copy_jsonb_with_complex', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +\COPY copy_jsonb_with_complex FROM STDIN +SELECT * FROM copy_jsonb_with_complex ORDER BY key; + key | value_1 | value_2 +--------------------------------------------------------------------- + 1 | {"f1": 3803, "f2": "v1", "f3": {"f4": 1}} | (10,9) + 2 | {"f5": null, "f6": true} | (20,19) +(2 rows) + +DROP TABLE copy_jsonb_with_complex; +DROP TYPE complex_for_copy_test; SET citus.skip_jsonb_validation_in_copy TO off; -- JSONB from text should work \COPY copy_jsonb (key, value) FROM STDIN diff --git a/src/test/regress/output/multi_multiuser_load_data.source b/src/test/regress/output/multi_multiuser_load_data.source new file mode 100644 index 000000000..90a77fb2b --- /dev/null +++ b/src/test/regress/output/multi_multiuser_load_data.source @@ -0,0 +1,39 @@ +-- +-- MULTI_MULTIUSER_LOAD_DATA +-- +-- Tests for loading data in a distributed cluster. Please note that the number +-- of shards uploaded depends on two config values: citusdb.shard_replication_factor and +-- citusdb.shard_max_size. These values are manually set in pg_regress.c. We also set +-- the shard placement policy to the local-node-first policy as other regression +-- tests expect the placements to be in that order. +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; + version_above_ten +------------------- + t +(1 row) + +SET citusdb.shard_placement_policy TO 'local-node-first'; +-- load as superuser +\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- as user with ALL access +SET ROLE full_access; +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +RESET ROLE; +-- as user with SELECT access, should fail +SET ROLE read_access; +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +ERROR: permission denied for table lineitem +RESET ROLE; +-- as user with no access, should fail +SET ROLE no_access; +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +ERROR: permission denied for table lineitem +RESET ROLE; +SET ROLE full_access; +\copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +\copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' +\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' +\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|' +\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' diff --git a/src/test/regress/output/multi_multiuser_load_data_0.source b/src/test/regress/output/multi_multiuser_load_data_0.source new file mode 100644 index 000000000..7a4429d32 --- /dev/null +++ b/src/test/regress/output/multi_multiuser_load_data_0.source @@ -0,0 +1,39 @@ +-- +-- MULTI_MULTIUSER_LOAD_DATA +-- +-- Tests for loading data in a distributed cluster. Please note that the number +-- of shards uploaded depends on two config values: citusdb.shard_replication_factor and +-- citusdb.shard_max_size. These values are manually set in pg_regress.c. We also set +-- the shard placement policy to the local-node-first policy as other regression +-- tests expect the placements to be in that order. +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; + version_above_ten +------------------- + f +(1 row) + +SET citusdb.shard_placement_policy TO 'local-node-first'; +-- load as superuser +\copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +-- as user with ALL access +SET ROLE full_access; +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +RESET ROLE; +-- as user with SELECT access, should fail +SET ROLE read_access; +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +ERROR: permission denied for relation lineitem +RESET ROLE; +-- as user with no access, should fail +SET ROLE no_access; +\copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' +ERROR: permission denied for relation lineitem +RESET ROLE; +SET ROLE full_access; +\copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' +\copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' +\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' +\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|' +\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index ff67ce433..aec7e71d5 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -285,6 +285,19 @@ sub revert_replace_postgres } } +sub generate_hba +{ + my $nodename = shift; + + open(my $fh, ">", catfile($TMP_CHECKDIR, $nodename, "data", "pg_hba.conf")) + or die "could not open pg_hba.conf"; + print $fh "host all alice,bob localhost md5\n"; + print $fh "host all all 127.0.0.1/32 trust\n"; + print $fh "host all all ::1/128 trust\n"; + print $fh "host replication postgres localhost trust\n"; + close $fh; +} + # always want to call initdb under normal postgres, so revert from a # partial run, even if we're now not using valgrind. revert_replace_postgres(); @@ -465,6 +478,7 @@ push(@pgOptions, "citus.node_connection_timeout=${connectionTimeout}"); push(@pgOptions, "citus.explain_analyze_sort_method='taskId'"); push(@pgOptions, "citus.enable_manual_changes_to_shards=on"); push(@pgOptions, "citus.allow_unsafe_locks_from_workers=on"); +push(@pgOptions, "citus.stat_statements_track = 'all'"); # Some tests look at shards in pg_class, make sure we can usually see them: push(@pgOptions, "citus.show_shards_for_app_name_prefixes='pg_regress'"); @@ -673,12 +687,15 @@ if (!$conninfo) system(catfile("$bindir", "initdb"), ("--no-sync", "--allow-group-access", "-U", $user, "--encoding", "UTF8", catfile($TMP_CHECKDIR, $MASTERDIR, "data"))) == 0 or die "Could not create $MASTERDIR data directory"; + generate_hba("master"); + if ($usingWindows) { for my $port (@workerPorts) { system(catfile("$bindir", "initdb"), ("--no-sync", "--allow-group-access", "-U", $user, "--encoding", "UTF8", catfile($TMP_CHECKDIR, "worker.$port", "data"))) == 0 or die "Could not create worker data directory"; + generate_hba("worker.$port"); } } else diff --git a/src/test/regress/spec/isolation_get_all_active_transactions.spec b/src/test/regress/spec/isolation_get_all_active_transactions.spec index 685046e63..6aa7e1828 100644 --- a/src/test/regress/spec/isolation_get_all_active_transactions.spec +++ b/src/test/regress/spec/isolation_get_all_active_transactions.spec @@ -1,31 +1,26 @@ setup { + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET citus.shard_replication_factor TO 1; CREATE TABLE test_table(column1 int, column2 int); SELECT create_distributed_table('test_table', 'column1'); CREATE USER test_user_1; - SELECT run_command_on_workers('CREATE USER test_user_1'); CREATE USER test_user_2; - SELECT run_command_on_workers('CREATE USER test_user_2'); CREATE USER test_readonly; - SELECT run_command_on_workers('CREATE USER test_readonly'); CREATE USER test_monitor; - SELECT run_command_on_workers('CREATE USER test_monitor'); GRANT pg_monitor TO test_monitor; - SELECT run_command_on_workers('GRANT pg_monitor TO test_monitor'); } teardown { DROP TABLE test_table; DROP USER test_user_1, test_user_2, test_readonly, test_monitor; - SELECT run_command_on_workers('DROP USER test_user_1, test_user_2, test_readonly, test_monitor'); } session "s1" @@ -34,10 +29,7 @@ session "s1" step "s1-grant" { GRANT ALL ON test_table TO test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_1'); - GRANT ALL ON test_table TO test_user_2; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); } step "s1-begin-insert" diff --git a/src/test/regress/spec/isolation_logical_replication_multi_shard_commands.spec b/src/test/regress/spec/isolation_logical_replication_multi_shard_commands.spec new file mode 100644 index 000000000..82fc0be72 --- /dev/null +++ b/src/test/regress/spec/isolation_logical_replication_multi_shard_commands.spec @@ -0,0 +1,157 @@ +// we use 15 as partition key values through out the test +// so setting the corresponding shard here is useful + +setup +{ + SELECT citus_internal.replace_isolation_tester_func(); + SELECT citus_internal.refresh_isolation_tester_prepared_statement(); + + SET citus.shard_count TO 8; + SET citus.shard_replication_factor TO 1; + CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int); + SELECT create_distributed_table('logical_replicate_placement', 'x'); + + SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard; + +} + +teardown +{ + SELECT citus_internal.restore_isolation_tester_func(); + + DROP TABLE selected_shard; + DROP TABLE logical_replicate_placement; +} + + +session "s1" + +step "s1-begin" +{ + BEGIN; +} + +step "s1-move-placement" +{ + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; +} + +step "s1-end" +{ + COMMIT; +} + +step "s1-select" +{ + SELECT * FROM logical_replicate_placement order by y; +} + +step "s1-insert" +{ + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); +} + +step "s1-get-shard-distribution" +{ + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-select" +{ + SELECT * FROM logical_replicate_placement ORDER BY y; +} + +step "s2-insert" +{ + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); +} + +step "s2-delete" +{ + DELETE FROM logical_replicate_placement; +} + +step "s2-update" +{ + UPDATE logical_replicate_placement SET y = y + 1; +} + +step "s2-upsert" +{ + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); + + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1; +} + +step "s2-copy" +{ + COPY logical_replicate_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5\n15,30"' WITH CSV; +} + +step "s2-truncate" +{ + TRUNCATE logical_replicate_placement; +} + +step "s2-alter-table" +{ + ALTER TABLE logical_replicate_placement ADD COLUMN z INT; +} + +step "s2-end" +{ + COMMIT; +} + +session "s3" + +// this advisory lock with (almost) random values are only used +// for testing purposes. For details, check Citus' logical replication +// source code +step "s3-acquire-advisory-lock" +{ + SELECT pg_advisory_lock(44000, 55152); +} + +step "s3-release-advisory-lock" +{ + SELECT pg_advisory_unlock(44000, 55152); +} + +##// nonblocking tests lie below ### + +// move placement first +// the following tests show the non-blocking modifications while shard is being moved +// in fact, the shard move blocks the writes for a very short duration of time +// by using an advisory and allowing the other commands continue to run, we prevent +// the modifications to block on that blocking duration + +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-insert" "s3-release-advisory-lock" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-upsert" "s3-release-advisory-lock" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-update" "s3-release-advisory-lock" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-delete" "s3-release-advisory-lock" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-select" "s3-release-advisory-lock" "s1-end" "s1-get-shard-distribution" +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-copy" "s3-release-advisory-lock" "s1-end" "s1-select" "s1-get-shard-distribution" + +// below two permutations are blocked by move-placement, as expected +permutation "s1-insert" "s1-begin" "s1-move-placement" "s2-truncate" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-alter-table" "s3-release-advisory-lock" "s1-end" "s1-select" "s1-get-shard-distribution" + +// move placement second +// force shard-move to be a blocking call intentionally +permutation "s1-begin" "s2-begin" "s2-insert" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution" # +permutation "s1-begin" "s2-begin" "s2-upsert" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s1-begin" "s2-begin" "s2-update" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s1-begin" "s2-begin" "s2-delete" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s1-begin" "s2-begin" "s2-select" "s1-move-placement" "s2-end" "s1-end" "s1-get-shard-distribution" +permutation "s1-begin" "s2-begin" "s2-copy" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s1-begin" "s2-begin" "s2-truncate" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-begin" "s2-begin" "s2-alter-table" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution" + diff --git a/src/test/regress/spec/isolation_logical_replication_multi_shard_commands_on_mx.spec b/src/test/regress/spec/isolation_logical_replication_multi_shard_commands_on_mx.spec new file mode 100644 index 000000000..512eb3af7 --- /dev/null +++ b/src/test/regress/spec/isolation_logical_replication_multi_shard_commands_on_mx.spec @@ -0,0 +1,133 @@ +#include "isolation_mx_common.include.spec" + +// we use 15 as partition key values through out the test +// so setting the corresponding shard here is useful + +setup +{ + SET citus.shard_count TO 8; + SET citus.shard_replication_factor TO 1; + CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int); + SELECT create_distributed_table('logical_replicate_placement', 'x'); + + SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard; +} + +teardown +{ + DROP TABLE selected_shard; + DROP TABLE logical_replicate_placement; + + SELECT citus_internal.restore_isolation_tester_func(); +} + + +session "s1" + +step "s1-begin" +{ + BEGIN; +} + +step "s1-move-placement" +{ + SELECT master_move_shard_placement(get_shard_id_for_distribution_column, 'localhost', 57637, 'localhost', 57638) FROM selected_shard; +} + +step "s1-commit" +{ + COMMIT; +} + +step "s1-select" +{ + SELECT * FROM logical_replicate_placement order by y; +} + +step "s1-insert" +{ + INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172); +} + +step "s1-get-shard-distribution" +{ + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; +} + +session "s2" + +step "s2-start-session-level-connection" +{ + SELECT start_session_level_connection_to_node('localhost', 57638); +} + +step "s2-begin-on-worker" +{ + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +} + +step "s2-select" +{ + SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement ORDER BY y'); +} + +step "s2-insert" +{ + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO logical_replicate_placement VALUES (15, 15), (172, 172)'); +} + +step "s2-delete" +{ + SELECT run_commands_on_session_level_connection_to_node('DELETE FROM logical_replicate_placement'); +} + +step "s2-update" +{ + SELECT run_commands_on_session_level_connection_to_node('UPDATE logical_replicate_placement SET y = y + 1'); +} + +step "s2-commit-worker" +{ + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +} + +step "s2-stop-connection" +{ + SELECT stop_session_level_connection_to_node(); +} + +session "s3" + +// this advisory lock with (almost) random values are only used +// for testing purposes. For details, check Citus' logical replication +// source code +step "s3-acquire-advisory-lock" +{ + SELECT pg_advisory_lock(44000, 55152); +} + +step "s3-release-advisory-lock" +{ + SELECT pg_advisory_unlock(44000, 55152); +} + +##// nonblocking tests lie below ### + +// move placement first +// the following tests show the non-blocking modifications while shard is being moved +// in fact, the shard move blocks the writes for a very short duration of time +// by using an advisory and allowing the other commands continue to run, we prevent +// the modifications to block on that blocking duration + +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert" "s2-commit-worker" "s3-release-advisory-lock" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update" "s2-commit-worker" "s3-release-advisory-lock" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete" "s2-commit-worker" "s3-release-advisory-lock" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select" "s2-commit-worker" "s3-release-advisory-lock" "s1-commit" "s1-get-shard-distribution" "s2-stop-connection" + +// move placement second +// force shard-move to be a blocking call intentionally +permutation "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-get-shard-distribution" "s2-stop-connection" + diff --git a/src/test/regress/spec/isolation_logical_replication_single_shard_commands.spec b/src/test/regress/spec/isolation_logical_replication_single_shard_commands.spec new file mode 100644 index 000000000..2bca7b9ad --- /dev/null +++ b/src/test/regress/spec/isolation_logical_replication_single_shard_commands.spec @@ -0,0 +1,148 @@ +// we use 15 as the partition key value through out the test +// so setting the corresponding shard here is useful +setup +{ + SELECT citus_internal.replace_isolation_tester_func(); + SELECT citus_internal.refresh_isolation_tester_prepared_statement(); + + SET citus.shard_count TO 8; + SET citus.shard_replication_factor TO 1; + CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int); + SELECT create_distributed_table('logical_replicate_placement', 'x'); + + SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard; +} + +teardown +{ + SELECT citus_internal.restore_isolation_tester_func(); + + DROP TABLE selected_shard; + DROP TABLE logical_replicate_placement; +} + + +session "s1" + +step "s1-begin" +{ + BEGIN; +} + +step "s1-move-placement" +{ + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); +} + +step "s1-end" +{ + COMMIT; +} + +step "s1-select" +{ + SELECT * FROM logical_replicate_placement order by y; +} + +step "s1-insert" +{ + INSERT INTO logical_replicate_placement VALUES (15, 15); +} + +step "s1-get-shard-distribution" +{ + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-move-placement" +{ + SELECT master_move_shard_placement( + get_shard_id_for_distribution_column('logical_replicate_placement', 4), + 'localhost', 57637, 'localhost', 57638); +} + +step "s2-select" +{ + SELECT * FROM logical_replicate_placement ORDER BY y; +} + +step "s2-insert" +{ + INSERT INTO logical_replicate_placement VALUES (15, 15); +} + +step "s2-select-for-update" +{ + SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE; +} + +step "s2-delete" +{ + DELETE FROM logical_replicate_placement WHERE x = 15; +} + +step "s2-update" +{ + UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15; +} + +step "s2-upsert" +{ + INSERT INTO logical_replicate_placement VALUES (15, 15); + + INSERT INTO logical_replicate_placement VALUES (15, 15) ON CONFLICT (x) DO UPDATE SET y = logical_replicate_placement.y + 1; +} + +step "s2-end" +{ + COMMIT; +} + +session "s3" + +// this advisory lock with (almost) random values are only used +// for testing purposes. For details, check Citus' logical replication +// source code +step "s3-acquire-advisory-lock" +{ + SELECT pg_advisory_lock(44000, 55152); +} + +step "s3-release-advisory-lock" +{ + SELECT pg_advisory_unlock(44000, 55152); +} + +##// nonblocking tests lie below ### + +// move placement first +// the following tests show the non-blocking modifications while shard is being moved +// in fact, the shard move blocks the writes for a very short duration of time +// by using an advisory and allowing the other commands continue to run, we prevent +// the modifications to block on that blocking duration +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-insert" "s3-release-advisory-lock" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-upsert" "s3-release-advisory-lock" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-update" "s3-release-advisory-lock" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-delete" "s3-release-advisory-lock" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-select" "s3-release-advisory-lock" "s1-end" "s1-get-shard-distribution" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-select-for-update" "s3-release-advisory-lock" "s1-end" "s1-get-shard-distribution" + +// move placement second +// force shard-move to be a blocking call intentionally +permutation "s1-begin" "s2-begin" "s2-insert" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-begin" "s2-begin" "s2-upsert" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s1-begin" "s2-begin" "s2-update" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s1-begin" "s2-begin" "s2-delete" "s1-move-placement" "s2-end" "s1-end" "s1-select" "s1-get-shard-distribution" +permutation "s1-insert" "s1-begin" "s2-begin" "s2-select" "s1-move-placement" "s2-end" "s1-end" "s1-get-shard-distribution" +permutation "s1-insert" "s1-begin" "s2-begin" "s2-select-for-update" "s1-move-placement" "s2-end" "s1-end" "s1-get-shard-distribution" + + +// This test actually blocks because we don't want two non blocking shard moves at the same time +permutation "s1-begin" "s2-begin" "s1-move-placement" "s2-move-placement" "s1-end" "s2-end" diff --git a/src/test/regress/spec/isolation_logical_replication_single_shard_commands_on_mx.spec b/src/test/regress/spec/isolation_logical_replication_single_shard_commands_on_mx.spec new file mode 100644 index 000000000..5f2a9cc93 --- /dev/null +++ b/src/test/regress/spec/isolation_logical_replication_single_shard_commands_on_mx.spec @@ -0,0 +1,137 @@ +#include "isolation_mx_common.include.spec" + +// 15 as the partition key value through out the test +// so setting the corresponding shard here is useful +setup +{ + SET citus.shard_count TO 8; + CREATE TABLE logical_replicate_placement (x int PRIMARY KEY, y int); + SELECT create_distributed_table('logical_replicate_placement', 'x'); + + SELECT get_shard_id_for_distribution_column('logical_replicate_placement', 15) INTO selected_shard; +} + +teardown +{ + DROP TABLE selected_shard; + DROP TABLE logical_replicate_placement; + + SELECT citus_internal.restore_isolation_tester_func(); +} + + +session "s1" + +step "s1-begin" +{ + BEGIN; +} + +step "s1-move-placement" +{ + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); +} + +step "s1-commit" +{ + COMMIT; +} + +step "s1-select" +{ + SELECT * FROM logical_replicate_placement order by y; +} + +step "s1-insert" +{ + INSERT INTO logical_replicate_placement VALUES (15, 15); +} + +step "s1-get-shard-distribution" +{ + select nodeport from pg_dist_placement inner join pg_dist_node on(pg_dist_placement.groupid = pg_dist_node.groupid) where shardstate != 4 AND shardid in (SELECT * FROM selected_shard) order by nodeport; +} + +session "s2" + +step "s2-start-session-level-connection" +{ + SELECT start_session_level_connection_to_node('localhost', 57638); +} + +step "s2-begin-on-worker" +{ + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +} + +step "s2-select" +{ + SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement ORDER BY y'); +} + +step "s2-insert" +{ + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO logical_replicate_placement VALUES (15, 15)'); +} + +step "s2-select-for-update" +{ + SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM logical_replicate_placement WHERE x=15 FOR UPDATE'); +} + +step "s2-delete" +{ + SELECT run_commands_on_session_level_connection_to_node('DELETE FROM logical_replicate_placement WHERE x = 15'); +} + +step "s2-update" +{ + SELECT run_commands_on_session_level_connection_to_node('UPDATE logical_replicate_placement SET y = y + 1 WHERE x = 15'); +} + +step "s2-commit-worker" +{ + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +} + +step "s2-stop-connection" +{ + SELECT stop_session_level_connection_to_node(); +} + +session "s3" + +// this advisory lock with (almost) random values are only used +// for testing purposes. For details, check Citus' logical replication +// source code +step "s3-acquire-advisory-lock" +{ + SELECT pg_advisory_lock(44000, 55152); +} + +step "s3-release-advisory-lock" +{ + SELECT pg_advisory_unlock(44000, 55152); +} + +##// nonblocking tests lie below ### + +// move placement first +// the following tests show the non-blocking modifications while shard is being moved +// in fact, the shard move blocks the writes for a very short duration of time +// by using an advisory and allowing the other commands continue to run, we prevent +#// the modifications to block on that blocking duration +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert" "s2-commit-worker" "s3-release-advisory-lock" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update" "s2-commit-worker" "s3-release-advisory-lock" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete" "s2-commit-worker" "s3-release-advisory-lock" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select" "s2-commit-worker" "s3-release-advisory-lock" "s1-commit" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s2-commit-worker" "s3-release-advisory-lock" "s1-commit" "s1-get-shard-distribution" "s2-stop-connection" + +#// move placement second +// force shard-move to be a blocking call intentionally +permutation "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-select" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-get-shard-distribution" "s2-stop-connection" +permutation "s1-insert" "s1-begin" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-move-placement" "s2-commit-worker" "s1-commit" "s1-get-shard-distribution" "s2-stop-connection" + diff --git a/src/test/regress/spec/isolation_logical_replication_with_partitioning.spec b/src/test/regress/spec/isolation_logical_replication_with_partitioning.spec new file mode 100644 index 000000000..aac8e5b71 --- /dev/null +++ b/src/test/regress/spec/isolation_logical_replication_with_partitioning.spec @@ -0,0 +1,140 @@ +// This file is adapted from isolation_logical_replication_single_shard_commands.spec +// in order to separate testing of partitinoned and non-partitioned +// tables. We do that to enabled testing of logical replication +// on older postgres versions that do not support partitioning yet. +// When we drop support for PG 96 we should consider merging this +// file with isolation_logical_replication_single_shard_commands.spec +// We used 5 as the partition key value through out the test +// so setting the corresponding shard here is useful +setup +{ + SET citus.shard_count TO 8; + SET citus.shard_replication_factor TO 1; + + CREATE TABLE logical_replicate_partitioned(x int, y int, PRIMARY KEY (x,y) ) PARTITION BY RANGE(y); + SELECT create_distributed_table('logical_replicate_partitioned', 'x'); + CREATE TABLE logical_replicate_partitioned_1 PARTITION OF logical_replicate_partitioned + FOR VALUES FROM (0) TO (100); + CREATE TABLE logical_replicate_partitioned_2 PARTITION OF logical_replicate_partitioned + FOR VALUES FROM (100) TO (200); + + SELECT get_shard_id_for_distribution_column('logical_replicate_partitioned', 5) INTO selected_partitioned_shard; + SELECT get_shard_id_for_distribution_column('logical_replicate_partitioned_1', 5) INTO selected_single_partition_shard; +} + +teardown +{ + DROP TABLE selected_partitioned_shard; + DROP TABLE selected_single_partition_shard; + DROP TABLE logical_replicate_partitioned; +} + + +session "s1" + +step "s1-begin" +{ + BEGIN; +} + +step "s1-move-placement-partitioned" +{ + SELECT master_move_shard_placement((SELECT * FROM selected_partitioned_shard), 'localhost', 57638, 'localhost', 57637); +} + +step "s1-move-placement-single-partition" +{ + SELECT master_move_shard_placement((SELECT * FROM selected_single_partition_shard), 'localhost', 57638, 'localhost', 57637); +} + +step "s1-end" +{ + COMMIT; +} + +session "s2" + +step "s2-insert-partitioned" +{ + INSERT INTO logical_replicate_partitioned VALUES (5, 15); +} + +step "s2-delete-partitioned" +{ + DELETE FROM logical_replicate_partitioned WHERE x = 5; +} + +step "s2-update-partitioned" +{ + UPDATE logical_replicate_partitioned SET y = y + 1 WHERE x = 5; +} + +step "s2-upsert-partitioned" +{ + INSERT INTO logical_replicate_partitioned VALUES (5, 15); + + INSERT INTO logical_replicate_partitioned VALUES (5, 15) ON CONFLICT (x, y) DO UPDATE SET y = logical_replicate_partitioned.y + 1; +} + +step "s2-copy-partitioned" +{ + COPY logical_replicate_partitioned FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; +} + +step "s2-truncate-partitioned" +{ + TRUNCATE logical_replicate_partitioned; +} + +step "s2-alter-table-partitioned" +{ + ALTER TABLE logical_replicate_partitioned ADD COLUMN z INT; +} + + +session "s3" + +// this advisory lock with (almost) random values are only used +// for testing purposes. For details, check Citus' logical replication +// source code +step "s3-acquire-advisory-lock" +{ + SELECT pg_advisory_lock(44000, 55152); +} + +step "s3-release-advisory-lock" +{ + SELECT pg_advisory_unlock(44000, 55152); +} + + +// the following tests show the non-blocking modifications while shard is being moved +// in fact, the shard move blocks the writes for a very short duration of time +// by using an advisory and allowing the other commands continue to run, we prevent +// the modifications to block on that blocking duration +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement-partitioned" "s2-insert-partitioned" "s3-release-advisory-lock" "s1-end" +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement-partitioned" "s2-upsert-partitioned" "s3-release-advisory-lock" "s1-end" +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement-partitioned" "s2-update-partitioned" "s3-release-advisory-lock" "s1-end" +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement-partitioned" "s2-delete-partitioned" "s3-release-advisory-lock" "s1-end" +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement-partitioned" "s2-copy-partitioned" "s3-release-advisory-lock" "s1-end" + + +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement-single-partition" "s2-insert-partitioned" "s3-release-advisory-lock" "s1-end" +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement-single-partition" "s2-upsert-partitioned" "s3-release-advisory-lock" "s1-end" +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement-single-partition" "s2-update-partitioned" "s3-release-advisory-lock" "s1-end" +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement-single-partition" "s2-delete-partitioned" "s3-release-advisory-lock" "s1-end" +permutation "s3-acquire-advisory-lock" "s1-begin" "s1-move-placement-single-partition" "s2-copy-partitioned" "s3-release-advisory-lock" "s1-end" + + +// now show that DDLs and truncate are blocked by move placement +permutation "s1-begin" "s1-move-placement-partitioned" "s2-truncate-partitioned" "s1-end" +permutation "s1-begin" "s1-move-placement-partitioned" "s2-alter-table-partitioned" "s1-end" +permutation "s1-begin" "s2-truncate-partitioned" "s1-move-placement-partitioned" "s1-end" +permutation "s1-begin" "s2-alter-table-partitioned" "s1-move-placement-partitioned" "s1-end" + +permutation "s1-begin" "s1-move-placement-single-partition" "s2-truncate-partitioned" "s1-end" +permutation "s1-begin" "s1-move-placement-single-partition" "s2-alter-table-partitioned" "s1-end" +permutation "s1-begin" "s2-truncate-partitioned" "s1-move-placement-single-partition" "s1-end" +permutation "s1-begin" "s2-alter-table-partitioned" "s1-move-placement-single-partition" "s1-end" + + diff --git a/src/test/regress/spec/isolation_move_placement_vs_modification.spec b/src/test/regress/spec/isolation_move_placement_vs_modification.spec new file mode 100644 index 000000000..964ef689e --- /dev/null +++ b/src/test/regress/spec/isolation_move_placement_vs_modification.spec @@ -0,0 +1,140 @@ +// we use 5 as the partition key value through out the test +// so setting the corresponding shard here is useful +setup +{ + SET citus.shard_count TO 8; + SET citus.shard_replication_factor TO 1; + CREATE TABLE test_move_placement (x int, y int); + SELECT create_distributed_table('test_move_placement', 'x'); + + SELECT get_shard_id_for_distribution_column('test_move_placement', 5) INTO selected_shard; +} + +teardown +{ + DROP TABLE test_move_placement; + DROP TABLE selected_shard; +} + +session "s1" + +step "s1-begin" +{ + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; +} + +// since test_move_placement has rep > 1 simple select query doesn't hit all placements +// hence not all placements are cached +step "s1-load-cache" +{ + TRUNCATE test_move_placement; +} + +step "s1-insert" +{ + INSERT INTO test_move_placement VALUES (5, 10); +} + +step "s1-update" +{ + UPDATE test_move_placement SET y = 5 WHERE x = 5; +} + +step "s1-delete" +{ + DELETE FROM test_move_placement WHERE x = 5; +} + +step "s1-select" +{ + SELECT count(*) FROM test_move_placement WHERE x = 5; +} + +step "s1-ddl" +{ + CREATE INDEX test_move_placement_index ON test_move_placement(x); +} + +step "s1-copy" +{ + COPY test_move_placement FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; +} + +step "s1-commit" +{ + COMMIT; +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + + +step "s2-move-placement" +{ + SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637, 'force_logical'); +} + +step "s2-commit" +{ + COMMIT; +} + +step "s2-print-content" +{ + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_move_placement', 'select y from %s WHERE x = 5') + WHERE + shardid IN (SELECT * FROM selected_shard) + ORDER BY + nodeport; +} + +step "s2-print-index-count" +{ + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_move_placement', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; +} + +step "s2-print-placements" +{ + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_placement'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; +} + +// move a placement while concurrently performing an update/delete/insert/copy +// note that at some points we use "s1-select" just after "s1-begin" given that BEGIN +// may invalidate cache at certain cases +permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-move-placement" "s1-update" "s2-commit" "s1-commit" "s2-print-content" "s2-print-placements" +permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-move-placement" "s1-delete" "s2-commit" "s1-commit" "s2-print-content" "s2-print-placements" +permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-move-placement" "s1-insert" "s2-commit" "s1-commit" "s2-print-content" "s2-print-placements" +permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-move-placement" "s1-copy" "s2-commit" "s1-commit" "s2-print-content" "s2-print-placements" +permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-move-placement" "s1-ddl" "s2-commit" "s1-commit" "s2-print-index-count" "s2-print-placements" + + +// the same tests without loading the cache at first +permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-move-placement" "s1-update" "s2-commit" "s1-commit" "s2-print-content" "s2-print-placements" +permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-move-placement" "s1-delete" "s2-commit" "s1-commit" "s2-print-content" "s2-print-placements" +permutation "s1-begin" "s1-select" "s2-begin" "s2-move-placement" "s1-insert" "s2-commit" "s1-commit" "s2-print-content" "s2-print-placements" +permutation "s1-begin" "s1-select" "s2-begin" "s2-move-placement" "s1-copy" "s2-commit" "s1-commit" "s2-print-content" "s2-print-placements" +permutation "s1-begin" "s1-select" "s2-begin" "s2-move-placement" "s1-ddl" "s2-commit" "s1-commit" "s2-print-index-count" "s2-print-placements" diff --git a/src/test/regress/spec/isolation_move_placement_vs_modification_fk.spec b/src/test/regress/spec/isolation_move_placement_vs_modification_fk.spec new file mode 100644 index 000000000..9987a5fec --- /dev/null +++ b/src/test/regress/spec/isolation_move_placement_vs_modification_fk.spec @@ -0,0 +1,132 @@ +setup +{ + SET citus.shard_count to 2; + SET citus.shard_replication_factor to 1; + SELECT setval('pg_dist_shardid_seq', + CASE WHEN nextval('pg_dist_shardid_seq') > 1699999 OR nextval('pg_dist_shardid_seq') < 1600000 + THEN 1600000 + ELSE nextval('pg_dist_shardid_seq')-2 + END); + + CREATE TABLE referenced_table (id int PRIMARY KEY, value int); + SELECT create_reference_table('referenced_table'); + + CREATE TABLE referencing_table (id int PRIMARY KEY, value int); + SELECT create_distributed_table('referencing_table', 'id'); + + SELECT get_shard_id_for_distribution_column('referencing_table', 2) INTO selected_shard_for_test_table; +} + +teardown +{ + DROP TABLE referencing_table; + DROP TABLE referenced_table; + DROP TABLE selected_shard_for_test_table; +} + +session "s1" + +step "s1-begin" +{ + BEGIN; +} + +step "s1-insert-referenced" +{ + INSERT INTO referenced_table SELECT x,x FROM generate_series(1,10) as f(x); +} + +step "s1-insert-referencing" +{ + INSERT INTO referencing_table SELECT x,x FROM generate_series(1,10) as f(x); +} + +step "s1-delete" +{ + DELETE FROM referenced_table WHERE id < 5; +} + +step "s1-update" +{ + UPDATE referenced_table SET value = 5 WHERE id = 5; +} + +step "s1-ddl" +{ + CREATE INDEX referenced_table_index ON referenced_table(id); +} + +step "s1-commit" +{ + COMMIT; +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-add-fkey" +{ + ALTER TABLE referencing_table ADD CONSTRAINT fkey_const FOREIGN KEY (value) REFERENCES referenced_table(id) ON DELETE CASCADE; +} + +step "s2-move-placement-blocking" +{ + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637, shard_transfer_mode:='block_writes'); +} + +step "s2-move-placement-nonblocking" +{ + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57638, 'localhost', 57637); +} + +step "s2-print-cluster" +{ + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('referencing_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT * FROM referencing_table ORDER BY 1; +} + +step "s2-commit" +{ + COMMIT; +} + +session "s3" + +// this advisory lock with (almost) random values are only used +// for testing purposes. For details, check Citus' logical replication +// source code +step "s3-acquire-advisory-lock" +{ + SELECT pg_advisory_lock(44000, 55152); +} + +step "s3-release-advisory-lock" +{ + SELECT pg_advisory_unlock(44000, 55152); +} + +// run master_move_shard_placement while concurrently performing an DML and index creation +// we expect shard_rebalancer on referencing table to get blocked by the DML operation +// on the referenced table. +permutation "s2-add-fkey" "s1-insert-referenced" "s1-insert-referencing" "s1-begin" "s2-begin" "s2-move-placement-blocking" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-insert-referenced" "s1-insert-referencing" "s1-begin" "s2-begin" "s2-move-placement-blocking" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-insert-referenced" "s1-insert-referencing" "s1-begin" "s2-begin" "s2-move-placement-blocking" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-insert-referenced" "s1-begin" "s2-begin" "s2-move-placement-blocking" "s1-insert-referencing" "s2-commit" "s1-commit" "s2-print-cluster" + +permutation "s2-add-fkey" "s3-acquire-advisory-lock" "s1-insert-referenced" "s1-insert-referencing" "s2-begin" "s2-move-placement-nonblocking" "s1-delete" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s3-acquire-advisory-lock" "s1-insert-referenced" "s1-insert-referencing" "s2-begin" "s2-move-placement-nonblocking" "s1-update" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s3-acquire-advisory-lock" "s1-insert-referenced" "s1-insert-referencing" "s2-begin" "s2-move-placement-nonblocking" "s1-ddl" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s3-acquire-advisory-lock" "s1-insert-referenced" "s2-begin" "s2-move-placement-nonblocking" "s1-insert-referencing" "s3-release-advisory-lock" "s2-commit" "s2-print-cluster" + diff --git a/src/test/regress/spec/isolation_move_placement_vs_move_placement.spec b/src/test/regress/spec/isolation_move_placement_vs_move_placement.spec new file mode 100644 index 000000000..ebb2e1761 --- /dev/null +++ b/src/test/regress/spec/isolation_move_placement_vs_move_placement.spec @@ -0,0 +1,70 @@ +// we use 5 as the partition key value through out the test +// so setting the corresponding shard here is useful +setup +{ + SET citus.shard_count TO 2; + SET citus.shard_replication_factor TO 1; + CREATE TABLE test_move_table (x int, y int); + SELECT create_distributed_table('test_move_table', 'x'); + + SELECT get_shard_id_for_distribution_column('test_move_table', 5) INTO selected_shard_for_test_table; +} + +teardown +{ + DROP TABLE test_move_table; + DROP TABLE selected_shard_for_test_table; +} + +session "s1" + +// with copy all placements are cached +step "s1-load-cache" +{ + COPY test_move_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; +} + +step "s1-move-placement" +{ + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-move-placement" +{ + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); +} + +step "s2-commit" +{ + COMMIT; +} + +step "s2-print-placements" +{ + SELECT + nodename, nodeport, count(*) + FROM + pg_dist_shard_placement + WHERE + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_move_table'::regclass) + AND + shardstate = 1 + GROUP BY + nodename, nodeport; +} + +// two concurrent shard moves on the same shard +// note that "s1-move-placement" errors out but that is expected +// given that "s2-move-placement" succeeds and the placement is +// already moved +permutation "s1-load-cache" "s2-begin" "s2-move-placement" "s1-move-placement" "s2-commit" "s2-print-placements" + +// the same test without the load caches +permutation "s2-begin" "s2-move-placement" "s1-move-placement" "s2-commit" "s2-print-placements" diff --git a/src/test/regress/spec/isolation_multiuser_locking.spec b/src/test/regress/spec/isolation_multiuser_locking.spec index 8303e9459..177b0f778 100644 --- a/src/test/regress/spec/isolation_multiuser_locking.spec +++ b/src/test/regress/spec/isolation_multiuser_locking.spec @@ -1,20 +1,18 @@ setup { + SET citus.max_cached_conns_per_worker to 0; SELECT citus_internal.replace_isolation_tester_func(); SELECT citus_internal.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; CREATE USER test_user_1; - SELECT run_command_on_workers('CREATE USER test_user_1'); CREATE USER test_user_2; - SELECT run_command_on_workers('CREATE USER test_user_2'); - SET ROLE test_user_1; CREATE TABLE test_table(column1 int, column2 int); + ALTER TABLE test_table OWNER TO test_user_1; SELECT create_distributed_table('test_table', 'column1'); - RESET ROLE; } teardown @@ -24,19 +22,24 @@ teardown BEGIN; DROP TABLE IF EXISTS test_table; DROP USER test_user_1, test_user_2; - SELECT run_command_on_workers('DROP USER test_user_1, test_user_2'); COMMIT; } session "s1" -// run_command_on_placements is done in a separate step because the setup is executed as a single transaction +// due to bug #3785 a second permutation of the isolation test would reuse a cached +// connection bound to the deleted user. This causes the tests to fail with unexplainable +// permission denied errors. +// By setting the cached connections to zero we prevent the use of cached conncetions. +// These steps can be removed once the root cause is solved +step "s1-no-connection-cache" { + SET citus.max_cached_conns_per_worker to 0; +} + step "s1-grant" { SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; - SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); } step "s1-begin" @@ -77,6 +80,15 @@ step "s1-commit" session "s2" +// due to bug #3785 a second permutation of the isolation test would reuse a cached +// connection bound to the deleted user. This causes the tests to fail with unexplainable +// permission denied errors. +// By setting the cached connections to zero we prevent the use of cached conncetions. +// These steps can be removed once the root cause is solved +step "s2-no-connection-cache" { + SET citus.max_cached_conns_per_worker to 0; +} + step "s2-begin" { BEGIN; @@ -114,17 +126,17 @@ step "s2-commit" } // REINDEX -permutation "s1-begin" "s2-begin" "s2-reindex" "s1-insert" "s2-commit" "s1-commit" -permutation "s1-grant" "s1-begin" "s2-begin" "s2-reindex" "s1-insert" "s2-insert" "s2-commit" "s1-commit" -permutation "s1-grant" "s1-begin" "s2-begin" "s1-reindex" "s2-insert" "s1-insert" "s1-commit" "s2-commit" +permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-begin" "s2-begin" "s2-reindex" "s1-insert" "s2-commit" "s1-commit" +permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-grant" "s1-begin" "s2-begin" "s2-reindex" "s1-insert" "s2-insert" "s2-commit" "s1-commit" +permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-grant" "s1-begin" "s2-begin" "s1-reindex" "s2-insert" "s1-insert" "s1-commit" "s2-commit" // CREATE INDEX -permutation "s1-begin" "s2-begin" "s2-index" "s1-insert" "s2-commit" "s1-commit" "s2-drop-index" -permutation "s1-grant" "s1-begin" "s2-begin" "s2-insert" "s1-index" "s2-insert" "s2-commit" "s1-commit" "s1-drop-index" -permutation "s1-grant" "s1-begin" "s2-begin" "s1-index" "s2-index" "s1-insert" "s1-commit" "s2-commit" "s1-drop-index" "s2-drop-index" +permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-begin" "s2-begin" "s2-index" "s1-insert" "s2-commit" "s1-commit" "s2-drop-index" +permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-grant" "s1-begin" "s2-begin" "s2-insert" "s1-index" "s2-insert" "s2-commit" "s1-commit" "s1-drop-index" +permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-grant" "s1-begin" "s2-begin" "s1-index" "s2-index" "s1-insert" "s1-commit" "s2-commit" "s1-drop-index" "s2-drop-index" // TRUNCATE -permutation "s1-begin" "s2-begin" "s2-truncate" "s1-insert" "s2-commit" "s1-commit" -permutation "s1-grant" "s1-begin" "s2-begin" "s1-truncate" "s2-insert" "s1-insert" "s1-commit" "s2-commit" -permutation "s1-grant" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" +permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-begin" "s2-begin" "s2-truncate" "s1-insert" "s2-commit" "s1-commit" +permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-grant" "s1-begin" "s2-begin" "s1-truncate" "s2-insert" "s1-insert" "s1-commit" "s2-commit" +permutation "s1-no-connection-cache" "s2-no-connection-cache" "s1-grant" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" diff --git a/src/test/regress/spec/isolation_pg_send_cancellation.spec b/src/test/regress/spec/isolation_pg_send_cancellation.spec new file mode 100644 index 000000000..46c6a0539 --- /dev/null +++ b/src/test/regress/spec/isolation_pg_send_cancellation.spec @@ -0,0 +1,65 @@ +setup +{ + CREATE FUNCTION run_pg_send_cancellation(int,int) + RETURNS void + AS 'citus' + LANGUAGE C STRICT; + + CREATE FUNCTION get_cancellation_key() + RETURNS int + AS 'citus' + LANGUAGE C STRICT; + + CREATE TABLE cancel_table (pid int, cancel_key int); +} + +teardown +{ + DROP TABLE IF EXISTS cancel_table; +} + +session "s1" + +/* store the PID and cancellation key of session 1 */ +step "s1-register" +{ + INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key()); +} + +/* lock the table from session 1, will block and get cancelled */ +step "s1-lock" +{ + BEGIN; + LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE; + END; +} + +session "s2" + +/* lock the table from session 2 to block session 1 */ +step "s2-lock" +{ + BEGIN; + LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE; +} + +/* PID mismatch */ +step "s2-wrong-cancel-1" +{ + SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table; +} + +/* cancellation key mismatch */ +step "s2-wrong-cancel-2" +{ + SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table; +} + +/* cancel the LOCK statement in session 1 */ +step "s2-cancel" +{ + SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table; + END; +} + +permutation "s1-register" "s2-lock" "s1-lock" "s2-wrong-cancel-1" "s2-wrong-cancel-2" "s2-cancel" diff --git a/src/test/regress/spec/isolation_ref2ref_foreign_keys_enterprise.spec b/src/test/regress/spec/isolation_ref2ref_foreign_keys_enterprise.spec new file mode 100644 index 000000000..e283e25fd --- /dev/null +++ b/src/test/regress/spec/isolation_ref2ref_foreign_keys_enterprise.spec @@ -0,0 +1,107 @@ +setup +{ + SET citus.shard_count TO 2; + SET citus.shard_replication_factor TO 1; + SELECT citus_internal.replace_isolation_tester_func(); + SELECT citus_internal.refresh_isolation_tester_prepared_statement(); + + CREATE TABLE ref_table_1(id int PRIMARY KEY, value int); + SELECT create_reference_table('ref_table_1'); + + CREATE TABLE ref_table_2(id int PRIMARY KEY REFERENCES ref_table_1(id) ON DELETE CASCADE ON UPDATE CASCADE); + SELECT create_reference_table('ref_table_2'); + + CREATE TABLE dist_table(id int PRIMARY KEY, value int REFERENCES ref_table_2(id) ON DELETE CASCADE ON UPDATE CASCADE); + SELECT create_distributed_table('dist_table', 'id'); + + INSERT INTO ref_table_1 VALUES (1, 1), (3, 3), (5, 5), (7, 7), (9, 9), (11, 11); + INSERT INTO ref_table_2 VALUES (1), (3), (7), (9), (11); + INSERT INTO dist_table VALUES (1, 1), (3, 3), (7, 7), (9, 9), (11, 11); + + SELECT get_shard_id_for_distribution_column('dist_table', 5) INTO selected_shard_for_dist_table; +} + +teardown +{ + DROP TABLE ref_table_1, ref_table_2, dist_table, selected_shard_for_dist_table; + SELECT citus_internal.restore_isolation_tester_func(); +} + +session "s1" + +step "s1-begin" +{ + BEGIN; +} + +step "s1-delete-table-1" +{ + DELETE FROM ref_table_1 WHERE value = 5; +} + +step "s1-update-table-1" +{ + UPDATE ref_table_1 SET id = id + 1 WHERE value = 5; +} + +step "s1-insert-table-1" +{ + INSERT INTO ref_table_2 VALUES (5); + INSERT INTO dist_table VALUES (5, 5); +} + +step "s1-select-table-1" +{ + SELECT * FROM ref_table_1 ORDER BY id, value; +} + +step "s1-select-dist-table" +{ + SELECT * FROM dist_table ORDER BY id, value; +} + +step "s1-commit" +{ + COMMIT; +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-move-shards" +{ + SELECT master_move_shard_placement((SELECT * FROM selected_shard_for_dist_table), 'localhost', 57637, 'localhost', 57638, 'force_logical'); +} + +step "s2-commit" +{ + COMMIT; +} + +session "s3" + +// this advisory lock with (almost) random values are only used +// for testing purposes. For details, check Citus' logical replication +// source code +step "s3-acquire-advisory-lock" +{ + SELECT pg_advisory_lock(44000, 55152); +} + +step "s3-release-advisory-lock" +{ + SELECT pg_advisory_unlock(44000, 55152); +} + +permutation "s1-insert-table-1" "s3-acquire-advisory-lock" "s2-begin" "s2-move-shards" "s1-delete-table-1" "s3-release-advisory-lock" "s2-commit" "s1-select-table-1" "s1-select-dist-table" +permutation "s1-insert-table-1" "s3-acquire-advisory-lock" "s2-begin" "s2-move-shards" "s1-update-table-1" "s3-release-advisory-lock" "s2-commit" "s1-select-table-1" "s1-select-dist-table" +permutation "s3-acquire-advisory-lock" "s2-begin" "s2-move-shards" "s1-insert-table-1" "s3-release-advisory-lock" "s2-commit" "s1-select-table-1" "s1-select-dist-table" +permutation "s1-insert-table-1" "s3-acquire-advisory-lock" "s2-begin" "s2-move-shards" "s1-select-table-1" "s3-release-advisory-lock" "s2-commit" + +permutation "s1-insert-table-1" "s2-begin" "s1-begin" "s1-delete-table-1" "s2-move-shards" "s1-commit" "s2-commit" "s1-select-table-1" "s1-select-dist-table" +permutation "s1-insert-table-1" "s2-begin" "s1-begin" "s1-update-table-1" "s2-move-shards" "s1-commit" "s2-commit" "s1-select-table-1" "s1-select-dist-table" +permutation "s2-begin" "s1-begin" "s1-insert-table-1" "s2-move-shards" "s1-commit" "s2-commit" "s1-select-table-1" "s1-select-dist-table" diff --git a/src/test/regress/spec/isolation_shard_move_vs_start_metadata_sync.spec b/src/test/regress/spec/isolation_shard_move_vs_start_metadata_sync.spec new file mode 100644 index 000000000..701fbe8dd --- /dev/null +++ b/src/test/regress/spec/isolation_shard_move_vs_start_metadata_sync.spec @@ -0,0 +1,60 @@ +setup +{ + SET citus.shard_replication_factor TO 1; + SET citus.shard_count TO 4; + + CREATE TABLE dist_table (id int, value int); + SELECT create_distributed_table('dist_table', 'id'); + + SELECT shardid INTO selected_shard FROM pg_dist_shard WHERE logicalrelid='dist_table'::regclass LIMIT 1; + +} + +teardown +{ + DROP TABLE dist_table; + DROP TABLE selected_shard; +} + +session "s1" + +step "s1-count-shards-by-worker" +{ + SELECT COUNT(*) FROM pg_dist_placement WHERE groupid=1 AND shardstate != 4; + SELECT * FROM run_command_on_workers('SELECT COUNT(*) FROM pg_dist_placement WHERE groupid=1') ORDER BY 1, 2, 3, 4; + SELECT COUNT(*) FROM pg_dist_placement WHERE groupid=2 AND shardstate != 4; + SELECT * FROM run_command_on_workers('SELECT COUNT(*) FROM pg_dist_placement WHERE groupid=2') ORDER BY 1, 2, 3, 4; +} + +step "s1-move-shard-force-logical" +{ + SELECT * FROM master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, 'force_logical'); +} + +step "s1-move-shard-block-writes" +{ + SELECT * FROM master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638, 'block_writes'); +} + +step "s1-begin" +{ + BEGIN; +} + +step "s1-commit" +{ + COMMIT; +} + + +session "s2" + +step "s2-start-metadata-sync" +{ + SELECT * FROM start_metadata_sync_to_node('localhost', 57637); +} + +// It is expected to observe different number of placements on coordinator and worker +// since we call that spec while mx is off. +permutation "s1-begin" "s1-move-shard-force-logical" "s2-start-metadata-sync" "s1-commit" "s1-count-shards-by-worker" +permutation "s1-begin" "s1-move-shard-block-writes" "s2-start-metadata-sync" "s1-commit" "s1-count-shards-by-worker" diff --git a/src/test/regress/spec/isolation_tenant_isolation.spec b/src/test/regress/spec/isolation_tenant_isolation.spec new file mode 100644 index 000000000..d7722fb5f --- /dev/null +++ b/src/test/regress/spec/isolation_tenant_isolation.spec @@ -0,0 +1,139 @@ +setup +{ + SET citus.shard_count to 2; + SET citus.shard_replication_factor to 1; + SELECT setval('pg_dist_shardid_seq', + CASE WHEN nextval('pg_dist_shardid_seq') > 1599999 OR nextval('pg_dist_shardid_seq') < 1500000 + THEN 1500000 + ELSE nextval('pg_dist_shardid_seq')-2 + END); + + CREATE TABLE isolation_table (id int, value int); + SELECT create_distributed_table('isolation_table', 'id'); +} + +teardown +{ + DROP TABLE isolation_table; +} + +session "s1" + +step "s1-begin" +{ + BEGIN; + + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; +} + +// cache all placements +step "s1-load-cache" +{ + TRUNCATE isolation_table; +} + +step "s1-insert" +{ + INSERT INTO isolation_table VALUES (5, 10); +} + +step "s1-update" +{ + UPDATE isolation_table SET value = 5 WHERE id = 5; +} + +step "s1-delete" +{ + DELETE FROM isolation_table WHERE id = 5; +} + +step "s1-select" +{ + SELECT count(*) FROM isolation_table WHERE id = 5; +} + +step "s1-ddl" +{ + CREATE INDEX test_table_index ON isolation_table(id); +} + +step "s1-copy" +{ + COPY isolation_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; +} + +step "s1-isolate-tenant" +{ + SELECT isolate_tenant_to_new_shard('isolation_table', 2); +} + +step "s1-commit" +{ + COMMIT; +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-isolate-tenant" +{ + SELECT isolate_tenant_to_new_shard('isolation_table', 5); +} + +step "s2-commit" +{ + COMMIT; +} + +step "s2-print-cluster" +{ + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; +} + +step "s2-print-index-count" +{ + SELECT + nodeport, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; +} + +// run tenant isolation while concurrently performing an DML and index creation +// we expect DML/DDL queries to fail because the shard they are waiting for is destroyed +permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count" + + +// the same tests without loading the cache at first +permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s1-begin" "s1-select" "s2-begin" "s2-isolate-tenant" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count" + + +// concurrent tenant isolation blocks on different shards of the same table (or any colocated table) +permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-isolate-tenant" "s2-isolate-tenant" "s1-commit" "s2-print-cluster" + +// the same test above without loading the cache at first +permutation "s1-insert" "s1-begin" "s1-isolate-tenant" "s2-isolate-tenant" "s1-commit" "s2-print-cluster" diff --git a/src/test/regress/spec/isolation_tenant_isolation_with_fkey_to_reference.spec b/src/test/regress/spec/isolation_tenant_isolation_with_fkey_to_reference.spec new file mode 100644 index 000000000..d30859343 --- /dev/null +++ b/src/test/regress/spec/isolation_tenant_isolation_with_fkey_to_reference.spec @@ -0,0 +1,106 @@ +setup +{ + -- set it to a predefined value as we check shard id's as well in output of this test + SELECT setval('pg_dist_shardid_seq', + CASE WHEN nextval('pg_dist_shardid_seq') > 1399999 OR nextval('pg_dist_shardid_seq') < 1300000 + THEN 1300000 + ELSE nextval('pg_dist_shardid_seq')-2 + END); + + SET citus.shard_count to 2; + SET citus.shard_replication_factor to 1; + + CREATE TABLE reference_table (id int PRIMARY KEY, value int); + SELECT create_reference_table('reference_table'); + + CREATE TABLE isolation_table (id int, value int); + SELECT create_distributed_table('isolation_table', 'id'); +} + +teardown +{ + DROP TABLE isolation_table CASCADE; + DROP TABLE reference_table CASCADE; +} + +session "s1" + +step "s1-begin" +{ + BEGIN; +} + +step "s1-insert" +{ + INSERT INTO reference_table VALUES (5, 10); +} + +step "s1-update" +{ + UPDATE reference_table SET value = 5 WHERE id = 5; +} + +step "s1-delete" +{ + DELETE FROM reference_table WHERE id = 5; +} + +step "s1-ddl" +{ + CREATE INDEX reference_table_index ON reference_table(id); +} + +step "s1-copy" +{ + COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; +} + +step "s1-commit" +{ + COMMIT; +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-isolate-tenant" +{ + SELECT isolate_tenant_to_new_shard('isolation_table', 5); +} + +step "s2-add-fkey" +{ + ALTER TABLE isolation_table ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); +} + +step "s2-commit" +{ + COMMIT; +} + +step "s2-print-cluster" +{ + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('isolation_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT id, value FROM isolation_table ORDER BY id, value; +} + +// run tenant isolation while concurrently performing an DML and index creation on the +// reference table which the distributed table have a foreign key to +// all modifications should block tenant isolation +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-isolate-tenant" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-isolate-tenant" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-isolate-tenant" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-isolate-tenant" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-isolate-tenant" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" diff --git a/src/test/regress/sql/.gitignore b/src/test/regress/sql/.gitignore index 684e83b1b..52ef890a6 100644 --- a/src/test/regress/sql/.gitignore +++ b/src/test/regress/sql/.gitignore @@ -12,6 +12,7 @@ /multi_load_data.sql /multi_load_data_superuser.sql /multi_load_more_data.sql +/multi_multiuser_load_data.sql /multi_mx_copy_data.sql /multi_outer_join.sql /multi_outer_join_reference.sql diff --git a/src/test/regress/sql/aggregate_support.sql b/src/test/regress/sql/aggregate_support.sql index b2cd063c7..a991d856e 100644 --- a/src/test/regress/sql/aggregate_support.sql +++ b/src/test/regress/sql/aggregate_support.sql @@ -355,7 +355,6 @@ select array_collect_sort(val) from aggdata; -- Test multiuser scenario create user notsuper; -select run_command_on_workers($$create user notsuper$$); grant all on schema aggregate_support to notsuper; grant all on all tables in schema aggregate_support to notsuper; select 1 from run_command_on_workers($$ diff --git a/src/test/regress/sql/alter_database_owner.sql b/src/test/regress/sql/alter_database_owner.sql index ae8418468..a6e8c70bb 100644 --- a/src/test/regress/sql/alter_database_owner.sql +++ b/src/test/regress/sql/alter_database_owner.sql @@ -3,11 +3,11 @@ SET search_path TO alter_database_owner, public; CREATE USER database_owner_1; CREATE USER database_owner_2; -SELECT run_command_on_workers('CREATE USER database_owner_1'); -SELECT run_command_on_workers('CREATE USER database_owner_2'); -- make sure the propagation of ALTER DATABASE ... OWNER TO ... is on -SET citus.enable_alter_database_owner TO on; +-- in enterprise we default to on, to verify this we don't set it explicitly to on and run +-- the rest of the tests exactly as on community +-- SET citus.enable_alter_database_owner TO on; -- list the owners of the current database on all nodes SELECT run_command_on_workers($$ @@ -89,6 +89,29 @@ SELECT run_command_on_workers($$ WHERE d.datname = current_database(); $$); +-- verify that a user not present on the newly added node will automatically be propagated +SELECT master_remove_node('localhost', :worker_2_port); +CREATE USER database_owner_3; +ALTER DATABASE regression OWNER TO database_owner_3; +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + +-- list the owners of the current database on all nodes +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + +-- reset back to original owner to not break subsequent tests and remove enterprise test user +ALTER DATABASE regression OWNER TO database_owner_1; +DROP USER database_owner_3; CREATE TABLE t (a int PRIMARY KEY); SELECT create_distributed_table('t', 'a'); @@ -169,7 +192,5 @@ $$); DROP USER database_owner_1; DROP USER database_owner_2; -SELECT run_command_on_workers('DROP USER database_owner_1'); -SELECT run_command_on_workers('DROP USER database_owner_2'); SET client_min_messages TO warning; DROP SCHEMA alter_database_owner CASCADE; diff --git a/src/test/regress/sql/alter_distributed_table.sql b/src/test/regress/sql/alter_distributed_table.sql index f9209eaba..8867f660b 100644 --- a/src/test/regress/sql/alter_distributed_table.sql +++ b/src/test/regress/sql/alter_distributed_table.sql @@ -199,6 +199,39 @@ ALTER TABLE par_table ATTACH PARTITION par_table_1 FOR VALUES FROM (1) TO (5); SELECT alter_distributed_table('par_table', distribution_column:='b', colocate_with:='col_table'); + +-- test changing shard count into a default colocation group with shard split +-- ensure there is no colocation group with 23 shards +SELECT count(*) FROM pg_dist_colocation WHERE shardcount = 23; +SET citus.shard_count TO 23; + +CREATE TABLE shard_split_table (a int, b int); +SELECT create_distributed_table ('shard_split_table', 'a'); +SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5); + +-- show the difference in pg_dist_colocation and citus_tables shard counts +SELECT + ( + SELECT shardcount FROM pg_dist_colocation WHERE colocationid IN + ( + SELECT colocation_id FROM public.citus_tables WHERE table_name = 'shard_split_table'::regclass + ) + ) AS "pg_dist_colocation", + (SELECT shard_count FROM public.citus_tables WHERE table_name = 'shard_split_table'::regclass) AS "citus_tables"; + +SET citus.shard_count TO 4; + +-- distribute another table and then change shard count to 23 +CREATE TABLE shard_split_table_2 (a int, b int); +SELECT create_distributed_table ('shard_split_table_2', 'a'); +SELECT alter_distributed_table ('shard_split_table_2', shard_count:=23, cascade_to_colocated:=false); + +SELECT a.colocation_id = b.colocation_id FROM public.citus_tables a, public.citus_tables b + WHERE a.table_name = 'shard_split_table'::regclass AND b.table_name = 'shard_split_table_2'::regclass; + +SELECT shard_count FROM public.citus_tables WHERE table_name = 'shard_split_table_2'::regclass; + + -- test messages -- test nothing to change SELECT alter_distributed_table('dist_table'); diff --git a/src/test/regress/sql/auto_undist_citus_local.sql b/src/test/regress/sql/auto_undist_citus_local.sql index 8fe3d642c..7c7814a46 100644 --- a/src/test/regress/sql/auto_undist_citus_local.sql +++ b/src/test/regress/sql/auto_undist_citus_local.sql @@ -646,18 +646,12 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre -- test DROP OWNED BY --- Citus does not support "ALTER TABLE OWNER TO" commands. Also, not to deal with tests output --- difference between community and enterprise, let's disable enable_ddl_propagation here. -SET citus.enable_ddl_propagation to OFF; - CREATE USER another_user; -SELECT run_command_on_workers('CREATE USER another_user'); +-- Citus does not support "ALTER TABLE OWNER TO" commands. ALTER TABLE reference_table_1 OWNER TO another_user; SELECT run_command_on_placements('reference_table_1', 'ALTER TABLE %s OWNER TO another_user'); -SET citus.enable_ddl_propagation to ON; - BEGIN; DROP OWNED BY another_user cascade; SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalrelid IN ( 'citus_local_table_1'::regclass, 'citus_local_table_2'::regclass, 'citus_local_table_3'::regclass) ORDER BY logicalrelid; diff --git a/src/test/regress/sql/citus_local_tables.sql b/src/test/regress/sql/citus_local_tables.sql index ae3db0060..57f93b076 100644 --- a/src/test/regress/sql/citus_local_tables.sql +++ b/src/test/regress/sql/citus_local_tables.sql @@ -133,7 +133,7 @@ BEGIN; SELECT * FROM citus_local_table_3; ROLLBACK; --- show that we do not support policies in citus community -- +-- show that we support policies in citus enterprise -- BEGIN; CREATE TABLE citus_local_table_3 (table_user text); @@ -144,7 +144,6 @@ BEGIN; CREATE POLICY table_policy ON citus_local_table_3 TO table_users USING (table_user = current_user); - -- this should error out SELECT citus_add_local_table_to_metadata('citus_local_table_3'); ROLLBACK; diff --git a/src/test/regress/sql/citus_local_tables_ent.sql b/src/test/regress/sql/citus_local_tables_ent.sql new file mode 100644 index 000000000..15c4371ac --- /dev/null +++ b/src/test/regress/sql/citus_local_tables_ent.sql @@ -0,0 +1,74 @@ +\set VERBOSITY terse + +SET citus.next_shard_id TO 1511000; +SET citus.shard_replication_factor TO 1; +SET citus.enable_local_execution TO ON; +SET citus.log_local_commands TO ON; + +CREATE SCHEMA citus_local_tables_ent; +SET search_path TO citus_local_tables_ent; + +-- ensure that coordinator is added to pg_dist_node +SET client_min_messages to ERROR; +SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0); +RESET client_min_messages; + +CREATE TABLE citus_local_table (a int, b int); +SELECT citus_add_local_table_to_metadata('citus_local_table'); + +-- isolate_tenant_to_new_shard is not supported +SELECT isolate_tenant_to_new_shard('citus_local_table', 100); + +-- master_copy_shard_placement is not supported +SELECT master_copy_shard_placement(shardid, 'localhost', :master_port, 'localhost', :worker_1_port, false) +FROM (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='citus_local_table'::regclass) as shardid; + +-- master_move_shard_placement is not supported +SELECT master_move_shard_placement(shardid, 'localhost', :master_port, 'localhost', :worker_1_port) +FROM (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='citus_local_table'::regclass) as shardid; + +-- replicate_table_shards is not suported +SELECT replicate_table_shards('citus_local_table'::regclass, 2); + +-- rebalance_table_shards is not supported +SELECT rebalance_table_shards('citus_local_table'); + +-- get_rebalance_table_shards_plan is not supported +SELECT get_rebalance_table_shards_plan('citus_local_table'); + +-- test a policy defined after creating a citus local table + +-- create another user for policy test +CREATE USER user_can_select_a_1; +ALTER ROLE user_can_select_a_1 SET search_path TO citus_local_tables_ent; +GRANT USAGE ON SCHEMA citus_local_tables_ent TO user_can_select_a_1; + +INSERT INTO citus_local_table VALUES (1,1); +INSERT INTO citus_local_table VALUES (2,2); + +-- grant access +GRANT SELECT ON TABLE citus_local_table TO user_can_select_a_1; + +-- enable row level security +ALTER TABLE citus_local_table ENABLE ROW LEVEL SECURITY; + +-- switch user, it should not be able to see any rows since row level security is enabled +SET ROLE user_can_select_a_1; +SELECT * FROM citus_local_table ORDER BY 1, 2; +RESET ROLE; + +-- create policy for user to read access for rows with a=1 +CREATE POLICY user_mod ON citus_local_table +FOR SELECT +TO user_can_select_a_1 +USING (current_user = 'user_can_select_a_1' and a=1); + +-- switch user, it should be able to see rows with a=1 +SET ROLE user_can_select_a_1; +SELECT * FROM citus_local_table ORDER BY 1, 2; + +-- reset role +RESET ROLE; + +-- cleanup at exit +DROP SCHEMA citus_local_tables_ent CASCADE; diff --git a/src/test/regress/sql/columnar_truncate.sql b/src/test/regress/sql/columnar_truncate.sql index 9cdc44d55..f9532c00d 100644 --- a/src/test/regress/sql/columnar_truncate.sql +++ b/src/test/regress/sql/columnar_truncate.sql @@ -128,7 +128,6 @@ set columnar.compression = 'pglz'; INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100); set columnar.compression to default; -- create a user that can not truncate -SELECT run_command_on_workers($$CREATE USER truncate_user;$$); CREATE USER truncate_user; GRANT USAGE ON SCHEMA truncate_schema TO truncate_user; GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user; diff --git a/src/test/regress/sql/create_role_propagation.sql b/src/test/regress/sql/create_role_propagation.sql new file mode 100644 index 000000000..7b73c2aed --- /dev/null +++ b/src/test/regress/sql/create_role_propagation.sql @@ -0,0 +1,274 @@ +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + +\c - - - :worker_1_port + +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + +\c - - - :master_port + +CREATE ROLE create_role; +CREATE ROLE create_role_2; +CREATE USER create_user; +CREATE USER create_user_2; +CREATE GROUP create_group; +CREATE GROUP create_group_2; + +SELECT master_remove_node('localhost', :worker_2_port); + +CREATE ROLE create_role_with_everything SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 105 PASSWORD 'strong_password123^' VALID UNTIL '2045-05-05 00:00:00.00+00' IN ROLE create_role, create_group ROLE create_user, create_group_2 ADMIN create_role_2, create_user_2; +CREATE ROLE create_role_with_nothing NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 3 PASSWORD 'weakpassword' VALID UNTIL '2015-05-05 00:00:00.00+00'; + +-- show that creating role from worker node is only allowed when create role +-- propagation is off +\c - - - :worker_1_port +CREATE ROLE role_on_worker; + +BEGIN; +SET citus.enable_create_role_propagation TO off; +CREATE ROLE role_on_worker; +ROLLBACK; + +\c - - - :master_port + +-- edge case role names +CREATE ROLE "create_role'edge"; +CREATE ROLE "create_role""edge"; + +-- test grant role +GRANT create_group TO create_role; +GRANT create_group TO create_role_2 WITH ADMIN OPTION; + +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, (rolpassword != '') as pass_not_empty, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + +\c - - - :worker_1_port + +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, (rolpassword != '') as pass_not_empty, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + +\c - - - :master_port + +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + +\c - - - :worker_2_port + +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, (rolpassword != '') as pass_not_empty, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + +\c - - - :master_port + +DROP ROLE create_role_with_everything; +REVOKE create_group FROM create_role; +REVOKE ADMIN OPTION FOR create_group FROM create_role_2; + +\c - - - :master_port + +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, (rolpassword != '') as pass_not_empty, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + +\c - - - :worker_1_port + +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, (rolpassword != '') as pass_not_empty, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + +\c - - - :master_port + +-- test grants with distributed and non-distributed roles + +SELECT master_remove_node('localhost', :worker_2_port); + +CREATE ROLE dist_role_1 SUPERUSER; +CREATE ROLE dist_role_2; +CREATE ROLE dist_role_3; +CREATE ROLE dist_role_4; + +SET citus.enable_create_role_propagation TO OFF; + +CREATE ROLE non_dist_role_1 SUPERUSER; +CREATE ROLE non_dist_role_2; +CREATE ROLE non_dist_role_3; +CREATE ROLE non_dist_role_4; + +SET citus.enable_create_role_propagation TO ON; + +SET ROLE dist_role_1; + +GRANT non_dist_role_1 TO non_dist_role_2; + +SET citus.enable_create_role_propagation TO OFF; + +SET ROLE non_dist_role_1; + +GRANT dist_role_1 TO dist_role_2; + +RESET ROLE; + +SET citus.enable_create_role_propagation TO ON; + +GRANT dist_role_3 TO non_dist_role_3; +GRANT non_dist_role_4 TO dist_role_4; + +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1; + +\c - - - :worker_1_port +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; + +\c - - - :worker_2_port +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; +\c - - - :master_port + +DROP ROLE dist_role_3, non_dist_role_3, dist_role_4, non_dist_role_4; + +-- test grant with multiple mixed roles +CREATE ROLE dist_mixed_1; +CREATE ROLE dist_mixed_2; +CREATE ROLE dist_mixed_3; +CREATE ROLE dist_mixed_4; + +SET citus.enable_create_role_propagation TO OFF; + +CREATE ROLE nondist_mixed_1; +CREATE ROLE nondist_mixed_2; + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1; +\c - - - :worker_1_port +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_mixed%' ORDER BY 1; + +\c - - - :master_port + +SELECT master_remove_node('localhost', :worker_2_port); +GRANT dist_mixed_1, dist_mixed_2, nondist_mixed_1 TO dist_mixed_3, dist_mixed_4, nondist_mixed_2; +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1; +\c - - - :worker_1_port +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_mixed%' ORDER BY 1; +\c - - - :worker_2_port +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_mixed%' ORDER BY 1; + +\c - - - :master_port +DROP ROLE dist_mixed_1, dist_mixed_2, dist_mixed_3, dist_mixed_4, nondist_mixed_1, nondist_mixed_2; + +-- test drop multiple roles with non-distributed roles + +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist%' ORDER BY 1; +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist%' ORDER BY 1; + +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist%' ORDER BY 1; +\c - - - :master_port + +DROP ROLE dist_role_1, non_dist_role_1, dist_role_2, non_dist_role_2; + +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist%' ORDER BY 1; +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist%' ORDER BY 1; + +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist%' ORDER BY 1; +\c - - - :master_port + +-- test alter part of create or alter role + +SELECT master_remove_node('localhost', :worker_2_port); +DROP ROLE create_role, create_role_2; + +\c - - - :worker_2_port +SELECT rolname, rolcanlogin FROM pg_authid WHERE rolname = 'create_role' OR rolname = 'create_role_2' ORDER BY rolname; + +\c - - - :master_port + +CREATE ROLE create_role LOGIN; +SELECT 1 FROM master_add_node('localhost', :worker_2_port); +CREATE ROLE create_role_2 LOGIN; + +\c - - - :worker_2_port +SELECT rolname, rolcanlogin FROM pg_authid WHERE rolname = 'create_role' OR rolname = 'create_role_2' ORDER BY rolname; + +\c - - - :master_port + +-- test cascading grants + +SET citus.enable_create_role_propagation TO OFF; + +CREATE ROLE nondist_cascade_1; +CREATE ROLE nondist_cascade_2; +CREATE ROLE nondist_cascade_3; + +SET citus.enable_create_role_propagation TO ON; + +CREATE ROLE dist_cascade; + +GRANT nondist_cascade_1 TO nondist_cascade_2; +GRANT nondist_cascade_2 TO nondist_cascade_3; + +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%cascade%' ORDER BY 1; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%cascade%' ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%cascade%' ORDER BY 1; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%cascade%' ORDER BY 1, 2; +\c - - - :master_port + +SELECT master_remove_node('localhost', :worker_2_port); + +GRANT nondist_cascade_3 TO dist_cascade; + +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%cascade%' ORDER BY 1; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%cascade%' ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%cascade%' ORDER BY 1; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%cascade%' ORDER BY 1, 2; +\c - - - :worker_2_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%cascade%' ORDER BY 1; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%cascade%' ORDER BY 1, 2; + +\c - - - :master_port +DROP ROLE create_role, create_role_2, create_group, create_group_2, create_user, create_user_2, create_role_with_nothing, "create_role'edge", "create_role""edge"; + + +-- test grant non-existing roles + +CREATE ROLE existing_role_1; +CREATE ROLE existing_role_2; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%existing%' ORDER BY 1, 2; + +GRANT existing_role_1, nonexisting_role_1 TO existing_role_2, nonexisting_role_2; + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%existing%' ORDER BY 1, 2; + + +-- test drop non-existing roles +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%existing%' ORDER BY 1; +SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; +\c - - - :master_port + +DROP ROLE existing_role_1, existing_role_2, nonexisting_role_1, nonexisting_role_2; + +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%existing%' ORDER BY 1; +SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; +\c - - - :master_port + +DROP ROLE IF EXISTS existing_role_1, existing_role_2, nonexisting_role_1, nonexisting_role_2; + +SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%existing%' ORDER BY 1; +SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; +\c - - - :worker_1_port +SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; +\c - - - :master_port diff --git a/src/test/regress/sql/distributed_collations.sql b/src/test/regress/sql/distributed_collations.sql index 6e6e35263..31464443d 100644 --- a/src/test/regress/sql/distributed_collations.sql +++ b/src/test/regress/sql/distributed_collations.sql @@ -1,7 +1,6 @@ SET citus.next_shard_id TO 20050000; CREATE USER collationuser; -SELECT run_command_on_workers($$CREATE USER collationuser;$$); CREATE SCHEMA collation_tests AUTHORIZATION collationuser; CREATE SCHEMA collation_tests2 AUTHORIZATION collationuser; @@ -92,7 +91,6 @@ SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA collation_tests CASCADE; DROP SCHEMA collation_tests2 CASCADE; DROP USER collationuser; -SELECT run_command_on_workers($$DROP USER collationuser;$$); \c - - - :worker_1_port -- test creating a collation on a worker diff --git a/src/test/regress/sql/distributed_types.sql b/src/test/regress/sql/distributed_types.sql index f1ada4e3a..9f1ad5e9d 100644 --- a/src/test/regress/sql/distributed_types.sql +++ b/src/test/regress/sql/distributed_types.sql @@ -1,7 +1,6 @@ SET citus.next_shard_id TO 20010000; CREATE USER typeuser; -SELECT run_command_on_workers($$CREATE USER typeuser;$$); CREATE SCHEMA type_tests AUTHORIZATION typeuser; CREATE SCHEMA type_tests2 AUTHORIZATION typeuser; -- to test creation in a specific schema and moving to schema @@ -356,4 +355,3 @@ SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA type_tests CASCADE; DROP SCHEMA type_tests2 CASCADE; DROP USER typeuser; -SELECT run_command_on_workers($$DROP USER typeuser;$$); diff --git a/src/test/regress/sql/failure_offline_move_shard_placement.sql b/src/test/regress/sql/failure_offline_move_shard_placement.sql new file mode 100644 index 000000000..81683398b --- /dev/null +++ b/src/test/regress/sql/failure_offline_move_shard_placement.sql @@ -0,0 +1,99 @@ +-- +-- failure_offline_move_shard_placement +-- + +-- The tests cover moving shard placements without using logical replication. + +CREATE SCHEMA IF NOT EXISTS move_shard_offline; +SET SEARCH_PATH = move_shard_offline; +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 200; +SET citus.shard_replication_factor TO 1; +SELECT pg_backend_pid() as pid \gset + +SELECT citus.mitmproxy('conn.allow()'); + +CREATE TABLE t(id int PRIMARY KEY, int_data int); + +SELECT create_distributed_table('t', 'id'); + +CREATE VIEW shards_in_workers AS + SELECT shardid, + (CASE WHEN nodeport = :worker_1_port THEN 'worker1' ELSE 'worker2' END) AS worker + FROM pg_dist_placement NATURAL JOIN pg_dist_node + WHERE shardstate != 4 + ORDER BY 1,2 ASC; + +CREATE VIEW indices_on_shard_201 AS + SELECT * FROM run_command_on_workers( $cmd$ + SELECT CASE WHEN COUNT(*) > 0 THEN TRUE ELSE FALSE END + FROM pg_index WHERE indrelid = 'move_shard_offline.t_201'::regclass + $cmd$); + +CREATE VIEW find_index_for_shard_201_in_workers AS + SELECT CASE nodeport WHEN :worker_1_port THEN 'worker1' ELSE 'worker2' END + FROM indices_on_shard_201 WHERE result = 't'; + +-- Insert some data +INSERT INTO t SELECT x, x+1 FROM generate_series(1,100) AS f(x); + +-- Initial shard placements +SELECT * FROM shards_in_workers; +SELECT * FROM find_index_for_shard_201_in_workers; + +-- failure on sanity checks +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS move_shard_offline.t CASCADE").kill()'); +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); + +-- cancellation on sanity checks +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS move_shard_offline.t CASCADE").cancel(' || :pid || ')'); +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); + +-- failure on move_shard table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard_offline.t").kill()'); +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); + +-- cancellation on move_shard table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard_offline.t").cancel(' || :pid || ')'); +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); + +-- failure on blocking append_table_to_shard operation on target node +SELECT citus.mitmproxy('conn.onQuery(query="worker_append_table_to_shard").kill()'); +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); + +-- cancellation on blocking append_table_to_shard operation on target node +SELECT citus.mitmproxy('conn.onQuery(query="worker_append_table_to_shard").cancel(' || :pid || ')'); +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); + +-- failure on adding constraints on target node +SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT").kill()'); +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); + +-- cancellation on adding constraints on target node +SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT").cancel(' || :pid || ')'); +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); + +-- failure on CopyData operation on source node +SELECT citus.mitmproxy('conn.onCopyData().kill()'); +SELECT master_move_shard_placement(200, 'localhost', :worker_2_proxy_port, 'localhost', :worker_1_port, 'block_writes'); + +-- cancellation on CopyData operation on source node +SELECT citus.mitmproxy('conn.onCopyData().cancel(' || :pid || ')'); +SELECT master_move_shard_placement(200, 'localhost', :worker_2_proxy_port, 'localhost', :worker_1_port, 'block_writes'); + +CALL citus_cleanup_orphaned_shards(); + +-- Verify that the shard is not moved and the number of rows are still 100k +SELECT citus.mitmproxy('conn.allow()'); +SELECT * FROM shards_in_workers; +SELECT count(*) FROM t; +SELECT * FROM find_index_for_shard_201_in_workers; + +-- Verify that shard can be moved after a temporary failure +SELECT master_move_shard_placement(201, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port, 'block_writes'); +CALL citus_cleanup_orphaned_shards(); +SELECT * FROM shards_in_workers; +SELECT count(*) FROM t; +SELECT * FROM find_index_for_shard_201_in_workers; + +DROP SCHEMA move_shard_offline CASCADE; diff --git a/src/test/regress/sql/failure_on_create_subscription.sql b/src/test/regress/sql/failure_on_create_subscription.sql new file mode 100644 index 000000000..b5a649008 --- /dev/null +++ b/src/test/regress/sql/failure_on_create_subscription.sql @@ -0,0 +1,49 @@ +-- +-- failure_on_create_subscription +-- + +-- Since the result of these tests depends on the success of background +-- process that creating the replication slot on the publisher. These +-- tests are separated. + +CREATE SCHEMA IF NOT EXISTS move_shard; +SET SEARCH_PATH = move_shard; +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 100; +SET citus.shard_replication_factor TO 1; +SELECT pg_backend_pid() as pid \gset + +SELECT citus.mitmproxy('conn.allow()'); + +CREATE TABLE t(id int PRIMARY KEY, int_data int, data text); + +SELECT create_distributed_table('t', 'id'); + +CREATE VIEW shards_in_workers AS +SELECT shardid, + (CASE WHEN nodeport = :worker_1_port THEN 'worker1' ELSE 'worker2' END) AS worker +FROM pg_dist_placement NATURAL JOIN pg_dist_node +WHERE shardstate != 4 +ORDER BY 1,2 ASC; + +-- Insert some data +INSERT INTO t SELECT x, x+1, MD5(random()::text) FROM generate_series(1,100000) AS f(x); + +-- Initial shard placements +SELECT * FROM shards_in_workers; + +-- failure on creating the subscription +SELECT citus.mitmproxy('conn.onQuery(query="CREATE SUBSCRIPTION").kill()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- Verify that the shard is not moved and the number of rows are still 100k +SELECT * FROM shards_in_workers; +SELECT count(*) FROM t; + +-- Verify that shard can be moved after a temporary failure +SELECT citus.mitmproxy('conn.allow()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +SELECT * FROM shards_in_workers; +SELECT count(*) FROM t; + +DROP SCHEMA move_shard CASCADE ; diff --git a/src/test/regress/sql/failure_online_move_shard_placement.sql b/src/test/regress/sql/failure_online_move_shard_placement.sql new file mode 100644 index 000000000..714c7b3b1 --- /dev/null +++ b/src/test/regress/sql/failure_online_move_shard_placement.sql @@ -0,0 +1,129 @@ +-- +-- failure_online_move_shard_placement +-- + +-- The tests cover moving shard placements using logical replication. + +CREATE SCHEMA IF NOT EXISTS move_shard; +SET SEARCH_PATH = move_shard; +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 100; +SET citus.shard_replication_factor TO 1; +SET citus.max_adaptive_executor_pool_size TO 1; +SELECT pg_backend_pid() as pid \gset + +SELECT citus.mitmproxy('conn.allow()'); + +CREATE TABLE t(id int PRIMARY KEY, int_data int, data text); +CREATE INDEX index_failure ON t(id); +SELECT create_distributed_table('t', 'id'); + +CREATE VIEW shards_in_workers AS +SELECT shardid, + (CASE WHEN nodeport = :worker_1_port THEN 'worker1' ELSE 'worker2' END) AS worker +FROM pg_dist_placement NATURAL JOIN pg_dist_node +WHERE shardstate != 4 +ORDER BY 1,2 ASC; + +-- Insert some data +INSERT INTO t SELECT x, x+1, MD5(random()::text) FROM generate_series(1,100000) AS f(x); + +-- Initial shard placements +SELECT * FROM shards_in_workers; + +-- failure on sanity checks +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS move_shard.t CASCADE").kill()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- cancellation on sanity checks +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS move_shard.t CASCADE").cancel(' || :pid || ')'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- failure on move_shard table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard.t").kill()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- cancellation on move_shard table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE move_shard.t").cancel(' || :pid || ')'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- failure on polling subscription state +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT count\(\*\) FROM pg_subscription_rel").kill()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- cancellation on polling subscription state +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT count\(\*\) FROM pg_subscription_rel").cancel(' || :pid || ')'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- failure on getting subscriber state +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT sum").kill()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- cancellation on getting subscriber state +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT sum").cancel(' || :pid || ')'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- failure on polling last write-ahead log location reported to origin WAL sender +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT min\(latest_end_lsn").kill()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- cancellation on polling last write-ahead log location reported to origin WAL sender +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT min\(latest_end_lsn").cancel(' || :pid || ')'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- failure on dropping subscription +SELECT citus.mitmproxy('conn.onQuery(query="^DROP SUBSCRIPTION").kill()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- cancellation on dropping subscription +SELECT citus.mitmproxy('conn.onQuery(query="^DROP SUBSCRIPTION").cancel(' || :pid || ')'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + + +-- failure on creating the primary key +SELECT citus.mitmproxy('conn.onQuery(query="t_pkey").kill()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- cancellation on creating the primary key +SELECT citus.mitmproxy('conn.onQuery(query="t_pkey").cancel(' || :pid || ')'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- failure on create index +SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +SELECT citus.mitmproxy('conn.allow()'); +-- lets create few more indexes and fail with both +-- parallel mode and sequential mode +CREATE INDEX index_failure_2 ON t(id); +CREATE INDEX index_failure_3 ON t(id); +CREATE INDEX index_failure_4 ON t(id); +CREATE INDEX index_failure_5 ON t(id); + +-- failure on the third create index +ALTER SYSTEM SET citus.max_adaptive_executor_pool_size TO 1; +SELECT pg_reload_conf(); + +SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +SELECT citus.mitmproxy('conn.allow()'); + +-- failure on parallel create index +ALTER SYSTEM RESET citus.max_adaptive_executor_pool_size; +SELECT pg_reload_conf(); +SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); + +-- Verify that the shard is not moved and the number of rows are still 100k +SELECT citus.mitmproxy('conn.allow()'); +SELECT * FROM shards_in_workers; +SELECT count(*) FROM t; + +-- Verify that shard can be moved after a temporary failure +SELECT citus.mitmproxy('conn.allow()'); +SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); +SELECT * FROM shards_in_workers; +SELECT count(*) FROM t; + +DROP SCHEMA move_shard CASCADE ; diff --git a/src/test/regress/sql/failure_tenant_isolation.sql b/src/test/regress/sql/failure_tenant_isolation.sql new file mode 100644 index 000000000..c183beee5 --- /dev/null +++ b/src/test/regress/sql/failure_tenant_isolation.sql @@ -0,0 +1,206 @@ +-- +-- failure_tenant_isolation +-- + +-- due to different libpq versions +-- some warning messages differ +-- between local and CI +SET client_min_messages TO ERROR; + +CREATE SCHEMA IF NOT EXISTS tenant_isolation; +SET SEARCH_PATH = tenant_isolation; +SET citus.shard_count TO 2; +SET citus.next_shard_id TO 300; +SET citus.shard_replication_factor TO 1; +SELECT pg_backend_pid() as pid \gset +SELECT citus.mitmproxy('conn.allow()'); + +CREATE TABLE table_1 (id int PRIMARY KEY); +CREATE TABLE table_2 (ref_id int REFERENCES table_1(id) UNIQUE, data int); + +SELECT create_distributed_table('table_1', 'id'); +SELECT create_distributed_table('table_2', 'ref_id'); + +CREATE VIEW shard_sizes AS + SELECT shardid, result AS row_count + FROM run_command_on_placements('table_1', 'SELECT count(*) FROM %s'); + +INSERT INTO table_1 +SELECT x +FROM generate_series(1, 100) AS f (x); + +INSERT INTO table_2 +SELECT x, x +FROM generate_series(1, 100) AS f (x); + +-- initial shard sizes +SELECT * FROM shard_sizes ORDER BY 1; + +-- failure on colocated table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- cancellation on colocated table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_2").after(1).cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on colocated table population +SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO tenant_isolation.table_2").after(2).kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- cancellation on colocated table population +SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO tenant_isolation.table_2").cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on colocated table constraints +SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(1).kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- cancellation on colocated table constraints +SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + + +-- failure on table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- cancellation on table creation +SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE tenant_isolation.table_1").after(1).cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on table population +SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO tenant_isolation.table_1").after(2).kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- cancellation on table population +SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO tenant_isolation.table_1").cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on table constraints +SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(1).kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- cancellation on table constraints +SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + + +-- failure on dropping old colocated shard +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- cancellation on dropping old colocated shard +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_2").cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on dropping old shard +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- cancellation on dropping old shard +SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS tenant_isolation.table_1").cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + + +-- failure on foreign key creation +SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on foreign key creation +SELECT citus.mitmproxy('conn.onQuery(query="ADD CONSTRAINT table_2_ref_id_fkey FOREIGN KEY").after(2).cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + + +-- failure on shard split transaction +SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on shard split transaction +SELECT citus.mitmproxy('conn.onQuery(query="BEGIN").cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on shard split transaction commit +SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on shard split transaction commit +SELECT citus.mitmproxy('conn.onQuery(query="COMMIT").cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on transaction for dropping old tables +SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on transaction for dropping old tables +SELECT citus.mitmproxy('conn.after(1).onQuery(query="BEGIN").cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on transaction for foreign key creation +SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on transaction for foreign key creation +SELECT citus.mitmproxy('conn.after(2).onQuery(query="BEGIN").cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on transaction commit for foreign key creation +SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on transaction commit for foreign key creation +SELECT citus.mitmproxy('conn.after(1).onQuery(query="COMMIT").cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on transaction prepare for dropping old tables +SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()'); + +-- due to libpq version differences, the output might change +-- hence use code block to catch the error +\set VERBOSITY terse +DO LANGUAGE plpgsql +$$ +BEGIN + SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + EXCEPTION WHEN OTHERS THEN + RAISE 'Command failed to execute'; +END; +$$; +\set VERBOSITY default + +-- failure on transaction prepare for dropping old tables +SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on transaction commit for dropping old tables +SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").kill()'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + +-- failure on transaction commit for dropping old tables +SELECT citus.mitmproxy('conn.after(2).onQuery(query="COMMIT").cancel(' || :pid || ')'); +SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE'); + + +-- verify that the tenant is not isolated +SELECT * FROM shard_sizes ORDER BY 1; + +-- Verify that tenant can be isolated after unsuccessful attempts +SELECT citus.mitmproxy('conn.allow()'); + +-- shard sizes after successful tenant isolation +CREATE TABLE old_shards AS SELECT shardid FROM pg_dist_shard; +WITH new_shard AS ( + SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE') AS shardid +) +SELECT row_count +FROM shard_sizes +JOIN new_shard ON shard_sizes.shardid = new_shard.shardid; + +SELECT row_count +FROM shard_sizes +WHERE shard_sizes.shardid NOT IN (SELECT * FROM old_shards) +ORDER BY 1; + +\set VERBOSITY terse +DROP SCHEMA tenant_isolation CASCADE; +\set VERBOSITY default diff --git a/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql b/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql index 546eeb89e..bd6c9a1ab 100644 --- a/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql +++ b/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql @@ -41,7 +41,7 @@ INSERT INTO referenced_table SELECT i,i FROM generate_series (0, 100) i; INSERT INTO referencing_table SELECT i,i FROM generate_series (0, 100) i; INSERT INTO referencing_table2 SELECT i,i FROM generate_series (0, 100) i; -SELECT master_move_shard_placement(15000009, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_move_shard_placement(15000009, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); SELECT count(*) FROM referencing_table2; @@ -71,7 +71,7 @@ ALTER TABLE referenceing_dist_table ADD CONSTRAINT very_very_very_very_very_very SELECT * FROM get_foreign_key_to_reference_table_commands('referenceing_dist_table'::regclass); -- and show that rebalancer works fine -SELECT master_move_shard_placement(15000018, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_move_shard_placement(15000018, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); \c - - - :worker_2_port diff --git a/src/test/regress/sql/grant_on_foreign_server_propagation.sql b/src/test/regress/sql/grant_on_foreign_server_propagation.sql new file mode 100644 index 000000000..75504ebb5 --- /dev/null +++ b/src/test/regress/sql/grant_on_foreign_server_propagation.sql @@ -0,0 +1,145 @@ +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve +\gset +\if :server_version_above_twelve +\else +\q +\endif +-- +-- GRANT_ON_FOREIGN_SERVER_PROPAGATION +-- We can't execute this file for PG12, as 'password_required' option for USER MAPPING +-- is introduced in PG13. +-- +CREATE SCHEMA "grant on server"; +SET search_path TO "grant on server"; + +-- remove one of the worker nodes to test adding a new node later +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); +select 1 from citus_add_node('localhost',:master_port,groupId=>0); + +SET citus.use_citus_managed_tables TO ON; + +-- create target table and insert some data +CREATE TABLE foreign_table_test (id integer NOT NULL, data text); +INSERT INTO foreign_table_test VALUES (1, 'text_test'); + +CREATE EXTENSION postgres_fdw; + +CREATE ROLE ownerrole WITH LOGIN; +GRANT ALL ON FOREIGN DATA WRAPPER postgres_fdw TO ownerrole WITH GRANT OPTION; +SET ROLE ownerrole; +-- verify we can create server using the privilege on FDW granted to non-superuser role +CREATE SERVER "Foreign Server" + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'localhost', port :'master_port', dbname 'regression'); +RESET ROLE; + +CREATE USER MAPPING FOR CURRENT_USER + SERVER "Foreign Server" + OPTIONS (user 'postgres'); + +-- foreign table owned by superuser +CREATE FOREIGN TABLE foreign_table_owned_by_superuser ( + id integer NOT NULL, + data text +) + SERVER "Foreign Server" + OPTIONS (schema_name 'grant on server', table_name 'foreign_table_test'); + +-- create a non-superuser role +CREATE ROLE role_test_servers; +ALTER ROLE role_test_servers WITH LOGIN; +CREATE ROLE role_test_servers_2 WITH LOGIN; + +SET ROLE ownerrole; +-- verify that non-superuser role can GRANT on other non-superuser roles, on FDWs +-- if WITH GRANT OPTION is provided +GRANT USAGE ON FOREIGN DATA WRAPPER postgres_fdw TO role_test_servers_2; +RESET ROLE; + +-- grant privileges +GRANT ALL ON SCHEMA "grant on server" TO role_test_servers WITH GRANT OPTION; +GRANT ALL ON foreign_table_owned_by_superuser TO role_test_servers WITH GRANT OPTION; +GRANT ALL ON foreign_table_test TO role_test_servers WITH GRANT OPTION; +GRANT ALL ON foreign server "Foreign Server" TO role_test_servers, postgres WITH GRANT OPTION; + +SET ROLE role_test_servers; + +GRANT ALL ON SCHEMA "grant on server" TO role_test_servers_2 WITH GRANT OPTION; +GRANT ALL ON foreign_table_owned_by_superuser TO role_test_servers_2 WITH GRANT OPTION; +GRANT ALL ON foreign_table_test TO role_test_servers_2 WITH GRANT OPTION; +GRANT ALL ON foreign server "Foreign Server" TO role_test_servers_2, postgres WITH GRANT OPTION; + +RESET ROLE; + +-- add user mapping for the role +CREATE USER MAPPING FOR role_test_servers + SERVER "Foreign Server" + OPTIONS (user 'role_test_servers', password_required 'false'); +CREATE USER MAPPING FOR role_test_servers_2 + SERVER "Foreign Server" + OPTIONS (user 'role_test_servers_2', password_required 'false'); + +SET ROLE role_test_servers_2; + +-- foreign table owned by non-superuser +CREATE FOREIGN TABLE foreign_table_owned_by_regular_user ( + id integer NOT NULL, + data text +) + SERVER "Foreign Server" + OPTIONS (schema_name 'grant on server', table_name 'foreign_table_test'); + +RESET ROLE; + +-- now add the node and verify that all propagated correctly +select 1 from citus_add_node('localhost', :'worker_2_port'); + +\c - - - :worker_2_port +SET search_path TO "grant on server"; +SET ROLE role_test_servers_2; +SELECT * from foreign_table_owned_by_superuser; +SELECT * from foreign_table_owned_by_regular_user; + +SET ROLE postgres; +SELECT * from foreign_table_owned_by_superuser; +SELECT * from foreign_table_owned_by_regular_user; + +\c - - - :master_port + +-- verify that the non-superuser privileges has been propagated to the existing worker, +-- and also granted on the newly added worker as well +SELECT run_command_on_workers($$ + SELECT fdwacl FROM pg_foreign_data_wrapper WHERE fdwname = 'postgres_fdw';$$); + +SELECT run_command_on_workers($$ + SELECT rolname FROM pg_roles WHERE oid IN (SELECT srvowner FROM pg_foreign_server WHERE srvname = 'Foreign Server');$$); + +REVOKE GRANT OPTION FOR ALL ON FOREIGN SERVER "Foreign Server" FROM role_test_servers CASCADE; +REVOKE GRANT OPTION FOR ALL ON FOREIGN DATA WRAPPER postgres_fdw FROM ownerrole CASCADE; + +SELECT run_command_on_workers($$ + SELECT fdwacl FROM pg_foreign_data_wrapper WHERE fdwname = 'postgres_fdw';$$); + +SELECT run_command_on_workers($$ + SELECT srvacl FROM pg_foreign_server WHERE srvname = 'Foreign Server';$$); + +REVOKE ALL ON FOREIGN DATA WRAPPER postgres_fdw FROM role_test_servers_2, ownerrole CASCADE; + +SELECT run_command_on_workers($$ + SELECT fdwacl FROM pg_foreign_data_wrapper WHERE fdwname = 'postgres_fdw';$$); + +REVOKE ALL ON FOREIGN SERVER "Foreign Server" FROM role_test_servers, postgres CASCADE; + +SELECT run_command_on_workers($$ + SELECT srvacl FROM pg_foreign_server WHERE srvname = 'Foreign Server';$$); + +REVOKE ALL ON SCHEMA "grant on server" FROM role_test_servers CASCADE; + +-- cleanup +SET client_min_messages TO ERROR; +DROP SERVER "Foreign Server" CASCADE; +DROP SCHEMA "grant on server" CASCADE; +DROP ROLE role_test_servers, role_test_servers_2, ownerrole; + +SELECT 1 FROM citus_remove_node('localhost', :master_port); diff --git a/src/test/regress/sql/grant_on_function_propagation.sql b/src/test/regress/sql/grant_on_function_propagation.sql new file mode 100644 index 000000000..b34cf1308 --- /dev/null +++ b/src/test/regress/sql/grant_on_function_propagation.sql @@ -0,0 +1,315 @@ +-- +-- GRANT_ON_FUNCTION_PROPAGATION +-- +CREATE SCHEMA grant_on_function; +SET search_path TO grant_on_function, public; + +-- remove one of the worker nodes to test adding a new node later +SET citus.shard_replication_factor TO 1; +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + +-- create some simple functions +CREATE OR REPLACE FUNCTION function_notice(text) +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE '%', $1; +END; +$$; + +CREATE OR REPLACE FUNCTION function_notice() +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'Constant Notice'; +END; +$$; + +CREATE OR REPLACE FUNCTION function_hello() +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'Hello World'; +END; +$$; + +SET citus.enable_metadata_sync TO OFF; +CREATE OR REPLACE FUNCTION not_distributed_function() +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'not_distributed_function'; +END; +$$; +RESET citus.enable_metadata_sync; + +-- create some users and grant them permission on grant_on_function schema +CREATE USER function_user_1; +CREATE USER function_user_2; +CREATE USER function_user_3; +GRANT ALL ON SCHEMA grant_on_function TO function_user_1, function_user_2, function_user_3; + +-- do some varying grants +GRANT EXECUTE ON FUNCTION function_notice() TO function_user_1; +GRANT EXECUTE ON FUNCTION function_notice() TO function_user_2 WITH GRANT OPTION; +SET ROLE function_user_2; +GRANT EXECUTE ON FUNCTION function_notice() TO function_user_3; +RESET ROLE; + +SELECT create_distributed_function('function_notice()'); + +-- re-distributing the same function with GRANTs should be fine +SELECT create_distributed_function('function_notice()'); + + +-- check grants propagated correctly after create_distributed_function +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; +\c - - - :master_port + +-- do some varying revokes +REVOKE EXECUTE ON FUNCTION grant_on_function.function_notice() FROM function_user_1, function_user_3; +REVOKE GRANT OPTION FOR EXECUTE ON FUNCTION grant_on_function.function_notice() FROM function_user_2 CASCADE; + +-- check revokes propagated correctly for the distributed function function_notice() +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; +\c - - - :master_port + +REVOKE EXECUTE ON FUNCTION grant_on_function.function_notice() FROM function_user_2; + +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; +\c - - - :master_port + +-- distribute another function +SET search_path TO grant_on_function, public; +SELECT create_distributed_function('function_notice(text)'); + +-- GRANT .. ON ALL FUNCTIONS IN SCHEMA .. with multiple roles +GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA grant_on_function TO function_user_1, function_user_3; + +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; +\c - - - :master_port + +-- REVOKE .. ON ALL FUNCTIONS IN SCHEMA .. with multiple roles +REVOKE EXECUTE ON ALL FUNCTIONS IN SCHEMA grant_on_function FROM function_user_1, function_user_3; + +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'function_notice' ORDER BY 1, 2; +\c - - - :master_port + +-- distribute another function +SET search_path TO grant_on_function, public; +SELECT create_distributed_function('function_hello()'); + +-- GRANT with multiple functions and multiple roles +-- function_hello needs no arguments since no other function has that name +GRANT EXECUTE ON FUNCTION grant_on_function.function_hello, grant_on_function.function_notice(), grant_on_function.function_notice(text), grant_on_function.not_distributed_function() TO function_user_2 WITH GRANT OPTION; +SET ROLE function_user_2; +GRANT EXECUTE ON FUNCTION grant_on_function.function_hello, grant_on_function.function_notice(), grant_on_function.function_notice(text), grant_on_function.not_distributed_function() TO function_user_1, function_user_3; +RESET ROLE; + +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('function_notice', 'function_hello', 'not_distributed_function') ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('function_notice', 'function_hello', 'not_distributed_function') ORDER BY 1, 2; +\c - - - :master_port + +-- add the previously removed node +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + +-- check if the grants are propagated correctly +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('function_notice', 'function_hello') ORDER BY 1, 2; +\c - - - :worker_2_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('function_notice', 'function_hello') ORDER BY 1, 2; +\c - - - :master_port + +-- check that it works correctly with a user that is not distributed +CREATE OR REPLACE FUNCTION not_propagated_function_user_test() +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'not_propagated_function_user_test'; +END; +$$; +SELECT create_distributed_function('not_propagated_function_user_test()'); + +SET citus.enable_ddl_propagation TO off; +CREATE USER not_propagated_function_user_4; +SET citus.enable_ddl_propagation TO on; + +GRANT EXECUTE ON FUNCTION not_propagated_function_user_test TO function_user_1, not_propagated_function_user_4; + +-- check if the grants are propagated correctly +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('not_propagated_function_user_test') ORDER BY 1, 2; +\c - - - :worker_2_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('not_propagated_function_user_test') ORDER BY 1, 2; +\c - - - :master_port + +SET search_path TO grant_on_function, public; + +-- the following should fail is in plain PG +GRANT EXECUTE ON FUNCTION function_notice(), non_existent_function TO function_user_1; +GRANT EXECUTE ON FUNCTION function_notice() TO function_user_1, non_existent_user; +GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA grant_on_function, non_existent_schema TO function_user_1; + +DROP FUNCTION function_notice(), function_notice(text), function_hello, not_distributed_function, not_propagated_function_user_test; + +-- add similar tests for procedures + +-- remove one of the worker nodes to test adding a new node later +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + +-- create some simple procedures +CREATE OR REPLACE PROCEDURE procedure_notice(text) +LANGUAGE PLPGSQL AS $proc$ +BEGIN + RAISE NOTICE '%', $1; +END; +$proc$; + +CREATE OR REPLACE PROCEDURE procedure_notice() +LANGUAGE PLPGSQL AS $proc$ +BEGIN + RAISE NOTICE 'Constant Notice'; +END; +$proc$; + +CREATE OR REPLACE PROCEDURE procedure_hello() +LANGUAGE PLPGSQL AS $proc$ +BEGIN + RAISE NOTICE 'Hello World'; +END; +$proc$; + +SET citus.enable_metadata_sync TO OFF; +CREATE OR REPLACE PROCEDURE not_distributed_procedure() +LANGUAGE PLPGSQL AS $proc$ +BEGIN + RAISE NOTICE 'not_distributed_procedure'; +END; +$proc$; +RESET citus.enable_metadata_sync; + +-- create some users and grant them permission on grant_on_function schema +CREATE USER procedure_user_1; +CREATE USER procedure_user_2; +CREATE USER procedure_user_3; +GRANT ALL ON SCHEMA grant_on_function TO procedure_user_1, procedure_user_2, procedure_user_3; + +-- do some varying grants +GRANT EXECUTE ON PROCEDURE procedure_notice() TO procedure_user_1; +GRANT EXECUTE ON PROCEDURE procedure_notice() TO procedure_user_2 WITH GRANT OPTION; +SET ROLE procedure_user_2; +GRANT EXECUTE ON PROCEDURE procedure_notice() TO procedure_user_3; +RESET ROLE; + +SELECT create_distributed_function('procedure_notice()'); + +-- check grants propagated correctly after create_distributed_function +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; +\c - - - :master_port + +-- do some varying revokes +REVOKE EXECUTE ON PROCEDURE grant_on_function.procedure_notice() FROM procedure_user_1, procedure_user_3; +REVOKE GRANT OPTION FOR EXECUTE ON PROCEDURE grant_on_function.procedure_notice() FROM procedure_user_2 CASCADE; + +-- check revokes propagated correctly for the distributed procedure procedure_notice() +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; +\c - - - :master_port + +REVOKE EXECUTE ON PROCEDURE grant_on_function.procedure_notice() FROM procedure_user_2; + +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; +\c - - - :master_port + +-- distribute another procedure +SET search_path TO grant_on_function, public; +SELECT create_distributed_function('procedure_notice(text)'); + +-- GRANT .. ON ALL PROCEDURES IN SCHEMA .. with multiple roles +GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA grant_on_function TO procedure_user_1, procedure_user_3; + +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; +\c - - - :master_port + +-- REVOKE .. ON ALL PROCEDURES IN SCHEMA .. with multiple roles +REVOKE EXECUTE ON ALL PROCEDURES IN SCHEMA grant_on_function FROM procedure_user_1, procedure_user_3; + +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname = 'procedure_notice' ORDER BY 1, 2; +\c - - - :master_port + +-- distribute another procedure +SET search_path TO grant_on_function, public; +SELECT create_distributed_function('procedure_hello()'); + +-- GRANT with multiple procedures and multiple roles +-- procedure_hello needs no arguments since no other procedure has that name +GRANT EXECUTE ON PROCEDURE grant_on_function.procedure_hello, grant_on_function.procedure_notice(), grant_on_function.procedure_notice(text), grant_on_function.not_distributed_procedure() TO procedure_user_2 WITH GRANT OPTION; +SET ROLE procedure_user_2; +GRANT EXECUTE ON PROCEDURE grant_on_function.procedure_hello, grant_on_function.procedure_notice(), grant_on_function.procedure_notice(text), grant_on_function.not_distributed_procedure() TO procedure_user_1, procedure_user_3; +RESET ROLE; + +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('procedure_notice', 'procedure_hello', 'not_distributed_procedure') ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('procedure_notice', 'procedure_hello', 'not_distributed_procedure') ORDER BY 1, 2; +\c - - - :master_port + +-- add the previously removed node +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + +-- check if the grants are propagated correctly +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('procedure_notice', 'procedure_hello') ORDER BY 1, 2; +\c - - - :worker_2_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('procedure_notice', 'procedure_hello') ORDER BY 1, 2; +\c - - - :master_port + +-- check that it works correctly with a user that is not distributed +CREATE OR REPLACE PROCEDURE not_propagated_procedure_user_test() +LANGUAGE PLPGSQL AS $proc$ +BEGIN + RAISE NOTICE 'not_propagated_procedure_user_test'; +END; +$proc$; +SELECT create_distributed_function('not_propagated_procedure_user_test()'); + +SET citus.enable_ddl_propagation TO off; +CREATE USER not_propagated_procedure_user_4; +SET citus.enable_ddl_propagation TO on; + +GRANT EXECUTE ON PROCEDURE not_propagated_procedure_user_test TO procedure_user_1, not_propagated_procedure_user_4; + +-- check if the grants are propagated correctly +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('not_propagated_procedure_user_test') ORDER BY 1, 2; +\c - - - :worker_2_port +SELECT proname, pronargs, proacl FROM pg_proc WHERE proname IN ('not_propagated_procedure_user_test') ORDER BY 1, 2; +\c - - - :master_port + +SET search_path TO grant_on_function, public; + +-- the following should fail is in plain PG +GRANT EXECUTE ON PROCEDURE procedure_notice(), non_existent_procedure TO procedure_user_1; +GRANT EXECUTE ON PROCEDURE procedure_notice() TO procedure_user_1, non_existent_user; +GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA grant_on_function, non_existent_schema TO procedure_user_1; + +DROP PROCEDURE procedure_notice(), procedure_notice(text), procedure_hello, not_distributed_procedure, not_propagated_procedure_user_test; + +SELECT run_command_on_coordinator_and_workers('DROP SCHEMA grant_on_function CASCADE'); +DROP USER function_user_1, function_user_2, function_user_3, not_propagated_function_user_4; +DROP USER procedure_user_1, procedure_user_2, procedure_user_3, not_propagated_procedure_user_4; diff --git a/src/test/regress/sql/grant_on_schema_propagation.sql b/src/test/regress/sql/grant_on_schema_propagation.sql index cf8832309..8769180df 100644 --- a/src/test/regress/sql/grant_on_schema_propagation.sql +++ b/src/test/regress/sql/grant_on_schema_propagation.sql @@ -12,9 +12,9 @@ CREATE SCHEMA non_dist_schema; SET citus.enable_ddl_propagation TO on; -- create roles on all nodes -SELECT run_command_on_coordinator_and_workers('CREATE USER role_1'); -SELECT run_command_on_coordinator_and_workers('CREATE USER role_2'); -SELECT run_command_on_coordinator_and_workers('CREATE USER role_3'); +CREATE USER role_1; +CREATE USER role_2; +CREATE USER role_3; -- do some varying grants GRANT USAGE, CREATE ON SCHEMA dist_schema TO role_1 WITH GRANT OPTION; @@ -220,4 +220,4 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspna DROP TABLE public_schema_table; -SELECT run_command_on_coordinator_and_workers('DROP ROLE role_1, role_2, role_3'); +DROP ROLE role_1, role_2, role_3; diff --git a/src/test/regress/sql/grant_on_sequence_propagation.sql b/src/test/regress/sql/grant_on_sequence_propagation.sql new file mode 100644 index 000000000..80a30ae5a --- /dev/null +++ b/src/test/regress/sql/grant_on_sequence_propagation.sql @@ -0,0 +1,168 @@ +-- +-- GRANT_ON_SEQUENCE_PROPAGATION +-- +SET citus.shard_replication_factor TO 1; +CREATE SCHEMA grant_on_sequence; +SET search_path TO grant_on_sequence, public; + +-- create some simple sequences +CREATE SEQUENCE dist_seq_0; +CREATE SEQUENCE dist_seq_1; +CREATE SEQUENCE non_dist_seq_0; + +-- create some users and grant them permission on grant_on_sequence schema +CREATE USER seq_user_0; +CREATE USER seq_user_1; +CREATE USER seq_user_2; +GRANT ALL ON SCHEMA grant_on_sequence TO seq_user_0, seq_user_1, seq_user_2; + +-- do some varying grants +GRANT SELECT ON SEQUENCE dist_seq_0 TO seq_user_0; +GRANT USAGE ON SEQUENCE dist_seq_0 TO seq_user_1 WITH GRANT OPTION; +SET ROLE seq_user_1; +GRANT USAGE ON SEQUENCE dist_seq_0 TO seq_user_2; +RESET ROLE; + +-- distribute a sequence +-- reminder: a sequence is distributed when used in a distributed table AND cluster has metadata workers +CREATE TABLE seq_test_0 (a int, b bigint DEFAULT nextval('dist_seq_0')); +SELECT create_distributed_table('seq_test_0', 'a'); +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- check grants propagated correctly after sequence is distributed +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_0' ORDER BY 1; +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_0' ORDER BY 1; +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- do some varying revokes +REVOKE SELECT ON SEQUENCE dist_seq_0 FROM seq_user_0, seq_user_2; +REVOKE GRANT OPTION FOR USAGE ON SEQUENCE dist_seq_0 FROM seq_user_1 CASCADE; + +-- check revokes propagated correctly for the distributed sequence dist_seq_0 +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_0' ORDER BY 1; +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_0' ORDER BY 1; +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +REVOKE USAGE ON SEQUENCE dist_seq_0 FROM seq_user_1; + +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_0' ORDER BY 1; +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_0' ORDER BY 1; +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- distribute another sequence +CREATE TABLE seq_test_1 (a int, b bigint DEFAULT nextval('dist_seq_1')); +SELECT create_distributed_table('seq_test_1', 'a'); + +-- GRANT .. ON ALL SEQUENCES IN SCHEMA .. with multiple roles +GRANT ALL ON ALL SEQUENCES IN SCHEMA grant_on_sequence TO seq_user_0, seq_user_2; + +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1') ORDER BY 1; +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1') ORDER BY 1; +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- REVOKE .. ON ALL SEQUENCES IN SCHEMA .. with multiple roles +REVOKE ALL ON ALL SEQUENCES IN SCHEMA grant_on_sequence FROM seq_user_0, seq_user_2; + +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1') ORDER BY 1; +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1') ORDER BY 1; +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- GRANT with multiple sequences and multiple roles +GRANT UPDATE ON SEQUENCE dist_seq_0, dist_seq_1, non_dist_seq_0 TO seq_user_1 WITH GRANT OPTION; +SET ROLE seq_user_1; +GRANT UPDATE ON SEQUENCE dist_seq_0, dist_seq_1, non_dist_seq_0 TO seq_user_0, seq_user_2; +RESET ROLE; + +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1', 'non_dist_seq_0') ORDER BY 1; +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1', 'non_dist_seq_0') ORDER BY 1; +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- sync metadata to another node +SELECT start_metadata_sync_to_node('localhost', :worker_2_port); + +-- check if the grants are propagated correctly +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1', 'non_dist_seq_0') ORDER BY 1; +\c - - - :worker_2_port +SELECT relname, relacl FROM pg_class WHERE relname IN ('dist_seq_0', 'dist_seq_1', 'non_dist_seq_0') ORDER BY 1; +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- check that it works correctly with a user that is not distributed +CREATE SEQUENCE dist_seq_2; +ALTER TABLE seq_test_1 ALTER COLUMN b SET DEFAULT nextval('dist_seq_2'); + +SET citus.enable_ddl_propagation TO off; +CREATE USER not_propagated_sequence_user_4; +SET citus.enable_ddl_propagation TO on; + +-- when running below command, not_propagated_sequence_user_4 should be propagated +-- to the worker nodes as part of dist_seq_2's dependencies +GRANT USAGE ON sequence dist_seq_2 TO seq_user_0, not_propagated_sequence_user_4; + +-- check if the grants are propagated correctly +-- check that we can see the not_propagated_sequence_user_4 +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_2' ORDER BY 1; +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_2' ORDER BY 1; +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- the following should fail as in plain PG +GRANT USAGE ON sequence dist_seq_0, non_existent_sequence TO seq_user_0; +GRANT UPDATE ON sequence dist_seq_0 TO seq_user_0, non_existent_user; +GRANT SELECT ON ALL SEQUENCES IN SCHEMA grant_on_sequence, non_existent_schema TO seq_user_0; + +-- check that GRANT ON TABLE that redirects to sequences works properly +CREATE SEQUENCE dist_seq_3; +ALTER TABLE seq_test_1 ALTER COLUMN b SET DEFAULT nextval('dist_seq_3'); + +GRANT UPDATE ON TABLE seq_test_1, dist_seq_3 TO seq_user_0; + +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_3' ORDER BY 1; +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_3' ORDER BY 1; +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +REVOKE ALL ON TABLE seq_test_1, dist_seq_3 FROM seq_user_0; +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_3' ORDER BY 1; +\c - - - :worker_1_port +SELECT relname, relacl FROM pg_class WHERE relname = 'dist_seq_3' ORDER BY 1; +\c - - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path = grant_on_sequence, public; +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('localhost', :worker_2_port); + +DROP SCHEMA grant_on_sequence CASCADE; +SET search_path TO public; diff --git a/src/test/regress/sql/ignoring_orphaned_shards.sql b/src/test/regress/sql/ignoring_orphaned_shards.sql index 2b56269ef..774d7cd19 100644 --- a/src/test/regress/sql/ignoring_orphaned_shards.sql +++ b/src/test/regress/sql/ignoring_orphaned_shards.sql @@ -63,7 +63,7 @@ SELECT 1 FROM citus_add_node('localhost', :master_port, 0); SELECT 1 FROM citus_set_node_property('localhost', :master_port, 'shouldhaveshards', true); SELECT logicalrelid FROM pg_dist_partition WHERE colocationid = 92448300 ORDER BY 1; -SELECT citus_move_shard_placement(92448300, 'localhost', :worker_1_port, 'localhost', :master_port); +SELECT citus_move_shard_placement(92448300, 'localhost', :worker_1_port, 'localhost', :master_port, 'block_writes'); SELECT shardid, shardstate, nodeport FROM pg_dist_shard_placement WHERE shardid = 92448300 ORDER BY placementid; -- Add a new table that should get colocated with rep1 automatically, but @@ -101,7 +101,7 @@ INSERT INTO rep1 VALUES (1); ROLLBACK; -- Cause the orphaned shard to be local -SELECT 1 FROM citus_drain_node('localhost', :master_port); +SELECT 1 FROM citus_drain_node('localhost', :master_port, 'block_writes'); SELECT shardid, shardstate, nodeport FROM pg_dist_shard_placement WHERE shardid = 92448300 ORDER BY placementid; -- Make sure we don't send a query to the orphaned shard if it's local diff --git a/src/test/regress/sql/intermediate_results.sql b/src/test/regress/sql/intermediate_results.sql index a2b0cf686..096d2451a 100644 --- a/src/test/regress/sql/intermediate_results.sql +++ b/src/test/regress/sql/intermediate_results.sql @@ -254,7 +254,6 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], -- test refreshing mat views SET client_min_messages TO ERROR; -SELECT run_command_on_workers($$CREATE USER some_other_user;$$); CREATE USER some_other_user; SELECT run_command_on_workers($$GRANT ALL ON DATABASE regression TO some_other_user;$$); GRANT ALL ON DATABASE regression TO some_other_user; diff --git a/src/test/regress/sql/logical_replication.sql b/src/test/regress/sql/logical_replication.sql new file mode 100644 index 000000000..e379edf36 --- /dev/null +++ b/src/test/regress/sql/logical_replication.sql @@ -0,0 +1,61 @@ +SET citus.next_shard_id TO 6830000; + +CREATE SCHEMA logical_replication; +SET search_path TO logical_replication; + +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; + +CREATE TABLE dist ( + id bigserial PRIMARY KEY +); + +SELECT oid AS postgres_oid FROM pg_roles where rolname = 'postgres' \gset + +SELECT create_distributed_table('dist', 'id'); + +SELECT 1 from citus_add_node('localhost', :master_port, groupId := 0); + +\c - - - :worker_1_port +SET search_path TO logical_replication; +CREATE PUBLICATION citus_shard_move_publication_:postgres_oid FOR TABLE dist_6830000; + +\c - - - :master_port +SET search_path TO logical_replication; +\set connection_string '\'user=postgres host=localhost port=' :worker_1_port '\'' +CREATE SUBSCRIPTION citus_shard_move_subscription_:postgres_oid CONNECTION :connection_string PUBLICATION citus_shard_move_publication_:postgres_oid; + +SELECT count(*) from pg_subscription; +SELECT count(*) from pg_publication; +SELECT count(*) from pg_replication_slots; + +\c - - - :worker_1_port + +SELECT count(*) from pg_subscription; +SELECT count(*) from pg_publication; +SELECT count(*) from pg_replication_slots; + +\c - - - :master_port + +select citus_move_shard_placement(6830002, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); + +SELECT citus_remove_node('localhost', :master_port); + +SELECT count(*) from pg_subscription; +SELECT count(*) from pg_publication; +SELECT count(*) from pg_replication_slots; + +\c - - - :worker_1_port + +SELECT count(*) from pg_subscription; +SELECT count(*) from pg_publication; +SELECT count(*) from pg_replication_slots; +\c - - - :worker_2_port + +SELECT count(*) from pg_subscription; +SELECT count(*) from pg_publication; +SELECT count(*) from pg_replication_slots; + +\c - - - :master_port +SET search_path TO public; +DROP SCHEMA logical_replication CASCADE; diff --git a/src/test/regress/sql/master_copy_shard_placement.sql b/src/test/regress/sql/master_copy_shard_placement.sql index 8d5ed13b3..30f36d56d 100644 --- a/src/test/regress/sql/master_copy_shard_placement.sql +++ b/src/test/regress/sql/master_copy_shard_placement.sql @@ -95,7 +95,9 @@ SET citus.shard_replication_factor TO 1; -- metadata sync will succeed even if we have rep > 1 tables INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles'::regclass::oid, 0); INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles_single_shard'::regclass::oid, 0); +SET client_min_messages TO warning; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +RESET client_min_messages; CREATE TABLE mx_table(a int); SELECT create_distributed_table('mx_table', 'a'); diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index 91641f22c..1f0ad09d0 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -6,7 +6,6 @@ SET citus.next_placement_id TO 1500000; -- supress notice messages to make sure that the tests -- do not diverge with enterprise SET client_min_messages TO WARNING; -SELECT run_command_on_workers($$CREATE ROLE metadata_sync_helper_role WITH LOGIN;$$); CREATE ROLE metadata_sync_helper_role WITH LOGIN; GRANT ALL ON SCHEMA metadata_sync_helpers TO metadata_sync_helper_role; RESET client_min_messages; diff --git a/src/test/regress/sql/multi_alter_table_row_level_security.sql b/src/test/regress/sql/multi_alter_table_row_level_security.sql new file mode 100644 index 000000000..88f9629d8 --- /dev/null +++ b/src/test/regress/sql/multi_alter_table_row_level_security.sql @@ -0,0 +1,486 @@ +-- +-- MULTI_ALTER_TABLE_ROW_LEVEL_SECURITY +-- +-- Test checks whether row level security can be enabled via +-- ALTER TABLE ... ENABLE | DISABLE ROW LEVEL SECURITY +SET citus.next_shard_id TO 1800000; +SET citus.shard_replication_factor TO 1; + +-- Setup user accounts and schema for tests. +CREATE USER rls_table_owner; +CREATE USER rls_tenant_1; +CREATE USER rls_tenant_2; + +SET client_min_messages TO WARNING; + +CREATE SCHEMA alter_table_rls; + +SET search_path TO alter_table_rls; +ALTER ROLE rls_table_owner SET search_path TO alter_table_rls; +ALTER ROLE rls_tenant_1 SET search_path TO alter_table_rls; +ALTER ROLE rls_tenant_2 SET search_path TO alter_table_rls; + +GRANT USAGE ON SCHEMA alter_table_rls TO rls_table_owner; +GRANT USAGE ON SCHEMA alter_table_rls TO rls_tenant_1; +GRANT USAGE ON SCHEMA alter_table_rls TO rls_tenant_2; + +-- +-- The first phase tests enabling Row Level Security only after the table has been +-- turned into a distributed table. +-- +-- This demonstrates that enabling Row Level Security on a distributed table correctly +-- enables Row Level Security on all shards that were in the system. +-- +CREATE TABLE events ( + tenant_id int, + id int, + type text +); + +SELECT create_distributed_table('events','tenant_id'); + +-- running ALTER TABLE ... OWNER TO ... only after distribution, otherwise ownership +-- information is lost. +ALTER TABLE events OWNER TO rls_table_owner; + +INSERT INTO events VALUES (1,1,'push'); +INSERT INTO events VALUES (2,2,'push'); + +-- grant access for tenants to table and shards +GRANT SELECT ON TABLE events TO rls_tenant_1; +GRANT SELECT ON TABLE events TO rls_tenant_2; + +-- Base line test to verify all rows are visible +SELECT * FROM events ORDER BY 1; + +-- Switch user that has been granted rights and read table +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Enable row level security +ALTER TABLE events ENABLE ROW LEVEL SECURITY; + +-- Switch user to owner, all rows should be visible +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Switch user that has been granted rights, should not be able to see any rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Create policy for tenants to read access their own rows +CREATE POLICY user_mod ON events FOR SELECT TO rls_tenant_1, rls_tenant_2 USING (current_user = 'rls_tenant_' || tenant_id::text); + +-- Switch user that has been granted rights, should be able to see their own rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Switch other user that has been granted rights, should be able to see their own rows +SET ROLE rls_tenant_2; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- We will test both renaming and deletion of Policies by renaming the user_mod policy +-- and drop the renamed version. If tenants cannot see rows afterwards both the RENAME and +-- the DROP has worked correctly +ALTER POLICY user_mod ON events RENAME TO user_mod_renamed; +DROP POLICY user_mod_renamed ON events; +-- Switch to tenant user, should not see any rows after above DDL's +SET ROLE rls_tenant_2; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Test Force Row Level Security, should also apply RLS to table owner +ALTER TABLE events FORCE ROW LEVEL SECURITY; + +-- Verify all rows are still visible for admin +SELECT * FROM events ORDER BY 1; + +-- Switch user to owner, no rows should be visible +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Allow admins to read all rows +CREATE POLICY owner_mod ON events TO rls_table_owner USING (true) WITH CHECK (true); + +-- Verify all rows are visible +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; +-- Verify the owner can insert a record +INSERT INTO events VALUES (3,3,'push'); +-- See its in the table +SELECT * FROM events ORDER BY 1; +-- Verify the owner can delete a record +DELETE FROM events WHERE tenant_id = 3; +-- Verify the record is gone +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Alter the policy and verify no rows are visible for admin +ALTER POLICY owner_mod ON events USING (false); + +-- Verify no rows are visible +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Test No Force Row Level Security, owner will not go through RLS anymore +ALTER TABLE events NO FORCE ROW LEVEL SECURITY; + +-- Verify all rows are visible +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Disable row level security +ALTER TABLE events DISABLE ROW LEVEL SECURITY; + +-- Switch user that has been granted rights and read table +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Clean up test +DROP TABLE events; + +-- +-- The second phase tests enables Row Level Security before the table will be turned into +-- a distributed table. +-- +-- This demonstrates that tables having Row Level Security enabled before they are +-- distributed to correctly have Row Level Security enabled on the shards after +-- distributing. +-- +CREATE TABLE events ( + tenant_id int, + id int, + type text +); + +INSERT INTO events VALUES (1,1,'push'); +INSERT INTO events VALUES (2,2,'push'); + +-- grant access for tenants to table +GRANT SELECT ON TABLE events TO rls_tenant_1; +GRANT SELECT ON TABLE events TO rls_tenant_2; + +-- Base line test to verify all rows are visible +SELECT * FROM events ORDER BY 1; + +-- Switch user that has been granted rights and read table +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Enable row level security +ALTER TABLE events ENABLE ROW LEVEL SECURITY; +ALTER TABLE events FORCE ROW LEVEL SECURITY; + + +-- create all types of policies before distribution +CREATE POLICY owner_read ON events FOR SELECT TO rls_table_owner USING (true); +CREATE POLICY owner_insert ON events FOR INSERT TO rls_table_owner WITH CHECK (false); +CREATE POLICY owner_delete ON events FOR DELETE TO rls_table_owner USING (false); +CREATE POLICY owner_update ON events FOR UPDATE TO rls_table_owner WITH CHECK (false); + +-- Distribute table +SELECT create_distributed_table('events','tenant_id'); + +-- running ALTER TABLE ... OWNER TO ... only after distribution, otherwise ownership +-- information is lost. +ALTER TABLE events OWNER TO rls_table_owner; + +SET ROLE rls_table_owner; + +-- Verify owner can see all rows +SELECT * FROM events ORDER BY 1; +-- Verify owner cannot insert anything +INSERT INTO events VALUES (3,3,'push'); +-- Verify owner cannot delete anything +DELETE FROM events WHERE tenant_id = 1; +-- Verify owner cannot updat anything +UPDATE events SET id = 10 WHERE tenant_id = 2; +-- Double check the table content +SELECT * FROM events ORDER BY 1; + +RESET ROLE; + +-- Switch user that has been granted rights, should not be able to see any rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Create policy for tenants to read access their own rows +CREATE POLICY user_mod ON events TO PUBLIC USING (current_user = 'rls_tenant_' || tenant_id::text) WITH CHECK (false); + +-- Switch user that has been granted rights, should be able to see their own rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Switch other user that has been granted rights, should be able to see their own rows +SET ROLE rls_tenant_2; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +ALTER POLICY user_mod ON events TO rls_tenant_1; + +-- Switch user that has been allowed, should be able to see their own rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Switch other user that has been disallowed +SET ROLE rls_tenant_2; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +ALTER POLICY user_mod ON events TO rls_tenant_1, rls_tenant_2; + +-- Switch user that stayed allowed, should be able to see their own rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Switch other user that got allowed +SET ROLE rls_tenant_2; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Set RLS to NO FORCE +ALTER TABLE events NO FORCE ROW LEVEL SECURITY; + +-- Switch to owner to verify all rows are visible +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Disable row level security +ALTER TABLE events DISABLE ROW LEVEL SECURITY; + +-- Switch user that has been granted rights and read table +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Clean up test +DROP TABLE events; + +-- +-- The third phase tests creates policies before distributing the tables. Only after +-- distribution RLS will be enabled. +-- +-- This test demonstrates all RLS policy/FORCE settings are configured the same way even +-- if RLS is not enabled at the time of distribution. +-- +CREATE TABLE events ( + tenant_id int, + id int, + type text +); + +INSERT INTO events VALUES + (1,1,'push'), + (2,2,'push'); + +-- grant access for tenants to table +GRANT ALL ON TABLE events TO rls_tenant_1; +GRANT ALL ON TABLE events TO rls_tenant_2; + +-- Base line test to verify all rows are visible +SELECT * FROM events ORDER BY 1; + +-- Switch user that has been granted rights and read table +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Configure FORCE and some policies before distribution +ALTER TABLE events FORCE ROW LEVEL SECURITY; +CREATE POLICY user_mod ON events TO rls_tenant_1, rls_tenant_2 + USING (current_user = 'rls_tenant_' || tenant_id::text) + WITH CHECK (current_user = 'rls_tenant_' || tenant_id::text AND id = 2); + +-- Distribute table +SELECT create_distributed_table('events','tenant_id'); +ALTER TABLE events ENABLE ROW LEVEL SECURITY; + +-- running ALTER TABLE ... OWNER TO ... only after distribution, otherwise ownership +-- information is lost. +ALTER TABLE events OWNER TO rls_table_owner; + +-- Verify owner cannot see any rows due to FORCE RLS +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- owner mod will only be set after distribution because it can test if FORCE has been +-- propagated during distribution +CREATE POLICY owner_mod ON events TO rls_table_owner USING (true); + +-- Verify owner now can see rows +SET ROLE rls_table_owner; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Switch user that has been granted rights, should be able to see their own rows +SET ROLE rls_tenant_1; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- Switch other user that has been granted rights, should be able to see their own rows +SET ROLE rls_tenant_2; +SELECT * FROM events ORDER BY 1; +RESET ROLE; + +-- We will test 2 different updates to the databse. +-- tenant 1 should not be able to update its record because its id is not set to 2 as +-- required by with check, tenant 2 should be able to update its record. +SET ROLE rls_tenant_1; +UPDATE events SET type = 'pull'; +RESET ROLE; +SET ROLE rls_tenant_2; +UPDATE events SET type = 'pull'; +RESET ROLE; +-- only items for tenant 2 should read pull in the result. +SELECT * FROM events ORDER BY 1; + +-- allow all users to update their own values +ALTER POLICY user_mod ON events WITH CHECK (true); +SET ROLE rls_tenant_1; +UPDATE events SET type = 'pull tenant 1'; +RESET ROLE; +SET ROLE rls_tenant_2; +UPDATE events SET type = 'pull tenant 2'; +RESET ROLE; + +-- records should read their own tenant pull items +SELECT * FROM events ORDER BY 1; + +-- Clean up test +DROP TABLE events; + +-- +-- The forth phase tests some disallowed policies for distributed tables +-- slight based on example https://www.postgresql.org/docs/9.5/static/ddl-rowsecurity.html +-- +CREATE TABLE groups ( + group_id int PRIMARY KEY, + group_name text NOT NULL +); + +INSERT INTO groups VALUES + (1, 'low'), + (2, 'medium'), + (5, 'high'); + +-- definition of users' privilege levels +CREATE TABLE users ( + user_name text PRIMARY KEY, + group_id int NOT NULL +); + +INSERT INTO users VALUES + ('alice', 5), + ('bob', 2), + ('mallory', 2); + +-- table holding the information to be protected +CREATE TABLE information ( + info text, + group_id int NOT NULL +); + +INSERT INTO information VALUES + ('barely secret', 1), + ('slightly secret', 2), + ('very secret', 5); + +ALTER TABLE information ENABLE ROW LEVEL SECURITY; + +-- this policy is disallowed because it has a subquery in it +CREATE POLICY fp_s ON information FOR SELECT + USING (group_id <= (SELECT group_id FROM users WHERE user_name = current_user)); + +-- this attempt for distribution fails because the table has a disallowed expression +SELECT create_distributed_table('information', 'group_id'); + +-- DROP the expression so we can distribute the table +DROP POLICY fp_s ON information; +SELECT create_distributed_table('information', 'group_id'); + +-- Try and create the expression on a distributed table, this should also fail +CREATE POLICY fp_s ON information FOR SELECT + USING (group_id <= (SELECT group_id FROM users WHERE user_name = current_user)); + +-- Clean up test +DROP TABLE information, groups, users; + +SET citus.next_shard_id TO 1810000; +CREATE TABLE test(x int, y int); +SELECT create_distributed_table('test','x'); + +GRANT SELECT ON TABLE test TO rls_tenant_2; + +ALTER TABLE test ENABLE ROW LEVEL SECURITY; +CREATE POLICY id_2_only +ON test +FOR SELECT TO rls_tenant_2 +USING (x = 2); + +INSERT INTO test SELECT i,i FROM generate_series(0,100)i; +SELECT count(*) FROM test ; + +SET ROLE rls_tenant_2; +SELECT count(*) FROM test ; +RESET ROLE; + +SELECT master_move_shard_placement(get_shard_id_for_distribution_column('test', 2), + 'localhost', :worker_2_port, 'localhost', :worker_1_port, shard_transfer_mode:='block_writes'); + +SET ROLE rls_tenant_2; +SELECT count(*) FROM test ; +RESET ROLE; + +-- Show that having nondistributed table via policy is checked +BEGIN; + CREATE TABLE table_1_check_policy ( + tenant_id int, + id int + ); + + CREATE TABLE table_2_check_policy ( + tenant_id int, + id int + ); + + ALTER TABLE table_1_check_policy ENABLE ROW LEVEL SECURITY; + ALTER TABLE table_1_check_policy FORCE ROW LEVEL SECURITY; + + CREATE OR REPLACE FUNCTION func_in_transaction(param_1 int, param_2 table_2_check_policy) + RETURNS boolean + LANGUAGE plpgsql AS + $$ + BEGIN + return param_1 > 5; + END; + $$; + + CREATE POLICY owner_read ON table_1_check_policy FOR SELECT USING (func_in_transaction(id, NULL::table_2_check_policy)); + CREATE POLICY owner_insert ON table_1_check_policy FOR INSERT WITH CHECK (func_in_transaction(id, NULL::table_2_check_policy)); + + -- It should error out + SELECT create_distributed_table('table_1_check_policy', 'tenant_id'); +ROLLBACK; + +-- Clean up test suite +DROP SCHEMA alter_table_rls CASCADE; + +DROP USER rls_table_owner; +DROP USER rls_tenant_1; +DROP USER rls_tenant_2; diff --git a/src/test/regress/sql/multi_alter_table_row_level_security_escape.sql b/src/test/regress/sql/multi_alter_table_row_level_security_escape.sql new file mode 100644 index 000000000..8f5c0e224 --- /dev/null +++ b/src/test/regress/sql/multi_alter_table_row_level_security_escape.sql @@ -0,0 +1,21 @@ +-- +-- MULTI_ALTER_TABLE_ROW_LEVEL_SECURITY_ESCAPE +-- +-- Test set that checks all row level security commands for +-- accepting identifiers that require escaping +SET citus.next_shard_id TO 1900000; + +CREATE SCHEMA alter_table_rls_quote; + +SET search_path TO alter_table_rls_quote; + +CREATE TABLE "t1""" (id int, name text); +CREATE POLICY "policy1""" ON "t1""" USING (true); + +SELECT create_distributed_table('t1"', 'id'); + +ALTER POLICY "policy1""" ON "t1""" RENAME TO "policy2"""; +ALTER POLICY "policy2""" ON "t1""" USING (false); +DROP POLICY "policy2""" ON "t1"""; + +DROP SCHEMA alter_table_rls_quote CASCADE; diff --git a/src/test/regress/sql/multi_colocated_shard_rebalance.sql b/src/test/regress/sql/multi_colocated_shard_rebalance.sql index 0c6d5da17..9d05170cc 100644 --- a/src/test/regress/sql/multi_colocated_shard_rebalance.sql +++ b/src/test/regress/sql/multi_colocated_shard_rebalance.sql @@ -26,7 +26,7 @@ SELECT master_create_empty_shard('table6_append'); -- Mark tables as non-mx tables, in order to be able to test master_copy_shard_placement UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN - ('table1_group1'::regclass, 'table2_group1'::regclass, 'table5_groupX'::regclass); + ('table1_group1'::regclass, 'table2_group1'::regclass, 'table5_groupX'::regclass); -- test copy @@ -41,9 +41,12 @@ WHERE colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; --- copy colocated shards +-- try to copy colocated shards without a replica identity SELECT master_copy_shard_placement(13000000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false); +-- copy colocated shards +SELECT master_copy_shard_placement(13000000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false, 'force_logical'); + -- status after shard copy SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport FROM @@ -77,7 +80,7 @@ WHERE ORDER BY s.shardid, sp.nodeport; -- copy NOT colocated shard -SELECT master_copy_shard_placement(13000012, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false); +SELECT master_copy_shard_placement(13000012, 'localhost', :worker_1_port, 'localhost', :worker_2_port, false, 'force_logical'); -- status after shard copy SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport @@ -128,11 +131,8 @@ WHERE colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; --- try force_logical -SELECT master_move_shard_placement(13000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); - -- move colocated shards -SELECT master_move_shard_placement(13000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port); +SELECT master_move_shard_placement(13000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); -- status after shard move SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport @@ -166,7 +166,7 @@ WHERE ORDER BY s.shardid, sp.nodeport; -- move NOT colocated shard -SELECT master_move_shard_placement(13000013, 'localhost', :worker_2_port, 'localhost', :worker_1_port); +SELECT master_move_shard_placement(13000013, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); -- status after shard move SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport @@ -193,7 +193,7 @@ WHERE ORDER BY s.shardid, sp.nodeport; -- move shard in append distributed table -SELECT master_move_shard_placement(13000021, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_move_shard_placement(13000021, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); -- status after shard move SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport @@ -208,7 +208,7 @@ ORDER BY s.shardid, sp.nodeport; -- try to move shard from wrong node -SELECT master_move_shard_placement(13000021, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_move_shard_placement(13000021, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); -- test shard move with foreign constraints @@ -226,7 +226,7 @@ SELECT create_distributed_table('table2_group1', 'table1_id', 'hash'); -- Mark the tables as non-mx tables UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN - ('table1_group1'::regclass, 'table2_group1'::regclass); + ('table1_group1'::regclass, 'table2_group1'::regclass); -- status before shard rebalance SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport @@ -283,7 +283,8 @@ INSERT INTO serial_move_test (key) VALUES (15) RETURNING *; -- confirm the shard id SELECT * FROM run_command_on_placements('serial_move_test', 'SELECT DISTINCT key FROM %s WHERE key = 15') WHERE result = '15' AND shardid = 13000034; -SELECT master_move_shard_placement(13000034, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT master_move_shard_placement(13000034, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); +CALL citus_cleanup_orphaned_shards(); -- confirm the successfull move SELECT * FROM run_command_on_placements('serial_move_test', 'SELECT DISTINCT key FROM %s WHERE key = 15') WHERE result = '15' AND shardid = 13000034; @@ -292,12 +293,133 @@ SELECT * FROM run_command_on_placements('serial_move_test', 'SELECT DISTINCT key INSERT INTO serial_move_test (key) VALUES (15) RETURNING *; INSERT INTO serial_move_test (key) VALUES (15) RETURNING *; + +-- lets do some failure testing +CREATE TABLE logical_failure_test (key int); +SET citus.shard_replication_factor TO 1; +SET citus.shard_count TO 4; +SELECT create_distributed_table('logical_failure_test', 'key'); + +-- ensure that the shard is created for this user +\c - - - :worker_2_port +\dt logical_failure_test_13000038 + +DROP TABLE logical_failure_test_13000038; + +-- should fail since the command wouldn't be able to connect to the worker_1 +\c - - - :master_port +SELECT master_move_shard_placement(13000038, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); +CALL citus_cleanup_orphaned_shards(); + +DROP TABLE logical_failure_test; + +-- lets test the logical replication modes +CREATE TABLE test_with_pkey (key int PRIMARY KEY, value int NOT NULL); +SET citus.shard_replication_factor TO 1; +SET citus.shard_count TO 4; +SELECT create_distributed_table('test_with_pkey', 'key', colocate_with => 'none'); + +-- should succeed since there is a replica identity defined +SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +CALL citus_cleanup_orphaned_shards(); + +-- should succeed since we still have a replica identity +ALTER TABLE test_with_pkey REPLICA IDENTITY FULL; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'auto'); +CALL citus_cleanup_orphaned_shards(); + +-- make sure we have the replica identity after the move +SELECT result FROM run_command_on_placements( 'test_with_pkey', 'SELECT relreplident FROM pg_class WHERE relname = ''%s''') WHERE shardid = 13000042; + +-- this time should fail since we don't have replica identity any more +ALTER TABLE test_with_pkey REPLICA IDENTITY NOTHING; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'auto'); +CALL citus_cleanup_orphaned_shards(); + +-- make sure we have the replica identity after the move +SELECT result FROM run_command_on_placements( 'test_with_pkey', 'SELECT relreplident FROM pg_class WHERE relname = ''%s''') WHERE shardid = 13000042; + +-- should succeed since we still have a replica identity +ALTER TABLE test_with_pkey REPLICA IDENTITY USING INDEX test_with_pkey_pkey; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +CALL citus_cleanup_orphaned_shards(); + +-- make sure we have the replica identity after the move +SELECT result FROM run_command_on_placements( 'test_with_pkey', 'SELECT relreplident FROM pg_class WHERE relname = ''%s''') WHERE shardid = 13000042; + +-- one final test with shard_transfer_mode auto +CREATE UNIQUE INDEX req_rep_idx ON test_with_pkey(key, value); +ALTER TABLE test_with_pkey REPLICA IDENTITY USING INDEX req_rep_idx; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'auto'); +CALL citus_cleanup_orphaned_shards(); + +-- make sure we have the replica identity after the move +SELECT result FROM run_command_on_placements( 'test_with_pkey', 'SELECT relreplident FROM pg_class WHERE relname = ''%s''') WHERE shardid = 13000042; + +ALTER TABLE test_with_pkey REPLICA IDENTITY NOTHING; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); +CALL citus_cleanup_orphaned_shards(); + +-- make sure we have the replica identity after the move +SELECT result FROM run_command_on_placements( 'test_with_pkey', 'SELECT relreplident FROM pg_class WHERE relname = ''%s''') WHERE shardid = 13000042; + +-- should succeed but not use logical replication +ALTER TABLE test_with_pkey REPLICA IDENTITY NOTHING; + +SET client_min_messages TO DEBUG1; +SELECT master_move_shard_placement(13000042, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'block_writes'); +CALL citus_cleanup_orphaned_shards(); + +SET client_min_messages TO DEFAULT; + +-- we don't support multiple shard moves in a single transaction +SELECT + master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical') +FROM + pg_dist_shard_placement where nodeport = :worker_1_port AND + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_with_pkey'::regclass); +CALL citus_cleanup_orphaned_shards(); + +-- similar test with explicit transaction block +BEGIN; + + SELECT master_move_shard_placement(13000042, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); + SELECT master_move_shard_placement(13000044, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); +COMMIT; + CALL citus_cleanup_orphaned_shards(); + +-- we do support the same with block writes +SELECT + master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes') +FROM + pg_dist_shard_placement where nodeport = :worker_1_port AND + shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_with_pkey'::regclass); +CALL citus_cleanup_orphaned_shards(); + +-- we should be able to move shard placements after COMMIT/ABORT +BEGIN; + + SELECT master_move_shard_placement(13000043, 'localhost', :worker_2_port, 'localhost', :worker_1_port, shard_transfer_mode:='force_logical'); +COMMIT; +CALL citus_cleanup_orphaned_shards(); + +SELECT master_move_shard_placement(13000045, 'localhost', :worker_2_port, 'localhost', :worker_1_port, shard_transfer_mode:='force_logical'); +CALL citus_cleanup_orphaned_shards(); + +BEGIN; + + SELECT master_move_shard_placement(13000043, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); +ABORT; + +SELECT master_move_shard_placement(13000045, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); +CALL citus_cleanup_orphaned_shards(); + -- we should be able to move shard placements of partitioend tables CREATE SCHEMA move_partitions; CREATE TABLE move_partitions.events ( - id serial, - t timestamptz default now(), - payload text + id serial, + t timestamptz default now(), + payload text ) PARTITION BY RANGE(t); @@ -318,6 +440,14 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port AND shardstate != 4 ORDER BY shardid LIMIT 1; +CALL citus_cleanup_orphaned_shards(); + +-- force logical replication +SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical') +FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port +ORDER BY shardid LIMIT 1; +CALL citus_cleanup_orphaned_shards(); SELECT count(*) FROM move_partitions.events; @@ -329,6 +459,7 @@ SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localh FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port AND shardstate != 4 ORDER BY shardid LIMIT 1; +CALL citus_cleanup_orphaned_shards(); SELECT count(*) FROM move_partitions.events; @@ -337,6 +468,7 @@ SELECT master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localh FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'move_partitions.events'::regclass AND nodeport = :worker_2_port AND shardstate != 4 ORDER BY shardid LIMIT 1; +CALL citus_cleanup_orphaned_shards(); SELECT count(*) FROM move_partitions.events; @@ -346,3 +478,7 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid::text LIKE 'move_partitions.events%' AND nodeport = :worker_1_port; DROP TABLE move_partitions.events; + +-- set back to the defaults and drop the table +SET client_min_messages TO DEFAULT; +DROP TABLE test_with_pkey; diff --git a/src/test/regress/sql/multi_create_role_dependency.sql b/src/test/regress/sql/multi_create_role_dependency.sql new file mode 100644 index 000000000..ed7782801 --- /dev/null +++ b/src/test/regress/sql/multi_create_role_dependency.sql @@ -0,0 +1,22 @@ +-- Show that dependent user/role objects can be created safely +CREATE USER non_super_user_test_user; +CREATE SCHEMA role_dependency_schema; + +CREATE TYPE role_dependency_schema.superusers_type AS (a int, b int); +GRANT CREATE ON SCHEMA role_dependency_schema to non_super_user_test_user; +GRANT USAGE ON SCHEMA role_dependency_schema to non_super_user_test_user; +GRANT USAGE ON TYPE role_dependency_schema.superusers_type TO non_super_user_test_user; + +SET ROLE non_super_user_test_user; +CREATE TABLE role_dependency_schema.non_super_user_table(a int, b role_dependency_schema.superusers_type); +SELECT create_distributed_table('role_dependency_schema.non_super_user_table','a'); + +-- Show that table and superuser's type is marked as distributed +RESET ROLE; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where pg_identify_object_as_address(classid, objid, objsubid)::text like '%non_super_user_table%'; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where pg_identify_object_as_address(classid, objid, objsubid)::text like '%non_super_user_table%';$$) ORDER BY 1,2; + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where pg_identify_object_as_address(classid, objid, objsubid)::text like '%superusers_type%'; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where pg_identify_object_as_address(classid, objid, objsubid)::text like '%superusers_type%';$$) ORDER BY 1,2; + +DROP SCHEMA role_dependency_schema CASCADE; diff --git a/src/test/regress/sql/multi_create_table.sql b/src/test/regress/sql/multi_create_table.sql index 4a1070514..82febb0fa 100644 --- a/src/test/regress/sql/multi_create_table.sql +++ b/src/test/regress/sql/multi_create_table.sql @@ -252,6 +252,49 @@ SELECT create_distributed_table('shard_count_table_2', 'a', shard_count:=12, col DROP TABLE shard_count_table, shard_count_table_2; + +-- test shard splitting doesn't break shard_count parameter +-- when shard count is given table needs to have exactly that +-- many shards, regardless of shard splitting on other tables + +-- ensure there is no colocation group with 9 shards +SELECT count(*) FROM pg_dist_colocation WHERE shardcount = 9; +SET citus.shard_count TO 9; + +CREATE TABLE shard_split_table (a int, b int); +SELECT create_distributed_table ('shard_split_table', 'a'); +SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5); + +-- show the difference in pg_dist_colocation and citus_tables shard counts +SELECT + ( + SELECT shardcount FROM pg_dist_colocation WHERE colocationid IN + ( + SELECT colocation_id FROM citus_tables WHERE table_name = 'shard_split_table'::regclass + ) + ) AS "pg_dist_colocation", + (SELECT shard_count FROM citus_tables WHERE table_name = 'shard_split_table'::regclass) AS "citus_tables"; + +CREATE TABLE shard_split_table_2 (a int, b int); +SELECT create_distributed_table ('shard_split_table_2', 'a', shard_count:=9); + +SELECT a.colocation_id = b.colocation_id FROM citus_tables a, citus_tables b + WHERE a.table_name = 'shard_split_table'::regclass AND b.table_name = 'shard_split_table_2'::regclass; + +SELECT shard_count FROM citus_tables WHERE table_name = 'shard_split_table_2'::regclass; + +-- also check we don't break regular behaviour +CREATE TABLE shard_split_table_3 (a int, b int); +SELECT create_distributed_table ('shard_split_table_3', 'a'); + +SELECT a.colocation_id = b.colocation_id FROM citus_tables a, citus_tables b + WHERE a.table_name = 'shard_split_table'::regclass AND b.table_name = 'shard_split_table_3'::regclass; + +SELECT shard_count FROM citus_tables WHERE table_name = 'shard_split_table_3'::regclass; + +DROP TABLE shard_split_table, shard_split_table_2, shard_split_table_3; + + -- test a shard count with an empty default colocation group -- ensure there is no colocation group with 13 shards SELECT count(*) FROM pg_dist_colocation WHERE shardcount = 13; diff --git a/src/test/regress/sql/multi_create_users.sql b/src/test/regress/sql/multi_create_users.sql new file mode 100644 index 000000000..fa002483e --- /dev/null +++ b/src/test/regress/sql/multi_create_users.sql @@ -0,0 +1,14 @@ +-- +-- MULTI_CREATE_USERS +-- +-- Create users on all nodes, they're currently automatically +-- replicated. +-- + +CREATE USER full_access; +CREATE USER read_access; +CREATE USER no_access; + +-- allow access to various users +GRANT ALL ON TABLE lineitem, orders, lineitem, customer, nation, part, supplier TO full_access; +GRANT SELECT ON TABLE lineitem, orders, lineitem, customer, nation, part, supplier TO read_access; diff --git a/src/test/regress/sql/multi_deparse_function.sql b/src/test/regress/sql/multi_deparse_function.sql index 2b8450b4f..ee8781daf 100644 --- a/src/test/regress/sql/multi_deparse_function.sql +++ b/src/test/regress/sql/multi_deparse_function.sql @@ -196,8 +196,9 @@ $cmd$); -- Rename the function back to the original name in the coordinator ALTER FUNCTION summation RENAME TO add; +SET citus.enable_ddl_propagation TO on; CREATE ROLE function_role; -SELECT run_command_on_workers('CREATE ROLE function_role'); +SET citus.enable_ddl_propagation TO off; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION add OWNER TO function_role diff --git a/src/test/regress/sql/multi_deparse_procedure.sql b/src/test/regress/sql/multi_deparse_procedure.sql index 2f582ed00..826789d60 100644 --- a/src/test/regress/sql/multi_deparse_procedure.sql +++ b/src/test/regress/sql/multi_deparse_procedure.sql @@ -176,8 +176,9 @@ ALTER PROCEDURE summation RENAME TO raise_info; $cmd$); ALTER PROCEDURE summation RENAME TO raise_info; +SET citus.enable_ddl_propagation TO on; CREATE ROLE procedure_role; -SELECT run_command_on_workers($$CREATE ROLE procedure_role;$$); +SET citus.enable_ddl_propagation TO off; SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info OWNER TO procedure_role @@ -222,4 +223,3 @@ $cmd$); SET client_min_messages TO WARNING; -- suppress cascading objects dropping DROP SCHEMA procedure_tests CASCADE; DROP ROLE procedure_role; -SELECT run_command_on_workers($$DROP ROLE procedure_role;$$); diff --git a/src/test/regress/sql/multi_index_statements.sql b/src/test/regress/sql/multi_index_statements.sql index 0d876d246..02222c17d 100644 --- a/src/test/regress/sql/multi_index_statements.sql +++ b/src/test/regress/sql/multi_index_statements.sql @@ -111,16 +111,31 @@ CREATE INDEX CONCURRENTLY lineitem_concurrently_index ON public.lineitem (l_orde CREATE TABLE local_table (id integer, name text); CREATE INDEX CONCURRENTLY ON local_table(id); --- Verify that we warn out on CLUSTER command for distributed tables and no parameter -CLUSTER index_test_hash USING index_test_hash_index_a; -CLUSTER; - -- Vefify we don't warn out on CLUSTER command for local tables CREATE INDEX CONCURRENTLY local_table_index ON local_table(id); CLUSTER local_table USING local_table_index; DROP TABLE local_table; +-- Verify that we can run CLUSTER command +CLUSTER index_test_hash USING index_test_hash_index_a; + +-- Verify that we ERROR on CLUSTER VERBOSE +CLUSTER VERBOSE index_test_hash USING index_test_hash_index_a; + +-- Verify that we WARN on CLUSTER ALL +CLUSTER; + +-- Verify that all indexes got created on the master node and one of the workers +SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; +\c - - - :worker_1_port +SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_%' ORDER BY relname LIMIT 1); +SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash_%'; +SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range_%'; +SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append_%'; + +-- Verify that we actually run the CLUSTER COMMAND +SELECT sum(indisclustered::integer) FROM pg_index WHERE indrelid::regclass::text SIMILAR TO '%\d'; \c - - - :master_port SET search_path TO multi_index_statements, public; diff --git a/src/test/regress/sql/multi_metadata_sync.sql b/src/test/regress/sql/multi_metadata_sync.sql index 0d67bb68b..0cf828391 100644 --- a/src/test/regress/sql/multi_metadata_sync.sql +++ b/src/test/regress/sql/multi_metadata_sync.sql @@ -553,10 +553,6 @@ SELECT master_remove_node('localhost', :worker_2_port); -- the master user needs superuser permissions to change the replication model CREATE USER mx_user WITH SUPERUSER; -\c - - - :worker_1_port -CREATE USER mx_user; -\c - - - :worker_2_port -CREATE USER mx_user; \c - mx_user - :master_port -- Create an mx table as a different user @@ -605,10 +601,6 @@ UPDATE pg_dist_placement \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -DROP USER mx_user; -\c - - - :worker_1_port -DROP USER mx_user; -\c - - - :worker_2_port DROP USER mx_user; -- Check that create_reference_table creates the metadata on workers diff --git a/src/test/regress/sql/multi_modifying_xacts.sql b/src/test/regress/sql/multi_modifying_xacts.sql index a53153b9b..72cff9d68 100644 --- a/src/test/regress/sql/multi_modifying_xacts.sql +++ b/src/test/regress/sql/multi_modifying_xacts.sql @@ -906,10 +906,6 @@ SELECT count(*) FROM pg_dist_transaction; -- first create the new user on all nodes CREATE USER test_user; -\c - - - :worker_1_port -CREATE USER test_user; -\c - - - :worker_2_port -CREATE USER test_user; -- now connect back to the master with the new user \c - test_user - :master_port @@ -1014,7 +1010,6 @@ SELECT * FROM run_command_on_workers('ALTER USER test_user_new RENAME TO test_us DROP TABLE reference_modifying_xacts, hash_modifying_xacts, hash_modifying_xacts_second, reference_failure_test, numbers_hash_failure_test; -SELECT * FROM run_command_on_workers('DROP USER test_user'); DROP USER test_user; -- set up foreign keys to test transactions with co-located and reference tables diff --git a/src/test/regress/sql/multi_move_mx.sql b/src/test/regress/sql/multi_move_mx.sql index 740d19462..f9f8072d0 100644 --- a/src/test/regress/sql/multi_move_mx.sql +++ b/src/test/regress/sql/multi_move_mx.sql @@ -68,7 +68,7 @@ LIMIT 1; -- Move a shard from worker 1 to worker 2 SELECT - master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port) + master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical') FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE @@ -78,6 +78,46 @@ ORDER BY shardid LIMIT 1; +-- Test changing citus.node_conninfo on the target node affects the +-- CREATE SUBSCRIPTION command for shard move + +\c - - - :worker_2_port + +ALTER SYSTEM SET citus.node_conninfo TO 'sslrootcert=/non/existing/certificate.crt sslmode=verify-full'; +SELECT pg_reload_conf(); + +\c - - - :master_port + +BEGIN; +SELECT + master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical') +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + AND nodeport = :worker_1_port +ORDER BY + shardid +LIMIT 1; +ROLLBACK; + +\c - - - :worker_2_port + +-- before reseting citus.node_conninfo, check that CREATE SUBSCRIPTION +-- with citus_use_authinfo takes into account node_conninfo even when +-- one of host, port, or user parameters are not specified. +-- +-- We need to specify host and port to not get an hba error, so we test +-- only with ommitting user. + +CREATE SUBSCRIPTION subs_01 CONNECTION 'host=''localhost'' port=57637' +PUBLICATION pub_01 WITH (citus_use_authinfo=true); + +ALTER SYSTEM RESET citus.node_conninfo; +SELECT pg_reload_conf(); + +\c - - - :master_port + -- Check that the shard and its colocated shard is moved, but not the other shards SELECT logicalrelid, shardid, nodename, nodeport diff --git a/src/test/regress/sql/multi_multiuser.sql b/src/test/regress/sql/multi_multiuser.sql index f379c75f1..fdb20e783 100644 --- a/src/test/regress/sql/multi_multiuser.sql +++ b/src/test/regress/sql/multi_multiuser.sql @@ -19,8 +19,6 @@ CREATE TABLE singleshard (id integer, val integer); SELECT create_distributed_table('singleshard', 'id'); -- turn off propagation to avoid Enterprise processing the following section -SET citus.enable_ddl_propagation TO off; - CREATE USER full_access; CREATE USER usage_access; CREATE USER read_access; @@ -29,6 +27,8 @@ CREATE ROLE some_role; GRANT some_role TO full_access; GRANT some_role TO read_access; +SET citus.enable_ddl_propagation TO off; + GRANT ALL ON TABLE test TO full_access; GRANT SELECT ON TABLE test TO read_access; @@ -40,14 +40,6 @@ GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access; SET citus.enable_ddl_propagation TO DEFAULT; \c - - - :worker_1_port -CREATE USER full_access; -CREATE USER usage_access; -CREATE USER read_access; -CREATE USER no_access; -CREATE ROLE some_role; -GRANT some_role TO full_access; -GRANT some_role TO read_access; - GRANT ALL ON TABLE test_1420000 TO full_access; GRANT SELECT ON TABLE test_1420000 TO read_access; @@ -55,14 +47,6 @@ GRANT ALL ON TABLE test_1420002 TO full_access; GRANT SELECT ON TABLE test_1420002 TO read_access; \c - - - :worker_2_port -CREATE USER full_access; -CREATE USER usage_access; -CREATE USER read_access; -CREATE USER no_access; -CREATE ROLE some_role; -GRANT some_role TO full_access; -GRANT some_role TO read_access; - GRANT ALL ON TABLE test_1420001 TO full_access; GRANT SELECT ON TABLE test_1420001 TO read_access; @@ -472,6 +456,9 @@ DROP TABLE test, test_coloc, colocation_table; +SELECT run_command_on_workers($$DROP OWNED BY full_access$$); +SELECT run_command_on_workers($$DROP OWNED BY some_role$$); +SELECT run_command_on_workers($$DROP OWNED BY read_access$$); DROP USER full_access; DROP USER read_access; DROP USER no_access; diff --git a/src/test/regress/sql/multi_multiuser_auth.sql b/src/test/regress/sql/multi_multiuser_auth.sql new file mode 100644 index 000000000..2d9681f9b --- /dev/null +++ b/src/test/regress/sql/multi_multiuser_auth.sql @@ -0,0 +1,154 @@ +-- +-- MULTI_MULTIUSER_AUTH +-- +-- Test authentication logic +-- + +-- We'll test different passwords for every user/host combo + +\set alice_master_pw mug-loth-oaf-mailman-languor +\set alice_worker_1_pw ratline-belabor-fatigue-chive-esdras +\set alice_worker_2_pw boogie-chick-asterisk-nevus-sonny +\set alice_fallback_pw :alice_worker_2_pw + +\set bob_master_pw nimbly-milepost-sandbag-cucumber-marksman +\set bob_worker_1_pw triplex-royalty-warranty-stand-cheek +\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile +\set bob_fallback_pw :bob_worker_1_pw + +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; +\gset +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; +\gset + +-- alice is a superuser so she can update own password +CREATE USER alice PASSWORD :'alice_master_pw' SUPERUSER; +CREATE USER bob PASSWORD :'bob_master_pw'; + +-- note we enter a wrong password for Alice to test cache invalidation +INSERT INTO pg_dist_authinfo (nodeid, rolename, authinfo) VALUES +(-1, 'alice', 'password=' || :'alice_master_pw'), +(:worker_1_id, 'alice', 'password=' || 'wrong_password'), +(0, 'alice', 'password=' || :'alice_fallback_pw'), +(-1, 'bob', 'password=' || :'bob_master_pw'), +(0, 'bob', 'password=' || :'bob_fallback_pw'), +(:worker_2_id, 'bob', 'password=' || :'bob_worker_2_pw'); + +\c - - - :worker_1_port +set citus.enable_ddl_propagation to off; +ALTER ROLE alice PASSWORD :'alice_worker_1_pw' SUPERUSER; +ALTER ROLE bob PASSWORD :'bob_worker_1_pw'; +reset citus.enable_ddl_propagation; + +-- note the wrong password for loopbacks here; task-tracker will fail +INSERT INTO pg_dist_authinfo (nodeid, rolename, authinfo) VALUES +(0, 'alice', 'password=dummy'), +(-1, 'alice', 'password=' || 'wrong_password'), +(-1, 'bob', 'password=' || :'bob_worker_1_pw'), +(0, 'bob', 'password=' || :'bob_worker_2_pw') +; + +\c - - - :worker_2_port +set citus.enable_ddl_propagation to off; +ALTER ROLE alice PASSWORD :'alice_worker_2_pw' SUPERUSER; +ALTER ROLE bob PASSWORD :'bob_worker_2_pw'; +reset citus.enable_ddl_propagation; + +INSERT INTO pg_dist_authinfo (nodeid, rolename, authinfo) VALUES +(0, 'alice', 'password=dummy'), +(-1, 'alice', 'password=' || 'wrong_password'), +(-1, 'bob', 'password=' || :'bob_worker_2_pw'), +(0, 'bob', 'password=' || :'bob_worker_1_pw') +; + +\c - - - :master_port +-- build format strings to specify PW +SELECT format('user=%s host=localhost port=%s password=%s dbname=regression', + 'alice', :master_port, :'alice_master_pw') AS alice_conninfo; +\gset +SELECT format('user=%s host=localhost port=%s password=%s dbname=regression', + 'bob', :master_port, :'bob_master_pw') AS bob_conninfo; +\gset + +GRANT ALL ON TABLE lineitem, orders, lineitem, customer, nation, part, supplier TO alice, bob; + +\c :alice_conninfo + +-- router query (should break because of bad password) +INSERT INTO customer VALUES (12345, 'name', NULL, 5, 'phone', 123.45, 'segment', 'comment'); + +-- fix alice's worker1 password ... +UPDATE pg_dist_authinfo +SET authinfo = ('password=' || :'alice_worker_1_pw') +WHERE nodeid = :worker_1_id AND rolename = 'alice'; + +-- and try again because cache should clear, should +-- just get invalid constraint this time, no bad pw +INSERT INTO customer VALUES (12345, 'name', NULL, 5, 'phone', 123.45, 'segment', 'comment'); + +SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders + WHERE l_orderkey = o_orderkey AND l_orderkey > 9030 + GROUP BY o_orderstatus + HAVING sum(l_linenumber) > 1000 + ORDER BY o_orderstatus; + +-- fix worker passwords, which should invalidate task tracker caches +\c - postgres - :worker_1_port + +UPDATE pg_dist_authinfo +SET authinfo = ('password=' || :'alice_worker_1_pw') +WHERE nodeid = -1 AND rolename = 'alice'; + +\c - postgres - :worker_2_port + +UPDATE pg_dist_authinfo +SET authinfo = ('password=' || :'alice_worker_2_pw') +WHERE nodeid = -1 AND rolename = 'alice'; + +\c :alice_conninfo +SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders + WHERE l_orderkey = o_orderkey AND l_orderkey > 9030 + GROUP BY o_orderstatus + HAVING sum(l_linenumber) > 1000 + ORDER BY o_orderstatus; + +-- adaptive query +RESET citus.task_executor_type; +SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders + WHERE l_orderkey = o_orderkey AND l_orderkey > 9030 + GROUP BY o_orderstatus + HAVING sum(l_linenumber) > 1000 + ORDER BY o_orderstatus; + +-- create and distribute table +CREATE TABLE wonderland (id integer, name text); +SELECT create_distributed_table('wonderland', 'id'); + +-- copy +COPY wonderland FROM STDIN WITH (FORMAT 'csv'); +1,White Rabbit +2,Mad Hatter +3,Queen of Hearts +\. + +SELECT COUNT(*) FROM wonderland; + +DROP TABLE wonderland; + +\c :bob_conninfo + +-- bob can't change authinfo: not a superuser + +DELETE FROM pg_dist_authinfo WHERE rolename = 'bob'; + +CREATE TABLE bob_lineitem (LIKE lineitem); + +SELECT create_distributed_table('bob_lineitem', 'l_orderkey', 'hash'); +INSERT INTO bob_lineitem SELECT * FROM lineitem; + +SET citus.enable_repartition_joins TO ON; +SELECT count(*) > 1 from bob_lineitem b , lineitem l where b.l_orderkey = l.l_orderkey LIMIT 10; + +SELECT COUNT(*) FROM bob_lineitem; + +DROP TABLE bob_lineitem; diff --git a/src/test/regress/sql/multi_multiuser_basic_queries.sql b/src/test/regress/sql/multi_multiuser_basic_queries.sql new file mode 100644 index 000000000..42ca859d6 --- /dev/null +++ b/src/test/regress/sql/multi_multiuser_basic_queries.sql @@ -0,0 +1,118 @@ +-- +-- MULTI_MULTIUSER_BASIC_QUERIES +-- + +SET ROLE full_access; + +-- Execute simple sum, average, and count queries on data recently uploaded to +-- our partitioned table. + +SELECT count(*) FROM lineitem; + +SELECT sum(l_extendedprice) FROM lineitem; + +SELECT avg(l_extendedprice) FROM lineitem; + +RESET ROLE; + +-- and again, to check a read-only user can query +SET ROLE read_access; +SET citus.task_executor_type TO 'task-tracker'; +SELECT count(*) FROM lineitem; +RESET citus.task_executor_type; +SELECT count(*) FROM lineitem; + +-- and yet again, to prove we're failing when a user doesn't have permissions +SET ROLE no_access; +SET citus.task_executor_type TO 'task-tracker'; +SELECT count(*) FROM lineitem; +RESET citus.task_executor_type; +SELECT count(*) FROM lineitem; +RESET ROLE; + +-- verify that broadcast joins work +SET citus.large_table_shard_count TO 2; + +SET ROLE read_access; + +SELECT + l_partkey, o_orderkey, count(*) +FROM + lineitem, part, orders, customer +WHERE + l_orderkey = o_orderkey AND + l_partkey = p_partkey AND + c_custkey = o_custkey AND + (l_quantity > 5.0 OR l_extendedprice > 1200.0) AND + p_size > 8 AND o_totalprice > 10 AND + c_acctbal < 5000.0 +GROUP BY + l_partkey, o_orderkey +ORDER BY + l_partkey, o_orderkey +LIMIT 30; +RESET ROLE; + +SET ROLE no_access; + +SELECT + l_partkey, o_orderkey, count(*) +FROM + lineitem, part, orders, customer +WHERE + l_orderkey = o_orderkey AND + l_partkey = p_partkey AND + c_custkey = o_custkey AND + (l_quantity > 5.0 OR l_extendedprice > 1200.0) AND + p_size > 8 AND o_totalprice > 10 AND + c_acctbal < 5000.0 +GROUP BY + l_partkey, o_orderkey +ORDER BY + l_partkey, o_orderkey +LIMIT 30; +RESET ROLE; + +-- verify that re-partition queries work +SET citus.large_table_shard_count TO 1; +SET citus.task_executor_type TO 'task-tracker'; + +SET ROLE read_access; + +SELECT + l_partkey, o_orderkey, count(*) +FROM + lineitem, part, orders, customer +WHERE + l_orderkey = o_orderkey AND + l_partkey = p_partkey AND + c_custkey = o_custkey AND + (l_quantity > 5.0 OR l_extendedprice > 1200.0) AND + p_size > 8 AND o_totalprice > 10 AND + c_acctbal < 5000.0 +GROUP BY + l_partkey, o_orderkey +ORDER BY + l_partkey, o_orderkey +LIMIT 30; +RESET ROLE; + +SET ROLE no_access; + +SELECT + l_partkey, o_orderkey, count(*) +FROM + lineitem, part, orders, customer +WHERE + l_orderkey = o_orderkey AND + l_partkey = p_partkey AND + c_custkey = o_custkey AND + (l_quantity > 5.0 OR l_extendedprice > 1200.0) AND + p_size > 8 AND o_totalprice > 10 AND + c_acctbal < 5000.0 +GROUP BY + l_partkey, o_orderkey +ORDER BY + l_partkey, o_orderkey +LIMIT 30; +RESET ROLE; diff --git a/src/test/regress/sql/multi_multiuser_copy.sql b/src/test/regress/sql/multi_multiuser_copy.sql new file mode 100644 index 000000000..a0e219e7c --- /dev/null +++ b/src/test/regress/sql/multi_multiuser_copy.sql @@ -0,0 +1,65 @@ +-- +-- MULTI_MULTIUSER_COPY +-- + +-- Create a new hash-partitioned table into which to COPY +CREATE TABLE customer_copy_hash ( + c_custkey integer, + c_name varchar(25) not null, + c_address varchar(40), + c_nationkey integer, + c_phone char(15), + c_acctbal decimal(15,2), + c_mktsegment char(10), + c_comment varchar(117), + primary key (c_custkey)); +SELECT create_distributed_table('customer_copy_hash', 'c_custkey', 'hash'); +GRANT ALL ON TABLE customer_copy_hash TO full_access; +GRANT SELECT ON TABLE customer_copy_hash TO read_access; + +-- COPY FROM as superuser +COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; +1 customer1 +\. + +-- COPY FROM as user with ALL access +SET ROLE full_access; +COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; +2 customer2 +\. +; +RESET ROLE; + +-- COPY FROM as user with SELECT access, should fail +SET ROLE read_access; +COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; +3 customer3 +\. +; +RESET ROLE; + +-- COPY FROM as user with no access, should fail +SET ROLE no_access; +COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; +4 customer4 +\. +; +RESET ROLE; + + +-- COPY TO as superuser +COPY (SELECT * FROM customer_copy_hash ORDER BY 1) TO STDOUT; + +SET ROLE full_access; +COPY (SELECT * FROM customer_copy_hash ORDER BY 1) TO STDOUT; +RESET ROLE; + +-- COPY FROM as user with SELECT access, should work +SET ROLE read_access; +COPY (SELECT * FROM customer_copy_hash ORDER BY 1) TO STDOUT; +RESET ROLE; + +-- COPY FROM as user with no access, should fail +SET ROLE no_access; +COPY (SELECT * FROM customer_copy_hash ORDER BY 1) TO STDOUT; +RESET ROLE; diff --git a/src/test/regress/sql/multi_multiuser_grant.sql b/src/test/regress/sql/multi_multiuser_grant.sql new file mode 100644 index 000000000..c565d3b0a --- /dev/null +++ b/src/test/regress/sql/multi_multiuser_grant.sql @@ -0,0 +1,36 @@ +-- +-- MULTI_MULTIUSER_GRANT +-- +-- check that after-the-fact REVOKE/GRANTs are taken into account +-- + +-- check that permissions are checked on the master and workers +REVOKE ALL ON TABLE customer FROM full_access; +SET ROLE full_access; +SELECT count(*) FROM customer; -- should fail +SELECT has_table_privilege('customer', 'SELECT'); +RESET ROLE; +\c - - - :worker_1_port +SET ROLE full_access; +SELECT oid, relname, relacl FROM pg_class WHERE relkind = 'r' AND relname LIKE 'customer_%' AND has_table_privilege(oid, 'SELECT'); +SELECT count(*) FROM pg_class WHERE relkind = 'r' AND relname LIKE 'customer_%' AND NOT has_table_privilege(oid, 'SELECT'); +RESET ROLE; + +-- check that GRANT command obeys citus.enable_ddl_propagation setting +\c - - - :master_port +SET citus.enable_ddl_propagation TO 'off'; +GRANT ALL ON TABLE customer TO full_access; +SET ROLE full_access; +SELECT has_table_privilege('customer', 'SELECT'); -- should be true + +\c - - - :worker_1_port +SET ROLE full_access; +SELECT oid, relname, relacl FROM pg_class WHERE relkind = 'r' AND relname LIKE 'customer_%' AND has_table_privilege(oid, 'SELECT'); +SELECT count(*) FROM pg_class WHERE relkind = 'r' AND relname LIKE 'customer_%' AND NOT has_table_privilege(oid, 'SELECT'); + +\c - - - :master_port +SET citus.enable_ddl_propagation TO 'on'; +GRANT ALL ON TABLE customer TO full_access; +SET ROLE full_access; +SELECT count(*) FROM customer; -- should work again +RESET ROLE; diff --git a/src/test/regress/sql/multi_multiuser_master_protocol.sql b/src/test/regress/sql/multi_multiuser_master_protocol.sql new file mode 100644 index 000000000..ff060cc06 --- /dev/null +++ b/src/test/regress/sql/multi_multiuser_master_protocol.sql @@ -0,0 +1,197 @@ +-- +-- MULTI_MULTIUSER_MASTER_PROTOCOL +-- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 109079; + +-- Tests that check the metadata returned by the master node. At the +-- same time ensure that any user, not just a superuser, can call +-- these. Note that, for now at least, any user can call these. That's +-- OK-ish, since the schema is visible from the catalogs anyway, and +-- exhausting shardids doesn't seem like a super viable attack path. +SET ROLE no_access; + +SELECT * FROM master_get_table_ddl_events('lineitem') order by 1; + +SELECT * FROM master_get_new_shardid(); + +SELECT * FROM master_get_active_worker_nodes(); + +RESET ROLE; + +-- ensure GRANT/REVOKE's do something sane for creating shards of +CREATE TABLE checkperm(key int); +SELECT master_create_distributed_table('checkperm', 'key', 'append'); +SELECT * FROM master_get_table_ddl_events('checkperm'); + +REVOKE ALL ON checkperm FROM PUBLIC; +SELECT * FROM master_get_table_ddl_events('checkperm'); + +GRANT SELECT ON checkperm TO read_access; +GRANT ALL ON checkperm TO full_access; +SELECT * FROM master_get_table_ddl_events('checkperm'); + +REVOKE ALL ON checkperm FROM read_access; +GRANT SELECT ON checkperm TO PUBLIC; +SELECT * FROM master_get_table_ddl_events('checkperm'); + +GRANT ALL ON checkperm TO full_access WITH GRANT OPTION; +SELECT * FROM master_get_table_ddl_events('checkperm'); + +-- create table as superuser/postgres +CREATE TABLE trivial_postgres (id int); +SELECT master_create_distributed_table('trivial_postgres', 'id', 'append'); +GRANT ALL ON trivial_postgres TO full_access; + +SET ROLE full_access; +CREATE TABLE trivial_full_access (id int); +SELECT master_create_distributed_table('trivial_full_access', 'id', 'append'); +RESET ROLE; + +SELECT relname, rolname, relacl FROM pg_class JOIN pg_roles ON (pg_roles.oid = pg_class.relowner) WHERE relname LIKE 'trivial%' ORDER BY relname; + +SET citus.shard_replication_factor = 2; -- on all workers... + +-- create shards as each user, verify ownership + +SELECT master_create_empty_shard('trivial_postgres'); +SELECT master_create_empty_shard('trivial_full_access'); +SET ROLE full_access; +SELECT master_create_empty_shard('trivial_postgres'); +SELECT master_create_empty_shard('trivial_full_access'); +RESET ROLE; + +SET ROLE full_access; +SELECT master_create_empty_shard('trivial_postgres'); +SELECT master_create_empty_shard('trivial_full_access'); +RESET ROLE; + +\c - - - :worker_1_port +SELECT relname, rolname, relacl FROM pg_class JOIN pg_roles ON (pg_roles.oid = pg_class.relowner) WHERE relname LIKE 'trivial%' ORDER BY relname; +\c - - - :worker_2_port +SELECT relname, rolname, relacl FROM pg_class JOIN pg_roles ON (pg_roles.oid = pg_class.relowner) WHERE relname LIKE 'trivial%' ORDER BY relname; +\c - - - :master_port + +-- ensure COPY into append tables works +CREATE TABLE stage_postgres(id) AS SELECT 2; +GRANT ALL ON stage_postgres TO full_access; +SET ROLE full_access; +CREATE TABLE stage_full_access(id) AS SELECT 1; +RESET ROLE; + +SELECT master_create_empty_shard('trivial_postgres') AS shardid \gset +COPY trivial_postgres FROM STDIN WITH (append_to_shard :shardid); +1 +2 +\. + +SELECT master_create_empty_shard('trivial_full_access') AS shardid \gset +COPY trivial_full_access FROM STDIN WITH (append_to_shard :shardid); +1 +2 +\. + +SET ROLE full_access; + +SELECT master_create_empty_shard('trivial_postgres') AS shardid \gset +COPY trivial_postgres FROM STDIN WITH (append_to_shard :shardid); +1 +2 +\. + +SELECT master_create_empty_shard('trivial_full_access') AS shardid \gset +COPY trivial_full_access FROM STDIN WITH (append_to_shard :shardid); +1 +2 +\. +RESET ROLE; + +SELECT * FROM trivial_postgres ORDER BY id; +SELECT * FROM trivial_full_access ORDER BY id; +SET ROLE full_access; +SELECT * FROM trivial_postgres ORDER BY id; +SELECT * FROM trivial_full_access ORDER BY id; +RESET ROLE; + +-- verify column level grants are not supported +GRANT UPDATE (id) ON trivial_postgres TO read_access; + +DROP TABLE trivial_full_access; +DROP TABLE trivial_postgres; +DROP TABLE stage_full_access; +DROP TABLE stage_postgres; + +-- test GRANT/REVOKE on all tables in schema +CREATE SCHEMA multiuser_schema; +CREATE TABLE multiuser_schema.hash_table(a int, b int); +CREATE TABLE multiuser_schema.reference_table(a int, b int); + +SELECT create_distributed_table('multiuser_schema.hash_table', 'a', colocate_with => 'none'); + + +-- usage right must be granted to user +GRANT USAGE ON SCHEMA multiuser_schema TO read_access; + +-- verify test user (read_access) does not have select privilege on both tables +SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + +-- grant select +GRANT SELECT ON ALL TABLES IN SCHEMA multiuser_schema TO read_access; + +-- verify select is granted +SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + +-- distribute the second table +SELECT create_reference_table('multiuser_schema.reference_table'); + +-- verify select is also granted +SELECT * FROM run_command_on_placements('multiuser_schema.reference_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + +-- create another table in the schema, verify select is not granted +CREATE TABLE multiuser_schema.another_table(a int, b int); +SELECT create_distributed_table('multiuser_schema.another_table', 'a', colocate_with => 'none'); + +SELECT * FROM run_command_on_placements('multiuser_schema.another_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + +-- grant select again, verify it is granted +GRANT SELECT ON ALL TABLES IN SCHEMA multiuser_schema TO read_access; +SELECT * FROM run_command_on_placements('multiuser_schema.another_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + +-- verify isolate tenant carries grants +SELECT isolate_tenant_to_new_shard('multiuser_schema.hash_table', 5); +SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + +-- revoke select +REVOKE SELECT ON ALL TABLES IN SCHEMA multiuser_schema FROM read_access; +SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + +-- test multi-schema grants +CREATE SCHEMA multiuser_second_schema; +CREATE TABLE multiuser_second_schema.hash_table(a int, b int); +SELECT create_distributed_table('multiuser_second_schema.hash_table', 'a'); + +GRANT ALL ON ALL TABLES IN SCHEMA multiuser_schema, multiuser_second_schema TO read_access; + +SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; +SELECT * FROM run_command_on_placements('multiuser_second_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + +-- revoke from multiple schemas, verify result +REVOKE SELECT ON ALL TABLES IN SCHEMA multiuser_schema, multiuser_second_schema FROM read_access; + +SELECT * FROM run_command_on_placements('multiuser_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; +SELECT * FROM run_command_on_placements('multiuser_second_schema.hash_table', $$ select has_table_privilege('read_access', '%s', 'select') $$) +ORDER BY nodename, nodeport, shardid; + +DROP SCHEMA multiuser_schema CASCADE; +DROP SCHEMA multiuser_second_schema CASCADE; + diff --git a/src/test/regress/sql/multi_mx_add_coordinator.sql b/src/test/regress/sql/multi_mx_add_coordinator.sql index 338fbc651..47053cd28 100644 --- a/src/test/regress/sql/multi_mx_add_coordinator.sql +++ b/src/test/regress/sql/multi_mx_add_coordinator.sql @@ -3,7 +3,6 @@ SET search_path TO mx_add_coordinator,public; SET client_min_messages TO WARNING; CREATE USER reprefuser WITH LOGIN; -SELECT run_command_on_workers('CREATE USER reprefuser WITH LOGIN'); SET citus.enable_alter_role_propagation TO ON; -- alter role for other than the extension owner works in enterprise, output differs accordingly ALTER ROLE reprefuser WITH CREATEDB; @@ -41,17 +40,24 @@ SELECT verify_metadata('localhost', :worker_1_port), CREATE TABLE ref(groupid int); SELECT create_reference_table('ref'); --- alter role from mx worker isn't propagated \c - - - :worker_1_port +-- alter role from mx worker isn't allowed when alter role propagation is on SET citus.enable_alter_role_propagation TO ON; ALTER ROLE reprefuser WITH CREATEROLE; -select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; +-- to alter role locally disable alter role propagation first +SET citus.enable_alter_role_propagation TO OFF; +ALTER ROLE reprefuser WITH CREATEROLE; +SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; +RESET citus.enable_alter_role_propagation; + \c - - - :worker_2_port -select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; +-- show that altering role locally on worker doesn't propagated to other worker +SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; + \c - - - :master_port SET search_path TO mx_add_coordinator,public; -SET client_min_messages TO WARNING; -select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser'; +-- show that altering role locally on worker doesn't propagated to coordinator +SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; SET citus.log_local_commands TO ON; SET client_min_messages TO DEBUG; diff --git a/src/test/regress/sql/multi_mx_metadata.sql b/src/test/regress/sql/multi_mx_metadata.sql index 169f3aa8b..5ac5176c4 100644 --- a/src/test/regress/sql/multi_mx_metadata.sql +++ b/src/test/regress/sql/multi_mx_metadata.sql @@ -188,7 +188,6 @@ SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; \c - - - :master_port CREATE USER no_access_mx; -SELECT run_command_on_workers($$CREATE USER no_access_mx;$$); SET ROLE no_access_mx; diff --git a/src/test/regress/sql/multi_poolinfo_usage.sql b/src/test/regress/sql/multi_poolinfo_usage.sql new file mode 100644 index 000000000..1261c9965 --- /dev/null +++ b/src/test/regress/sql/multi_poolinfo_usage.sql @@ -0,0 +1,60 @@ +-- +-- MULTI_POOLINFO_USAGE +-- +-- Test pooler info logic +-- +-- Test of ability to override host/port for a node +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 20000000; + +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; +\gset +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; +\gset + +CREATE TABLE lotsa_connections (id integer, name text); +SELECT create_distributed_table('lotsa_connections', 'id'); + +INSERT INTO lotsa_connections VALUES (1, 'user'), (2, 'user'), (3, 'user'), (4, 'user'); + +SELECT COUNT(*) FROM lotsa_connections; + +-- put outright bad values +\set VERBOSITY terse +INSERT INTO pg_dist_poolinfo VALUES (:worker_1_id, 'host=failhost'), + (:worker_2_id, 'port=9999'); +\c + +-- supress OS specific error message +DO $$ +BEGIN + BEGIN + SELECT COUNT(*) FROM lotsa_connections; + EXCEPTION WHEN OTHERS THEN + IF SQLERRM LIKE 'connection to the remote node%%' THEN + RAISE 'failed to execute select'; + END IF; + END; +END; +$$; + +-- "re-route" worker one to node two and vice-versa +DELETE FROM pg_dist_poolinfo; +INSERT INTO pg_dist_poolinfo VALUES (:worker_1_id, 'port=' || :worker_2_port), + (:worker_2_id, 'port=' || :worker_1_port); +\c + +-- this fails because the shards of one worker won't exist on the other and shards +-- are still looked up using the node name, not the effective connection host +INSERT INTO lotsa_connections VALUES (1, 'user'), (2, 'user'), (3, 'user'), (4, 'user'); + +-- tweak poolinfo to use 127.0.0.1 instead of localhost; should work! +DELETE FROM pg_dist_poolinfo; +INSERT INTO pg_dist_poolinfo VALUES (:worker_1_id, 'host=127.0.0.1 port=' || :worker_1_port), + (:worker_2_id, 'host=127.0.0.1 port=' || :worker_2_port); +\c + +DELETE FROM lotsa_connections; +DROP TABLE lotsa_connections; + +DELETE FROM pg_dist_poolinfo; diff --git a/src/test/regress/sql/multi_real_time_transaction.sql b/src/test/regress/sql/multi_real_time_transaction.sql index f2d77e801..f5032f780 100644 --- a/src/test/regress/sql/multi_real_time_transaction.sql +++ b/src/test/regress/sql/multi_real_time_transaction.sql @@ -245,14 +245,10 @@ ROLLBACK; -- test propagation of SET LOCAL -- gonna need a non-superuser as we'll use RLS to test GUC propagation CREATE USER rls_user; -SELECT run_command_on_workers('CREATE USER rls_user'); GRANT ALL ON SCHEMA multi_real_time_transaction TO rls_user; GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user; -SELECT run_command_on_workers('GRANT ALL ON SCHEMA multi_real_time_transaction TO rls_user'); -SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user'); - -- create trigger on one worker to reject access if GUC not \c - - - :worker_1_port SET search_path = 'multi_real_time_transaction'; diff --git a/src/test/regress/sql/multi_router_planner.sql b/src/test/regress/sql/multi_router_planner.sql index a1bed0093..a8a2fd748 100644 --- a/src/test/regress/sql/multi_router_planner.sql +++ b/src/test/regress/sql/multi_router_planner.sql @@ -1190,6 +1190,7 @@ SET citus.enable_ddl_propagation TO off; CREATE USER router_user; GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user; \c - - - :worker_1_port +SET citus.enable_ddl_propagation TO off; CREATE USER router_user; GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user; \c - router_user - :master_port diff --git a/src/test/regress/sql/multi_schema_support.sql b/src/test/regress/sql/multi_schema_support.sql index 2f7e040e0..403a1180b 100644 --- a/src/test/regress/sql/multi_schema_support.sql +++ b/src/test/regress/sql/multi_schema_support.sql @@ -827,7 +827,7 @@ DROP SCHEMA "cItuS.T E E N'sSchema", "citus-teen's scnd schm." CASCADE; -- test schema propagation with user other than current user -SELECT run_command_on_coordinator_and_workers('CREATE USER "test-user"'); +CREATE USER "test-user"; SELECT run_command_on_coordinator_and_workers('GRANT ALL ON DATABASE postgres to "test-user"'); CREATE SCHEMA schema_with_user AUTHORIZATION "test-user"; @@ -845,7 +845,7 @@ SET citus.next_shard_id TO 1197000; -- we do not use run_command_on_coordinator_and_workers here because when there is CASCADE, it causes deadlock DROP OWNED BY "test-user" CASCADE; SELECT run_command_on_workers('DROP OWNED BY "test-user" CASCADE'); -SELECT run_command_on_coordinator_and_workers('DROP USER "test-user"'); +DROP USER "test-user"; DROP FUNCTION run_command_on_coordinator_and_workers(p_sql text); diff --git a/src/test/regress/sql/multi_sequence_default.sql b/src/test/regress/sql/multi_sequence_default.sql index a185d5cff..3b1dd188b 100644 --- a/src/test/regress/sql/multi_sequence_default.sql +++ b/src/test/regress/sql/multi_sequence_default.sql @@ -277,8 +277,6 @@ CREATE ROLE seq_role_0; CREATE ROLE seq_role_1; ALTER SEQUENCE seq_10 OWNER TO seq_role_0; SELECT sequencename, sequenceowner FROM pg_sequences WHERE sequencename = 'seq_10' ORDER BY 1, 2; -SELECT run_command_on_workers('CREATE ROLE seq_role_0'); -SELECT run_command_on_workers('CREATE ROLE seq_role_1'); ALTER TABLE seq_test_9 ALTER COLUMN y SET DEFAULT nextval('seq_10'); ALTER SEQUENCE seq_10 OWNER TO seq_role_1; SELECT sequencename, sequenceowner FROM pg_sequences WHERE sequencename = 'seq_10' ORDER BY 1, 2; @@ -299,7 +297,6 @@ SET search_path = sequence_default, public; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); DROP SEQUENCE seq_10 CASCADE; DROP ROLE seq_role_0, seq_role_1; -SELECT run_command_on_workers('DROP ROLE IF EXISTS seq_role_0, seq_role_1'); -- Check some cases when default is defined by diff --git a/src/test/regress/sql/multi_tenant_isolation.sql b/src/test/regress/sql/multi_tenant_isolation.sql new file mode 100644 index 000000000..853f05a58 --- /dev/null +++ b/src/test/regress/sql/multi_tenant_isolation.sql @@ -0,0 +1,596 @@ +-- +-- MULTI_TENANT_ISOLATION +-- +-- Tests tenant isolation feature +-- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000; + +SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id +\gset +ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100000; + + +CREATE SCHEMA "Tenant Isolation"; +SET search_path to "Tenant Isolation"; + +CREATE ROLE mx_isolation_role_ent WITH LOGIN; +GRANT ALL ON SCHEMA "Tenant Isolation", public TO mx_isolation_role_ent; + +-- connect with this new role +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; + +SET citus.shard_replication_factor TO 1; +SET citus.shard_count to 2; + +CREATE TABLE lineitem_streaming ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +SELECT create_distributed_table('lineitem_streaming', 'l_orderkey'); + +CREATE TABLE orders_streaming ( + o_orderkey bigint not null primary key, + o_custkey integer not null, + o_orderstatus char(1) not null, + o_totalprice decimal(15,2) not null, + o_orderdate date not null, + o_orderpriority char(15) not null, + o_clerk char(15) not null, + o_shippriority integer not null, + o_comment varchar(79) not null); +SELECT create_distributed_table('orders_streaming', 'o_orderkey'); + +\COPY lineitem_streaming FROM STDIN WITH DELIMITER '|' +99|87114|4639|1|10|11011.10|0.02|0.01|A|F|1994-05-18|1994-06-03|1994-05-23|COLLECT COD|RAIL|kages. requ +99|123766|3767|2|5|8948.80|0.02|0.07|R|F|1994-05-06|1994-05-28|1994-05-20|TAKE BACK RETURN|RAIL|ests cajole fluffily waters. blithe +99|134082|1622|3|42|46875.36|0.02|0.02|A|F|1994-04-19|1994-05-18|1994-04-20|NONE|RAIL|kages are fluffily furiously ir +99|108338|849|4|36|48467.88|0.09|0.02|A|F|1994-07-04|1994-04-17|1994-07-30|DELIVER IN PERSON|AIR|slyly. slyly e +100|62029|2030|1|28|27748.56|0.04|0.05|N|O|1998-05-08|1998-05-13|1998-06-07|COLLECT COD|TRUCK|sts haggle. slowl +100|115979|8491|2|22|43889.34|0.00|0.07|N|O|1998-06-24|1998-04-12|1998-06-29|DELIVER IN PERSON|SHIP|nto beans alongside of the fi +100|46150|8655|3|46|50422.90|0.03|0.04|N|O|1998-05-02|1998-04-10|1998-05-22|TAKE BACK RETURN|SHIP|ular accounts. even +100|38024|3031|4|14|13468.28|0.06|0.03|N|O|1998-05-22|1998-05-01|1998-06-03|COLLECT COD|MAIL|y. furiously ironic ideas gr +100|53439|955|5|37|51519.91|0.05|0.00|N|O|1998-03-06|1998-04-16|1998-03-31|TAKE BACK RETURN|TRUCK|nd the quickly s +101|118282|5816|1|49|63713.72|0.10|0.00|N|O|1996-06-21|1996-05-27|1996-06-29|DELIVER IN PERSON|REG AIR|ts +101|163334|883|2|36|50303.88|0.00|0.01|N|O|1996-05-19|1996-05-01|1996-06-04|DELIVER IN PERSON|AIR|tes. blithely pending dolphins x-ray f +101|138418|5958|3|12|17476.92|0.06|0.02|N|O|1996-03-29|1996-04-20|1996-04-12|COLLECT COD|MAIL|. quickly regular +102|88914|3931|1|37|70407.67|0.06|0.00|N|O|1997-07-24|1997-08-02|1997-08-07|TAKE BACK RETURN|SHIP|ully across the ideas. final deposit +102|169238|6787|2|34|44445.82|0.03|0.08|N|O|1997-08-09|1997-07-28|1997-08-26|TAKE BACK RETURN|SHIP|eposits cajole across +102|182321|4840|3|25|35083.00|0.01|0.01|N|O|1997-07-31|1997-07-24|1997-08-17|NONE|RAIL|bits. ironic accoun +102|61158|8677|4|15|16787.25|0.07|0.07|N|O|1997-06-02|1997-07-13|1997-06-04|DELIVER IN PERSON|SHIP|final packages. carefully even excu +103|194658|2216|1|6|10515.90|0.03|0.05|N|O|1996-10-11|1996-07-25|1996-10-28|NONE|FOB|cajole. carefully ex +103|10426|2928|2|37|49447.54|0.02|0.07|N|O|1996-09-17|1996-07-27|1996-09-20|TAKE BACK RETURN|MAIL|ies. quickly ironic requests use blithely +103|28431|8432|3|23|31266.89|0.01|0.04|N|O|1996-09-11|1996-09-18|1996-09-26|NONE|FOB|ironic accou +103|29022|4027|4|32|30432.64|0.01|0.07|N|O|1996-07-30|1996-08-06|1996-08-04|NONE|RAIL|kages doze. special, regular deposit +-1995148554|112942|2943|1|9|17594.46|0.04|0.04|N|O|1996-08-03|1996-05-31|1996-08-04|DELIVER IN PERSON|TRUCK|c realms print carefully car +-1686493264|15110|113|5|2|2050.22|0.03|0.08|R|F|1994-04-26|1994-03-15|1994-05-15|TAKE BACK RETURN|MAIL|e final, regular requests. carefully +\. + +\COPY orders_streaming FROM STDIN WITH DELIMITER '|' +99|890|F|108594.87|1994-03-13|4-NOT SPECIFIED|Clerk#000000973|0|e carefully ironic packages. pending +100|1471|O|198978.27|1998-02-28|4-NOT SPECIFIED|Clerk#000000577|0|heodolites detect slyly alongside of the ent +101|280|O|118448.39|1996-03-17|3-MEDIUM|Clerk#000000419|0|ding accounts above the slyly final asymptote +102|8|O|184806.58|1997-05-09|2-HIGH|Clerk#000000596|0| slyly according to the asymptotes. carefully final packages integrate furious +103|292|O|118745.16|1996-06-20|4-NOT SPECIFIED|Clerk#000000090|0|ges. carefully unusual instructions haggle quickly regular f +-1995148554|142|O|3553.15|1995-05-08|3-MEDIUM|Clerk#000000378|0|nts hinder fluffily ironic instructions. express, express excuses +-1686493264|878|O|177809.13|1997-09-05|3-MEDIUM|Clerk#000000379|0|y final packages. final foxes since the quickly even +\. + +ALTER TABLE lineitem_streaming ADD CONSTRAINT test_constraint + FOREIGN KEY(l_orderkey) REFERENCES orders_streaming(o_orderkey); + +-- test failing foreign constraints +\COPY lineitem_streaming FROM STDIN WITH DELIMITER '|' +128|106828|9339|1|38|69723.16|0.06|0.01|A|F|1992-09-01|1992-08-27|1992-10-01|TAKE BACK RETURN|FOB| cajole careful +\. + +-- tests for cluster health +SELECT count(*) FROM lineitem_streaming; +SELECT count(*) FROM orders_streaming; + +SELECT + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate +FROM + orders_streaming, + lineitem_streaming +WHERE + l_orderkey = o_orderkey +GROUP BY + l_orderkey, + o_orderdate +ORDER BY + revenue DESC, + o_orderdate; + +-- Checks to see if metadata and data are isolated properly. If there are problems in +-- metadata and/or data on workers, these queries should return different results below +-- after tenant isolation operations are applied. +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 99; +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 100; +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101; +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 102; +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 103; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 99; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 100; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 101; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 102; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103; + +SELECT * FROM pg_dist_shard + WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass + ORDER BY shardminvalue::BIGINT, logicalrelid; + +-- check without cascade option +SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100); + +-- check with an input not castable to bigint +SELECT isolate_tenant_to_new_shard('lineitem_streaming', 'abc', 'CASCADE'); + +SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE'); +SELECT isolate_tenant_to_new_shard('lineitem_streaming', 101, 'CASCADE'); + +-- add an explain check to see if we hit the new isolated shard +EXPLAIN (COSTS false) SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101; + +-- create an MX node +\c - postgres - :master_port +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; + +-- test a failing transaction block +BEGIN; +SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE'); +SELECT isolate_tenant_to_new_shard('lineitem_streaming', 102, 'CASCADE'); +COMMIT; + +-- test a rollback transaction block +BEGIN; +SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE'); +SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE'); +ROLLBACK; + +-- test a succesfull transaction block +BEGIN; +SELECT isolate_tenant_to_new_shard('orders_streaming', 102, 'CASCADE'); +SELECT isolate_tenant_to_new_shard('orders_streaming', 103, 'CASCADE'); +COMMIT; + +SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE'); +SELECT isolate_tenant_to_new_shard('orders_streaming', 101, 'CASCADE'); + +-- test corner cases: hash(-1995148554) = -2147483648 and hash(-1686493264) = 2147483647 +SELECT isolate_tenant_to_new_shard('lineitem_streaming', -1995148554, 'CASCADE'); +SELECT isolate_tenant_to_new_shard('orders_streaming', -1686493264, 'CASCADE'); + +SELECT count(*) FROM orders_streaming WHERE o_orderkey = -1995148554; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = -1686493264; + +-- tests for cluster health +SELECT count(*) FROM lineitem_streaming; +SELECT count(*) FROM orders_streaming; + +SELECT + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate +FROM + orders_streaming, + lineitem_streaming +WHERE + l_orderkey = o_orderkey +GROUP BY + l_orderkey, + o_orderdate +ORDER BY + revenue DESC, + o_orderdate; + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 99; +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 100; +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101; +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 102; +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 103; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 99; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 100; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 101; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 102; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103; + +SELECT * FROM pg_dist_shard + WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass + ORDER BY shardminvalue::BIGINT, logicalrelid; + +SELECT * FROM pg_dist_shard_placement WHERE shardid >= 1230000 ORDER BY nodeport, shardid; + +-- test failing foreign constraints after multiple tenant isolation +\COPY lineitem_streaming FROM STDIN WITH DELIMITER '|' +128|106828|9339|1|38|69723.16|0.06|0.01|A|F|1992-09-01|1992-08-27|1992-10-01|TAKE BACK RETURN|FOB| cajole careful +\. + +-- connect to the worker node with metadata +\c - mx_isolation_role_ent - :worker_1_port +SET search_path to "Tenant Isolation"; + +-- check mx tables +SELECT count(*) FROM lineitem_streaming; +SELECT count(*) FROM orders_streaming; + +SELECT + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate +FROM + orders_streaming, + lineitem_streaming +WHERE + l_orderkey = o_orderkey +GROUP BY + l_orderkey, + o_orderdate +ORDER BY + revenue DESC, + o_orderdate; + +-- check shards +SET citus.override_table_visibility TO false; +\d + +\c - postgres - :worker_1_port +SET search_path to "Tenant Isolation"; +SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='orders_streaming_1230045'::regclass; + +\c - mx_isolation_role_ent - :worker_1_port +SET search_path to "Tenant Isolation"; + +-- check MX metadata +SELECT * FROM pg_dist_shard + WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass + ORDER BY shardminvalue::BIGINT, logicalrelid; + +-- return to master node +\c - mx_isolation_role_ent - :master_port + +-- test a distribution type which does not have a sql hash function +SET search_path to "Tenant Isolation"; + +SET citus.shard_replication_factor TO 2; +SET citus.shard_count to 2; + +CREATE TABLE lineitem_date ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +SELECT create_distributed_table('lineitem_date', 'l_shipdate'); + +\COPY lineitem_date FROM STDIN WITH DELIMITER '|' +390|106523|9034|1|10|15295.20|0.02|0.05|N|O|1998-05-26|1998-07-06|1998-06-23|TAKE BACK RETURN|SHIP| requests. final accounts x-ray beside the +1347|112077|4589|4|28|30493.96|0.01|0.00|N|O|1997-07-30|1997-07-22|1997-08-18|TAKE BACK RETURN|FOB|foxes after the blithely special i +1794|116434|1457|5|47|68170.21|0.10|0.06|N|O|1998-01-15|1997-11-30|1998-02-14|DELIVER IN PERSON|TRUCK| haggle slyly. furiously express orbit +1859|74969|4970|1|18|34991.28|0.10|0.00|N|O|1997-08-08|1997-06-30|1997-08-26|TAKE BACK RETURN|SHIP|e carefully a +\. + +SELECT count(*) FROM lineitem_date; +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-05-26'; +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-07-30'; +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-01-15'; +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-08-08'; + +SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-05-26'); +SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-07-30'); +SELECT isolate_tenant_to_new_shard('lineitem_date', '1998-01-15'); + +SELECT count(*) FROM lineitem_date; +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-05-26'; +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-07-30'; +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1998-01-15'; +SELECT count(*) FROM lineitem_date WHERE l_shipdate = '1997-08-08'; + +-- test with invalid shard placements +\c - postgres - :master_port +SET search_path to "Tenant Isolation"; + +UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_1_port; +SELECT isolate_tenant_to_new_shard('lineitem_date', '1997-08-08'); + +UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE nodeport = :worker_1_port; + +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; +DROP TABLE lineitem_date; + +-- test on append distributed table +CREATE TABLE test_append ( + tenant_id integer +); + +SELECT create_distributed_table('test_append', 'tenant_id', 'append'); +SELECT isolate_tenant_to_new_shard('test_append', 100); + +-- check metadata for comparison +SELECT * FROM pg_dist_shard + WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass + ORDER BY shardminvalue::BIGINT, logicalrelid; + +-- test failure scenarios with triggers on workers +\c - postgres - :worker_1_port +SET search_path to "Tenant Isolation"; + +SET citus.enable_metadata_sync TO OFF; +CREATE OR REPLACE FUNCTION abort_any_command() + RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + RAISE EXCEPTION 'command % is disabled', tg_tag; +END; +$$; +RESET citus.enable_metadata_sync; + +CREATE EVENT TRIGGER abort_ddl ON ddl_command_end + EXECUTE PROCEDURE abort_any_command(); + +SET citus.override_table_visibility TO false; +\d + +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; + +\set VERBOSITY terse +SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE'); + +\set VERBOSITY default + +\c - postgres - :worker_1_port +SET search_path to "Tenant Isolation"; + +SET citus.override_table_visibility TO false; +\d + +DROP EVENT TRIGGER abort_ddl; + +-- create a trigger for drops +SET citus.enable_metadata_sync TO OFF; +CREATE OR REPLACE FUNCTION abort_drop_command() + RETURNS event_trigger + LANGUAGE plpgsql + AS $$ +BEGIN + RAISE EXCEPTION 'command % is disabled', tg_tag; +END; +$$; +RESET citus.enable_metadata_sync; + +CREATE EVENT TRIGGER abort_drop ON sql_drop + EXECUTE PROCEDURE abort_drop_command(); + +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; + +\set VERBOSITY terse +SELECT isolate_tenant_to_new_shard('orders_streaming', 104, 'CASCADE'); + +\set VERBOSITY default + +-- check if metadata is changed +SELECT * FROM pg_dist_shard + WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass + ORDER BY shardminvalue::BIGINT, logicalrelid; + +\c - - - :worker_1_port +SET search_path to "Tenant Isolation"; + +-- however, new tables are already created +SET citus.override_table_visibility TO false; +\d + +\c - postgres - :worker_1_port + +DROP EVENT TRIGGER abort_drop; + +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; + +-- tests for cluster health +SELECT count(*) FROM lineitem_streaming; +SELECT count(*) FROM orders_streaming; + +SELECT + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate +FROM + orders_streaming, + lineitem_streaming +WHERE + l_orderkey = o_orderkey +GROUP BY + l_orderkey, + o_orderdate +ORDER BY + revenue DESC, + o_orderdate; + +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 99; +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 100; +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 101; +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 102; +SELECT count(*) FROM lineitem_streaming WHERE l_orderkey = 103; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 99; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 100; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 101; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 102; +SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103; + +-- test composite types with tenant isolation +set search_path to default; + +\c - postgres - :worker_1_port +SET search_path to "Tenant Isolation", public, pg_catalog; + +-- ... create a test HASH function. Though it is a poor hash function, +-- it is acceptable for our tests +SET citus.enable_metadata_sync TO OFF; +CREATE FUNCTION test_composite_type_hash(test_composite_type) RETURNS int +AS 'SELECT hashtext( ($1.i + $1.i2)::text);' +LANGUAGE SQL +IMMUTABLE +RETURNS NULL ON NULL INPUT; +RESET citus.enable_metadata_sync; + +CREATE OPERATOR CLASS cats_op_fam_class +DEFAULT FOR TYPE test_composite_type USING HASH AS +OPERATOR 1 = (test_composite_type, test_composite_type), +FUNCTION 1 test_composite_type_hash(test_composite_type); + +\c - - - :worker_2_port +SET search_path to "Tenant Isolation", public, pg_catalog; + +-- ... create a test HASH function. Though it is a poor hash function, +-- it is acceptable for our tests +SET citus.enable_metadata_sync TO OFF; +CREATE FUNCTION test_composite_type_hash(test_composite_type) RETURNS int +AS 'SELECT hashtext( ($1.i + $1.i2)::text);' +LANGUAGE SQL +IMMUTABLE +RETURNS NULL ON NULL INPUT; +RESET citus.enable_metadata_sync; + +CREATE OPERATOR CLASS cats_op_fam_class +DEFAULT FOR TYPE test_composite_type USING HASH AS +OPERATOR 1 = (test_composite_type, test_composite_type), +FUNCTION 1 test_composite_type_hash(test_composite_type); + +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation", public, pg_catalog; + +CREATE TABLE composite_table ( + composite_key test_composite_type); + +SELECT create_distributed_table('composite_table', 'composite_key'); + +INSERT INTO composite_table VALUES ('(1, 2)'::test_composite_type); +INSERT INTO composite_table VALUES ('(1, 3)'::test_composite_type); +INSERT INTO composite_table VALUES ('(1, 4)'::test_composite_type); + +SELECT isolate_tenant_to_new_shard('composite_table', '(1, 3)'); + +SELECT count(*) FROM composite_table WHERE composite_key = '(1, 2)'::test_composite_type; +SELECT count(*) FROM composite_table WHERE composite_key = '(1, 3)'::test_composite_type; +SELECT count(*) FROM composite_table WHERE composite_key = '(1, 4)'::test_composite_type; + +DROP TABLE composite_table; + +-- create foreign keys from a reference and distributed table +-- to another distributed table +SET search_path to "Tenant Isolation", public, pg_catalog; +SET citus.shard_replication_factor TO 1; +SET citus.shard_count to 8; +CREATE TABLE test_reference_table_fkey(id int PRIMARY KEY); +SELECT create_reference_table('test_reference_table_fkey'); + +CREATE TABLE test_colocated_table_1(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES test_colocated_table_1(id)); +SELECT create_distributed_table('test_colocated_table_1', 'id', colocate_with => 'NONE'); + +CREATE TABLE test_colocated_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id)); +SELECT create_distributed_table('test_colocated_table_2', 'id', colocate_with => 'test_colocated_table_1'); + +CREATE TABLE test_colocated_table_3(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id), FOREIGN KEY(id) REFERENCES test_colocated_table_2(id)); +SELECT create_distributed_table('test_colocated_table_3', 'id', colocate_with => 'test_colocated_table_1'); + +INSERT INTO test_reference_table_fkey SELECT i FROM generate_series (0, 100) i; +INSERT INTO test_colocated_table_1 SELECT i, i FROM generate_series (0, 100) i; +INSERT INTO test_colocated_table_2 SELECT i, i FROM generate_series (0, 100) i; +INSERT INTO test_colocated_table_3 SELECT i, i FROM generate_series (0, 100) i; + +SELECT isolate_tenant_to_new_shard('test_colocated_table_2', 1, 'CASCADE'); + +SELECT count(*) FROM test_colocated_table_2; + +\c - postgres - :worker_1_port + +-- show the foreign keys of the main table & its colocated shard on other tables +SELECT tbl.relname, fk."Constraint", fk."Definition" +FROM pg_catalog.pg_class tbl +JOIN public.table_fkeys fk on tbl.oid = fk.relid +WHERE tbl.relname like 'test_colocated_table_%' +ORDER BY 1, 2; + +\c - mx_isolation_role_ent - :master_port + +SET search_path to "Tenant Isolation"; + +-- +-- Make sure that isolate_tenant_to_new_shard() replicats reference tables +-- when replicate_reference_tables_on_activate is off. +-- + + +CREATE TABLE ref_table(a int); +SELECT create_reference_table('ref_table'); + +\c - postgres - :master_port +SET search_path to "Tenant Isolation"; + +SET citus.replicate_reference_tables_on_activate TO off; +SET client_min_messages TO WARNING; + +SELECT 1 FROM master_add_node('localhost', :master_port, groupId=>0); + +SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass; + +\c - mx_isolation_role_ent - :master_port +SET search_path to "Tenant Isolation"; + +SELECT 1 FROM isolate_tenant_to_new_shard('test_colocated_table_2', 2, 'CASCADE'); + +SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass; + +\c - postgres - :master_port +SELECT 1 FROM master_remove_node('localhost', :master_port); +SET client_min_messages TO WARNING; +DROP SCHEMA "Tenant Isolation" CASCADE; + +-- stop & resync and stop syncing metadata +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); + +-- restart metadata sync for rest of the tests +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- make sure there are no tables with non-zero colocationid +SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0; +TRUNCATE TABLE pg_catalog.pg_dist_colocation; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1; + +ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; diff --git a/src/test/regress/sql/multi_transaction_recovery.sql b/src/test/regress/sql/multi_transaction_recovery.sql index 39b90ce3d..5156c83e3 100644 --- a/src/test/regress/sql/multi_transaction_recovery.sql +++ b/src/test/regress/sql/multi_transaction_recovery.sql @@ -197,12 +197,12 @@ SELECT shardid INTO selected_shard FROM pg_dist_shard WHERE logicalrelid='test_2 SELECT COUNT(*) FROM pg_dist_transaction; BEGIN; SET LOCAL citus.defer_drop_after_shard_move TO OFF; -SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode := 'block_writes'); COMMIT; SELECT COUNT(*) FROM pg_dist_transaction; SELECT recover_prepared_transactions(); -SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_2_port, 'localhost', :worker_1_port); +SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :worker_2_port, 'localhost', :worker_1_port, shard_transfer_mode := 'block_writes'); -- for the following test, ensure that 6 and 7 go to different shards on different workers diff --git a/src/test/regress/sql/multi_transactional_drop_shards.sql b/src/test/regress/sql/multi_transactional_drop_shards.sql index 25c69031e..dd7ba43a5 100644 --- a/src/test/regress/sql/multi_transactional_drop_shards.sql +++ b/src/test/regress/sql/multi_transactional_drop_shards.sql @@ -392,7 +392,6 @@ DROP TABLE transactional_drop_shards, transactional_drop_reference; -- test DROP TABLE as a non-superuser in a transaction block CREATE USER try_drop_table WITH LOGIN; -SELECT run_command_on_workers('CREATE USER try_drop_table WITH LOGIN'); GRANT ALL ON SCHEMA public TO try_drop_table; \c - try_drop_table - :master_port diff --git a/src/test/regress/sql/multi_utility_warnings.sql b/src/test/regress/sql/multi_utility_warnings.sql index 3a7b0a910..282641aeb 100644 --- a/src/test/regress/sql/multi_utility_warnings.sql +++ b/src/test/regress/sql/multi_utility_warnings.sql @@ -3,7 +3,7 @@ -- -- Tests to check if we inform the user about potential caveats of creating new --- databases, schemas, and roles. +-- databases, schemas. SET citus.next_shard_id TO 1010000; @@ -11,12 +11,6 @@ SET citus.next_shard_id TO 1010000; CREATE DATABASE new_database; -CREATE ROLE new_role; - -CREATE USER new_user; - -INSERT INTO pg_dist_authinfo VALUES (0, 'new_user', 'password=1234'); - BEGIN; INSERT INTO pg_dist_node VALUES (1234567890, 1234567890, 'localhost', 5432); INSERT INTO pg_dist_poolinfo VALUES (1234567890, 'port=1234'); diff --git a/src/test/regress/sql/pg14.sql b/src/test/regress/sql/pg14.sql index d64076b26..bda6c5b0d 100644 --- a/src/test/regress/sql/pg14.sql +++ b/src/test/regress/sql/pg14.sql @@ -71,8 +71,8 @@ SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regcla -- test column compression propagation in rebalance SELECT shardid INTO moving_shard FROM citus_shards WHERE table_name='col_compression'::regclass AND nodeport=:worker_1_port LIMIT 1; -SELECT citus_move_shard_placement((SELECT * FROM moving_shard), :'public_worker_1_host', :worker_1_port, :'public_worker_2_host', :worker_2_port); -SELECT rebalance_table_shards('col_compression', rebalance_strategy := 'by_shard_count'); +SELECT citus_move_shard_placement((SELECT * FROM moving_shard), :'public_worker_1_host', :worker_1_port, :'public_worker_2_host', :worker_2_port, shard_transfer_mode := 'block_writes'); +SELECT rebalance_table_shards('col_compression', rebalance_strategy := 'by_shard_count', shard_transfer_mode := 'block_writes'); CALL citus_cleanup_orphaned_shards(); SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY( SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1 @@ -394,10 +394,7 @@ CREATE TABLE st1 (a int, b int); CREATE STATISTICS role_s1 ON a, b FROM st1; SELECT create_distributed_table('st1','a'); ALTER STATISTICS role_s1 OWNER TO CURRENT_ROLE; -SET citus.enable_ddl_propagation TO off; -- for enterprise CREATE ROLE role_1 WITH LOGIN SUPERUSER; -SET citus.enable_ddl_propagation TO on; -SELECT run_command_on_workers($$CREATE ROLE role_1 WITH LOGIN SUPERUSER;$$); ALTER STATISTICS role_s1 OWNER TO CURRENT_ROLE; SELECT run_command_on_workers($$SELECT rolname FROM pg_roles WHERE oid IN (SELECT stxowner FROM pg_statistic_ext WHERE stxname LIKE 'role\_s1%');$$); SET ROLE role_1; diff --git a/src/test/regress/sql/shard_move_constraints.sql b/src/test/regress/sql/shard_move_constraints.sql new file mode 100644 index 000000000..b66dd84b9 --- /dev/null +++ b/src/test/regress/sql/shard_move_constraints.sql @@ -0,0 +1,269 @@ +CREATE SCHEMA "shard Move Fkeys Indexes"; +SET search_path TO "shard Move Fkeys Indexes"; +SET citus.next_shard_id TO 8970000; +SET citus.next_placement_id TO 8770000; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; + +-- contrib module required to havve GIST exclude constraints +CREATE EXTENSION btree_gist; + +-- create a non-superuser role +CREATE ROLE mx_rebalancer_role_ent WITH LOGIN; +GRANT ALL ON SCHEMA "shard Move Fkeys Indexes" TO mx_rebalancer_role_ent; + +-- connect with this new role +\c - mx_rebalancer_role_ent - :master_port +SET search_path TO "shard Move Fkeys Indexes"; +SET citus.next_shard_id TO 8970000; +SET citus.next_placement_id TO 8770000; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; + +CREATE TABLE sensors( +measureid integer, +eventdatetime date, +measure_data jsonb, +PRIMARY KEY (measureid, eventdatetime, measure_data)) +PARTITION BY RANGE(eventdatetime); + +CREATE TABLE sensors_old PARTITION OF sensors FOR VALUES FROM ('2000-01-01') TO ('2020-01-01'); +CREATE TABLE sensors_2020_01_01 PARTITION OF sensors FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); +CREATE TABLE sensors_news PARTITION OF sensors FOR VALUES FROM ('2020-05-01') TO ('2025-01-01'); + +CREATE INDEX index_on_parent ON sensors(lower(measureid::text)); +CREATE INDEX index_on_child ON sensors_2020_01_01(lower(measure_data::text)); +CREATE INDEX hash_index ON sensors USING HASH((measure_data->'IsFailed')); +CREATE INDEX index_with_include ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime); + +CREATE STATISTICS s1 (dependencies) ON measureid, eventdatetime FROM sensors; +CREATE STATISTICS s2 (dependencies) ON measureid, eventdatetime FROM sensors_2020_01_01; + +ALTER INDEX index_on_parent ALTER COLUMN 1 SET STATISTICS 1000; +ALTER INDEX index_on_child ALTER COLUMN 1 SET STATISTICS 1000; + +CLUSTER sensors_2020_01_01 USING index_on_child; +SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); + +-- due to https://github.com/citusdata/citus/issues/5121 +\c - postgres - :master_port +SET search_path TO "shard Move Fkeys Indexes"; + +SELECT update_distributed_table_colocation('sensors_old', 'sensors'); +SELECT update_distributed_table_colocation('sensors_2020_01_01', 'sensors'); +SELECT update_distributed_table_colocation('sensors_news', 'sensors'); + +\c - mx_rebalancer_role_ent - :master_port +SET search_path TO "shard Move Fkeys Indexes"; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 8970016; +SET citus.next_placement_id TO 8770016; + +-- create a colocated distributed tables and create foreign keys FROM/TO +-- the partitions +CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); +SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors'); + +CLUSTER colocated_dist_table USING colocated_dist_table_pkey; + +CREATE TABLE colocated_partitioned_table( + measureid integer, + eventdatetime date, + PRIMARY KEY (measureid, eventdatetime)) +PARTITION BY RANGE(eventdatetime); + +CREATE TABLE colocated_partitioned_table_2020_01_01 PARTITION OF colocated_partitioned_table FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); +SELECT create_distributed_table('colocated_partitioned_table', 'measureid', colocate_with:='sensors'); + +CLUSTER colocated_partitioned_table_2020_01_01 USING colocated_partitioned_table_2020_01_01_pkey; + +CREATE TABLE reference_table (measureid integer PRIMARY KEY); +SELECT create_reference_table('reference_table'); + +-- this table is used to make sure that index backed +-- replica identites can have clustered indexes +-- and no index statistics +CREATE TABLE index_backed_rep_identity(key int NOT NULL); +CREATE UNIQUE INDEX uqx ON index_backed_rep_identity(key); +ALTER TABLE index_backed_rep_identity REPLICA IDENTITY USING INDEX uqx; +CLUSTER index_backed_rep_identity USING uqx; +SELECT create_distributed_table('index_backed_rep_identity', 'key', colocate_with:='sensors'); + +-- from parent to regular dist +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); + +-- from parent to parent +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_parent FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table(measureid, eventdatetime); + +-- from parent to child +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_child FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid, eventdatetime); + +-- from parent to reference table +-- enable this test when https://github.com/citusdata/citus-enterprise/issues/284 is fixed +-- ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_ref FOREIGN KEY (measureid) REFERENCES reference_table(measureid); + + +-- from child to regular dist +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); + +-- from child to parent +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_parent FOREIGN KEY (measureid,eventdatetime) REFERENCES colocated_partitioned_table(measureid,eventdatetime); + +-- from child to child +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_child FOREIGN KEY (measureid,eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid,eventdatetime); + +-- from child to reference table +-- enable this test when https://github.com/citusdata/citus-enterprise/issues/284 is fixed +-- ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_ref FOREIGN KEY (measureid) REFERENCES reference_table(measureid); + +-- load some data +INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_partitioned_table SELECT i, '2020-01-05' FROM generate_series(0,1000)i; +INSERT INTO sensors SELECT i, '2020-01-05', '{}' FROM generate_series(0,1000)i; + +\c - postgres - :worker_1_port +SET search_path TO "shard Move Fkeys Indexes", public, pg_catalog; + +-- show the current state of the constraints +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_8970000'::regclass ORDER BY 1,2; +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_2020_01_01_8970008'::regclass ORDER BY 1,2; +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_8970000' ORDER BY 1,2; +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_2020_01_01_8970008' ORDER BY 1,2; +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='index_backed_rep_identity_8970029' ORDER BY 1,2; +SELECT indisclustered FROM pg_index where indisclustered AND indrelid = 'index_backed_rep_identity_8970029'::regclass; + +SELECT stxname FROM pg_statistic_ext +WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('shard Move Fkeys Indexes') +) +ORDER BY stxname ASC; + +SELECT count(*) FROM pg_index +WHERE indisclustered + and +indrelid IN +('sensors_2020_01_01_8970008'::regclass, 'colocated_dist_table_8970016'::regclass, 'colocated_partitioned_table_2020_01_01_8970024'::regclass); +\c - - - :master_port +-- make sure that constrainst are moved sanely with logical replication +SELECT citus_move_shard_placement(8970000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); +CALL citus_cleanup_orphaned_shards(); + + +\c - postgres - :worker_2_port +SET search_path TO "shard Move Fkeys Indexes", public, pg_catalog; +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_8970000'::regclass ORDER BY 1,2; +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_2020_01_01_8970008'::regclass ORDER BY 1,2; +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_8970000' ORDER BY 1,2; +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_2020_01_01_8970008' ORDER BY 1,2; +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='index_backed_rep_identity_8970029' ORDER BY 1,2; +SELECT indisclustered FROM pg_index where indisclustered AND indrelid = 'index_backed_rep_identity_8970029'::regclass; + +SELECT stxname FROM pg_statistic_ext +WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('shard Move Fkeys Indexes') +) +ORDER BY stxname ASC; + +SELECT count(*) FROM pg_index +WHERE indisclustered + and +indrelid IN +('sensors_2020_01_01_8970008'::regclass, 'colocated_dist_table_8970016'::regclass, 'colocated_partitioned_table_2020_01_01_8970024'::regclass); + +\c - mx_rebalancer_role_ent - :master_port +-- verify that the data is consistent +SET search_path TO "shard Move Fkeys Indexes"; +SELECT count(*) FROM reference_table; +SELECT count(*) FROM colocated_partitioned_table; +SELECT count(*) FROM colocated_dist_table; +SELECT count(*) FROM sensors; + +-- we should be able to change/drop constraints +ALTER INDEX index_on_parent RENAME TO index_on_parent_renamed; +ALTER INDEX index_on_child RENAME TO index_on_child_renamed; + +ALTER INDEX index_on_parent_renamed ALTER COLUMN 1 SET STATISTICS 200; +ALTER INDEX index_on_child_renamed ALTER COLUMN 1 SET STATISTICS 200; + +DROP STATISTICS s1,s2; + +DROP INDEX index_on_parent_renamed; +DROP INDEX index_on_child_renamed; +ALTER TABLE sensors DROP CONSTRAINT fkey_from_parent_to_dist; +ALTER TABLE sensors DROP CONSTRAINT fkey_from_parent_to_parent; +ALTER TABLE sensors DROP CONSTRAINT fkey_from_parent_to_child; +ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_dist; +ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_parent; +ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_child; + +-- another test with multiple constraints backed by indexes +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +create table multiple_unique_keys( + key int primary key, a int, b int, c int, d int, e int, f int, g int, h int, i int, + UNIQUE(key,a), UNIQUE(key,b),UNIQUE(key,c),UNIQUE(key,d),UNIQUE(key,e),UNIQUE(key,f),UNIQUE(key,g),UNIQUE(key,h), + UNIQUE(key,i),UNIQUE(key,a,b),UNIQUE(key,a,c),UNIQUE(key,a,d),UNIQUE(key,a,e),UNIQUE(key,a,f),UNIQUE(key,a,h), + UNIQUE(key,a,h), + + EXCLUDE USING gist (a WITH =, key WITH =), + EXCLUDE USING gist (b WITH =, key WITH =), + EXCLUDE USING gist (c WITH =, key WITH =), + EXCLUDE USING gist (d WITH =, key WITH =), + EXCLUDE USING gist (e WITH =, key WITH =), + EXCLUDE USING gist (f WITH =, key WITH =), + EXCLUDE USING gist (g WITH =, key WITH =), + EXCLUDE USING gist (h WITH =, key WITH =), + EXCLUDE USING gist (i WITH =, key WITH =) +); + + +CREATE UNIQUE INDEX i1 ON multiple_unique_keys(key); +CREATE UNIQUE INDEX i2 ON multiple_unique_keys(key,b); +CREATE UNIQUE INDEX i3 ON multiple_unique_keys(key,c); +CREATE UNIQUE INDEX i4 ON multiple_unique_keys(key,d); +CREATE UNIQUE INDEX i5 ON multiple_unique_keys(key,a); +CREATE UNIQUE INDEX i6 ON multiple_unique_keys(key,a); +CREATE UNIQUE INDEX i7 ON multiple_unique_keys(key,a); +CREATE UNIQUE INDEX i8 ON multiple_unique_keys(key,a); +CREATE UNIQUE INDEX i9 ON multiple_unique_keys(key,a); +CREATE UNIQUE INDEX i10 ON multiple_unique_keys(key,b,c); + +CREATE INDEX ii1 ON multiple_unique_keys(a); +CREATE INDEX ii2 ON multiple_unique_keys(b); +CREATE INDEX ii3 ON multiple_unique_keys(c); +CREATE INDEX ii4 ON multiple_unique_keys(d); +CREATE INDEX ii5 ON multiple_unique_keys(e); +CREATE INDEX ii6 ON multiple_unique_keys(f); +CREATE INDEX ii7 ON multiple_unique_keys(g); +CREATE INDEX ii8 ON multiple_unique_keys(h); +CREATE INDEX ii9 ON multiple_unique_keys(i); +CREATE INDEX ii10 ON multiple_unique_keys(a,b,c); + +-- distribute table and load some data +SELECT create_distributed_table('multiple_unique_keys', 'key', colocate_with:='sensors'); +INSERT INTO multiple_unique_keys SELECT i,i,i,i,i,i,i,i,i FROM generate_series(0,1000)i; + +-- make sure that both online and offline rebalance operations succeed +SELECT citus_move_shard_placement(8970000, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); +SELECT citus_move_shard_placement(8970000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'block_writes'); + +-- even on another schema +SET search_path TO public; +SELECT citus_move_shard_placement(8970000, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); +SELECT citus_move_shard_placement(8970000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'block_writes'); + +\c - postgres - :master_port + +-- stop and re-sync the metadata to make sure all works fine +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('localhost', :worker_2_port); + +DROP SCHEMA "shard Move Fkeys Indexes" CASCADE; diff --git a/src/test/regress/sql/shard_rebalancer.sql b/src/test/regress/sql/shard_rebalancer.sql index bad28f1e9..b16356a4a 100644 --- a/src/test/regress/sql/shard_rebalancer.sql +++ b/src/test/regress/sql/shard_rebalancer.sql @@ -373,7 +373,7 @@ SELECT * FROM replication_test_table_placements_per_node; -- Replicate the remaining under-replicated shards -SELECT replicate_table_shards('replication_test_table'); +SELECT replicate_table_shards('replication_test_table', shard_transfer_mode:='block_writes'); SELECT * FROM replication_test_table_placements_per_node; @@ -499,6 +499,7 @@ SET ROLE testrole; SELECT rebalance_table_shards('rebalance_test_table', shard_transfer_mode:='block_writes'); RESET ROLE; +CALL citus_cleanup_orphaned_shards(); -- Confirm no moves took place at all during these errors SELECT * FROM table_placements_per_node; CALL citus_cleanup_orphaned_shards(); @@ -519,7 +520,7 @@ SELECT * FROM table_placements_per_node; -- Move the remaining shards using threshold=0 -SELECT rebalance_table_shards('rebalance_test_table', threshold := 0); +SELECT rebalance_table_shards('rebalance_test_table', threshold := 0, shard_transfer_mode:='block_writes'); CALL citus_cleanup_orphaned_shards(); SELECT * FROM table_placements_per_node; @@ -640,10 +641,6 @@ SELECT * FROM public.table_placements_per_node; -- Row count in imbalanced table before rebalance SELECT COUNT(*) FROM imbalanced_table; --- Try force_logical -SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_mode:='force_logical'); -CALL citus_cleanup_orphaned_shards(); - -- Test rebalance operation SELECT rebalance_table_shards('imbalanced_table', threshold:=0, shard_transfer_mode:='block_writes'); CALL citus_cleanup_orphaned_shards(); @@ -826,6 +823,16 @@ SELECT * FROM public.table_placements_per_node; -- Drop some tables for clear consistent error DROP TABLE test_schema_support.colocated_rebalance_test2; +-- testing behaviour when a transfer fails when using master_drain_node +SELECT * from master_drain_node('localhost', :worker_2_port); +-- Make sure shouldhaveshards is false +select shouldhaveshards from pg_dist_node where nodeport = :worker_2_port; +-- Make sure no actual nodes are moved +SELECT * FROM public.table_placements_per_node; + +-- Make it a data node again +SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); + -- Leave no trace on workers RESET search_path; @@ -1324,6 +1331,7 @@ INSERT INTO r2 VALUES (1,2), (3,4); SELECT 1 from master_add_node('localhost', :worker_2_port); +-- since r2 has no replica identity we expect an error here SELECT rebalance_table_shards(); CALL citus_cleanup_orphaned_shards(); diff --git a/src/test/regress/sql/single_node_enterprise.sql b/src/test/regress/sql/single_node_enterprise.sql new file mode 100644 index 000000000..8c7764835 --- /dev/null +++ b/src/test/regress/sql/single_node_enterprise.sql @@ -0,0 +1,328 @@ +-- we already have lots of tests targeting +-- single node citus clusters in sql/single_node.sql +-- in this file, we are testing enterprise features +CREATE SCHEMA single_node_ent; +SET search_path TO single_node_ent; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 90730500; + +-- idempotently add node to allow this test to run without add_coordinator +SET client_min_messages TO WARNING; +SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); +RESET client_min_messages; + +SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); + +CREATE USER full_access_single_node; +CREATE USER read_access_single_node; +CREATE USER no_access_single_node; + +CREATE TYPE new_type AS (n int, m text); +CREATE TABLE test(x int, y int, z new_type); +SELECT create_distributed_table('test','x'); + +CREATE TABLE ref(a int, b int); +SELECT create_reference_table('ref'); + +-- we want to test replicate_table_shards() +-- which requiest statement based +CREATE TABLE statement_replicated(a int PRIMARY KEY); +SELECT create_distributed_table('statement_replicated','a', colocate_with:='none'); +UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='statement_replicated'::regclass; + +-- We create this function to make sure +-- GRANT ALL ON ALL FUNCTIONS IN SCHEMA doesn't get stuck. +CREATE FUNCTION notice(text) +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE '%', $1; +END; +$$; +SELECT create_distributed_function('notice(text)'); + +-- allow access to various users +GRANT ALL ON TABLE test,ref TO full_access_single_node; +GRANT USAGE ON SCHEMA single_node_ent TO full_access_single_node; +ALTER ROLE full_access_single_node WITH LOGIN; +GRANT ALL PRIVILEGES ON DATABASE postgres TO no_access_single_node; +REVOKE ALL PRIVILEGES ON DATABASE postgres FROM no_access_single_node; +REVOKE USAGE, CREATE ON SCHEMA single_node_ent FROM no_access_single_node; +GRANT SELECT ON ref,test TO no_access_single_node; +REVOKE SELECT ON ref,test FROM no_access_single_node; + +-- we have to use local execution, otherwise we hit to a known issue +-- (see https://github.com/citusdata/citus-enterprise/ issues/474) +-- to force local execution, use transaction block +BEGIN; +GRANT USAGE ON SCHEMA single_node_ent TO read_access_single_node; +GRANT SELECT ON ALL TABLES IN SCHEMA single_node_ent TO read_access_single_node; +COMMIT; + +-- revoke SELECT access for the next 3-4 tests +REVOKE SELECT ON test FROM read_access_single_node; + +-- Make sure the access is revoked +SET ROLE read_access_single_node; +SELECT COUNT(*) FROM test; + +SET ROLE postgres; + +BEGIN; +GRANT SELECT ON ALL TABLES IN SCHEMA single_node_ent TO read_access_single_node; +-- Make sure we can now read as read_access_single_node role +SET ROLE read_access_single_node; +SELECT COUNT(*) FROM test; +SET ROLE postgres; + +-- Make sure REVOKE .. IN SCHEMA also works +REVOKE SELECT ON ALL TABLES IN SCHEMA single_node_ent FROM read_access_single_node; +SET ROLE read_access_single_node; +SELECT COUNT(*) FROM test; +ROLLBACK; + +GRANT ALL ON ALL FUNCTIONS IN SCHEMA single_node_ent to full_access_single_node; +GRANT SELECT ON ALL TABLES IN SCHEMA single_node_ent TO read_access_single_node; +GRANT SELECT ON test, ref TO read_access_single_node; +SET ROLE read_access_single_node; +-- Make sure we can now read as read_access_single_node role +SELECT COUNT(*) FROM test; + +SET ROLE full_access_single_node; + +INSERT INTO test VALUES (1, 1, (95, 'citus9.5')::new_type); + +-- should fail as only read access is allowed +SET ROLE read_access_single_node; +INSERT INTO test VALUES (1, 1, (95, 'citus9.5')::new_type); + +SELECT nodeid AS coordinator_node_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :master_port +\gset + + +-- pg_dist_poolinfo should work fine for coordinator +-- put outright bad values +SET ROLE postgres; +INSERT INTO pg_dist_poolinfo VALUES (:coordinator_node_id, 'host=failhost'); +\c +SET search_path TO single_node_ent; + +\set VERBOSITY terse + +-- supress OS specific error message +DO $$ +BEGIN + BEGIN + -- we want to force remote execution + SET LOCAL citus.enable_local_execution TO false; + SELECT COUNT(*) FROM test; + EXCEPTION WHEN OTHERS THEN + IF SQLERRM LIKE 'connection to the remote node%%' THEN + RAISE 'failed to execute select'; + END IF; + END; +END; +$$; + +TRUNCATE pg_dist_poolinfo; + +-- using 127.0.0.1 should work fine +INSERT INTO pg_dist_poolinfo VALUES (:coordinator_node_id, 'host=127.0.0.1 port=' || :master_port); +\c +SET search_path TO single_node_ent; +SET citus.log_remote_commands TO ON; +SET client_min_messages TO DEBUG1; +-- force multi-shard query to be able to +-- have remote connections +SELECT COUNT(*) FROM test WHERE x = 1 OR x = 2; +RESET citus.log_remote_commands; +RESET client_min_messages; +TRUNCATE pg_dist_poolinfo; + +-- reconnect +\c +SET search_path TO single_node_ent; + +-- now, create a colocated table +-- add a new node, and move the +-- shards to the new node +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 90731500; + +CREATE TABLE colocated_table (x int PRIMARY KEY, y int); +SELECT create_distributed_table('colocated_table','x', colocate_with:='single_node_ent.test'); + +-- create some foreign keys +TRUNCATE test, ref; + +ALTER TABLE test ADD CONSTRAINT p_key PRIMARY KEY(x); +ALTER TABLE ref ADD CONSTRAINT p_key_2 PRIMARY KEY(a); + +ALTER TABLE colocated_table ADD CONSTRAINT fkey FOREIGN KEY (x) REFERENCES test(x); +ALTER TABLE test ADD CONSTRAINT fkey FOREIGN KEY (x) REFERENCES ref(a); + +-- load some data +INSERT INTO ref SELECT i, i*2 FROM generate_series(0,50)i; +INSERT INTO test SELECT i, i*2, (i, 'citus' || i)::new_type FROM generate_series(0,50)i; +INSERT INTO colocated_table SELECT i, i*2 FROM generate_series(0,50)i; + +-- run a very basic query +SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a); + +CREATE VIEW view_created_before_shard_moves AS + SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a); + +SELECT * FROM view_created_before_shard_moves; + +-- show that tenant isolation works fine +SELECT isolate_tenant_to_new_shard('test', 5, 'CASCADE'); + +-- in the first iteration, have an +-- hybrid cluster meaning that +-- the shards exists on both the coordinator +-- and on the workers +SELECT 1 FROM master_add_node('localhost', :worker_1_port); + +-- make sure that we can replicate tables as well +select replicate_table_shards('statement_replicated', shard_replication_factor:=2, shard_transfer_mode:='block_writes'); +-- we don't need the table anymore, it complicates the output of rebalances +DROP TABLE statement_replicated; + +-- move 1 set of colocated shards in non-blocking mode +-- and the other in block_writes +SELECT rebalance_table_shards(max_shard_moves:=1); +SELECT rebalance_table_shards(shard_transfer_mode:='block_writes'); + +-- should fail as only read access is allowed +SET ROLE read_access_single_node; +INSERT INTO test VALUES (1, 1, (95, 'citus9.5')::new_type); + +SET ROLE postgres; +\c +SET search_path TO single_node_ent; + +-- the same query should work +SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a); + +-- make sure that composite type is created +-- on the worker +SELECT * FROM test ORDER BY 1 DESC, 2, 3 LIMIT 1; + +-- make sure that we can execute with intermediate +-- results that are needed on all shards on the +-- final step +WITH cte_1 AS (SELECT * FROM test ORDER BY 1 DESC, 2, 3 LIMIT 5) +SELECT count(*) FROM colocated_table JOIN cte_1 USING (x); + +-- make sure that we can still query the view +SELECT * FROM view_created_before_shard_moves; + +-- we should be able to switch the cluster to CitusMX +SELECT start_metadata_sync_to_node('localhost', :master_port); +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- sanity-check: the same queries should work +SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a); +SELECT * FROM test ORDER BY 1 DESC,2,3 LIMIT 1; +WITH cte_1 AS (SELECT * FROM test ORDER BY 1 DESC,2,3 LIMIT 5) +SELECT count(*) FROM colocated_table JOIN cte_1 USING (x); + +-- all DDLs should work +ALTER TABLE colocated_table ADD COLUMN z single_node_ent.new_type; +UPDATE colocated_table SET z = (x, y::text)::new_type; +SELECT * FROM colocated_table ORDER BY 1 DESC,2,3 LIMIT 1; +CREATE INDEX i_colocated_table ON colocated_table(y); +BEGIN; + CREATE INDEX i_colocated_table_2 ON colocated_table(x,y); +ROLLBACK; + +-- sanity check: transaction blocks spanning both nodes should work fine +BEGIN; + UPDATE colocated_table SET y = y + 1; + UPDATE test SET y = y -1; + SELECT max(y) FROM colocated_table; + SELECT max(y) FROM test; +ROLLBACK; + +-- generate data so that we can enforce fkeys +INSERT INTO ref SELECT i, i*2 FROM generate_series(100,150)i; + +-- the first insert goes to a shard on the worker +-- the second insert goes to a shard on the coordinator +BEGIN; + SET LOCAL citus.log_remote_commands TO ON; + INSERT INTO test(x,y) VALUES (101,100); + INSERT INTO test(x,y) VALUES (102,100); + + -- followed by a multi-shard command + SELECT count(*) FROM test; +ROLLBACK; + +-- the first insert goes to a shard on the coordinator +-- the second insert goes to a shard on the worker +BEGIN; + SET LOCAL citus.log_remote_commands TO ON; + INSERT INTO test(x,y) VALUES (102,100); + INSERT INTO test(x,y) VALUES (101,100); + + -- followed by a multi-shard command + SELECT count(*) FROM test; +ROLLBACK; + +-- now, lets move all the shards of distributed tables out of the coordinator +-- block writes is much faster for the sake of the test timings we prefer it +SELECT master_drain_node('localhost', :master_port, shard_transfer_mode:='block_writes'); + +-- should return false as master_drain_node had just set it to false for coordinator +SELECT shouldhaveshards FROM pg_dist_node WHERE nodeport = :master_port; + +-- sanity-check: the same queries should work +SELECT count(*) FROM (test JOIN colocated_table USING (x)) as foo LEFT JOIN ref ON(foo.x = a); +SELECT * FROM test ORDER BY 1 DESC,2 ,3 LIMIT 1; +WITH cte_1 AS (SELECT * FROM test ORDER BY 1 DESC,2 , 3 LIMIT 5) +SELECT count(*) FROM colocated_table JOIN cte_1 USING (x); + +-- make sure that we can still query the view +SELECT * FROM view_created_before_shard_moves; + +-- and make sure that all the shards are remote +BEGIN; + SET LOCAL citus.log_remote_commands TO ON; + INSERT INTO test(x,y) VALUES (101,100); + INSERT INTO test(x,y) VALUES (102,100); + + -- followed by a multi-shard command + SELECT count(*) FROM test; +ROLLBACK; + +-- should fail as only read access is allowed +SET ROLE read_access_single_node; +INSERT INTO test VALUES (1, 1, (95, 'citus9.5')::new_type); + +SET ROLE postgres; +\c + +SET search_path TO single_node_ent; + +-- Cleanup +RESET citus.log_remote_commands; +SET client_min_messages TO WARNING; +DROP SCHEMA single_node_ent CASCADE; + +DROP OWNED BY full_access_single_node; +DROP OWNED BY read_access_single_node; +SELECT run_command_on_workers($$DROP OWNED BY full_access_single_node$$); +SELECT run_command_on_workers($$DROP OWNED BY read_access_single_node$$); +DROP ROLE full_access_single_node; +DROP ROLE read_access_single_node; + +-- remove the nodes for next tests +SELECT 1 FROM master_remove_node('localhost', :master_port); +SELECT 1 FROM master_remove_node('localhost', :worker_1_port); + +-- restart nodeid sequence so that multi_cluster_management still has the same +-- nodeids +ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 1; +ALTER SEQUENCE pg_dist_groupid_seq RESTART 1; diff --git a/src/test/regress/sql/stat_statements.sql b/src/test/regress/sql/stat_statements.sql new file mode 100644 index 000000000..1f15a82c4 --- /dev/null +++ b/src/test/regress/sql/stat_statements.sql @@ -0,0 +1,295 @@ +-- +-- stat_statements +-- +-- tests citus_stat_statements functionality + +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 13 AS server_version_above_thirteen +\gset +\if :server_version_above_thirteen +SET compute_query_id = 'on'; +\endif + +-- check if pg_stat_statements is available +SELECT name FROM pg_available_extensions WHERE name = 'pg_stat_statements'; + +SELECT regexp_split_to_array(setting, ',') @> ARRAY['pg_stat_statements'] AS stats_loaded +FROM pg_settings WHERE name = 'shared_preload_libraries'; + +DROP EXTENSION IF EXISTS pg_stat_statements; +-- verify it is not loaded yet +SELECT extname FROM pg_extension WHERE extname = 'pg_stat_statements'; + +-- this should error out since extension is not created yet +SELECT * FROM citus_stat_statements; + +-- create extension if available +SELECT CASE WHEN COUNT(*) > 0 THEN + 'CREATE EXTENSION pg_stat_statements' +ELSE 'SELECT false as pg_stat_statements_available' END +AS create_cmd FROM pg_available_extensions() +WHERE name = 'pg_stat_statements' +\gset + +:create_cmd; + +CREATE FUNCTION normalize_query_string(query_string text) + RETURNS TEXT + LANGUAGE plpgsql + AS $function$ +BEGIN + RETURN rtrim(regexp_replace(query_string, '\$\d+', '?', 'g'), ';'); +END; +$function$; + +-- verify citus stat statements reset +SELECT citus_stat_statements_reset(); + +CREATE TABLE test(a int); +SELECT create_distributed_table('test','a'); +insert into test values(1); + +select query, calls from citus_stat_statements(); +\if :server_version_above_thirteen +SET compute_query_id = 'off'; +\else +set citus.stat_statements_track = 'none'; +\endif + +-- for pg >= 14, since compute_query_id is off, this insert +-- shouldn't be tracked +-- for pg < 14, we disable it explicitly so that we don't need +-- to add an alternative output file. +insert into test values(1); +select query, calls from citus_stat_statements(); + + +\if :server_version_above_thirteen +SET compute_query_id = 'on'; +\else +RESET citus.stat_statements_track; +\endif + + +SELECT citus_stat_statements_reset(); + + +-- it should now succeed, but with empty result +SELECT normalize_query_string(query) FROM citus_stat_statements; + +-- run some queries +SELECT count(*) FROM lineitem_hash_part; +SELECT count(*) FROM lineitem_hash_part; +SELECT l_orderkey FROM lineitem_hash_part; +SELECT l_orderkey FROM lineitem_hash_part WHERE l_orderkey > 100; +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 1; +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 1200; + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + +-- test GUC citus.stat_statements_track +SET citus.stat_statements_track TO 'none'; +-- this shouldn't increment the call count since tracking is disabled +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; +-- this should give the same output as above, since the last query is not counted +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; +-- reset the GUC to track stats +SET citus.stat_statements_track TO 'all'; + +-- reset pg_stat_statements and verify it also cleans citus_stat_statements output +-- verify that entries are actually removed from citus_stat_statements +SELECT pg_stat_statements_reset(); +SELECT * FROM citus_stat_statements; + +-- run some queries +SELECT count(*) FROM lineitem_hash_part; +SELECT count(*) FROM lineitem_hash_part; +SELECT l_orderkey FROM lineitem_hash_part; +SELECT l_orderkey FROM lineitem_hash_part WHERE l_orderkey > 100; +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 1; +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 1200; + +-- show current list, and reset pg_stat_statements +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + +SELECT pg_stat_statements_reset(); + +SELECT count(*) FROM lineitem_hash_part; +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; + +-- verify although pg_stat_statements was reset, some call counts are not +-- if a query is re-issued between pg_stat_statements_reset() and citus_stat_statements() +-- its call count is preserved. +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + + +-- citus_stat_statements_reset() must be called to reset call counts +SELECT citus_stat_statements_reset(); + +SELECT count(*) FROM lineitem_hash_part; +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 4; + +-- verify citus stats has only 2 rows +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + +-- create test tables to run update/delete scenarios +CREATE TABLE stat_test_text(user_id text, value int); +CREATE TABLE stat_test_bigint(user_id bigint, value int); + +SELECT citus_stat_statements_reset(); + +-- verify regular tables are not included in citus_stat_statements +SELECT * FROM stat_test_text; +SELECT * FROM stat_test_bigint WHERE user_id = 1::bigint; + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + +SELECT create_distributed_table('stat_test_text', 'user_id'); +SELECT create_distributed_table('stat_test_bigint', 'user_id'); + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + +SELECT * FROM stat_test_text; +SELECT * FROM stat_test_text WHERE user_id = 'me'; + +SELECT * FROM stat_test_bigint; +SELECT * FROM stat_test_bigint WHERE user_id = 2::bigint; + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + +-- insert some rows and check stats +INSERT INTO stat_test_bigint VALUES (1, 1); +INSERT INTO stat_test_bigint VALUES (7, 1); +INSERT INTO stat_test_bigint VALUES (7, 1), (2,3); +INSERT INTO stat_test_bigint VALUES (8, 1), (8,3); + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + +-- delete some rows and check stats +SELECT citus_stat_statements_reset(); + +DELETE FROM stat_test_bigint WHERE value > 1000; +DELETE FROM stat_test_bigint WHERE value > 1200; +DELETE FROM stat_test_bigint WHERE user_id > 1000; +DELETE FROM stat_test_bigint WHERE user_id = 1000; +DELETE FROM stat_test_bigint WHERE user_id = 1000; + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + +-- update some rows and check stats +SELECT citus_stat_statements_reset(); + +UPDATE stat_test_bigint SET value = 300 WHERE value = 3000; +UPDATE stat_test_bigint SET value = 320 WHERE value = 3200; +UPDATE stat_test_bigint SET value = 300 WHERE user_id = 3; +UPDATE stat_test_bigint SET value = 300 WHERE user_id = 3; +UPDATE stat_test_bigint SET value = 3000 WHERE user_id > 500; + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + +-- test joins +CREATE TABLE stat_test_bigint_other(LIKE stat_test_bigint); +SELECT create_distributed_table('stat_test_bigint_other', 'user_id'); + +SELECT citus_stat_statements_reset(); + +INSERT INTO stat_test_bigint_other SELECT * FROM stat_test_bigint; +INSERT INTO stat_test_bigint_other SELECT * FROM stat_test_bigint WHERE user_id = 3; +INSERT INTO stat_test_bigint_other SELECT * FROM stat_test_bigint WHERE user_id = 3; + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id); +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id); + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id) +WHERE b.user_id = 3; + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id) +WHERE b.user_id = 3; + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_bigint_other o USING (user_id) +WHERE o.user_id = 3; + +SELECT normalize_query_string(query), executor, partition_key, calls FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + +-- test reference table +CREATE TABLE stat_test_reference(LIKE stat_test_bigint); +SELECT create_reference_table('stat_test_reference'); + +INSERT INTO stat_test_reference SELECT user_id, count(*) FROM stat_test_bigint GROUP BY user_id; + +SELECT citus_stat_statements_reset(); + +SELECT count(*) FROM stat_test_reference; +SELECT count(*) FROM stat_test_reference WHERE user_id = 2; +SELECT count(*) FROM stat_test_reference WHERE user_id = 2; + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id); +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id); + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id) +WHERE b.user_id = 1; + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id) +WHERE b.user_id = 1 and r.value > 0; + +SELECT count(*) FROM stat_test_bigint b JOIN stat_test_reference r USING (user_id) +WHERE r.user_id = 1; + +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + +-- non-stats role should only see its own entries, even when calling citus_query_stats directly +CREATE USER nostats; +GRANT SELECT ON TABLE lineitem_hash_part TO nostats; + +SET ROLE nostats; +SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = 2; +SELECT partition_key FROM citus_query_stats(); +RESET ROLE; + +-- stats-role/superuser should be able to see entries belonging to other users +SELECT partition_key FROM citus_query_stats() WHERE partition_key = '2'; + +-- drop pg_stat_statements and verify citus_stat_statement does not work anymore +DROP extension pg_stat_statements; +SELECT normalize_query_string(query), executor, partition_key, calls +FROM citus_stat_statements +ORDER BY 1, 2, 3, 4; + +-- drop created tables +DROP TABLE stat_test_text, stat_test_bigint, stat_test_bigint_other, stat_test_reference; + +DROP FUNCTION normalize_query_string(text); + + +\if :server_version_above_thirteen +SET compute_query_id = 'off'; +\endif diff --git a/src/test/regress/sql/text_search.sql b/src/test/regress/sql/text_search.sql index b5b1f300e..d0d4b5a6f 100644 --- a/src/test/regress/sql/text_search.sql +++ b/src/test/regress/sql/text_search.sql @@ -114,7 +114,6 @@ DROP TEXT SEARCH CONFIGURATION text_search.french_noaccent CASCADE; SELECT * FROM run_command_on_workers($$ SELECT 'text_search.french_noaccent'::regconfig; $$) ORDER BY 1,2; SET client_min_messages TO 'warning'; -SELECT * FROM run_command_on_workers($$CREATE ROLE text_search_owner;$$) ORDER BY 1,2; CREATE ROLE text_search_owner; RESET client_min_messages;