diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 1c1a2f083..c2cab0272 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -68,7 +68,7 @@ USER citus # build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions FROM base AS pg14 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.9 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.10 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf FROM base AS pg15 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.4 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.5 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf FROM base AS pg16 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.0 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.1 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -210,7 +210,7 @@ COPY --chown=citus:citus .psqlrc . RUN sudo chown --from=root:root citus:citus -R ~ # sets default pg version -RUN pgenv switch 16.0 +RUN pgenv switch 16.1 # make connecting to the coordinator easy ENV PGPORT=9700 diff --git a/.github/packaging/validate_build_output.sh b/.github/packaging/validate_build_output.sh index 64098811e..dab301aa5 100755 --- a/.github/packaging/validate_build_output.sh +++ b/.github/packaging/validate_build_output.sh @@ -32,7 +32,10 @@ python3 -m pip install -r tools/packaging_automation/requirements.txt echo "Package type: ${package_type}" echo "OS version: $(get_rpm_os_version)" - # if os version is centos 7 or oracle linux 7, then remove urllib3 with pip uninstall and install urllib3<2.0.0 with pip install + # For RHEL 7, we need to install urllib3<2 due to below execution error + # ImportError: urllib3 v2.0 only supports OpenSSL 1.1.1+, currently the 'ssl' + # module is compiled with 'OpenSSL 1.0.2k-fips 26 Jan 2017'. + # See: https://github.com/urllib3/urllib3/issues/2168 if [[ ${package_type} == "rpm" && $(get_rpm_os_version) == 7* ]]; then python3 -m pip uninstall -y urllib3 python3 -m pip install 'urllib3<2' diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index e938e3904..6b33c658f 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -31,11 +31,11 @@ jobs: pgupgrade_image_name: "citus/pgupgradetester" style_checker_image_name: "citus/stylechecker" style_checker_tools_version: "0.8.18" - image_suffix: "-v9d71045" - pg14_version: '{ "major": "14", "full": "14.9" }' - pg15_version: '{ "major": "15", "full": "15.4" }' - pg16_version: '{ "major": "16", "full": "16.0" }' - upgrade_pg_versions: "14.9-15.4-16.0" + image_suffix: "-vbd8441d" + pg14_version: '{ "major": "14", "full": "14.10" }' + pg15_version: '{ "major": "15", "full": "15.5" }' + pg16_version: '{ "major": "16", "full": "16.1" }' + upgrade_pg_versions: "14.10-15.5-16.1" steps: # Since GHA jobs needs at least one step we use a noop step here. - name: Set up parameters diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 51bd82503..4ae741a91 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -112,11 +112,6 @@ jobs: PACKAGING_DOCKER_IMAGE: ${{ matrix.packaging_docker_image }} run: | echo "Postgres version: ${POSTGRES_VERSION}" - - ## Install required packages to execute packaging tools for rpm based distros - yum install python3-pip python3-devel postgresql-devel -y - python3 -m pip install wheel - ./.github/packaging/validate_build_output.sh "rpm" deb_build_tests: @@ -192,9 +187,4 @@ jobs: PACKAGING_DOCKER_IMAGE: ${{ matrix.packaging_docker_image }} run: | echo "Postgres version: ${POSTGRES_VERSION}" - - apt-get update -y - ## Install required packages to execute packaging tools for deb based distros - apt-get install python3-dev python3-pip -y - apt-get purge -y python3-yaml ./.github/packaging/validate_build_output.sh "deb" diff --git a/.gitignore b/.gitignore index df447746a..e636392ac 100644 --- a/.gitignore +++ b/.gitignore @@ -55,3 +55,6 @@ lib*.pc # style related temporary outputs *.uncrustify .venv + +# added output when modifying check_gucs_are_alphabetically_sorted.sh +guc.out diff --git a/CHANGELOG.md b/CHANGELOG.md index 686e78dd1..8d979c104 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,10 @@ ### citus v12.1.1 (November 9, 2023) ### * Fixes leaking of memory and memory contexts in Citus foreign key cache - (#7219) + (#7236) * Makes sure to disallow creating a replicated distributed table concurrently - (#7236) + (#7219) ### citus v12.1.0 (September 12, 2023) ### diff --git a/ci/check_gucs_are_alphabetically_sorted.sh b/ci/check_gucs_are_alphabetically_sorted.sh index a769ae4fb..763b5305f 100755 --- a/ci/check_gucs_are_alphabetically_sorted.sh +++ b/ci/check_gucs_are_alphabetically_sorted.sh @@ -5,6 +5,6 @@ set -euo pipefail source ci/ci_helpers.sh # extract citus gucs in the form of "citus.X" -grep -o -E "(\.*\"citus.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out +grep -o -E "(\.*\"citus\.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out sort -c gucs.out rm gucs.out diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index c617edfa4..901762ebe 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -385,6 +385,15 @@ static DistributeObjectOps Any_Rename = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps Any_SecLabel = { + .deparse = DeparseSecLabelStmt, + .qualify = NULL, + .preprocess = NULL, + .postprocess = PostprocessSecLabelStmt, + .operationType = DIST_OPS_ALTER, + .address = SecLabelStmtObjectAddress, + .markDistributed = false, +}; static DistributeObjectOps Attribute_Rename = { .deparse = DeparseRenameAttributeStmt, .qualify = QualifyRenameAttributeStmt, @@ -2036,6 +2045,11 @@ GetDistributeObjectOps(Node *node) return &Vacuum_Analyze; } + case T_SecLabelStmt: + { + return &Any_SecLabel; + } + case T_RenameStmt: { RenameStmt *stmt = castNode(RenameStmt, node); diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index a2da3bf81..3177c73a0 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -23,6 +23,7 @@ #include "catalog/pg_auth_members.h" #include "catalog/pg_authid.h" #include "catalog/pg_db_role_setting.h" +#include "catalog/pg_shseclabel.h" #include "catalog/pg_type.h" #include "catalog/objectaddress.h" #include "commands/dbcommands.h" @@ -65,6 +66,7 @@ static DefElem * makeDefElemBool(char *name, bool value); static List * GenerateRoleOptionsList(HeapTuple tuple); static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options); static List * GenerateGrantRoleStmtsOfRole(Oid roleid); +static List * GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename); static void EnsureSequentialModeForRoleDDL(void); static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple, @@ -515,13 +517,14 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) { HeapTuple roleTuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(roleOid)); Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(roleTuple)); + char *rolename = pstrdup(NameStr(role->rolname)); CreateRoleStmt *createRoleStmt = NULL; if (EnableCreateRolePropagation) { createRoleStmt = makeNode(CreateRoleStmt); createRoleStmt->stmt_type = ROLESTMT_ROLE; - createRoleStmt->role = pstrdup(NameStr(role->rolname)); + createRoleStmt->role = rolename; createRoleStmt->options = GenerateRoleOptionsList(roleTuple); } @@ -532,7 +535,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) alterRoleStmt->role = makeNode(RoleSpec); alterRoleStmt->role->roletype = ROLESPEC_CSTRING; alterRoleStmt->role->location = -1; - alterRoleStmt->role->rolename = pstrdup(NameStr(role->rolname)); + alterRoleStmt->role->rolename = rolename; alterRoleStmt->action = 1; alterRoleStmt->options = GenerateRoleOptionsList(roleTuple); } @@ -544,7 +547,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) { /* add a worker_create_or_alter_role command if any of them are set */ char *createOrAlterRoleQuery = CreateCreateOrAlterRoleCommand( - pstrdup(NameStr(role->rolname)), + rolename, createRoleStmt, alterRoleStmt); @@ -566,6 +569,20 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) { completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt)); } + + /* + * append SECURITY LABEL ON ROLE commands for this specific user + * When we propagate user creation, we also want to make sure that we propagate + * all the security labels it has been given. For this, we check pg_shseclabel + * for the ROLE entry corresponding to roleOid, and generate the relevant + * SecLabel stmts to be run in the new node. + */ + List *secLabelOnRoleStmts = GenerateSecLabelOnRoleStmts(roleOid, rolename); + stmt = NULL; + foreach_ptr(stmt, secLabelOnRoleStmts) + { + completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt)); + } } return completeRoleList; @@ -895,6 +912,54 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) } +/* + * GenerateSecLabelOnRoleStmts generates the SecLabelStmts for the role + * whose oid is roleid. + */ +static List * +GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename) +{ + List *secLabelStmts = NIL; + + /* + * Note that roles are shared database objects, therefore their + * security labels are stored in pg_shseclabel instead of pg_seclabel. + */ + Relation pg_shseclabel = table_open(SharedSecLabelRelationId, AccessShareLock); + ScanKeyData skey[1]; + ScanKeyInit(&skey[0], Anum_pg_shseclabel_objoid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(roleid)); + SysScanDesc scan = systable_beginscan(pg_shseclabel, SharedSecLabelObjectIndexId, + true, NULL, 1, &skey[0]); + + HeapTuple tuple = NULL; + while (HeapTupleIsValid(tuple = systable_getnext(scan))) + { + SecLabelStmt *secLabelStmt = makeNode(SecLabelStmt); + secLabelStmt->objtype = OBJECT_ROLE; + secLabelStmt->object = (Node *) makeString(pstrdup(rolename)); + + Datum datumArray[Natts_pg_shseclabel]; + bool isNullArray[Natts_pg_shseclabel]; + + heap_deform_tuple(tuple, RelationGetDescr(pg_shseclabel), datumArray, + isNullArray); + + secLabelStmt->provider = TextDatumGetCString( + datumArray[Anum_pg_shseclabel_provider - 1]); + secLabelStmt->label = TextDatumGetCString( + datumArray[Anum_pg_shseclabel_label - 1]); + + secLabelStmts = lappend(secLabelStmts, secLabelStmt); + } + + systable_endscan(scan); + table_close(pg_shseclabel, AccessShareLock); + + return secLabelStmts; +} + + /* * PreprocessCreateRoleStmt creates a worker_create_or_alter_role query for the * role that is being created. With that query we can create the role in the diff --git a/src/backend/distributed/commands/seclabel.c b/src/backend/distributed/commands/seclabel.c new file mode 100644 index 000000000..208d186c7 --- /dev/null +++ b/src/backend/distributed/commands/seclabel.c @@ -0,0 +1,125 @@ +/*------------------------------------------------------------------------- + * + * seclabel.c + * + * This file contains the logic of SECURITY LABEL statement propagation. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/deparser.h" +#include "distributed/log_utils.h" +#include "distributed/metadata_sync.h" +#include "distributed/metadata/distobject.h" + + +/* + * PostprocessSecLabelStmt prepares the commands that need to be run on all workers to assign + * security labels on distributed objects, currently supporting just Role objects. + * It also ensures that all object dependencies exist on all + * nodes for the object in the SecLabelStmt. + */ +List * +PostprocessSecLabelStmt(Node *node, const char *queryString) +{ + if (!ShouldPropagate()) + { + return NIL; + } + + SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node); + + List *objectAddresses = GetObjectAddressListFromParseTree(node, false, true); + if (!IsAnyObjectDistributed(objectAddresses)) + { + return NIL; + } + + if (secLabelStmt->objtype != OBJECT_ROLE) + { + /* + * If we are not in the coordinator, we don't want to interrupt the security + * label command with notices, the user expects that from the worker node + * the command will not be propagated + */ + if (EnableUnsupportedFeatureMessages && IsCoordinator()) + { + ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands whose " + "object type is not role"), + errhint("Connect to worker nodes directly to manually " + "run the same SECURITY LABEL command."))); + } + return NIL; + } + + if (!EnableCreateRolePropagation) + { + return NIL; + } + + EnsureCoordinator(); + EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses); + + const char *sql = DeparseTreeNode((Node *) secLabelStmt); + + List *commandList = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList); +} + + +/* + * SecLabelStmtObjectAddress returns the object address of the object on + * which this statement operates (secLabelStmt->object). Note that it has no limitation + * on the object type being OBJECT_ROLE. This is intentionally implemented like this + * since it is fairly simple to implement and we might extend SECURITY LABEL propagation + * in the future to include more object types. + */ +List * +SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) +{ + SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node); + + Relation rel = NULL; + ObjectAddress address = get_object_address(secLabelStmt->objtype, + secLabelStmt->object, &rel, + AccessShareLock, missing_ok); + if (rel != NULL) + { + relation_close(rel, AccessShareLock); + } + + ObjectAddress *addressPtr = palloc0(sizeof(ObjectAddress)); + *addressPtr = address; + return list_make1(addressPtr); +} + + +/* + * citus_test_object_relabel is a dummy function for check_object_relabel_type hook. + * It is meant to be used in tests combined with citus_test_register_label_provider + */ +void +citus_test_object_relabel(const ObjectAddress *object, const char *seclabel) +{ + if (seclabel == NULL || + strcmp(seclabel, "citus_unclassified") == 0 || + strcmp(seclabel, "citus_classified") == 0 || + strcmp(seclabel, "citus '!unclassified") == 0) + { + return; + } + + ereport(ERROR, + (errcode(ERRCODE_INVALID_NAME), + errmsg("'%s' is not a valid security label for Citus tests.", seclabel))); +} diff --git a/src/backend/distributed/deparser/deparse_seclabel_stmts.c b/src/backend/distributed/deparser/deparse_seclabel_stmts.c new file mode 100644 index 000000000..a1aa047cc --- /dev/null +++ b/src/backend/distributed/deparser/deparse_seclabel_stmts.c @@ -0,0 +1,78 @@ +/*------------------------------------------------------------------------- + * + * deparse_seclabel_stmts.c + * All routines to deparse SECURITY LABEL statements. + * + * Copyright (c), Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "distributed/deparser.h" +#include "nodes/parsenodes.h" +#include "utils/builtins.h" + +static void AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt); + +/* + * DeparseSecLabelStmt builds and returns a string representing of the + * SecLabelStmt for application on a remote server. + */ +char * +DeparseSecLabelStmt(Node *node) +{ + SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node); + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + AppendSecLabelStmt(&buf, secLabelStmt); + + return buf.data; +} + + +/* + * AppendSecLabelStmt generates the string representation of the + * SecLabelStmt and appends it to the buffer. + */ +static void +AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt) +{ + appendStringInfoString(buf, "SECURITY LABEL "); + + if (stmt->provider != NULL) + { + appendStringInfo(buf, "FOR %s ", quote_identifier(stmt->provider)); + } + + appendStringInfoString(buf, "ON "); + + switch (stmt->objtype) + { + case OBJECT_ROLE: + { + appendStringInfo(buf, "ROLE %s ", quote_identifier(strVal(stmt->object))); + break; + } + + /* normally, we shouldn't reach this */ + default: + { + ereport(ERROR, (errmsg("unsupported security label statement for" + " deparsing"))); + } + } + + appendStringInfoString(buf, "IS "); + + if (stmt->label != NULL) + { + appendStringInfo(buf, "%s", quote_literal_cstr(stmt->label)); + } + else + { + appendStringInfoString(buf, "NULL"); + } +} diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index d339ac56a..213d135e6 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -317,7 +317,7 @@ PG_FUNCTION_INFO_V1(citus_rebalance_start); PG_FUNCTION_INFO_V1(citus_rebalance_stop); PG_FUNCTION_INFO_V1(citus_rebalance_wait); -bool RunningUnderIsolationTest = false; +bool RunningUnderCitusTestSuite = false; int MaxRebalancerLoggedIgnoredMoves = 5; int RebalancerByDiskSizeBaseCost = 100 * 1024 * 1024; bool PropagateSessionSettingsForLoopbackConnection = false; diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index 97f6fdb3d..48571d7c4 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -1143,7 +1143,7 @@ ConflictWithIsolationTestingBeforeCopy(void) const bool sessionLock = false; const bool dontWait = false; - if (RunningUnderIsolationTest) + if (RunningUnderCitusTestSuite) { SET_LOCKTAG_ADVISORY(tag, MyDatabaseId, SHARD_MOVE_ADVISORY_LOCK_SECOND_KEY, @@ -1177,7 +1177,7 @@ ConflictWithIsolationTestingAfterCopy(void) const bool sessionLock = false; const bool dontWait = false; - if (RunningUnderIsolationTest) + if (RunningUnderCitusTestSuite) { SET_LOCKTAG_ADVISORY(tag, MyDatabaseId, SHARD_MOVE_ADVISORY_LOCK_FIRST_KEY, diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 9b5768ee7..22037c82b 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -29,6 +29,7 @@ #include "citus_version.h" #include "commands/explain.h" #include "commands/extension.h" +#include "commands/seclabel.h" #include "common/string.h" #include "executor/executor.h" #include "distributed/backend_data.h" @@ -574,6 +575,16 @@ _PG_init(void) INIT_COLUMNAR_SYMBOL(PGFunction, columnar_storage_info); INIT_COLUMNAR_SYMBOL(PGFunction, columnar_store_memory_stats); INIT_COLUMNAR_SYMBOL(PGFunction, test_columnar_storage_write_new_page); + + /* + * This part is only for SECURITY LABEL tests + * mimicking what an actual security label provider would do + */ + if (RunningUnderCitusTestSuite) + { + register_label_provider("citus '!tests_label_provider", + citus_test_object_relabel); + } } @@ -2305,13 +2316,14 @@ RegisterCitusConfigVariables(void) WarnIfReplicationModelIsSet, NULL, NULL); DefineCustomBoolVariable( - "citus.running_under_isolation_test", + "citus.running_under_citus_test_suite", gettext_noop( "Only useful for testing purposes, when set to true, Citus does some " - "tricks to implement useful isolation tests with rebalancing. Should " + "tricks to implement useful isolation tests with rebalancing. It also " + "registers a dummy label provider for SECURITY LABEL tests. Should " "never be set to true on production systems "), gettext_noop("for details of the tricks implemented, refer to the source code"), - &RunningUnderIsolationTest, + &RunningUnderCitusTestSuite, false, PGC_SUSET, GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE, diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 0c681c118..02eadacc5 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -523,6 +523,11 @@ extern List * AlterSchemaOwnerStmtObjectAddress(Node *node, bool missing_ok, extern List * AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); +/* seclabel.c - forward declarations*/ +extern List * PostprocessSecLabelStmt(Node *node, const char *queryString); +extern List * SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); +extern void citus_test_object_relabel(const ObjectAddress *object, const char *seclabel); + /* sequence.c - forward declarations */ extern List * PreprocessAlterSequenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index baccec78f..e98084e59 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -261,6 +261,9 @@ extern void QualifyRenameTextSearchDictionaryStmt(Node *node); extern void QualifyTextSearchConfigurationCommentStmt(Node *node); extern void QualifyTextSearchDictionaryCommentStmt(Node *node); +/* forward declarations for deparse_seclabel_stmts.c */ +extern char * DeparseSecLabelStmt(Node *node); + /* forward declarations for deparse_sequence_stmts.c */ extern char * DeparseDropSequenceStmt(Node *node); extern char * DeparseRenameSequenceStmt(Node *node); diff --git a/src/include/distributed/shard_rebalancer.h b/src/include/distributed/shard_rebalancer.h index 38ce4f485..345748ced 100644 --- a/src/include/distributed/shard_rebalancer.h +++ b/src/include/distributed/shard_rebalancer.h @@ -189,7 +189,7 @@ typedef struct RebalancePlanFunctions extern char *VariablesToBePassedToNewConnections; extern int MaxRebalancerLoggedIgnoredMoves; extern int RebalancerByDiskSizeBaseCost; -extern bool RunningUnderIsolationTest; +extern bool RunningUnderCitusTestSuite; extern bool PropagateSessionSettingsForLoopbackConnection; extern int MaxBackgroundTaskExecutorsPerNode; diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index b902a7998..158a44ef6 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -135,20 +135,10 @@ DEPS = { ), "alter_role_propagation": TestDeps("minimal_schedule"), "background_rebalance": TestDeps( - None, - [ - "multi_test_helpers", - "multi_cluster_management", - ], - worker_count=3, + None, ["multi_test_helpers", "multi_cluster_management"], worker_count=3 ), "background_rebalance_parallel": TestDeps( - None, - [ - "multi_test_helpers", - "multi_cluster_management", - ], - worker_count=6, + None, ["multi_test_helpers", "multi_cluster_management"], worker_count=6 ), "function_propagation": TestDeps("minimal_schedule"), "citus_shards": TestDeps("minimal_schedule"), @@ -165,30 +155,17 @@ DEPS = { ), "schema_based_sharding": TestDeps("minimal_schedule"), "multi_sequence_default": TestDeps( - None, - [ - "multi_test_helpers", - "multi_cluster_management", - "multi_table_ddl", - ], + None, ["multi_test_helpers", "multi_cluster_management", "multi_table_ddl"] ), "grant_on_schema_propagation": TestDeps("minimal_schedule"), "propagate_extension_commands": TestDeps("minimal_schedule"), "multi_size_queries": TestDeps("base_schedule", ["multi_copy"]), "multi_mx_node_metadata": TestDeps( - None, - [ - "multi_extension", - "multi_test_helpers", - "multi_test_helpers_superuser", - ], + None, ["multi_extension", "multi_test_helpers", "multi_test_helpers_superuser"] ), "multi_mx_function_table_reference": TestDeps( None, - [ - "multi_cluster_management", - "remove_coordinator_from_metadata", - ], + ["multi_cluster_management", "remove_coordinator_from_metadata"], # because it queries node group id and it changes as we add / remove nodes repeatable=False, ), @@ -201,16 +178,27 @@ DEPS = { ], ), "metadata_sync_helpers": TestDeps( - None, - [ - "multi_mx_node_metadata", - "multi_cluster_management", - ], + None, ["multi_mx_node_metadata", "multi_cluster_management"] ), - "multi_utilities": TestDeps( + "multi_utilities": TestDeps("minimal_schedule", ["multi_data_types"]), + "multi_tenant_isolation_nonblocking": TestDeps( + "minimal_schedule", ["multi_data_types", "remove_coordinator_from_metadata"] + ), + "remove_non_default_nodes": TestDeps( + None, ["multi_mx_node_metadata", "multi_cluster_management"], repeatable=False + ), + "citus_split_shard_columnar_partitioned": TestDeps( + "minimal_schedule", ["remove_coordinator_from_metadata"] + ), + "add_coordinator": TestDeps( + "minimal_schedule", ["remove_coordinator_from_metadata"], repeatable=False + ), + "multi_multiuser_auth": TestDeps( "minimal_schedule", - ["multi_data_types"], + ["multi_create_table", "multi_create_users", "multi_multiuser_load_data"], + repeatable=False, ), + "multi_prepare_plsql": TestDeps("base_schedule"), } @@ -303,9 +291,13 @@ def run_schedule_with_multiregress(test_name, schedule, dependencies, args): worker_count = needed_worker_count(test_name, dependencies) # find suitable make recipe - if dependencies.schedule == "base_isolation_schedule" or "isolation" in test_name: + if dependencies.schedule == "base_isolation_schedule" or test_name.startswith( + "isolation" + ): make_recipe = "check-isolation-custom-schedule" - elif dependencies.schedule == "failure_base_schedule" or "failure" in test_name: + elif dependencies.schedule == "failure_base_schedule" or test_name.startswith( + "failure" + ): make_recipe = "check-failure-custom-schedule" else: make_recipe = "check-custom-schedule" @@ -418,10 +410,7 @@ def test_dependencies(test_name, test_schedule, schedule_line, args): if "upgrade_columnar_before" not in before_tests: before_tests.append("upgrade_columnar_before") - return TestDeps( - default_base_schedule(test_schedule, args), - before_tests, - ) + return TestDeps(default_base_schedule(test_schedule, args), before_tests) # before_ tests leave stuff around on purpose for the after tests. So they # are not repeatable by definition. diff --git a/src/test/regress/expected/add_coordinator.out b/src/test/regress/expected/add_coordinator.out index 499669385..01f3a682d 100644 --- a/src/test/regress/expected/add_coordinator.out +++ b/src/test/regress/expected/add_coordinator.out @@ -2,13 +2,6 @@ -- ADD_COORDINATOR -- -- node trying to add itself without specifying groupid => 0 should error out --- first remove the coordinator to for testing master_add_node for coordinator -SELECT master_remove_node('localhost', :master_port); - master_remove_node ---------------------------------------------------------------------- - -(1 row) - SELECT master_add_node('localhost', :master_port); ERROR: Node cannot add itself as a worker. HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636); diff --git a/src/test/regress/expected/citus_split_shard_by_split_points_negative.out b/src/test/regress/expected/citus_split_shard_by_split_points_negative.out index 85b1fc3ee..6a4265f81 100644 --- a/src/test/regress/expected/citus_split_shard_by_split_points_negative.out +++ b/src/test/regress/expected/citus_split_shard_by_split_points_negative.out @@ -135,4 +135,10 @@ NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to table citus_split_shard_by_split_points_negative.range_paritioned_table_to_split drop cascades to table citus_split_shard_by_split_points_negative.table_to_split drop cascades to table citus_split_shard_by_split_points_negative.table_to_split_replication_factor_2 +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + --END : Cleanup diff --git a/src/test/regress/expected/multi_multiuser_auth.out b/src/test/regress/expected/multi_multiuser_auth.out index 8dd9b8ba7..7a72eeba1 100644 --- a/src/test/regress/expected/multi_multiuser_auth.out +++ b/src/test/regress/expected/multi_multiuser_auth.out @@ -12,19 +12,9 @@ \set bob_worker_1_pw triplex-royalty-warranty-stand-cheek \set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile \set bob_fallback_pw :bob_worker_1_pw -SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; - worker_1_id ---------------------------------------------------------------------- - 17 -(1 row) - +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port \gset -SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; - worker_2_id ---------------------------------------------------------------------- - 35 -(1 row) - +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port \gset -- alice is a superuser so she can update own password CREATE USER alice PASSWORD :'alice_master_pw' SUPERUSER; diff --git a/src/test/regress/expected/multi_poolinfo_usage.out b/src/test/regress/expected/multi_poolinfo_usage.out index ee98f0df7..53dfca24e 100644 --- a/src/test/regress/expected/multi_poolinfo_usage.out +++ b/src/test/regress/expected/multi_poolinfo_usage.out @@ -6,19 +6,9 @@ -- Test of ability to override host/port for a node SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 20000000; -SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; - worker_1_id ---------------------------------------------------------------------- - 17 -(1 row) - +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port \gset -SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; - worker_2_id ---------------------------------------------------------------------- - 35 -(1 row) - +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port \gset CREATE TABLE lotsa_connections (id integer, name text); SELECT create_distributed_table('lotsa_connections', 'id'); diff --git a/src/test/regress/expected/multi_prepare_plsql.out b/src/test/regress/expected/multi_prepare_plsql.out index 74c9835ff..a87b47a34 100644 --- a/src/test/regress/expected/multi_prepare_plsql.out +++ b/src/test/regress/expected/multi_prepare_plsql.out @@ -1317,11 +1317,11 @@ SELECT type_ddl_plpgsql(); (1 row) -- find all renamed types to verify the schema name didn't leak, nor a crash happened -SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup'; +SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup' ORDER BY 1; nspname | typname --------------------------------------------------------------------- - public | prepare_ddl_type_backup otherschema | prepare_ddl_type_backup + public | prepare_ddl_type_backup (2 rows) DROP TYPE prepare_ddl_type_backup; @@ -1332,6 +1332,7 @@ DROP FUNCTION ddl_in_plpgsql(); DROP FUNCTION copy_in_plpgsql(); DROP TABLE prepare_ddl; DROP TABLE local_ddl; +DROP TABLE plpgsql_table; DROP SCHEMA otherschema; -- clean-up functions DROP FUNCTION plpgsql_test_1(); diff --git a/src/test/regress/expected/multi_tenant_isolation_nonblocking.out b/src/test/regress/expected/multi_tenant_isolation_nonblocking.out index 3ec16e6ee..3daac7dac 100644 --- a/src/test/regress/expected/multi_tenant_isolation_nonblocking.out +++ b/src/test/regress/expected/multi_tenant_isolation_nonblocking.out @@ -1275,8 +1275,9 @@ SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0; TRUNCATE TABLE pg_catalog.pg_dist_colocation; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; -SELECT citus_set_coordinator_host('localhost'); - citus_set_coordinator_host +-- make sure we don't have any replication objects leftover on the nodes +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index b8758e561..4b621b968 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -526,3 +526,33 @@ BEGIN RETURN result; END; $func$ LANGUAGE plpgsql; +-- Returns pg_seclabels entries from all nodes in the cluster for which +-- the object name is the input. +CREATE OR REPLACE FUNCTION get_citus_tests_label_provider_labels(object_name text, + master_port INTEGER DEFAULT 57636, + worker_1_port INTEGER DEFAULT 57637, + worker_2_port INTEGER DEFAULT 57638) +RETURNS TABLE ( + node_type text, + result text +) +AS $func$ +DECLARE + pg_seclabels_cmd TEXT := 'SELECT to_jsonb(q.*) FROM (' || + 'SELECT provider, objtype, label FROM pg_seclabels ' || + 'WHERE objname = ''' || object_name || ''') q'; +BEGIN + RETURN QUERY + SELECT + CASE + WHEN nodeport = master_port THEN 'coordinator' + WHEN nodeport = worker_1_port THEN 'worker_1' + WHEN nodeport = worker_2_port THEN 'worker_2' + ELSE 'unexpected_node' + END AS node_type, + a.result + FROM run_command_on_all_nodes(pg_seclabels_cmd) a + JOIN pg_dist_node USING (nodeid) + ORDER BY node_type; +END; +$func$ LANGUAGE plpgsql; diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out index d2b0940ed..5faab87d7 100644 --- a/src/test/regress/expected/multi_utilities.out +++ b/src/test/regress/expected/multi_utilities.out @@ -424,6 +424,34 @@ FROM pg_total_relation_size('local_vacuum_table') s ; 35000000 (1 row) +-- vacuum (process_toast true) should be vacuuming toast tables (default is true) +select reltoastrelid from pg_class where relname='local_vacuum_table' +\gset +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +insert into local_vacuum_table select i from generate_series(1,10000) i; +VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; +SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; + frozen_performed +--------------------------------------------------------------------- + t +(1 row) + +delete from local_vacuum_table; +-- vacuum (process_toast false) should not be vacuuming toast tables (default is true) +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +insert into local_vacuum_table select i from generate_series(1,10000) i; +VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; +SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; + frozen_not_performed +--------------------------------------------------------------------- + t +(1 row) + +delete from local_vacuum_table; -- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true) insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out index 1b1d80df2..badd23240 100644 --- a/src/test/regress/expected/pg14.out +++ b/src/test/regress/expected/pg14.out @@ -71,32 +71,6 @@ NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx --- vacuum (process_toast true) should be vacuuming toast tables (default is true) -CREATE TABLE local_vacuum_table(name text); -select reltoastrelid from pg_class where relname='local_vacuum_table' -\gset -SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass -\gset -VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; -SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class -WHERE oid=:reltoastrelid::regclass; - frozen_performed ---------------------------------------------------------------------- - t -(1 row) - --- vacuum (process_toast false) should not be vacuuming toast tables (default is true) -SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass -\gset -VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; -SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class -WHERE oid=:reltoastrelid::regclass; - frozen_not_performed ---------------------------------------------------------------------- - t -(1 row) - -DROP TABLE local_vacuum_table; SET citus.log_remote_commands TO OFF; create table dist(a int, b int); select create_distributed_table('dist','a'); @@ -1492,4 +1466,5 @@ DROP TABLE compression_and_defaults, compression_and_generated_col; set client_min_messages to error; drop extension postgres_fdw cascade; drop schema pg14 cascade; +DROP ROLE role_1, r1; reset client_min_messages; diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out index c761efb3e..2df4e59d3 100644 --- a/src/test/regress/expected/publication.out +++ b/src/test/regress/expected/publication.out @@ -267,6 +267,7 @@ SET client_min_messages TO ERROR; DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; +SELECT public.wait_for_resource_cleanup(); \q \endif -- recreate a mixed publication @@ -544,3 +545,9 @@ DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; DROP SCHEMA publication2 CASCADE; +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/publication_0.out b/src/test/regress/expected/publication_0.out index 14fa94d17..e768a1d41 100644 --- a/src/test/regress/expected/publication_0.out +++ b/src/test/regress/expected/publication_0.out @@ -267,4 +267,10 @@ SET client_min_messages TO ERROR; DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + \q diff --git a/src/test/regress/expected/remove_non_default_nodes.out b/src/test/regress/expected/remove_non_default_nodes.out new file mode 100644 index 000000000..7645af708 --- /dev/null +++ b/src/test/regress/expected/remove_non_default_nodes.out @@ -0,0 +1,13 @@ +-- The default nodes for the citus test suite are coordinator and 2 worker nodes +-- Which we identify with master_port, worker_1_port, worker_2_port. +-- When needed in some tests, GetLocalNodeId() does not behave correctly, +-- So we remove the non default nodes. This tests expects the non default nodes +-- to not have any active placements. +SELECT any_value(citus_remove_node('localhost', nodeport)) +FROM pg_dist_node +WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port); + any_value +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/seclabel.out b/src/test/regress/expected/seclabel.out new file mode 100644 index 000000000..f826de44b --- /dev/null +++ b/src/test/regress/expected/seclabel.out @@ -0,0 +1,173 @@ +-- +-- SECLABEL +-- +-- Test suite for SECURITY LABEL ON ROLE statements +-- +-- first we remove one of the worker nodes to be able to test +-- citus_add_node later +SELECT citus_remove_node('localhost', :worker_2_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +-- create two roles, one with characters that need escaping +CREATE ROLE user1; +CREATE ROLE "user 2"; +-- check an invalid label for our current dummy hook citus_test_object_relabel +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'invalid_label'; +ERROR: 'invalid_label' is not a valid security label for Citus tests. +-- if we disable metadata_sync, the command will not be propagated +SET citus.enable_metadata_sync TO off; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | +(2 rows) + +RESET citus.enable_metadata_sync; +-- check that we only support propagating for roles +SET citus.shard_replication_factor to 1; +-- distributed table +CREATE TABLE a (a int); +SELECT create_distributed_table('a', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- distributed view +CREATE VIEW v_dist AS SELECT * FROM a; +-- distributed function +CREATE FUNCTION notice(text) RETURNS void LANGUAGE plpgsql AS $$ + BEGIN RAISE NOTICE '%', $1; END; $$; +SECURITY LABEL ON TABLE a IS 'citus_classified'; +NOTICE: not propagating SECURITY LABEL commands whose object type is not role +HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command. +SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified'; +NOTICE: not propagating SECURITY LABEL commands whose object type is not role +HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command. +SECURITY LABEL ON VIEW v_dist IS 'citus_classified'; +NOTICE: not propagating SECURITY LABEL commands whose object type is not role +HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command. +SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"} + worker_1 | +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"} + worker_1 | +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"} + worker_1 | +(2 rows) + +\c - - - :worker_1_port +SECURITY LABEL ON TABLE a IS 'citus_classified'; +SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified'; +SECURITY LABEL ON VIEW v_dist IS 'citus_classified'; +\c - - - :master_port +SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"} +(2 rows) + +DROP TABLE a CASCADE; +NOTICE: drop cascades to view v_dist +DROP FUNCTION notice; +-- test that SECURITY LABEL statement is actually propagated for ROLES +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +-- we have exactly one provider loaded, so we may not include the provider in the command +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SECURITY LABEL ON ROLE user1 IS NULL; +NOTICE: issuing SECURITY LABEL ON ROLE user1 IS NULL +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +NOTICE: issuing SECURITY LABEL ON ROLE user1 IS 'citus_unclassified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +\c - - - :worker_1_port +-- command not allowed from worker node +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified'; +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +\c - - - :master_port +RESET citus.log_remote_commands; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +-- add a new node and check that it also propagates the SECURITY LABEL statement to the new node +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); +NOTICE: issuing SELECT worker_create_or_alter_role('user1', 'CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SELECT worker_create_or_alter_role('user 2', 'CREATE ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +-- cleanup +RESET citus.log_remote_commands; +DROP ROLE user1, "user 2"; diff --git a/src/test/regress/expected/worker_split_binary_copy_test.out b/src/test/regress/expected/worker_split_binary_copy_test.out index f23dc2043..e161b7f67 100644 --- a/src/test/regress/expected/worker_split_binary_copy_test.out +++ b/src/test/regress/expected/worker_split_binary_copy_test.out @@ -3,43 +3,6 @@ SET search_path TO worker_split_binary_copy_test; SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 81060000; --- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly. -SELECT citus_remove_node('localhost', 8887); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 9995); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 9992); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 9998); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 9997); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 8888); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -- BEGIN: Create distributed table and insert data. CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy ( l_orderkey bigint not null, diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 7ee1ffa80..0bb482856 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -32,6 +32,7 @@ test: propagate_extension_commands test: escape_extension_name test: ref_citus_local_fkeys test: alter_database_owner +test: seclabel test: distributed_triggers test: create_single_shard_table # don't parallelize single_shard_table_udfs to make sure colocation ids are sequential @@ -296,6 +297,7 @@ test: multi_foreign_key_relation_graph # Replicating reference tables to coordinator. Add coordinator to pg_dist_node # and rerun some of the tests. # -------- +test: remove_coordinator_from_metadata test: add_coordinator test: replicate_reference_tables_to_coordinator test: citus_local_tables diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 65a272566..2c8f7b085 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -83,7 +83,8 @@ test: forcedelegation_functions # this should be run alone as it gets too many clients test: join_pushdown test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc statement_cancel_error_message -test: multi_agg_distinct multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction +test: multi_agg_distinct +test: multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction test: multi_reference_table multi_select_for_update relation_access_tracking pg13_with_ties test: custom_aggregate_support aggregate_support tdigest_aggregate_support test: multi_average_expression multi_working_columns multi_having_pushdown having_subquery diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index 4cc022198..3acde4c3c 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -90,7 +90,6 @@ my $workerCount = 2; my $serversAreShutdown = "TRUE"; my $usingWindows = 0; my $mitmPid = 0; -my $workerCount = 2; if ($Config{osname} eq "MSWin32") { @@ -510,6 +509,12 @@ if($vanillatest) # we disable some restrictions for local objects like local views to not break postgres vanilla test behaviour. push(@pgOptions, "citus.enforce_object_restrictions_for_local_objects=false"); } +else +{ + # We currently need this config for isolation tests and security label tests + # this option loads a security label provider, which we don't want in vanilla tests + push(@pgOptions, "citus.running_under_citus_test_suite=true"); +} if ($useMitmproxy) { @@ -560,7 +565,6 @@ if($isolationtester) push(@pgOptions, "citus.metadata_sync_interval=1000"); push(@pgOptions, "citus.metadata_sync_retry_interval=100"); push(@pgOptions, "client_min_messages='warning'"); # pg12 introduced notice showing during isolation tests - push(@pgOptions, "citus.running_under_isolation_test=true"); # Disable all features of the maintenance daemon. Otherwise queries might # randomly show temporarily as "waiting..." because they are waiting for the diff --git a/src/test/regress/split_schedule b/src/test/regress/split_schedule index b47acd828..53c422eab 100644 --- a/src/test/regress/split_schedule +++ b/src/test/regress/split_schedule @@ -10,6 +10,7 @@ test: foreign_key_to_reference_table # Split tests go here. test: split_shard test: worker_split_copy_test +test: remove_non_default_nodes test: worker_split_binary_copy_test test: worker_split_text_copy_test test: citus_split_shard_by_split_points_negative diff --git a/src/test/regress/sql/add_coordinator.sql b/src/test/regress/sql/add_coordinator.sql index 81b77bfcd..2dba78064 100644 --- a/src/test/regress/sql/add_coordinator.sql +++ b/src/test/regress/sql/add_coordinator.sql @@ -3,8 +3,6 @@ -- -- node trying to add itself without specifying groupid => 0 should error out --- first remove the coordinator to for testing master_add_node for coordinator -SELECT master_remove_node('localhost', :master_port); SELECT master_add_node('localhost', :master_port); SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset diff --git a/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql b/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql index fe37777c7..4c180052f 100644 --- a/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql +++ b/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql @@ -113,4 +113,5 @@ SELECT citus_split_shard_by_split_points( --BEGIN : Cleanup \c - postgres - :master_port DROP SCHEMA "citus_split_shard_by_split_points_negative" CASCADE; +SELECT public.wait_for_resource_cleanup(); --END : Cleanup diff --git a/src/test/regress/sql/multi_multiuser_auth.sql b/src/test/regress/sql/multi_multiuser_auth.sql index 43cb3c11f..1cd566b50 100644 --- a/src/test/regress/sql/multi_multiuser_auth.sql +++ b/src/test/regress/sql/multi_multiuser_auth.sql @@ -16,9 +16,9 @@ \set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile \set bob_fallback_pw :bob_worker_1_pw -SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port \gset -SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port \gset -- alice is a superuser so she can update own password diff --git a/src/test/regress/sql/multi_poolinfo_usage.sql b/src/test/regress/sql/multi_poolinfo_usage.sql index da039cfca..2fbaed2ed 100644 --- a/src/test/regress/sql/multi_poolinfo_usage.sql +++ b/src/test/regress/sql/multi_poolinfo_usage.sql @@ -7,9 +7,9 @@ SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 20000000; -SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port \gset -SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port \gset CREATE TABLE lotsa_connections (id integer, name text); diff --git a/src/test/regress/sql/multi_prepare_plsql.sql b/src/test/regress/sql/multi_prepare_plsql.sql index 8589e5b5a..e71e2818e 100644 --- a/src/test/regress/sql/multi_prepare_plsql.sql +++ b/src/test/regress/sql/multi_prepare_plsql.sql @@ -624,7 +624,7 @@ CREATE TYPE prepare_ddl_type AS (x int, y int); SELECT type_ddl_plpgsql(); -- find all renamed types to verify the schema name didn't leak, nor a crash happened -SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup'; +SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup' ORDER BY 1; DROP TYPE prepare_ddl_type_backup; RESET search_path; @@ -635,6 +635,7 @@ DROP FUNCTION ddl_in_plpgsql(); DROP FUNCTION copy_in_plpgsql(); DROP TABLE prepare_ddl; DROP TABLE local_ddl; +DROP TABLE plpgsql_table; DROP SCHEMA otherschema; -- clean-up functions diff --git a/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql b/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql index 1299c9282..994f29f0a 100644 --- a/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql +++ b/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql @@ -608,5 +608,5 @@ ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; -SELECT citus_set_coordinator_host('localhost'); - +-- make sure we don't have any replication objects leftover on the nodes +SELECT public.wait_for_resource_cleanup(); diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql index f7c97f1b2..7f0346d14 100644 --- a/src/test/regress/sql/multi_test_helpers.sql +++ b/src/test/regress/sql/multi_test_helpers.sql @@ -550,3 +550,34 @@ BEGIN RETURN result; END; $func$ LANGUAGE plpgsql; + +-- Returns pg_seclabels entries from all nodes in the cluster for which +-- the object name is the input. +CREATE OR REPLACE FUNCTION get_citus_tests_label_provider_labels(object_name text, + master_port INTEGER DEFAULT 57636, + worker_1_port INTEGER DEFAULT 57637, + worker_2_port INTEGER DEFAULT 57638) +RETURNS TABLE ( + node_type text, + result text +) +AS $func$ +DECLARE + pg_seclabels_cmd TEXT := 'SELECT to_jsonb(q.*) FROM (' || + 'SELECT provider, objtype, label FROM pg_seclabels ' || + 'WHERE objname = ''' || object_name || ''') q'; +BEGIN + RETURN QUERY + SELECT + CASE + WHEN nodeport = master_port THEN 'coordinator' + WHEN nodeport = worker_1_port THEN 'worker_1' + WHEN nodeport = worker_2_port THEN 'worker_2' + ELSE 'unexpected_node' + END AS node_type, + a.result + FROM run_command_on_all_nodes(pg_seclabels_cmd) a + JOIN pg_dist_node USING (nodeid) + ORDER BY node_type; +END; +$func$ LANGUAGE plpgsql; diff --git a/src/test/regress/sql/multi_utilities.sql b/src/test/regress/sql/multi_utilities.sql index 1124b9890..668e1f32f 100644 --- a/src/test/regress/sql/multi_utilities.sql +++ b/src/test/regress/sql/multi_utilities.sql @@ -272,6 +272,27 @@ VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 49999999 THEN 35000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; +-- vacuum (process_toast true) should be vacuuming toast tables (default is true) +select reltoastrelid from pg_class where relname='local_vacuum_table' +\gset + +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +insert into local_vacuum_table select i from generate_series(1,10000) i; +VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; +SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; +delete from local_vacuum_table; + +-- vacuum (process_toast false) should not be vacuuming toast tables (default is true) +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +insert into local_vacuum_table select i from generate_series(1,10000) i; +VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; +SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; +delete from local_vacuum_table; + -- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true) insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; diff --git a/src/test/regress/sql/pg14.sql b/src/test/regress/sql/pg14.sql index 8d3f430ce..47eb67930 100644 --- a/src/test/regress/sql/pg14.sql +++ b/src/test/regress/sql/pg14.sql @@ -22,25 +22,6 @@ VACUUM (INDEX_CLEANUP "AUTOX") t1; VACUUM (FULL, FREEZE, VERBOSE false, ANALYZE, SKIP_LOCKED, INDEX_CLEANUP, PROCESS_TOAST, TRUNCATE) t1; VACUUM (FULL, FREEZE false, VERBOSE false, ANALYZE false, SKIP_LOCKED false, INDEX_CLEANUP "Auto", PROCESS_TOAST true, TRUNCATE false) t1; --- vacuum (process_toast true) should be vacuuming toast tables (default is true) -CREATE TABLE local_vacuum_table(name text); -select reltoastrelid from pg_class where relname='local_vacuum_table' -\gset - -SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass -\gset -VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; -SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class -WHERE oid=:reltoastrelid::regclass; - --- vacuum (process_toast false) should not be vacuuming toast tables (default is true) -SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass -\gset -VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; -SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class -WHERE oid=:reltoastrelid::regclass; - -DROP TABLE local_vacuum_table; SET citus.log_remote_commands TO OFF; create table dist(a int, b int); @@ -777,4 +758,5 @@ DROP TABLE compression_and_defaults, compression_and_generated_col; set client_min_messages to error; drop extension postgres_fdw cascade; drop schema pg14 cascade; +DROP ROLE role_1, r1; reset client_min_messages; diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql index 06bdc39fe..70baf6726 100644 --- a/src/test/regress/sql/publication.sql +++ b/src/test/regress/sql/publication.sql @@ -195,6 +195,7 @@ SET client_min_messages TO ERROR; DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; +SELECT public.wait_for_resource_cleanup(); \q \endif @@ -391,3 +392,5 @@ DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; DROP SCHEMA publication2 CASCADE; + +SELECT public.wait_for_resource_cleanup(); diff --git a/src/test/regress/sql/remove_non_default_nodes.sql b/src/test/regress/sql/remove_non_default_nodes.sql new file mode 100644 index 000000000..4175e87dc --- /dev/null +++ b/src/test/regress/sql/remove_non_default_nodes.sql @@ -0,0 +1,8 @@ +-- The default nodes for the citus test suite are coordinator and 2 worker nodes +-- Which we identify with master_port, worker_1_port, worker_2_port. +-- When needed in some tests, GetLocalNodeId() does not behave correctly, +-- So we remove the non default nodes. This tests expects the non default nodes +-- to not have any active placements. +SELECT any_value(citus_remove_node('localhost', nodeport)) +FROM pg_dist_node +WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port); diff --git a/src/test/regress/sql/seclabel.sql b/src/test/regress/sql/seclabel.sql new file mode 100644 index 000000000..e523fc1da --- /dev/null +++ b/src/test/regress/sql/seclabel.sql @@ -0,0 +1,87 @@ +-- +-- SECLABEL +-- +-- Test suite for SECURITY LABEL ON ROLE statements +-- + +-- first we remove one of the worker nodes to be able to test +-- citus_add_node later +SELECT citus_remove_node('localhost', :worker_2_port); + +-- create two roles, one with characters that need escaping +CREATE ROLE user1; +CREATE ROLE "user 2"; + +-- check an invalid label for our current dummy hook citus_test_object_relabel +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'invalid_label'; + +-- if we disable metadata_sync, the command will not be propagated +SET citus.enable_metadata_sync TO off; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + +RESET citus.enable_metadata_sync; + +-- check that we only support propagating for roles +SET citus.shard_replication_factor to 1; +-- distributed table +CREATE TABLE a (a int); +SELECT create_distributed_table('a', 'a'); +-- distributed view +CREATE VIEW v_dist AS SELECT * FROM a; +-- distributed function +CREATE FUNCTION notice(text) RETURNS void LANGUAGE plpgsql AS $$ + BEGIN RAISE NOTICE '%', $1; END; $$; + +SECURITY LABEL ON TABLE a IS 'citus_classified'; +SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified'; +SECURITY LABEL ON VIEW v_dist IS 'citus_classified'; + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type; + +\c - - - :worker_1_port +SECURITY LABEL ON TABLE a IS 'citus_classified'; +SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified'; +SECURITY LABEL ON VIEW v_dist IS 'citus_classified'; + +\c - - - :master_port +SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type; + +DROP TABLE a CASCADE; +DROP FUNCTION notice; + +-- test that SECURITY LABEL statement is actually propagated for ROLES +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; + +-- we have exactly one provider loaded, so we may not include the provider in the command +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SECURITY LABEL ON ROLE user1 IS NULL; +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; + +\c - - - :worker_1_port +-- command not allowed from worker node +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified'; + +\c - - - :master_port +RESET citus.log_remote_commands; + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +-- add a new node and check that it also propagates the SECURITY LABEL statement to the new node +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +-- cleanup +RESET citus.log_remote_commands; +DROP ROLE user1, "user 2"; diff --git a/src/test/regress/sql/worker_split_binary_copy_test.sql b/src/test/regress/sql/worker_split_binary_copy_test.sql index 489ff9dc4..d6ca3c9df 100644 --- a/src/test/regress/sql/worker_split_binary_copy_test.sql +++ b/src/test/regress/sql/worker_split_binary_copy_test.sql @@ -4,14 +4,6 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 81060000; --- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly. -SELECT citus_remove_node('localhost', 8887); -SELECT citus_remove_node('localhost', 9995); -SELECT citus_remove_node('localhost', 9992); -SELECT citus_remove_node('localhost', 9998); -SELECT citus_remove_node('localhost', 9997); -SELECT citus_remove_node('localhost', 8888); - -- BEGIN: Create distributed table and insert data. CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy ( l_orderkey bigint not null,