mirror of https://github.com/citusdata/citus.git
Merge branch 'main' into reassign_owned_prop
commit
557dd71133
|
@ -68,7 +68,7 @@ USER citus
|
|||
|
||||
# build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions
|
||||
FROM base AS pg14
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.9
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.10
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
|
|||
RUN rm .pgenv-staging/config/default.conf
|
||||
|
||||
FROM base AS pg15
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.4
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.5
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
|
|||
RUN rm .pgenv-staging/config/default.conf
|
||||
|
||||
FROM base AS pg16
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.0
|
||||
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.1
|
||||
RUN rm .pgenv/src/*.tar*
|
||||
RUN make -C .pgenv/src/postgresql-*/ clean
|
||||
RUN make -C .pgenv/src/postgresql-*/src/include install
|
||||
|
@ -210,7 +210,7 @@ COPY --chown=citus:citus .psqlrc .
|
|||
RUN sudo chown --from=root:root citus:citus -R ~
|
||||
|
||||
# sets default pg version
|
||||
RUN pgenv switch 16.0
|
||||
RUN pgenv switch 16.1
|
||||
|
||||
# make connecting to the coordinator easy
|
||||
ENV PGPORT=9700
|
||||
|
|
|
@ -32,7 +32,10 @@ python3 -m pip install -r tools/packaging_automation/requirements.txt
|
|||
echo "Package type: ${package_type}"
|
||||
echo "OS version: $(get_rpm_os_version)"
|
||||
|
||||
# if os version is centos 7 or oracle linux 7, then remove urllib3 with pip uninstall and install urllib3<2.0.0 with pip install
|
||||
# For RHEL 7, we need to install urllib3<2 due to below execution error
|
||||
# ImportError: urllib3 v2.0 only supports OpenSSL 1.1.1+, currently the 'ssl'
|
||||
# module is compiled with 'OpenSSL 1.0.2k-fips 26 Jan 2017'.
|
||||
# See: https://github.com/urllib3/urllib3/issues/2168
|
||||
if [[ ${package_type} == "rpm" && $(get_rpm_os_version) == 7* ]]; then
|
||||
python3 -m pip uninstall -y urllib3
|
||||
python3 -m pip install 'urllib3<2'
|
||||
|
|
|
@ -31,11 +31,11 @@ jobs:
|
|||
pgupgrade_image_name: "citus/pgupgradetester"
|
||||
style_checker_image_name: "citus/stylechecker"
|
||||
style_checker_tools_version: "0.8.18"
|
||||
image_suffix: "-v9d71045"
|
||||
pg14_version: '{ "major": "14", "full": "14.9" }'
|
||||
pg15_version: '{ "major": "15", "full": "15.4" }'
|
||||
pg16_version: '{ "major": "16", "full": "16.0" }'
|
||||
upgrade_pg_versions: "14.9-15.4-16.0"
|
||||
image_suffix: "-vbd8441d"
|
||||
pg14_version: '{ "major": "14", "full": "14.10" }'
|
||||
pg15_version: '{ "major": "15", "full": "15.5" }'
|
||||
pg16_version: '{ "major": "16", "full": "16.1" }'
|
||||
upgrade_pg_versions: "14.10-15.5-16.1"
|
||||
steps:
|
||||
# Since GHA jobs needs at least one step we use a noop step here.
|
||||
- name: Set up parameters
|
||||
|
|
|
@ -112,11 +112,6 @@ jobs:
|
|||
PACKAGING_DOCKER_IMAGE: ${{ matrix.packaging_docker_image }}
|
||||
run: |
|
||||
echo "Postgres version: ${POSTGRES_VERSION}"
|
||||
|
||||
## Install required packages to execute packaging tools for rpm based distros
|
||||
yum install python3-pip python3-devel postgresql-devel -y
|
||||
python3 -m pip install wheel
|
||||
|
||||
./.github/packaging/validate_build_output.sh "rpm"
|
||||
|
||||
deb_build_tests:
|
||||
|
@ -192,9 +187,4 @@ jobs:
|
|||
PACKAGING_DOCKER_IMAGE: ${{ matrix.packaging_docker_image }}
|
||||
run: |
|
||||
echo "Postgres version: ${POSTGRES_VERSION}"
|
||||
|
||||
apt-get update -y
|
||||
## Install required packages to execute packaging tools for deb based distros
|
||||
apt-get install python3-dev python3-pip -y
|
||||
apt-get purge -y python3-yaml
|
||||
./.github/packaging/validate_build_output.sh "deb"
|
||||
|
|
|
@ -55,3 +55,6 @@ lib*.pc
|
|||
# style related temporary outputs
|
||||
*.uncrustify
|
||||
.venv
|
||||
|
||||
# added output when modifying check_gucs_are_alphabetically_sorted.sh
|
||||
guc.out
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
### citus v12.1.1 (November 9, 2023) ###
|
||||
|
||||
* Fixes leaking of memory and memory contexts in Citus foreign key cache
|
||||
(#7219)
|
||||
(#7236)
|
||||
|
||||
* Makes sure to disallow creating a replicated distributed table concurrently
|
||||
(#7236)
|
||||
(#7219)
|
||||
|
||||
### citus v12.1.0 (September 12, 2023) ###
|
||||
|
||||
|
|
|
@ -5,6 +5,6 @@ set -euo pipefail
|
|||
source ci/ci_helpers.sh
|
||||
|
||||
# extract citus gucs in the form of "citus.X"
|
||||
grep -o -E "(\.*\"citus.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out
|
||||
grep -o -E "(\.*\"citus\.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out
|
||||
sort -c gucs.out
|
||||
rm gucs.out
|
||||
|
|
|
@ -385,6 +385,15 @@ static DistributeObjectOps Any_Rename = {
|
|||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_SecLabel = {
|
||||
.deparse = DeparseSecLabelStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = NULL,
|
||||
.postprocess = PostprocessSecLabelStmt,
|
||||
.operationType = DIST_OPS_ALTER,
|
||||
.address = SecLabelStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Attribute_Rename = {
|
||||
.deparse = DeparseRenameAttributeStmt,
|
||||
.qualify = QualifyRenameAttributeStmt,
|
||||
|
@ -2036,6 +2045,11 @@ GetDistributeObjectOps(Node *node)
|
|||
return &Vacuum_Analyze;
|
||||
}
|
||||
|
||||
case T_SecLabelStmt:
|
||||
{
|
||||
return &Any_SecLabel;
|
||||
}
|
||||
|
||||
case T_RenameStmt:
|
||||
{
|
||||
RenameStmt *stmt = castNode(RenameStmt, node);
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "catalog/pg_auth_members.h"
|
||||
#include "catalog/pg_authid.h"
|
||||
#include "catalog/pg_db_role_setting.h"
|
||||
#include "catalog/pg_shseclabel.h"
|
||||
#include "catalog/pg_type.h"
|
||||
#include "catalog/objectaddress.h"
|
||||
#include "commands/dbcommands.h"
|
||||
|
@ -65,6 +66,7 @@ static DefElem * makeDefElemBool(char *name, bool value);
|
|||
static List * GenerateRoleOptionsList(HeapTuple tuple);
|
||||
static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options);
|
||||
static List * GenerateGrantRoleStmtsOfRole(Oid roleid);
|
||||
static List * GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename);
|
||||
static void EnsureSequentialModeForRoleDDL(void);
|
||||
|
||||
static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple,
|
||||
|
@ -515,13 +517,14 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
|
|||
{
|
||||
HeapTuple roleTuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(roleOid));
|
||||
Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(roleTuple));
|
||||
char *rolename = pstrdup(NameStr(role->rolname));
|
||||
|
||||
CreateRoleStmt *createRoleStmt = NULL;
|
||||
if (EnableCreateRolePropagation)
|
||||
{
|
||||
createRoleStmt = makeNode(CreateRoleStmt);
|
||||
createRoleStmt->stmt_type = ROLESTMT_ROLE;
|
||||
createRoleStmt->role = pstrdup(NameStr(role->rolname));
|
||||
createRoleStmt->role = rolename;
|
||||
createRoleStmt->options = GenerateRoleOptionsList(roleTuple);
|
||||
}
|
||||
|
||||
|
@ -532,7 +535,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
|
|||
alterRoleStmt->role = makeNode(RoleSpec);
|
||||
alterRoleStmt->role->roletype = ROLESPEC_CSTRING;
|
||||
alterRoleStmt->role->location = -1;
|
||||
alterRoleStmt->role->rolename = pstrdup(NameStr(role->rolname));
|
||||
alterRoleStmt->role->rolename = rolename;
|
||||
alterRoleStmt->action = 1;
|
||||
alterRoleStmt->options = GenerateRoleOptionsList(roleTuple);
|
||||
}
|
||||
|
@ -544,7 +547,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
|
|||
{
|
||||
/* add a worker_create_or_alter_role command if any of them are set */
|
||||
char *createOrAlterRoleQuery = CreateCreateOrAlterRoleCommand(
|
||||
pstrdup(NameStr(role->rolname)),
|
||||
rolename,
|
||||
createRoleStmt,
|
||||
alterRoleStmt);
|
||||
|
||||
|
@ -566,6 +569,20 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
|
|||
{
|
||||
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
|
||||
}
|
||||
|
||||
/*
|
||||
* append SECURITY LABEL ON ROLE commands for this specific user
|
||||
* When we propagate user creation, we also want to make sure that we propagate
|
||||
* all the security labels it has been given. For this, we check pg_shseclabel
|
||||
* for the ROLE entry corresponding to roleOid, and generate the relevant
|
||||
* SecLabel stmts to be run in the new node.
|
||||
*/
|
||||
List *secLabelOnRoleStmts = GenerateSecLabelOnRoleStmts(roleOid, rolename);
|
||||
stmt = NULL;
|
||||
foreach_ptr(stmt, secLabelOnRoleStmts)
|
||||
{
|
||||
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
|
||||
}
|
||||
}
|
||||
|
||||
return completeRoleList;
|
||||
|
@ -895,6 +912,54 @@ GenerateGrantRoleStmtsOfRole(Oid roleid)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateSecLabelOnRoleStmts generates the SecLabelStmts for the role
|
||||
* whose oid is roleid.
|
||||
*/
|
||||
static List *
|
||||
GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename)
|
||||
{
|
||||
List *secLabelStmts = NIL;
|
||||
|
||||
/*
|
||||
* Note that roles are shared database objects, therefore their
|
||||
* security labels are stored in pg_shseclabel instead of pg_seclabel.
|
||||
*/
|
||||
Relation pg_shseclabel = table_open(SharedSecLabelRelationId, AccessShareLock);
|
||||
ScanKeyData skey[1];
|
||||
ScanKeyInit(&skey[0], Anum_pg_shseclabel_objoid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(roleid));
|
||||
SysScanDesc scan = systable_beginscan(pg_shseclabel, SharedSecLabelObjectIndexId,
|
||||
true, NULL, 1, &skey[0]);
|
||||
|
||||
HeapTuple tuple = NULL;
|
||||
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
|
||||
{
|
||||
SecLabelStmt *secLabelStmt = makeNode(SecLabelStmt);
|
||||
secLabelStmt->objtype = OBJECT_ROLE;
|
||||
secLabelStmt->object = (Node *) makeString(pstrdup(rolename));
|
||||
|
||||
Datum datumArray[Natts_pg_shseclabel];
|
||||
bool isNullArray[Natts_pg_shseclabel];
|
||||
|
||||
heap_deform_tuple(tuple, RelationGetDescr(pg_shseclabel), datumArray,
|
||||
isNullArray);
|
||||
|
||||
secLabelStmt->provider = TextDatumGetCString(
|
||||
datumArray[Anum_pg_shseclabel_provider - 1]);
|
||||
secLabelStmt->label = TextDatumGetCString(
|
||||
datumArray[Anum_pg_shseclabel_label - 1]);
|
||||
|
||||
secLabelStmts = lappend(secLabelStmts, secLabelStmt);
|
||||
}
|
||||
|
||||
systable_endscan(scan);
|
||||
table_close(pg_shseclabel, AccessShareLock);
|
||||
|
||||
return secLabelStmts;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessCreateRoleStmt creates a worker_create_or_alter_role query for the
|
||||
* role that is being created. With that query we can create the role in the
|
||||
|
|
|
@ -0,0 +1,125 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* seclabel.c
|
||||
*
|
||||
* This file contains the logic of SECURITY LABEL statement propagation.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
#include "distributed/coordinator_protocol.h"
|
||||
#include "distributed/deparser.h"
|
||||
#include "distributed/log_utils.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessSecLabelStmt prepares the commands that need to be run on all workers to assign
|
||||
* security labels on distributed objects, currently supporting just Role objects.
|
||||
* It also ensures that all object dependencies exist on all
|
||||
* nodes for the object in the SecLabelStmt.
|
||||
*/
|
||||
List *
|
||||
PostprocessSecLabelStmt(Node *node, const char *queryString)
|
||||
{
|
||||
if (!ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
|
||||
|
||||
List *objectAddresses = GetObjectAddressListFromParseTree(node, false, true);
|
||||
if (!IsAnyObjectDistributed(objectAddresses))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (secLabelStmt->objtype != OBJECT_ROLE)
|
||||
{
|
||||
/*
|
||||
* If we are not in the coordinator, we don't want to interrupt the security
|
||||
* label command with notices, the user expects that from the worker node
|
||||
* the command will not be propagated
|
||||
*/
|
||||
if (EnableUnsupportedFeatureMessages && IsCoordinator())
|
||||
{
|
||||
ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands whose "
|
||||
"object type is not role"),
|
||||
errhint("Connect to worker nodes directly to manually "
|
||||
"run the same SECURITY LABEL command.")));
|
||||
}
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (!EnableCreateRolePropagation)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses);
|
||||
|
||||
const char *sql = DeparseTreeNode((Node *) secLabelStmt);
|
||||
|
||||
List *commandList = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SecLabelStmtObjectAddress returns the object address of the object on
|
||||
* which this statement operates (secLabelStmt->object). Note that it has no limitation
|
||||
* on the object type being OBJECT_ROLE. This is intentionally implemented like this
|
||||
* since it is fairly simple to implement and we might extend SECURITY LABEL propagation
|
||||
* in the future to include more object types.
|
||||
*/
|
||||
List *
|
||||
SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
|
||||
{
|
||||
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
|
||||
|
||||
Relation rel = NULL;
|
||||
ObjectAddress address = get_object_address(secLabelStmt->objtype,
|
||||
secLabelStmt->object, &rel,
|
||||
AccessShareLock, missing_ok);
|
||||
if (rel != NULL)
|
||||
{
|
||||
relation_close(rel, AccessShareLock);
|
||||
}
|
||||
|
||||
ObjectAddress *addressPtr = palloc0(sizeof(ObjectAddress));
|
||||
*addressPtr = address;
|
||||
return list_make1(addressPtr);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_test_object_relabel is a dummy function for check_object_relabel_type hook.
|
||||
* It is meant to be used in tests combined with citus_test_register_label_provider
|
||||
*/
|
||||
void
|
||||
citus_test_object_relabel(const ObjectAddress *object, const char *seclabel)
|
||||
{
|
||||
if (seclabel == NULL ||
|
||||
strcmp(seclabel, "citus_unclassified") == 0 ||
|
||||
strcmp(seclabel, "citus_classified") == 0 ||
|
||||
strcmp(seclabel, "citus '!unclassified") == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_NAME),
|
||||
errmsg("'%s' is not a valid security label for Citus tests.", seclabel)));
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* deparse_seclabel_stmts.c
|
||||
* All routines to deparse SECURITY LABEL statements.
|
||||
*
|
||||
* Copyright (c), Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "distributed/deparser.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "utils/builtins.h"
|
||||
|
||||
static void AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt);
|
||||
|
||||
/*
|
||||
* DeparseSecLabelStmt builds and returns a string representing of the
|
||||
* SecLabelStmt for application on a remote server.
|
||||
*/
|
||||
char *
|
||||
DeparseSecLabelStmt(Node *node)
|
||||
{
|
||||
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
|
||||
StringInfoData buf = { 0 };
|
||||
initStringInfo(&buf);
|
||||
|
||||
AppendSecLabelStmt(&buf, secLabelStmt);
|
||||
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendSecLabelStmt generates the string representation of the
|
||||
* SecLabelStmt and appends it to the buffer.
|
||||
*/
|
||||
static void
|
||||
AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt)
|
||||
{
|
||||
appendStringInfoString(buf, "SECURITY LABEL ");
|
||||
|
||||
if (stmt->provider != NULL)
|
||||
{
|
||||
appendStringInfo(buf, "FOR %s ", quote_identifier(stmt->provider));
|
||||
}
|
||||
|
||||
appendStringInfoString(buf, "ON ");
|
||||
|
||||
switch (stmt->objtype)
|
||||
{
|
||||
case OBJECT_ROLE:
|
||||
{
|
||||
appendStringInfo(buf, "ROLE %s ", quote_identifier(strVal(stmt->object)));
|
||||
break;
|
||||
}
|
||||
|
||||
/* normally, we shouldn't reach this */
|
||||
default:
|
||||
{
|
||||
ereport(ERROR, (errmsg("unsupported security label statement for"
|
||||
" deparsing")));
|
||||
}
|
||||
}
|
||||
|
||||
appendStringInfoString(buf, "IS ");
|
||||
|
||||
if (stmt->label != NULL)
|
||||
{
|
||||
appendStringInfo(buf, "%s", quote_literal_cstr(stmt->label));
|
||||
}
|
||||
else
|
||||
{
|
||||
appendStringInfoString(buf, "NULL");
|
||||
}
|
||||
}
|
|
@ -317,7 +317,7 @@ PG_FUNCTION_INFO_V1(citus_rebalance_start);
|
|||
PG_FUNCTION_INFO_V1(citus_rebalance_stop);
|
||||
PG_FUNCTION_INFO_V1(citus_rebalance_wait);
|
||||
|
||||
bool RunningUnderIsolationTest = false;
|
||||
bool RunningUnderCitusTestSuite = false;
|
||||
int MaxRebalancerLoggedIgnoredMoves = 5;
|
||||
int RebalancerByDiskSizeBaseCost = 100 * 1024 * 1024;
|
||||
bool PropagateSessionSettingsForLoopbackConnection = false;
|
||||
|
|
|
@ -1143,7 +1143,7 @@ ConflictWithIsolationTestingBeforeCopy(void)
|
|||
const bool sessionLock = false;
|
||||
const bool dontWait = false;
|
||||
|
||||
if (RunningUnderIsolationTest)
|
||||
if (RunningUnderCitusTestSuite)
|
||||
{
|
||||
SET_LOCKTAG_ADVISORY(tag, MyDatabaseId,
|
||||
SHARD_MOVE_ADVISORY_LOCK_SECOND_KEY,
|
||||
|
@ -1177,7 +1177,7 @@ ConflictWithIsolationTestingAfterCopy(void)
|
|||
const bool sessionLock = false;
|
||||
const bool dontWait = false;
|
||||
|
||||
if (RunningUnderIsolationTest)
|
||||
if (RunningUnderCitusTestSuite)
|
||||
{
|
||||
SET_LOCKTAG_ADVISORY(tag, MyDatabaseId,
|
||||
SHARD_MOVE_ADVISORY_LOCK_FIRST_KEY,
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "citus_version.h"
|
||||
#include "commands/explain.h"
|
||||
#include "commands/extension.h"
|
||||
#include "commands/seclabel.h"
|
||||
#include "common/string.h"
|
||||
#include "executor/executor.h"
|
||||
#include "distributed/backend_data.h"
|
||||
|
@ -574,6 +575,16 @@ _PG_init(void)
|
|||
INIT_COLUMNAR_SYMBOL(PGFunction, columnar_storage_info);
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, columnar_store_memory_stats);
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, test_columnar_storage_write_new_page);
|
||||
|
||||
/*
|
||||
* This part is only for SECURITY LABEL tests
|
||||
* mimicking what an actual security label provider would do
|
||||
*/
|
||||
if (RunningUnderCitusTestSuite)
|
||||
{
|
||||
register_label_provider("citus '!tests_label_provider",
|
||||
citus_test_object_relabel);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -2305,13 +2316,14 @@ RegisterCitusConfigVariables(void)
|
|||
WarnIfReplicationModelIsSet, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.running_under_isolation_test",
|
||||
"citus.running_under_citus_test_suite",
|
||||
gettext_noop(
|
||||
"Only useful for testing purposes, when set to true, Citus does some "
|
||||
"tricks to implement useful isolation tests with rebalancing. Should "
|
||||
"tricks to implement useful isolation tests with rebalancing. It also "
|
||||
"registers a dummy label provider for SECURITY LABEL tests. Should "
|
||||
"never be set to true on production systems "),
|
||||
gettext_noop("for details of the tricks implemented, refer to the source code"),
|
||||
&RunningUnderIsolationTest,
|
||||
&RunningUnderCitusTestSuite,
|
||||
false,
|
||||
PGC_SUSET,
|
||||
GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE,
|
||||
|
|
|
@ -523,6 +523,11 @@ extern List * AlterSchemaOwnerStmtObjectAddress(Node *node, bool missing_ok,
|
|||
extern List * AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok, bool
|
||||
isPostprocess);
|
||||
|
||||
/* seclabel.c - forward declarations*/
|
||||
extern List * PostprocessSecLabelStmt(Node *node, const char *queryString);
|
||||
extern List * SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess);
|
||||
extern void citus_test_object_relabel(const ObjectAddress *object, const char *seclabel);
|
||||
|
||||
/* sequence.c - forward declarations */
|
||||
extern List * PreprocessAlterSequenceStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
|
|
|
@ -261,6 +261,9 @@ extern void QualifyRenameTextSearchDictionaryStmt(Node *node);
|
|||
extern void QualifyTextSearchConfigurationCommentStmt(Node *node);
|
||||
extern void QualifyTextSearchDictionaryCommentStmt(Node *node);
|
||||
|
||||
/* forward declarations for deparse_seclabel_stmts.c */
|
||||
extern char * DeparseSecLabelStmt(Node *node);
|
||||
|
||||
/* forward declarations for deparse_sequence_stmts.c */
|
||||
extern char * DeparseDropSequenceStmt(Node *node);
|
||||
extern char * DeparseRenameSequenceStmt(Node *node);
|
||||
|
|
|
@ -189,7 +189,7 @@ typedef struct RebalancePlanFunctions
|
|||
extern char *VariablesToBePassedToNewConnections;
|
||||
extern int MaxRebalancerLoggedIgnoredMoves;
|
||||
extern int RebalancerByDiskSizeBaseCost;
|
||||
extern bool RunningUnderIsolationTest;
|
||||
extern bool RunningUnderCitusTestSuite;
|
||||
extern bool PropagateSessionSettingsForLoopbackConnection;
|
||||
extern int MaxBackgroundTaskExecutorsPerNode;
|
||||
|
||||
|
|
|
@ -135,20 +135,10 @@ DEPS = {
|
|||
),
|
||||
"alter_role_propagation": TestDeps("minimal_schedule"),
|
||||
"background_rebalance": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_test_helpers",
|
||||
"multi_cluster_management",
|
||||
],
|
||||
worker_count=3,
|
||||
None, ["multi_test_helpers", "multi_cluster_management"], worker_count=3
|
||||
),
|
||||
"background_rebalance_parallel": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_test_helpers",
|
||||
"multi_cluster_management",
|
||||
],
|
||||
worker_count=6,
|
||||
None, ["multi_test_helpers", "multi_cluster_management"], worker_count=6
|
||||
),
|
||||
"function_propagation": TestDeps("minimal_schedule"),
|
||||
"citus_shards": TestDeps("minimal_schedule"),
|
||||
|
@ -165,30 +155,17 @@ DEPS = {
|
|||
),
|
||||
"schema_based_sharding": TestDeps("minimal_schedule"),
|
||||
"multi_sequence_default": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_test_helpers",
|
||||
"multi_cluster_management",
|
||||
"multi_table_ddl",
|
||||
],
|
||||
None, ["multi_test_helpers", "multi_cluster_management", "multi_table_ddl"]
|
||||
),
|
||||
"grant_on_schema_propagation": TestDeps("minimal_schedule"),
|
||||
"propagate_extension_commands": TestDeps("minimal_schedule"),
|
||||
"multi_size_queries": TestDeps("base_schedule", ["multi_copy"]),
|
||||
"multi_mx_node_metadata": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_extension",
|
||||
"multi_test_helpers",
|
||||
"multi_test_helpers_superuser",
|
||||
],
|
||||
None, ["multi_extension", "multi_test_helpers", "multi_test_helpers_superuser"]
|
||||
),
|
||||
"multi_mx_function_table_reference": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_cluster_management",
|
||||
"remove_coordinator_from_metadata",
|
||||
],
|
||||
["multi_cluster_management", "remove_coordinator_from_metadata"],
|
||||
# because it queries node group id and it changes as we add / remove nodes
|
||||
repeatable=False,
|
||||
),
|
||||
|
@ -201,16 +178,27 @@ DEPS = {
|
|||
],
|
||||
),
|
||||
"metadata_sync_helpers": TestDeps(
|
||||
None,
|
||||
[
|
||||
"multi_mx_node_metadata",
|
||||
"multi_cluster_management",
|
||||
],
|
||||
None, ["multi_mx_node_metadata", "multi_cluster_management"]
|
||||
),
|
||||
"multi_utilities": TestDeps(
|
||||
"multi_utilities": TestDeps("minimal_schedule", ["multi_data_types"]),
|
||||
"multi_tenant_isolation_nonblocking": TestDeps(
|
||||
"minimal_schedule", ["multi_data_types", "remove_coordinator_from_metadata"]
|
||||
),
|
||||
"remove_non_default_nodes": TestDeps(
|
||||
None, ["multi_mx_node_metadata", "multi_cluster_management"], repeatable=False
|
||||
),
|
||||
"citus_split_shard_columnar_partitioned": TestDeps(
|
||||
"minimal_schedule", ["remove_coordinator_from_metadata"]
|
||||
),
|
||||
"add_coordinator": TestDeps(
|
||||
"minimal_schedule", ["remove_coordinator_from_metadata"], repeatable=False
|
||||
),
|
||||
"multi_multiuser_auth": TestDeps(
|
||||
"minimal_schedule",
|
||||
["multi_data_types"],
|
||||
["multi_create_table", "multi_create_users", "multi_multiuser_load_data"],
|
||||
repeatable=False,
|
||||
),
|
||||
"multi_prepare_plsql": TestDeps("base_schedule"),
|
||||
}
|
||||
|
||||
|
||||
|
@ -303,9 +291,13 @@ def run_schedule_with_multiregress(test_name, schedule, dependencies, args):
|
|||
worker_count = needed_worker_count(test_name, dependencies)
|
||||
|
||||
# find suitable make recipe
|
||||
if dependencies.schedule == "base_isolation_schedule" or "isolation" in test_name:
|
||||
if dependencies.schedule == "base_isolation_schedule" or test_name.startswith(
|
||||
"isolation"
|
||||
):
|
||||
make_recipe = "check-isolation-custom-schedule"
|
||||
elif dependencies.schedule == "failure_base_schedule" or "failure" in test_name:
|
||||
elif dependencies.schedule == "failure_base_schedule" or test_name.startswith(
|
||||
"failure"
|
||||
):
|
||||
make_recipe = "check-failure-custom-schedule"
|
||||
else:
|
||||
make_recipe = "check-custom-schedule"
|
||||
|
@ -418,10 +410,7 @@ def test_dependencies(test_name, test_schedule, schedule_line, args):
|
|||
if "upgrade_columnar_before" not in before_tests:
|
||||
before_tests.append("upgrade_columnar_before")
|
||||
|
||||
return TestDeps(
|
||||
default_base_schedule(test_schedule, args),
|
||||
before_tests,
|
||||
)
|
||||
return TestDeps(default_base_schedule(test_schedule, args), before_tests)
|
||||
|
||||
# before_ tests leave stuff around on purpose for the after tests. So they
|
||||
# are not repeatable by definition.
|
||||
|
|
|
@ -2,13 +2,6 @@
|
|||
-- ADD_COORDINATOR
|
||||
--
|
||||
-- node trying to add itself without specifying groupid => 0 should error out
|
||||
-- first remove the coordinator to for testing master_add_node for coordinator
|
||||
SELECT master_remove_node('localhost', :master_port);
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_node('localhost', :master_port);
|
||||
ERROR: Node cannot add itself as a worker.
|
||||
HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636);
|
||||
|
|
|
@ -135,4 +135,10 @@ NOTICE: drop cascades to 3 other objects
|
|||
DETAIL: drop cascades to table citus_split_shard_by_split_points_negative.range_paritioned_table_to_split
|
||||
drop cascades to table citus_split_shard_by_split_points_negative.table_to_split
|
||||
drop cascades to table citus_split_shard_by_split_points_negative.table_to_split_replication_factor_2
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
wait_for_resource_cleanup
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
--END : Cleanup
|
||||
|
|
|
@ -12,19 +12,9 @@
|
|||
\set bob_worker_1_pw triplex-royalty-warranty-stand-cheek
|
||||
\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile
|
||||
\set bob_fallback_pw :bob_worker_1_pw
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
|
||||
worker_1_id
|
||||
---------------------------------------------------------------------
|
||||
17
|
||||
(1 row)
|
||||
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
|
||||
\gset
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
|
||||
worker_2_id
|
||||
---------------------------------------------------------------------
|
||||
35
|
||||
(1 row)
|
||||
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
|
||||
\gset
|
||||
-- alice is a superuser so she can update own password
|
||||
CREATE USER alice PASSWORD :'alice_master_pw' SUPERUSER;
|
||||
|
|
|
@ -6,19 +6,9 @@
|
|||
-- Test of ability to override host/port for a node
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 20000000;
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
|
||||
worker_1_id
|
||||
---------------------------------------------------------------------
|
||||
17
|
||||
(1 row)
|
||||
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
|
||||
\gset
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
|
||||
worker_2_id
|
||||
---------------------------------------------------------------------
|
||||
35
|
||||
(1 row)
|
||||
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
|
||||
\gset
|
||||
CREATE TABLE lotsa_connections (id integer, name text);
|
||||
SELECT create_distributed_table('lotsa_connections', 'id');
|
||||
|
|
|
@ -1317,11 +1317,11 @@ SELECT type_ddl_plpgsql();
|
|||
(1 row)
|
||||
|
||||
-- find all renamed types to verify the schema name didn't leak, nor a crash happened
|
||||
SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup';
|
||||
SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup' ORDER BY 1;
|
||||
nspname | typname
|
||||
---------------------------------------------------------------------
|
||||
public | prepare_ddl_type_backup
|
||||
otherschema | prepare_ddl_type_backup
|
||||
public | prepare_ddl_type_backup
|
||||
(2 rows)
|
||||
|
||||
DROP TYPE prepare_ddl_type_backup;
|
||||
|
@ -1332,6 +1332,7 @@ DROP FUNCTION ddl_in_plpgsql();
|
|||
DROP FUNCTION copy_in_plpgsql();
|
||||
DROP TABLE prepare_ddl;
|
||||
DROP TABLE local_ddl;
|
||||
DROP TABLE plpgsql_table;
|
||||
DROP SCHEMA otherschema;
|
||||
-- clean-up functions
|
||||
DROP FUNCTION plpgsql_test_1();
|
||||
|
|
|
@ -1275,8 +1275,9 @@ SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0;
|
|||
TRUNCATE TABLE pg_catalog.pg_dist_colocation;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
|
||||
SELECT citus_set_coordinator_host('localhost');
|
||||
citus_set_coordinator_host
|
||||
-- make sure we don't have any replication objects leftover on the nodes
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
wait_for_resource_cleanup
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
|
|
@ -526,3 +526,33 @@ BEGIN
|
|||
RETURN result;
|
||||
END;
|
||||
$func$ LANGUAGE plpgsql;
|
||||
-- Returns pg_seclabels entries from all nodes in the cluster for which
|
||||
-- the object name is the input.
|
||||
CREATE OR REPLACE FUNCTION get_citus_tests_label_provider_labels(object_name text,
|
||||
master_port INTEGER DEFAULT 57636,
|
||||
worker_1_port INTEGER DEFAULT 57637,
|
||||
worker_2_port INTEGER DEFAULT 57638)
|
||||
RETURNS TABLE (
|
||||
node_type text,
|
||||
result text
|
||||
)
|
||||
AS $func$
|
||||
DECLARE
|
||||
pg_seclabels_cmd TEXT := 'SELECT to_jsonb(q.*) FROM (' ||
|
||||
'SELECT provider, objtype, label FROM pg_seclabels ' ||
|
||||
'WHERE objname = ''' || object_name || ''') q';
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
CASE
|
||||
WHEN nodeport = master_port THEN 'coordinator'
|
||||
WHEN nodeport = worker_1_port THEN 'worker_1'
|
||||
WHEN nodeport = worker_2_port THEN 'worker_2'
|
||||
ELSE 'unexpected_node'
|
||||
END AS node_type,
|
||||
a.result
|
||||
FROM run_command_on_all_nodes(pg_seclabels_cmd) a
|
||||
JOIN pg_dist_node USING (nodeid)
|
||||
ORDER BY node_type;
|
||||
END;
|
||||
$func$ LANGUAGE plpgsql;
|
||||
|
|
|
@ -424,6 +424,34 @@ FROM pg_total_relation_size('local_vacuum_table') s ;
|
|||
35000000
|
||||
(1 row)
|
||||
|
||||
-- vacuum (process_toast true) should be vacuuming toast tables (default is true)
|
||||
select reltoastrelid from pg_class where relname='local_vacuum_table'
|
||||
\gset
|
||||
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
|
||||
\gset
|
||||
insert into local_vacuum_table select i from generate_series(1,10000) i;
|
||||
VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table;
|
||||
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
|
||||
WHERE oid=:reltoastrelid::regclass;
|
||||
frozen_performed
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
delete from local_vacuum_table;
|
||||
-- vacuum (process_toast false) should not be vacuuming toast tables (default is true)
|
||||
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
|
||||
\gset
|
||||
insert into local_vacuum_table select i from generate_series(1,10000) i;
|
||||
VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table;
|
||||
SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class
|
||||
WHERE oid=:reltoastrelid::regclass;
|
||||
frozen_not_performed
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
delete from local_vacuum_table;
|
||||
-- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true)
|
||||
insert into local_vacuum_table select i from generate_series(1,1000000) i;
|
||||
delete from local_vacuum_table;
|
||||
|
|
|
@ -71,32 +71,6 @@ NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000
|
|||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
-- vacuum (process_toast true) should be vacuuming toast tables (default is true)
|
||||
CREATE TABLE local_vacuum_table(name text);
|
||||
select reltoastrelid from pg_class where relname='local_vacuum_table'
|
||||
\gset
|
||||
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
|
||||
\gset
|
||||
VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table;
|
||||
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
|
||||
WHERE oid=:reltoastrelid::regclass;
|
||||
frozen_performed
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- vacuum (process_toast false) should not be vacuuming toast tables (default is true)
|
||||
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
|
||||
\gset
|
||||
VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table;
|
||||
SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class
|
||||
WHERE oid=:reltoastrelid::regclass;
|
||||
frozen_not_performed
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
DROP TABLE local_vacuum_table;
|
||||
SET citus.log_remote_commands TO OFF;
|
||||
create table dist(a int, b int);
|
||||
select create_distributed_table('dist','a');
|
||||
|
@ -1492,4 +1466,5 @@ DROP TABLE compression_and_defaults, compression_and_generated_col;
|
|||
set client_min_messages to error;
|
||||
drop extension postgres_fdw cascade;
|
||||
drop schema pg14 cascade;
|
||||
DROP ROLE role_1, r1;
|
||||
reset client_min_messages;
|
||||
|
|
|
@ -267,6 +267,7 @@ SET client_min_messages TO ERROR;
|
|||
DROP SCHEMA publication CASCADE;
|
||||
DROP SCHEMA "publication-1" CASCADE;
|
||||
DROP SCHEMA citus_schema_1 CASCADE;
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
\q
|
||||
\endif
|
||||
-- recreate a mixed publication
|
||||
|
@ -544,3 +545,9 @@ DROP SCHEMA publication CASCADE;
|
|||
DROP SCHEMA "publication-1" CASCADE;
|
||||
DROP SCHEMA citus_schema_1 CASCADE;
|
||||
DROP SCHEMA publication2 CASCADE;
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
wait_for_resource_cleanup
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -267,4 +267,10 @@ SET client_min_messages TO ERROR;
|
|||
DROP SCHEMA publication CASCADE;
|
||||
DROP SCHEMA "publication-1" CASCADE;
|
||||
DROP SCHEMA citus_schema_1 CASCADE;
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
wait_for_resource_cleanup
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\q
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
-- The default nodes for the citus test suite are coordinator and 2 worker nodes
|
||||
-- Which we identify with master_port, worker_1_port, worker_2_port.
|
||||
-- When needed in some tests, GetLocalNodeId() does not behave correctly,
|
||||
-- So we remove the non default nodes. This tests expects the non default nodes
|
||||
-- to not have any active placements.
|
||||
SELECT any_value(citus_remove_node('localhost', nodeport))
|
||||
FROM pg_dist_node
|
||||
WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port);
|
||||
any_value
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,173 @@
|
|||
--
|
||||
-- SECLABEL
|
||||
--
|
||||
-- Test suite for SECURITY LABEL ON ROLE statements
|
||||
--
|
||||
-- first we remove one of the worker nodes to be able to test
|
||||
-- citus_add_node later
|
||||
SELECT citus_remove_node('localhost', :worker_2_port);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- create two roles, one with characters that need escaping
|
||||
CREATE ROLE user1;
|
||||
CREATE ROLE "user 2";
|
||||
-- check an invalid label for our current dummy hook citus_test_object_relabel
|
||||
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'invalid_label';
|
||||
ERROR: 'invalid_label' is not a valid security label for Citus tests.
|
||||
-- if we disable metadata_sync, the command will not be propagated
|
||||
SET citus.enable_metadata_sync TO off;
|
||||
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified';
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
|
||||
node_type | result
|
||||
---------------------------------------------------------------------
|
||||
coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
|
||||
worker_1 |
|
||||
(2 rows)
|
||||
|
||||
RESET citus.enable_metadata_sync;
|
||||
-- check that we only support propagating for roles
|
||||
SET citus.shard_replication_factor to 1;
|
||||
-- distributed table
|
||||
CREATE TABLE a (a int);
|
||||
SELECT create_distributed_table('a', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- distributed view
|
||||
CREATE VIEW v_dist AS SELECT * FROM a;
|
||||
-- distributed function
|
||||
CREATE FUNCTION notice(text) RETURNS void LANGUAGE plpgsql AS $$
|
||||
BEGIN RAISE NOTICE '%', $1; END; $$;
|
||||
SECURITY LABEL ON TABLE a IS 'citus_classified';
|
||||
NOTICE: not propagating SECURITY LABEL commands whose object type is not role
|
||||
HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command.
|
||||
SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified';
|
||||
NOTICE: not propagating SECURITY LABEL commands whose object type is not role
|
||||
HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command.
|
||||
SECURITY LABEL ON VIEW v_dist IS 'citus_classified';
|
||||
NOTICE: not propagating SECURITY LABEL commands whose object type is not role
|
||||
HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command.
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type;
|
||||
node_type | result
|
||||
---------------------------------------------------------------------
|
||||
coordinator | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"}
|
||||
worker_1 |
|
||||
(2 rows)
|
||||
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type;
|
||||
node_type | result
|
||||
---------------------------------------------------------------------
|
||||
coordinator | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"}
|
||||
worker_1 |
|
||||
(2 rows)
|
||||
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type;
|
||||
node_type | result
|
||||
---------------------------------------------------------------------
|
||||
coordinator | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"}
|
||||
worker_1 |
|
||||
(2 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SECURITY LABEL ON TABLE a IS 'citus_classified';
|
||||
SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified';
|
||||
SECURITY LABEL ON VIEW v_dist IS 'citus_classified';
|
||||
\c - - - :master_port
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type;
|
||||
node_type | result
|
||||
---------------------------------------------------------------------
|
||||
coordinator | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"}
|
||||
worker_1 | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"}
|
||||
(2 rows)
|
||||
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type;
|
||||
node_type | result
|
||||
---------------------------------------------------------------------
|
||||
coordinator | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"}
|
||||
worker_1 | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"}
|
||||
(2 rows)
|
||||
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type;
|
||||
node_type | result
|
||||
---------------------------------------------------------------------
|
||||
coordinator | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"}
|
||||
worker_1 | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"}
|
||||
(2 rows)
|
||||
|
||||
DROP TABLE a CASCADE;
|
||||
NOTICE: drop cascades to view v_dist
|
||||
DROP FUNCTION notice;
|
||||
-- test that SECURITY LABEL statement is actually propagated for ROLES
|
||||
SET citus.log_remote_commands TO on;
|
||||
SET citus.grep_remote_commands = '%SECURITY LABEL%';
|
||||
-- we have exactly one provider loaded, so we may not include the provider in the command
|
||||
SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified';
|
||||
NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
SECURITY LABEL ON ROLE user1 IS NULL;
|
||||
NOTICE: issuing SECURITY LABEL ON ROLE user1 IS NULL
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
SECURITY LABEL ON ROLE user1 IS 'citus_unclassified';
|
||||
NOTICE: issuing SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified';
|
||||
NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
\c - - - :worker_1_port
|
||||
-- command not allowed from worker node
|
||||
SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified';
|
||||
ERROR: operation is not allowed on this node
|
||||
HINT: Connect to the coordinator and run it again.
|
||||
\c - - - :master_port
|
||||
RESET citus.log_remote_commands;
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
|
||||
node_type | result
|
||||
---------------------------------------------------------------------
|
||||
coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
|
||||
worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
|
||||
(2 rows)
|
||||
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type;
|
||||
node_type | result
|
||||
---------------------------------------------------------------------
|
||||
coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
|
||||
worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
|
||||
(2 rows)
|
||||
|
||||
-- add a new node and check that it also propagates the SECURITY LABEL statement to the new node
|
||||
SET citus.log_remote_commands TO on;
|
||||
SET citus.grep_remote_commands = '%SECURITY LABEL%';
|
||||
SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
|
||||
NOTICE: issuing SELECT worker_create_or_alter_role('user1', 'CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing SELECT worker_create_or_alter_role('user 2', 'CREATE ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
|
||||
node_type | result
|
||||
---------------------------------------------------------------------
|
||||
coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
|
||||
worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
|
||||
worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
|
||||
(3 rows)
|
||||
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type;
|
||||
node_type | result
|
||||
---------------------------------------------------------------------
|
||||
coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
|
||||
worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
|
||||
worker_2 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
|
||||
(3 rows)
|
||||
|
||||
-- cleanup
|
||||
RESET citus.log_remote_commands;
|
||||
DROP ROLE user1, "user 2";
|
|
@ -3,43 +3,6 @@ SET search_path TO worker_split_binary_copy_test;
|
|||
SET citus.shard_count TO 1;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 81060000;
|
||||
-- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly.
|
||||
SELECT citus_remove_node('localhost', 8887);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_remove_node('localhost', 9995);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_remove_node('localhost', 9992);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_remove_node('localhost', 9998);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_remove_node('localhost', 9997);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_remove_node('localhost', 8888);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- BEGIN: Create distributed table and insert data.
|
||||
CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy (
|
||||
l_orderkey bigint not null,
|
||||
|
|
|
@ -32,6 +32,7 @@ test: propagate_extension_commands
|
|||
test: escape_extension_name
|
||||
test: ref_citus_local_fkeys
|
||||
test: alter_database_owner
|
||||
test: seclabel
|
||||
test: distributed_triggers
|
||||
test: create_single_shard_table
|
||||
# don't parallelize single_shard_table_udfs to make sure colocation ids are sequential
|
||||
|
@ -296,6 +297,7 @@ test: multi_foreign_key_relation_graph
|
|||
# Replicating reference tables to coordinator. Add coordinator to pg_dist_node
|
||||
# and rerun some of the tests.
|
||||
# --------
|
||||
test: remove_coordinator_from_metadata
|
||||
test: add_coordinator
|
||||
test: replicate_reference_tables_to_coordinator
|
||||
test: citus_local_tables
|
||||
|
|
|
@ -83,7 +83,8 @@ test: forcedelegation_functions
|
|||
# this should be run alone as it gets too many clients
|
||||
test: join_pushdown
|
||||
test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc statement_cancel_error_message
|
||||
test: multi_agg_distinct multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction
|
||||
test: multi_agg_distinct
|
||||
test: multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction
|
||||
test: multi_reference_table multi_select_for_update relation_access_tracking pg13_with_ties
|
||||
test: custom_aggregate_support aggregate_support tdigest_aggregate_support
|
||||
test: multi_average_expression multi_working_columns multi_having_pushdown having_subquery
|
||||
|
|
|
@ -90,7 +90,6 @@ my $workerCount = 2;
|
|||
my $serversAreShutdown = "TRUE";
|
||||
my $usingWindows = 0;
|
||||
my $mitmPid = 0;
|
||||
my $workerCount = 2;
|
||||
|
||||
if ($Config{osname} eq "MSWin32")
|
||||
{
|
||||
|
@ -510,6 +509,12 @@ if($vanillatest)
|
|||
# we disable some restrictions for local objects like local views to not break postgres vanilla test behaviour.
|
||||
push(@pgOptions, "citus.enforce_object_restrictions_for_local_objects=false");
|
||||
}
|
||||
else
|
||||
{
|
||||
# We currently need this config for isolation tests and security label tests
|
||||
# this option loads a security label provider, which we don't want in vanilla tests
|
||||
push(@pgOptions, "citus.running_under_citus_test_suite=true");
|
||||
}
|
||||
|
||||
if ($useMitmproxy)
|
||||
{
|
||||
|
@ -560,7 +565,6 @@ if($isolationtester)
|
|||
push(@pgOptions, "citus.metadata_sync_interval=1000");
|
||||
push(@pgOptions, "citus.metadata_sync_retry_interval=100");
|
||||
push(@pgOptions, "client_min_messages='warning'"); # pg12 introduced notice showing during isolation tests
|
||||
push(@pgOptions, "citus.running_under_isolation_test=true");
|
||||
|
||||
# Disable all features of the maintenance daemon. Otherwise queries might
|
||||
# randomly show temporarily as "waiting..." because they are waiting for the
|
||||
|
|
|
@ -10,6 +10,7 @@ test: foreign_key_to_reference_table
|
|||
# Split tests go here.
|
||||
test: split_shard
|
||||
test: worker_split_copy_test
|
||||
test: remove_non_default_nodes
|
||||
test: worker_split_binary_copy_test
|
||||
test: worker_split_text_copy_test
|
||||
test: citus_split_shard_by_split_points_negative
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
--
|
||||
|
||||
-- node trying to add itself without specifying groupid => 0 should error out
|
||||
-- first remove the coordinator to for testing master_add_node for coordinator
|
||||
SELECT master_remove_node('localhost', :master_port);
|
||||
SELECT master_add_node('localhost', :master_port);
|
||||
|
||||
SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset
|
||||
|
|
|
@ -113,4 +113,5 @@ SELECT citus_split_shard_by_split_points(
|
|||
--BEGIN : Cleanup
|
||||
\c - postgres - :master_port
|
||||
DROP SCHEMA "citus_split_shard_by_split_points_negative" CASCADE;
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
--END : Cleanup
|
||||
|
|
|
@ -16,9 +16,9 @@
|
|||
\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile
|
||||
\set bob_fallback_pw :bob_worker_1_pw
|
||||
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
|
||||
\gset
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
|
||||
\gset
|
||||
|
||||
-- alice is a superuser so she can update own password
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 20000000;
|
||||
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
|
||||
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
|
||||
\gset
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
|
||||
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
|
||||
\gset
|
||||
|
||||
CREATE TABLE lotsa_connections (id integer, name text);
|
||||
|
|
|
@ -624,7 +624,7 @@ CREATE TYPE prepare_ddl_type AS (x int, y int);
|
|||
SELECT type_ddl_plpgsql();
|
||||
|
||||
-- find all renamed types to verify the schema name didn't leak, nor a crash happened
|
||||
SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup';
|
||||
SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup' ORDER BY 1;
|
||||
|
||||
DROP TYPE prepare_ddl_type_backup;
|
||||
RESET search_path;
|
||||
|
@ -635,6 +635,7 @@ DROP FUNCTION ddl_in_plpgsql();
|
|||
DROP FUNCTION copy_in_plpgsql();
|
||||
DROP TABLE prepare_ddl;
|
||||
DROP TABLE local_ddl;
|
||||
DROP TABLE plpgsql_table;
|
||||
DROP SCHEMA otherschema;
|
||||
|
||||
-- clean-up functions
|
||||
|
|
|
@ -608,5 +608,5 @@ ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
|
|||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
|
||||
|
||||
SELECT citus_set_coordinator_host('localhost');
|
||||
|
||||
-- make sure we don't have any replication objects leftover on the nodes
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
|
|
|
@ -550,3 +550,34 @@ BEGIN
|
|||
RETURN result;
|
||||
END;
|
||||
$func$ LANGUAGE plpgsql;
|
||||
|
||||
-- Returns pg_seclabels entries from all nodes in the cluster for which
|
||||
-- the object name is the input.
|
||||
CREATE OR REPLACE FUNCTION get_citus_tests_label_provider_labels(object_name text,
|
||||
master_port INTEGER DEFAULT 57636,
|
||||
worker_1_port INTEGER DEFAULT 57637,
|
||||
worker_2_port INTEGER DEFAULT 57638)
|
||||
RETURNS TABLE (
|
||||
node_type text,
|
||||
result text
|
||||
)
|
||||
AS $func$
|
||||
DECLARE
|
||||
pg_seclabels_cmd TEXT := 'SELECT to_jsonb(q.*) FROM (' ||
|
||||
'SELECT provider, objtype, label FROM pg_seclabels ' ||
|
||||
'WHERE objname = ''' || object_name || ''') q';
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
CASE
|
||||
WHEN nodeport = master_port THEN 'coordinator'
|
||||
WHEN nodeport = worker_1_port THEN 'worker_1'
|
||||
WHEN nodeport = worker_2_port THEN 'worker_2'
|
||||
ELSE 'unexpected_node'
|
||||
END AS node_type,
|
||||
a.result
|
||||
FROM run_command_on_all_nodes(pg_seclabels_cmd) a
|
||||
JOIN pg_dist_node USING (nodeid)
|
||||
ORDER BY node_type;
|
||||
END;
|
||||
$func$ LANGUAGE plpgsql;
|
||||
|
|
|
@ -272,6 +272,27 @@ VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table;
|
|||
SELECT CASE WHEN s BETWEEN 20000000 AND 49999999 THEN 35000000 ELSE s END size
|
||||
FROM pg_total_relation_size('local_vacuum_table') s ;
|
||||
|
||||
-- vacuum (process_toast true) should be vacuuming toast tables (default is true)
|
||||
select reltoastrelid from pg_class where relname='local_vacuum_table'
|
||||
\gset
|
||||
|
||||
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
|
||||
\gset
|
||||
insert into local_vacuum_table select i from generate_series(1,10000) i;
|
||||
VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table;
|
||||
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
|
||||
WHERE oid=:reltoastrelid::regclass;
|
||||
delete from local_vacuum_table;
|
||||
|
||||
-- vacuum (process_toast false) should not be vacuuming toast tables (default is true)
|
||||
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
|
||||
\gset
|
||||
insert into local_vacuum_table select i from generate_series(1,10000) i;
|
||||
VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table;
|
||||
SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class
|
||||
WHERE oid=:reltoastrelid::regclass;
|
||||
delete from local_vacuum_table;
|
||||
|
||||
-- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true)
|
||||
insert into local_vacuum_table select i from generate_series(1,1000000) i;
|
||||
delete from local_vacuum_table;
|
||||
|
|
|
@ -22,25 +22,6 @@ VACUUM (INDEX_CLEANUP "AUTOX") t1;
|
|||
VACUUM (FULL, FREEZE, VERBOSE false, ANALYZE, SKIP_LOCKED, INDEX_CLEANUP, PROCESS_TOAST, TRUNCATE) t1;
|
||||
VACUUM (FULL, FREEZE false, VERBOSE false, ANALYZE false, SKIP_LOCKED false, INDEX_CLEANUP "Auto", PROCESS_TOAST true, TRUNCATE false) t1;
|
||||
|
||||
-- vacuum (process_toast true) should be vacuuming toast tables (default is true)
|
||||
CREATE TABLE local_vacuum_table(name text);
|
||||
select reltoastrelid from pg_class where relname='local_vacuum_table'
|
||||
\gset
|
||||
|
||||
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
|
||||
\gset
|
||||
VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table;
|
||||
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
|
||||
WHERE oid=:reltoastrelid::regclass;
|
||||
|
||||
-- vacuum (process_toast false) should not be vacuuming toast tables (default is true)
|
||||
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
|
||||
\gset
|
||||
VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table;
|
||||
SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class
|
||||
WHERE oid=:reltoastrelid::regclass;
|
||||
|
||||
DROP TABLE local_vacuum_table;
|
||||
SET citus.log_remote_commands TO OFF;
|
||||
|
||||
create table dist(a int, b int);
|
||||
|
@ -777,4 +758,5 @@ DROP TABLE compression_and_defaults, compression_and_generated_col;
|
|||
set client_min_messages to error;
|
||||
drop extension postgres_fdw cascade;
|
||||
drop schema pg14 cascade;
|
||||
DROP ROLE role_1, r1;
|
||||
reset client_min_messages;
|
||||
|
|
|
@ -195,6 +195,7 @@ SET client_min_messages TO ERROR;
|
|||
DROP SCHEMA publication CASCADE;
|
||||
DROP SCHEMA "publication-1" CASCADE;
|
||||
DROP SCHEMA citus_schema_1 CASCADE;
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
\q
|
||||
\endif
|
||||
|
||||
|
@ -391,3 +392,5 @@ DROP SCHEMA publication CASCADE;
|
|||
DROP SCHEMA "publication-1" CASCADE;
|
||||
DROP SCHEMA citus_schema_1 CASCADE;
|
||||
DROP SCHEMA publication2 CASCADE;
|
||||
|
||||
SELECT public.wait_for_resource_cleanup();
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
-- The default nodes for the citus test suite are coordinator and 2 worker nodes
|
||||
-- Which we identify with master_port, worker_1_port, worker_2_port.
|
||||
-- When needed in some tests, GetLocalNodeId() does not behave correctly,
|
||||
-- So we remove the non default nodes. This tests expects the non default nodes
|
||||
-- to not have any active placements.
|
||||
SELECT any_value(citus_remove_node('localhost', nodeport))
|
||||
FROM pg_dist_node
|
||||
WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port);
|
|
@ -0,0 +1,87 @@
|
|||
--
|
||||
-- SECLABEL
|
||||
--
|
||||
-- Test suite for SECURITY LABEL ON ROLE statements
|
||||
--
|
||||
|
||||
-- first we remove one of the worker nodes to be able to test
|
||||
-- citus_add_node later
|
||||
SELECT citus_remove_node('localhost', :worker_2_port);
|
||||
|
||||
-- create two roles, one with characters that need escaping
|
||||
CREATE ROLE user1;
|
||||
CREATE ROLE "user 2";
|
||||
|
||||
-- check an invalid label for our current dummy hook citus_test_object_relabel
|
||||
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'invalid_label';
|
||||
|
||||
-- if we disable metadata_sync, the command will not be propagated
|
||||
SET citus.enable_metadata_sync TO off;
|
||||
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified';
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
|
||||
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
-- check that we only support propagating for roles
|
||||
SET citus.shard_replication_factor to 1;
|
||||
-- distributed table
|
||||
CREATE TABLE a (a int);
|
||||
SELECT create_distributed_table('a', 'a');
|
||||
-- distributed view
|
||||
CREATE VIEW v_dist AS SELECT * FROM a;
|
||||
-- distributed function
|
||||
CREATE FUNCTION notice(text) RETURNS void LANGUAGE plpgsql AS $$
|
||||
BEGIN RAISE NOTICE '%', $1; END; $$;
|
||||
|
||||
SECURITY LABEL ON TABLE a IS 'citus_classified';
|
||||
SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified';
|
||||
SECURITY LABEL ON VIEW v_dist IS 'citus_classified';
|
||||
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type;
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type;
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SECURITY LABEL ON TABLE a IS 'citus_classified';
|
||||
SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified';
|
||||
SECURITY LABEL ON VIEW v_dist IS 'citus_classified';
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type;
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type;
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type;
|
||||
|
||||
DROP TABLE a CASCADE;
|
||||
DROP FUNCTION notice;
|
||||
|
||||
-- test that SECURITY LABEL statement is actually propagated for ROLES
|
||||
SET citus.log_remote_commands TO on;
|
||||
SET citus.grep_remote_commands = '%SECURITY LABEL%';
|
||||
|
||||
-- we have exactly one provider loaded, so we may not include the provider in the command
|
||||
SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified';
|
||||
SECURITY LABEL ON ROLE user1 IS NULL;
|
||||
SECURITY LABEL ON ROLE user1 IS 'citus_unclassified';
|
||||
SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- command not allowed from worker node
|
||||
SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified';
|
||||
|
||||
\c - - - :master_port
|
||||
RESET citus.log_remote_commands;
|
||||
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type;
|
||||
|
||||
-- add a new node and check that it also propagates the SECURITY LABEL statement to the new node
|
||||
SET citus.log_remote_commands TO on;
|
||||
SET citus.grep_remote_commands = '%SECURITY LABEL%';
|
||||
SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
|
||||
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
|
||||
SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type;
|
||||
|
||||
-- cleanup
|
||||
RESET citus.log_remote_commands;
|
||||
DROP ROLE user1, "user 2";
|
|
@ -4,14 +4,6 @@ SET citus.shard_count TO 1;
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 81060000;
|
||||
|
||||
-- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly.
|
||||
SELECT citus_remove_node('localhost', 8887);
|
||||
SELECT citus_remove_node('localhost', 9995);
|
||||
SELECT citus_remove_node('localhost', 9992);
|
||||
SELECT citus_remove_node('localhost', 9998);
|
||||
SELECT citus_remove_node('localhost', 9997);
|
||||
SELECT citus_remove_node('localhost', 8888);
|
||||
|
||||
-- BEGIN: Create distributed table and insert data.
|
||||
CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy (
|
||||
l_orderkey bigint not null,
|
||||
|
|
Loading…
Reference in New Issue