Merge branch 'create_alter_database' into alter_database_additional_options

pull/7253/head
Gürkan İndibay 2023-11-20 11:16:21 +03:00 committed by GitHub
commit 7794aab38c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
64 changed files with 2725 additions and 1168 deletions

View File

@ -68,7 +68,7 @@ USER citus
# build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions
FROM base AS pg14
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.9
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.10
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf
FROM base AS pg15
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.4
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.5
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf
FROM base AS pg16
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.0
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.1
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
@ -210,7 +210,7 @@ COPY --chown=citus:citus .psqlrc .
RUN sudo chown --from=root:root citus:citus -R ~
# sets default pg version
RUN pgenv switch 16.0
RUN pgenv switch 16.1
# make connecting to the coordinator easy
ENV PGPORT=9700

View File

@ -32,7 +32,10 @@ python3 -m pip install -r tools/packaging_automation/requirements.txt
echo "Package type: ${package_type}"
echo "OS version: $(get_rpm_os_version)"
# if os version is centos 7 or oracle linux 7, then remove urllib3 with pip uninstall and install urllib3<2.0.0 with pip install
# For RHEL 7, we need to install urllib3<2 due to below execution error
# ImportError: urllib3 v2.0 only supports OpenSSL 1.1.1+, currently the 'ssl'
# module is compiled with 'OpenSSL 1.0.2k-fips 26 Jan 2017'.
# See: https://github.com/urllib3/urllib3/issues/2168
if [[ ${package_type} == "rpm" && $(get_rpm_os_version) == 7* ]]; then
python3 -m pip uninstall -y urllib3
python3 -m pip install 'urllib3<2'

View File

@ -31,11 +31,11 @@ jobs:
pgupgrade_image_name: "citus/pgupgradetester"
style_checker_image_name: "citus/stylechecker"
style_checker_tools_version: "0.8.18"
image_suffix: "-v9d71045"
pg14_version: '{ "major": "14", "full": "14.9" }'
pg15_version: '{ "major": "15", "full": "15.4" }'
pg16_version: '{ "major": "16", "full": "16.0" }'
upgrade_pg_versions: "14.9-15.4-16.0"
image_suffix: "-vbd8441d"
pg14_version: '{ "major": "14", "full": "14.10" }'
pg15_version: '{ "major": "15", "full": "15.5" }'
pg16_version: '{ "major": "16", "full": "16.1" }'
upgrade_pg_versions: "14.10-15.5-16.1"
steps:
# Since GHA jobs needs at least one step we use a noop step here.
- name: Set up parameters

View File

@ -112,11 +112,6 @@ jobs:
PACKAGING_DOCKER_IMAGE: ${{ matrix.packaging_docker_image }}
run: |
echo "Postgres version: ${POSTGRES_VERSION}"
## Install required packages to execute packaging tools for rpm based distros
yum install python3-pip python3-devel postgresql-devel -y
python3 -m pip install wheel
./.github/packaging/validate_build_output.sh "rpm"
deb_build_tests:
@ -192,9 +187,4 @@ jobs:
PACKAGING_DOCKER_IMAGE: ${{ matrix.packaging_docker_image }}
run: |
echo "Postgres version: ${POSTGRES_VERSION}"
apt-get update -y
## Install required packages to execute packaging tools for deb based distros
apt-get install python3-dev python3-pip -y
apt-get purge -y python3-yaml
./.github/packaging/validate_build_output.sh "deb"

3
.gitignore vendored
View File

@ -55,3 +55,6 @@ lib*.pc
# style related temporary outputs
*.uncrustify
.venv
# added output when modifying check_gucs_are_alphabetically_sorted.sh
guc.out

View File

@ -1,10 +1,10 @@
### citus v12.1.1 (November 9, 2023) ###
* Fixes leaking of memory and memory contexts in Citus foreign key cache
(#7219)
(#7236)
* Makes sure to disallow creating a replicated distributed table concurrently
(#7236)
(#7219)
### citus v12.1.0 (September 12, 2023) ###

View File

@ -5,6 +5,6 @@ set -euo pipefail
source ci/ci_helpers.sh
# extract citus gucs in the form of "citus.X"
grep -o -E "(\.*\"citus.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out
grep -o -E "(\.*\"citus\.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out
sort -c gucs.out
rm gucs.out

View File

@ -14,10 +14,8 @@
#include "postgres.h"
#include "catalog/objectaddress.h"
#include "catalog/pg_database.h"
#include "catalog/pg_ts_config.h"
#include "catalog/pg_ts_dict.h"
#include "commands/dbcommands.h"
#include "nodes/parsenodes.h"
#include "tcop/utility.h"

View File

@ -21,6 +21,7 @@
#include "catalog/pg_database_d.h"
#include "catalog/pg_tablespace.h"
#include "commands/dbcommands.h"
#include "commands/defrem.h"
#include "nodes/parsenodes.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
@ -48,18 +49,33 @@
*/
typedef struct DatabaseCollationInfo
{
char *collation;
char *ctype;
char *datcollate;
char *datctype;
#if PG_VERSION_NUM >= PG_VERSION_15
char *icu_locale;
char *collversion;
char *daticulocale;
char *datcollversion;
#endif
#if PG_VERSION_NUM >= PG_VERSION_16
char *daticurules;
#endif
} DatabaseCollationInfo;
static void EnsureSupportedCreateDatabaseCommand(CreatedbStmt *stmt);
static char * GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database
databaseForm);
static DatabaseCollationInfo GetDatabaseCollation(Oid dbOid);
static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid);
#if PG_VERSION_NUM >= PG_VERSION_15
static char * GetLocaleProviderString(char datlocprovider);
#endif
static char * GetTablespaceName(Oid tablespaceOid);
static ObjectAddress * GetDatabaseAddressFromDatabaseName(char *databaseName, bool
missingOk);
static Oid get_database_owner(Oid db_oid);
List * PreprocessGrantOnDatabaseStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
/* controlled via GUC */
bool EnableCreateDatabasePropagation = false;
@ -326,12 +342,61 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString,
}
/*
* This function validates the options provided for the CREATE DATABASE command.
* It iterates over each option in the stmt->options list and checks if it's supported.
* If an unsupported option is found, or if a supported option has an invalid value,
* it raises an error.
*
* Parameters:
* stmt: A CreatedbStmt struct representing a CREATE DATABASE command.
* The options field is a list of DefElem structs, each representing an option.
*
* Currently, this function checks for the following:
* - The "oid" option is not supported.
* - The "template" option is only supported with the value "template1".
* - The "strategy" option is only supported with the value "wal_log".
*
* If any of these checks fail, the function calls ereport to raise an error.
*/
static void
EnsureSupportedCreateDatabaseCommand(CreatedbStmt *stmt)
{
DefElem *option = NULL;
foreach_ptr(option, stmt->options)
{
if (strcmp(option->defname, "oid") == 0)
{
ereport(ERROR,
errmsg("CREATE DATABASE option \"%s\" is not supported",
option->defname));
}
char *optionValue = defGetString(option);
if (strcmp(option->defname, "template") == 0 && strcmp(optionValue,
"template1") != 0)
{
ereport(ERROR, errmsg("Only template1 is supported as template "
"parameter for CREATE DATABASE"));
}
if (strcmp(option->defname, "strategy") == 0 && strcmp(optionValue, "wal_log") !=
0)
{
ereport(ERROR, errmsg("Only wal_log is supported as strategy "
"parameter for CREATE DATABASE"));
}
}
}
/*
* PostprocessAlterDatabaseStmt is executed before the statement is applied to the local
* postgres instance.
*
* In this stage, we can perform validations and prepare the commands that need to
* be run on all workers to grant.
* be run on all workers to create the database.
*/
List *
PreprocessCreateDatabaseStmt(Node *node, const char *queryString,
@ -344,17 +409,20 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString,
EnsureCoordinator();
/*Validate the statement */
DeparseTreeNode(node);
/*validate the statement*/
CreatedbStmt *stmt = castNode(CreatedbStmt, node);
EnsureSupportedCreateDatabaseCommand(stmt);
return NIL;
}
/*
* PostprocessCreatedbStmt is executed after the statement is applied to the local
* PostprocessCreateDatabaseStmt is executed after the statement is applied to the local
* postgres instance. In this stage we can prepare the commands that need to be run on
* all workers to create the database.
* all workers to create the database. Since the CREATE DATABASE statement gives error
* in a transaction block, we need to use NontransactionalNodeDDLTaskList to send the
* CREATE DATABASE statement to the workers.
*
*/
List *
@ -378,7 +446,7 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString)
/*
* PostprocessAlterDatabaseStmt is executed after the statement is applied to the local
* PreprocessDropDatabaseStmt is executed after the statement is applied to the local
* postgres instance. In this stage we can prepare the commands that need to be run on
* all workers to drop the database. Since the DROP DATABASE statement gives error in
* transaction context, we need to use NontransactionalNodeDDLTaskList to send the
@ -442,11 +510,11 @@ GetDatabaseAddressFromDatabaseName(char *databaseName, bool missingOk)
* object of the DropdbStmt.
*/
List *
DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
DropDatabaseStmtObjectAddress(Node *node, bool missingOk, bool isPostprocess)
{
DropdbStmt *stmt = castNode(DropdbStmt, node);
ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname,
missing_ok);
missingOk);
return list_make1(dbAddress);
}
@ -456,11 +524,11 @@ DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
* object of the CreatedbStmt.
*/
List *
CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
CreateDatabaseStmtObjectAddress(Node *node, bool missingOk, bool isPostprocess)
{
CreatedbStmt *stmt = castNode(CreatedbStmt, node);
ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname,
missing_ok);
missingOk);
return list_make1(dbAddress);
}
@ -478,7 +546,7 @@ GetTablespaceName(Oid tablespaceOid)
}
Form_pg_tablespace tablespaceForm = (Form_pg_tablespace) GETSTRUCT(tuple);
char *tablespaceName = NameStr(tablespaceForm->spcname);
char *tablespaceName = pstrdup(NameStr(tablespaceForm->spcname));
ReleaseSysCache(tuple);
@ -488,100 +556,65 @@ GetTablespaceName(Oid tablespaceOid)
/*
* GetDatabaseCollation gets oid of a database and returns all the collation related information
* We need this method since collation related info in Form_pg_database is not accessible
* We need this method since collation related info in Form_pg_database is not accessible.
*/
static DatabaseCollationInfo
GetDatabaseCollation(Oid db_oid)
GetDatabaseCollation(Oid dbOid)
{
DatabaseCollationInfo info;
bool isNull;
memset(&info, 0, sizeof(DatabaseCollationInfo));
Snapshot snapshot = RegisterSnapshot(GetLatestSnapshot());
Relation rel = table_open(DatabaseRelationId, AccessShareLock);
HeapTuple tup = get_catalog_object_by_oid(rel, Anum_pg_database_oid, db_oid);
HeapTuple tup = get_catalog_object_by_oid(rel, Anum_pg_database_oid, dbOid);
if (!HeapTupleIsValid(tup))
{
elog(ERROR, "cache lookup failed for database %u", db_oid);
elog(ERROR, "cache lookup failed for database %u", dbOid);
}
bool isNull = false;
TupleDesc tupdesc = RelationGetDescr(rel);
Datum collationDatum = heap_getattr(tup, Anum_pg_database_datcollate, tupdesc,
&isNull);
if (isNull)
{
info.collation = NULL;
}
else
{
info.collation = TextDatumGetCString(collationDatum);
}
info.datcollate = TextDatumGetCString(collationDatum);
Datum ctypeDatum = heap_getattr(tup, Anum_pg_database_datctype, tupdesc, &isNull);
if (isNull)
{
info.ctype = NULL;
}
else
{
info.ctype = TextDatumGetCString(ctypeDatum);
}
info.datctype = TextDatumGetCString(ctypeDatum);
#if PG_VERSION_NUM >= PG_VERSION_15
Datum icuLocaleDatum = heap_getattr(tup, Anum_pg_database_daticulocale, tupdesc,
&isNull);
if (isNull)
if (!isNull)
{
info.icu_locale = NULL;
}
else
{
info.icu_locale = TextDatumGetCString(icuLocaleDatum);
info.daticulocale = TextDatumGetCString(icuLocaleDatum);
}
Datum collverDatum = heap_getattr(tup, Anum_pg_database_datcollversion, tupdesc,
&isNull);
if (isNull)
if (!isNull)
{
info.collversion = NULL;
info.datcollversion = TextDatumGetCString(collverDatum);
}
else
#endif
#if PG_VERSION_NUM >= PG_VERSION_16
Datum icurulesDatum = heap_getattr(tup, Anum_pg_database_daticurules, tupdesc,
&isNull);
if (!isNull)
{
info.collversion = TextDatumGetCString(collverDatum);
info.daticurules = TextDatumGetCString(icurulesDatum);
}
#endif
table_close(rel, AccessShareLock);
UnregisterSnapshot(snapshot);
heap_freetuple(tup);
return info;
}
/*
* FreeDatabaseCollationInfo frees the memory allocated for DatabaseCollationInfo
*/
static void
FreeDatabaseCollationInfo(DatabaseCollationInfo collInfo)
{
if (collInfo.collation != NULL)
{
pfree(collInfo.collation);
}
if (collInfo.ctype != NULL)
{
pfree(collInfo.ctype);
}
#if PG_VERSION_NUM >= PG_VERSION_15
if (collInfo.icu_locale != NULL)
{
pfree(collInfo.icu_locale);
}
#endif
}
#if PG_VERSION_NUM >= PG_VERSION_15
/*
@ -603,13 +636,11 @@ GetLocaleProviderString(char datlocprovider)
return "icu";
}
case 'l':
{
return "locale";
}
default:
return "";
{
ereport(ERROR, (errmsg("unexpected datlocprovider value: %c",
datlocprovider)));
}
}
}
@ -620,6 +651,10 @@ GetLocaleProviderString(char datlocprovider)
/*
* GenerateCreateDatabaseStatementFromPgDatabase gets the pg_database tuple and returns the
* CREATE DATABASE statement that can be used to create given database.
*
* Note that this doesn't deparse OID of the database and this is not a
* problem as we anyway don't allow specifying custom OIDs for databases
* when creating them.
*/
static char *
GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm)
@ -632,78 +667,100 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm)
appendStringInfo(&str, "CREATE DATABASE %s",
quote_identifier(NameStr(databaseForm->datname)));
if (databaseForm->datdba != InvalidOid)
{
appendStringInfo(&str, " OWNER = %s",
quote_literal_cstr(GetUserNameFromId(databaseForm->datdba,
false)));
}
if (databaseForm->encoding != -1)
{
appendStringInfo(&str, " ENCODING = %s",
quote_literal_cstr(pg_encoding_to_char(databaseForm->encoding)));
}
if (collInfo.collation != NULL)
{
appendStringInfo(&str, " LC_COLLATE = %s", quote_literal_cstr(
collInfo.collation));
}
if (collInfo.ctype != NULL)
{
appendStringInfo(&str, " LC_CTYPE = %s", quote_literal_cstr(collInfo.ctype));
}
#if PG_VERSION_NUM >= PG_VERSION_15
if (collInfo.icu_locale != NULL)
{
appendStringInfo(&str, " ICU_LOCALE = %s", quote_literal_cstr(
collInfo.icu_locale));
}
if (databaseForm->datlocprovider != 0)
{
appendStringInfo(&str, " LOCALE_PROVIDER = %s",
quote_literal_cstr(GetLocaleProviderString(
databaseForm->datlocprovider)));
}
if (collInfo.collversion != NULL)
{
appendStringInfo(&str, " COLLATION_VERSION = %s", quote_literal_cstr(
collInfo.collversion));
}
#endif
if (databaseForm->dattablespace != InvalidOid)
{
appendStringInfo(&str, " TABLESPACE = %s",
quote_identifier(GetTablespaceName(
databaseForm->dattablespace)));
}
appendStringInfo(&str, " CONNECTION LIMIT %d", databaseForm->datconnlimit);
appendStringInfo(&str, " ALLOW_CONNECTIONS = %s",
quote_literal_cstr(databaseForm->datallowconn ? "true" : "false"));
if (databaseForm->datconnlimit >= 0)
{
appendStringInfo(&str, " CONNECTION LIMIT %d", databaseForm->datconnlimit);
}
appendStringInfo(&str, " IS_TEMPLATE = %s",
quote_literal_cstr(databaseForm->datistemplate ? "true" : "false"));
FreeDatabaseCollationInfo(collInfo);
appendStringInfo(&str, " LC_COLLATE = %s",
quote_literal_cstr(collInfo.datcollate));
appendStringInfo(&str, " LC_CTYPE = %s", quote_literal_cstr(collInfo.datctype));
appendStringInfo(&str, " OWNER = %s",
quote_identifier(GetUserNameFromId(databaseForm->datdba, false)));
appendStringInfo(&str, " TABLESPACE = %s",
quote_identifier(GetTablespaceName(databaseForm->dattablespace)));
appendStringInfo(&str, " ENCODING = %s",
quote_literal_cstr(pg_encoding_to_char(databaseForm->encoding)));
#if PG_VERSION_NUM >= PG_VERSION_15
if (collInfo.datcollversion != NULL)
{
appendStringInfo(&str, " COLLATION_VERSION = %s",
quote_identifier(collInfo.datcollversion));
}
if (collInfo.daticulocale != NULL)
{
appendStringInfo(&str, " ICU_LOCALE = %s", quote_identifier(
collInfo.daticulocale));
}
appendStringInfo(&str, " LOCALE_PROVIDER = %s",
quote_identifier(GetLocaleProviderString(
databaseForm->datlocprovider)));
#endif
#if PG_VERSION_NUM >= PG_VERSION_16
if (collInfo.daticurules != NULL)
{
appendStringInfo(&str, " ICU_RULES = %s", quote_identifier(
collInfo.daticurules));
}
#endif
return str.data;
}
/*
* GenerateCreateDatabaseCommandList gets a list of pg_database tuples and returns
* a list of CREATE DATABASE statements for all the databases.
* GrantOnDatabaseDDLCommands returns a list of sql statements to idempotently apply a
* GRANT on distributed databases.
*/
List *
GenerateGrantDatabaseCommandList(void)
{
List *grantCommands = NIL;
Relation pgDatabaseRel = table_open(DatabaseRelationId, AccessShareLock);
TableScanDesc scan = table_beginscan_catalog(pgDatabaseRel, 0, NULL);
HeapTuple tuple = NULL;
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
Form_pg_database databaseForm = (Form_pg_database) GETSTRUCT(tuple);
ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(
NameStr(databaseForm->datname), false);
/* skip databases that are not distributed */
if (!IsAnyObjectDistributed(list_make1(dbAddress)))
{
continue;
}
List *dbGrants = GrantOnDatabaseDDLCommands(databaseForm->oid);
/* append dbGrants into grantCommands*/
grantCommands = list_concat(grantCommands, dbGrants);
}
heap_endscan(scan);
table_close(pgDatabaseRel, AccessShareLock);
return grantCommands;
}
/*
* GenerateCreateDatabaseCommandList returns a list of CREATE DATABASE statements
* for all the databases.
*
* Commands in the list are wrapped by citus_internal_database_command() UDF
* to avoid from transaction block restrictions that apply to database commands
@ -721,8 +778,16 @@ GenerateCreateDatabaseCommandList(void)
{
Form_pg_database databaseForm = (Form_pg_database) GETSTRUCT(tuple);
char *createStmt = GenerateCreateDatabaseStatementFromPgDatabase(databaseForm);
ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(
NameStr(databaseForm->datname), false);
/* skip databases that are not distributed */
if (!IsAnyObjectDistributed(list_make1(dbAddress)))
{
continue;
}
char *createStmt = GenerateCreateDatabaseStatementFromPgDatabase(databaseForm);
StringInfo outerDbStmt = makeStringInfo();
@ -732,8 +797,6 @@ GenerateCreateDatabaseCommandList(void)
quote_literal_cstr(
createStmt));
elog(LOG, "outerDbStmt: %s", outerDbStmt->data);
/* Add the statement to the list of commands */
commands = lappend(commands, outerDbStmt->data);
}

View File

@ -374,6 +374,15 @@ static DistributeObjectOps Any_Rename = {
.address = NULL,
.markDistributed = false,
};
static DistributeObjectOps Any_SecLabel = {
.deparse = DeparseSecLabelStmt,
.qualify = NULL,
.preprocess = NULL,
.postprocess = PostprocessSecLabelStmt,
.operationType = DIST_OPS_ALTER,
.address = SecLabelStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps Attribute_Rename = {
.deparse = DeparseRenameAttributeStmt,
.qualify = QualifyRenameAttributeStmt,
@ -2062,6 +2071,11 @@ GetDistributeObjectOps(Node *node)
return &Vacuum_Analyze;
}
case T_SecLabelStmt:
{
return &Any_SecLabel;
}
case T_RenameStmt:
{
RenameStmt *stmt = castNode(RenameStmt, node);

View File

@ -23,6 +23,7 @@
#include "catalog/pg_auth_members.h"
#include "catalog/pg_authid.h"
#include "catalog/pg_db_role_setting.h"
#include "catalog/pg_shseclabel.h"
#include "catalog/pg_type.h"
#include "catalog/objectaddress.h"
#include "commands/dbcommands.h"
@ -65,6 +66,7 @@ static DefElem * makeDefElemBool(char *name, bool value);
static List * GenerateRoleOptionsList(HeapTuple tuple);
static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options);
static List * GenerateGrantRoleStmtsOfRole(Oid roleid);
static List * GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename);
static void EnsureSequentialModeForRoleDDL(void);
static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple,
@ -515,13 +517,14 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
{
HeapTuple roleTuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(roleOid));
Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(roleTuple));
char *rolename = pstrdup(NameStr(role->rolname));
CreateRoleStmt *createRoleStmt = NULL;
if (EnableCreateRolePropagation)
{
createRoleStmt = makeNode(CreateRoleStmt);
createRoleStmt->stmt_type = ROLESTMT_ROLE;
createRoleStmt->role = pstrdup(NameStr(role->rolname));
createRoleStmt->role = rolename;
createRoleStmt->options = GenerateRoleOptionsList(roleTuple);
}
@ -532,7 +535,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
alterRoleStmt->role = makeNode(RoleSpec);
alterRoleStmt->role->roletype = ROLESPEC_CSTRING;
alterRoleStmt->role->location = -1;
alterRoleStmt->role->rolename = pstrdup(NameStr(role->rolname));
alterRoleStmt->role->rolename = rolename;
alterRoleStmt->action = 1;
alterRoleStmt->options = GenerateRoleOptionsList(roleTuple);
}
@ -544,7 +547,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
{
/* add a worker_create_or_alter_role command if any of them are set */
char *createOrAlterRoleQuery = CreateCreateOrAlterRoleCommand(
pstrdup(NameStr(role->rolname)),
rolename,
createRoleStmt,
alterRoleStmt);
@ -566,6 +569,20 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
{
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
}
/*
* append SECURITY LABEL ON ROLE commands for this specific user
* When we propagate user creation, we also want to make sure that we propagate
* all the security labels it has been given. For this, we check pg_shseclabel
* for the ROLE entry corresponding to roleOid, and generate the relevant
* SecLabel stmts to be run in the new node.
*/
List *secLabelOnRoleStmts = GenerateSecLabelOnRoleStmts(roleOid, rolename);
stmt = NULL;
foreach_ptr(stmt, secLabelOnRoleStmts)
{
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
}
}
return completeRoleList;
@ -895,6 +912,54 @@ GenerateGrantRoleStmtsOfRole(Oid roleid)
}
/*
* GenerateSecLabelOnRoleStmts generates the SecLabelStmts for the role
* whose oid is roleid.
*/
static List *
GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename)
{
List *secLabelStmts = NIL;
/*
* Note that roles are shared database objects, therefore their
* security labels are stored in pg_shseclabel instead of pg_seclabel.
*/
Relation pg_shseclabel = table_open(SharedSecLabelRelationId, AccessShareLock);
ScanKeyData skey[1];
ScanKeyInit(&skey[0], Anum_pg_shseclabel_objoid, BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(roleid));
SysScanDesc scan = systable_beginscan(pg_shseclabel, SharedSecLabelObjectIndexId,
true, NULL, 1, &skey[0]);
HeapTuple tuple = NULL;
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
{
SecLabelStmt *secLabelStmt = makeNode(SecLabelStmt);
secLabelStmt->objtype = OBJECT_ROLE;
secLabelStmt->object = (Node *) makeString(pstrdup(rolename));
Datum datumArray[Natts_pg_shseclabel];
bool isNullArray[Natts_pg_shseclabel];
heap_deform_tuple(tuple, RelationGetDescr(pg_shseclabel), datumArray,
isNullArray);
secLabelStmt->provider = TextDatumGetCString(
datumArray[Anum_pg_shseclabel_provider - 1]);
secLabelStmt->label = TextDatumGetCString(
datumArray[Anum_pg_shseclabel_label - 1]);
secLabelStmts = lappend(secLabelStmts, secLabelStmt);
}
systable_endscan(scan);
table_close(pg_shseclabel, AccessShareLock);
return secLabelStmts;
}
/*
* PreprocessCreateRoleStmt creates a worker_create_or_alter_role query for the
* role that is being created. With that query we can create the role in the

View File

@ -0,0 +1,125 @@
/*-------------------------------------------------------------------------
*
* seclabel.c
*
* This file contains the logic of SECURITY LABEL statement propagation.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "distributed/commands.h"
#include "distributed/commands/utility_hook.h"
#include "distributed/coordinator_protocol.h"
#include "distributed/deparser.h"
#include "distributed/log_utils.h"
#include "distributed/metadata_sync.h"
#include "distributed/metadata/distobject.h"
/*
* PostprocessSecLabelStmt prepares the commands that need to be run on all workers to assign
* security labels on distributed objects, currently supporting just Role objects.
* It also ensures that all object dependencies exist on all
* nodes for the object in the SecLabelStmt.
*/
List *
PostprocessSecLabelStmt(Node *node, const char *queryString)
{
if (!ShouldPropagate())
{
return NIL;
}
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
List *objectAddresses = GetObjectAddressListFromParseTree(node, false, true);
if (!IsAnyObjectDistributed(objectAddresses))
{
return NIL;
}
if (secLabelStmt->objtype != OBJECT_ROLE)
{
/*
* If we are not in the coordinator, we don't want to interrupt the security
* label command with notices, the user expects that from the worker node
* the command will not be propagated
*/
if (EnableUnsupportedFeatureMessages && IsCoordinator())
{
ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands whose "
"object type is not role"),
errhint("Connect to worker nodes directly to manually "
"run the same SECURITY LABEL command.")));
}
return NIL;
}
if (!EnableCreateRolePropagation)
{
return NIL;
}
EnsureCoordinator();
EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses);
const char *sql = DeparseTreeNode((Node *) secLabelStmt);
List *commandList = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList);
}
/*
* SecLabelStmtObjectAddress returns the object address of the object on
* which this statement operates (secLabelStmt->object). Note that it has no limitation
* on the object type being OBJECT_ROLE. This is intentionally implemented like this
* since it is fairly simple to implement and we might extend SECURITY LABEL propagation
* in the future to include more object types.
*/
List *
SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
{
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
Relation rel = NULL;
ObjectAddress address = get_object_address(secLabelStmt->objtype,
secLabelStmt->object, &rel,
AccessShareLock, missing_ok);
if (rel != NULL)
{
relation_close(rel, AccessShareLock);
}
ObjectAddress *addressPtr = palloc0(sizeof(ObjectAddress));
*addressPtr = address;
return list_make1(addressPtr);
}
/*
* citus_test_object_relabel is a dummy function for check_object_relabel_type hook.
* It is meant to be used in tests combined with citus_test_register_label_provider
*/
void
citus_test_object_relabel(const ObjectAddress *object, const char *seclabel)
{
if (seclabel == NULL ||
strcmp(seclabel, "citus_unclassified") == 0 ||
strcmp(seclabel, "citus_classified") == 0 ||
strcmp(seclabel, "citus '!unclassified") == 0)
{
return;
}
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
errmsg("'%s' is not a valid security label for Citus tests.", seclabel)));
}

View File

@ -25,7 +25,6 @@
*-------------------------------------------------------------------------
*/
#include "pg_version_constants.h"
#include "postgres.h"
@ -64,7 +63,6 @@
#include "distributed/multi_executor.h"
#include "distributed/multi_explain.h"
#include "distributed/multi_physical_planner.h"
#include "distributed/pg_version_constants.h"
#include "distributed/reference_table_utils.h"
#include "distributed/resource_lock.h"
#include "distributed/string_utils.h"

View File

@ -1,16 +1,21 @@
/*
/*-------------------------------------------------------------------------
*
* citus_deparseutils.c
* ---------------------
*
* This file contains common functions used for deparsing PostgreSQL statements
* to their equivalent SQL representation.
* This file contains common functions used for deparsing PostgreSQL
* statements to their equivalent SQL representation.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "pg_version_constants.h"
#include "postgres.h"
#include "commands/defrem.h"
#include "distributed/deparser.h"
#include "distributed/pg_version_constants.h"
#include "utils/builtins.h"
#include "utils/elog.h"
#include "utils/rel.h"
@ -29,9 +34,9 @@
* @param optionFormatsLen The number of option formats in the opt_formats array.
*/
void
DefElemOptionToStatement(StringInfo buf, DefElem *option, const
DefElemOptionFormat *optionFormats, int
optionFormatsLen)
DefElemOptionToStatement(StringInfo buf, DefElem *option,
const DefElemOptionFormat *optionFormats,
int optionFormatsLen)
{
const char *name = option->defname;
int i;

View File

@ -27,7 +27,12 @@
static void AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt);
static void AppendAlterDatabaseSetStmt(StringInfo buf, AlterDatabaseSetStmt *stmt);
static void AppendAlterDatabaseStmt(StringInfo buf, AlterDatabaseStmt *stmt);
static void AppendDefElemConnLimit(StringInfo buf, DefElem *def);
static void AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt);
static void AppendDropDatabaseStmt(StringInfo buf, DropdbStmt *stmt);
static void AppendGrantOnDatabaseStmt(StringInfo buf, GrantStmt *stmt);
const DefElemOptionFormat create_database_option_formats[] = {
{ "owner", " OWNER %s", OPTION_FORMAT_STRING },
@ -281,34 +286,6 @@ DeparseAlterDatabaseSetStmt(Node *node)
return str.data;
}
/*
* Validates for if option is template, lc_type, locale or lc_collate, propagation will
* not be supported since template and strategy options are not stored in the catalog
* and lc_type, locale and lc_collate options depends on template parameter.
*/
static void
ValidateCreateDatabaseOptions(DefElem *option)
{
if (strcmp(option->defname, "strategy") == 0)
{
ereport(ERROR,
errmsg("CREATE DATABASE option \"%s\" is not supported",
option->defname));
}
char *optionValue = defGetString(option);
if (strcmp(option->defname, "template") == 0 && strcmp(optionValue, "template1") != 0)
{
ereport(ERROR, errmsg(
"Only template1 is supported as template parameter for CREATE DATABASE"));
}
}
/*
* Prepares a CREATE DATABASE statement with given empty StringInfo buffer and CreatedbStmt node.
*/
static void
AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt)
{
@ -320,7 +297,7 @@ AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt)
foreach_ptr(option, stmt->options)
{
ValidateCreateDatabaseOptions(option);
/*ValidateCreateDatabaseOptions(option); */
DefElemOptionToStatement(buf, option, create_database_option_formats,
lengthof(create_database_option_formats));
@ -345,9 +322,6 @@ DeparseCreateDatabaseStmt(Node *node)
}
/*
* Prepares a DROP DATABASE statement with given empty StringInfo buffer and DropdbStmt node.
*/
static void
AppendDropDatabaseStmt(StringInfo buf, DropdbStmt *stmt)
{
@ -359,8 +333,19 @@ AppendDropDatabaseStmt(StringInfo buf, DropdbStmt *stmt)
DefElem *option = NULL;
foreach_ptr(option, stmt->options)
{
/* if it is the first option then append with "WITH" else append with "," */
if (option == linitial(stmt->options))
{
appendStringInfo(buf, " WITH ( ");
}
else
{
appendStringInfo(buf, ", ");
}
if (strcmp(option->defname, "force") == 0)
{
appendStringInfo(buf, "FORCE");
@ -371,6 +356,12 @@ AppendDropDatabaseStmt(StringInfo buf, DropdbStmt *stmt)
errmsg("unrecognized DROP DATABASE option \"%s\"",
option->defname)));
}
/* if it is the last option then append with ")" */
if (option == llast(stmt->options))
{
appendStringInfo(buf, " )");
}
}
}

View File

@ -0,0 +1,78 @@
/*-------------------------------------------------------------------------
*
* deparse_seclabel_stmts.c
* All routines to deparse SECURITY LABEL statements.
*
* Copyright (c), Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "distributed/deparser.h"
#include "nodes/parsenodes.h"
#include "utils/builtins.h"
static void AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt);
/*
* DeparseSecLabelStmt builds and returns a string representing of the
* SecLabelStmt for application on a remote server.
*/
char *
DeparseSecLabelStmt(Node *node)
{
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
StringInfoData buf = { 0 };
initStringInfo(&buf);
AppendSecLabelStmt(&buf, secLabelStmt);
return buf.data;
}
/*
* AppendSecLabelStmt generates the string representation of the
* SecLabelStmt and appends it to the buffer.
*/
static void
AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt)
{
appendStringInfoString(buf, "SECURITY LABEL ");
if (stmt->provider != NULL)
{
appendStringInfo(buf, "FOR %s ", quote_identifier(stmt->provider));
}
appendStringInfoString(buf, "ON ");
switch (stmt->objtype)
{
case OBJECT_ROLE:
{
appendStringInfo(buf, "ROLE %s ", quote_identifier(strVal(stmt->object)));
break;
}
/* normally, we shouldn't reach this */
default:
{
ereport(ERROR, (errmsg("unsupported security label statement for"
" deparsing")));
}
}
appendStringInfoString(buf, "IS ");
if (stmt->label != NULL)
{
appendStringInfo(buf, "%s", quote_literal_cstr(stmt->label));
}
else
{
appendStringInfoString(buf, "NULL");
}
}

View File

@ -22,11 +22,13 @@
#include "catalog/dependency.h"
#include "catalog/namespace.h"
#include "catalog/objectaddress.h"
#include "catalog/pg_database.h"
#include "catalog/pg_extension_d.h"
#include "catalog/pg_namespace.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "citus_version.h"
#include "commands/dbcommands.h"
#include "commands/extension.h"
#include "distributed/listutils.h"
#include "distributed/colocation_utils.h"
@ -48,9 +50,6 @@
#include "utils/lsyscache.h"
#include "utils/regproc.h"
#include "utils/rel.h"
#include "catalog/pg_database.h"
#include "commands/dbcommands.h"
static char * CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress);
static int ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes,
@ -360,8 +359,10 @@ ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes,
/*
* Deletes all pg_dist_object records for distributed roles in `DROP ROLE` statement a
* and for all databases in `DROP DATABASE` statement
* UnmarkNodeWideObjectsDistributed deletes pg_dist_object records
* for all distributed objects in given Drop stmt node.
*
* Today we only expect DropRoleStmt and DropdbStmt to get here.
*/
void
UnmarkNodeWideObjectsDistributed(Node *node)
@ -385,9 +386,12 @@ UnmarkNodeWideObjectsDistributed(Node *node)
Oid dbOid = get_database_oid(dbName, stmt->missing_ok);
ObjectAddress *dbObjectAddress = palloc0(sizeof(ObjectAddress));
ObjectAddressSet(*dbObjectAddress, DatabaseRelationId, dbOid);
if (IsAnyObjectDistributed(list_make1(dbObjectAddress)))
{
UnmarkObjectDistributed(dbObjectAddress);
}
}
}
/*

View File

@ -123,6 +123,7 @@ static List * GetObjectsForGrantStmt(ObjectType objectType, Oid objectId);
static AccessPriv * GetAccessPrivObjectForGrantStmt(char *permission);
static List * GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid,
AclItem *aclItem);
static List * GenerateGrantOnDatabaseFromAclItem(Oid databaseOid, AclItem *aclItem);
static List * GenerateGrantOnFunctionQueriesFromAclItem(Oid schemaOid,
AclItem *aclItem);
static List * GrantOnSequenceDDLCommands(Oid sequenceOid);
@ -154,6 +155,7 @@ static char * RemoteSchemaIdExpressionByName(char *schemaName);
static char * RemoteTypeIdExpression(Oid typeId);
static char * RemoteCollationIdExpression(Oid colocationId);
static char * RemoteTableIdExpression(Oid relationId);
static void SendDatabaseGrantSyncCommands(MetadataSyncContext *context);
PG_FUNCTION_INFO_V1(start_metadata_sync_to_all_nodes);
@ -2047,6 +2049,84 @@ GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid, AclItem *aclItem)
}
List *
GrantOnDatabaseDDLCommands(Oid databaseOid)
{
HeapTuple databaseTuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(databaseOid));
bool isNull = true;
Datum aclDatum = SysCacheGetAttr(DATABASEOID, databaseTuple, Anum_pg_database_datacl,
&isNull);
if (isNull)
{
ReleaseSysCache(databaseTuple);
return NIL;
}
Acl *acl = DatumGetAclPCopy(aclDatum);
AclItem *aclDat = ACL_DAT(acl);
int aclNum = ACL_NUM(acl);
List *commands = NIL;
ReleaseSysCache(databaseTuple);
for (int i = 0; i < aclNum; i++)
{
commands = list_concat(commands,
GenerateGrantOnDatabaseFromAclItem(
databaseOid, &aclDat[i]));
}
return commands;
}
List *
GenerateGrantOnDatabaseFromAclItem(Oid databaseOid, AclItem *aclItem)
{
AclMode permissions = ACLITEM_GET_PRIVS(*aclItem) & ACL_ALL_RIGHTS_DATABASE;
AclMode grants = ACLITEM_GET_GOPTIONS(*aclItem) & ACL_ALL_RIGHTS_DATABASE;
/*
* seems unlikely but we check if there is a grant option in the list without the actual permission
*/
Assert(!(grants & ACL_CONNECT) || (permissions & ACL_CONNECT));
Assert(!(grants & ACL_CREATE) || (permissions & ACL_CREATE));
Assert(!(grants & ACL_CREATE_TEMP) || (permissions & ACL_CREATE_TEMP));
Oid granteeOid = aclItem->ai_grantee;
List *queries = NIL;
queries = lappend(queries, GenerateSetRoleQuery(aclItem->ai_grantor));
if (permissions & ACL_CONNECT)
{
char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights(
OBJECT_DATABASE, granteeOid, databaseOid,
"CONNECT",
grants & ACL_CONNECT));
queries = lappend(queries, query);
}
if (permissions & ACL_CREATE)
{
char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights(
OBJECT_DATABASE, granteeOid, databaseOid,
"CREATE",
grants & ACL_CREATE));
queries = lappend(queries, query);
}
if (permissions & ACL_CREATE_TEMP)
{
char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights(
OBJECT_DATABASE, granteeOid, databaseOid,
"TEMPORARY",
grants & ACL_CREATE_TEMP));
queries = lappend(queries, query);
}
queries = lappend(queries, "RESET ROLE");
return queries;
}
/*
* GenerateGrantStmtForRights is the function for creating GrantStmt's for all
* types of objects that are supported. It takes parameters to fill a GrantStmt's
@ -2120,6 +2200,11 @@ GetObjectsForGrantStmt(ObjectType objectType, Oid objectId)
return list_make1(sequence);
}
case OBJECT_DATABASE:
{
return list_make1(makeString(get_database_name(objectId)));
}
default:
{
elog(ERROR, "unsupported object type for GRANT");
@ -3895,18 +3980,20 @@ citus_internal_update_none_dist_table_metadata(PG_FUNCTION_ARGS)
/*
* citus_internal_database_command is an internal UDF to
* create/drop a database in an idempotent maner without
* create a database in an idempotent maner without
* transaction block restrictions.
*/
Datum
citus_internal_database_command(PG_FUNCTION_ARGS)
{
CheckCitusVersion(ERROR);
if (!ShouldSkipMetadataChecks())
{
EnsureCitusInitiatedOperation();
}
PG_ENSURE_ARGNOTNULL(0, "database command");
PG_ENSURE_ARGNOTNULL(0, "command");
text *commandText = PG_GETARG_TEXT_P(0);
char *command = text_to_cstring(commandText);
@ -3923,7 +4010,7 @@ citus_internal_database_command(PG_FUNCTION_ARGS)
GUC_ACTION_LOCAL, true, 0, false);
/*
* createdb() / DropDatabase() uses ParseState to report the error position for the
* createdb() uses ParseState to report the error position for the
* input command and the position is reported to be 0 when it's provided as NULL.
* We're okay with that because we don't expect this UDF to be called with an incorrect
* DDL command.
@ -3942,25 +4029,13 @@ citus_internal_database_command(PG_FUNCTION_ARGS)
createdb(pstate, (CreatedbStmt *) parseTree);
}
}
else if (IsA(parseTree, DropdbStmt))
{
DropdbStmt *stmt = castNode(DropdbStmt, parseTree);
bool missingOk = false;
Oid databaseOid = get_database_oid(stmt->dbname, missingOk);
if (OidIsValid(databaseOid))
{
DropDatabase(pstate, (DropdbStmt *) parseTree);
}
}
else
{
ereport(ERROR, (errmsg("unsupported command type %d", nodeTag(parseTree))));
ereport(ERROR, (errmsg("citus_internal_database_command() can only be used "
"for CREATE DATABASE command by Citus.")));
}
/* Rollbacks GUCs to the state before this session */
/* rollback GUCs to the state before this session */
AtEOXact_GUC(true, saveNestLevel);
PG_RETURN_VOID();
@ -4573,13 +4648,6 @@ PropagateNodeWideObjectsCommandList(void)
/* collect all commands */
List *ddlCommands = NIL;
if (EnableCreateDatabasePropagation)
{
/* get commands for database creation */
List *createDatabaseCommands = GenerateCreateDatabaseCommandList();
ddlCommands = list_concat(ddlCommands, createDatabaseCommands);
}
if (EnableAlterRoleSetPropagation)
{
/*
@ -4590,6 +4658,13 @@ PropagateNodeWideObjectsCommandList(void)
ddlCommands = list_concat(ddlCommands, alterRoleSetCommands);
}
if (EnableCreateDatabasePropagation)
{
/* get commands for database creation */
List *createDatabaseCommands = GenerateCreateDatabaseCommandList();
ddlCommands = list_concat(ddlCommands, createDatabaseCommands);
}
return ddlCommands;
}
@ -4621,7 +4696,7 @@ SyncDistributedObjects(MetadataSyncContext *context)
Assert(ShouldPropagate());
/* Send systemwide objects, only roles for now */
/* send systemwide objects; i.e. roles and databases for now */
SendNodeWideObjectsSyncCommands(context);
/*
@ -4661,6 +4736,13 @@ SyncDistributedObjects(MetadataSyncContext *context)
* those tables.
*/
SendInterTableRelationshipCommands(context);
/*
* After creation of databases and roles, send the grant database commands
* to the workers.
*/
SendDatabaseGrantSyncCommands(context);
}
@ -4686,6 +4768,34 @@ SendNodeWideObjectsSyncCommands(MetadataSyncContext *context)
}
/*
* SendDatabaseGrantSyncCommands sends database grants to roles to workers with
* transactional or nontransactional mode according to transactionMode inside
* metadataSyncContext in case of EnableCreateDatabasePropagation GUC set.
* This function is called after SendNodeWideObjectsSyncCommands and SendDependencyCreationCommands
* because we need both databases and roles to be created on the worker.
*
*/
static void
SendDatabaseGrantSyncCommands(MetadataSyncContext *context)
{
if (EnableCreateDatabasePropagation)
{
/* propagate node wide objects. It includes only roles for now. */
List *commandList = GenerateGrantDatabaseCommandList();
if (commandList == NIL)
{
return;
}
commandList = lcons(DISABLE_DDL_PROPAGATION, commandList);
commandList = lappend(commandList, ENABLE_DDL_PROPAGATION);
SendOrCollectCommandListToActivatedNodes(context, commandList);
}
}
/*
* SendShellTableDeletionCommands sends sequence, and shell table deletion
* commands to workers with transactional or nontransactional mode according to

View File

@ -317,7 +317,7 @@ PG_FUNCTION_INFO_V1(citus_rebalance_start);
PG_FUNCTION_INFO_V1(citus_rebalance_stop);
PG_FUNCTION_INFO_V1(citus_rebalance_wait);
bool RunningUnderIsolationTest = false;
bool RunningUnderCitusTestSuite = false;
int MaxRebalancerLoggedIgnoredMoves = 5;
int RebalancerByDiskSizeBaseCost = 100 * 1024 * 1024;
bool PropagateSessionSettingsForLoopbackConnection = false;

View File

@ -1143,7 +1143,7 @@ ConflictWithIsolationTestingBeforeCopy(void)
const bool sessionLock = false;
const bool dontWait = false;
if (RunningUnderIsolationTest)
if (RunningUnderCitusTestSuite)
{
SET_LOCKTAG_ADVISORY(tag, MyDatabaseId,
SHARD_MOVE_ADVISORY_LOCK_SECOND_KEY,
@ -1177,7 +1177,7 @@ ConflictWithIsolationTestingAfterCopy(void)
const bool sessionLock = false;
const bool dontWait = false;
if (RunningUnderIsolationTest)
if (RunningUnderCitusTestSuite)
{
SET_LOCKTAG_ADVISORY(tag, MyDatabaseId,
SHARD_MOVE_ADVISORY_LOCK_FIRST_KEY,

View File

@ -29,6 +29,7 @@
#include "citus_version.h"
#include "commands/explain.h"
#include "commands/extension.h"
#include "commands/seclabel.h"
#include "common/string.h"
#include "executor/executor.h"
#include "distributed/backend_data.h"
@ -574,6 +575,16 @@ _PG_init(void)
INIT_COLUMNAR_SYMBOL(PGFunction, columnar_storage_info);
INIT_COLUMNAR_SYMBOL(PGFunction, columnar_store_memory_stats);
INIT_COLUMNAR_SYMBOL(PGFunction, test_columnar_storage_write_new_page);
/*
* This part is only for SECURITY LABEL tests
* mimicking what an actual security label provider would do
*/
if (RunningUnderCitusTestSuite)
{
register_label_provider("citus '!tests_label_provider",
citus_test_object_relabel);
}
}
@ -2316,13 +2327,14 @@ RegisterCitusConfigVariables(void)
WarnIfReplicationModelIsSet, NULL, NULL);
DefineCustomBoolVariable(
"citus.running_under_isolation_test",
"citus.running_under_citus_test_suite",
gettext_noop(
"Only useful for testing purposes, when set to true, Citus does some "
"tricks to implement useful isolation tests with rebalancing. Should "
"tricks to implement useful isolation tests with rebalancing. It also "
"registers a dummy label provider for SECURITY LABEL tests. Should "
"never be set to true on production systems "),
gettext_noop("for details of the tricks implemented, refer to the source code"),
&RunningUnderIsolationTest,
&RunningUnderCitusTestSuite,
false,
PGC_SUSET,
GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE,

View File

@ -239,11 +239,12 @@ extern List * PreprocessCreateDatabaseStmt(Node *node, const char *queryString,
extern List * PostprocessCreateDatabaseStmt(Node *node, const char *queryString);
extern List * PreprocessDropDatabaseStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern List * DropDatabaseStmtObjectAddress(Node *node, bool missing_ok,
extern List * DropDatabaseStmtObjectAddress(Node *node, bool missingOk,
bool isPostprocess);
extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok,
extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missingOk,
bool isPostprocess);
extern List * GenerateCreateDatabaseCommandList(void);
extern List * GenerateGrantDatabaseCommandList(void);
extern List * PreprocessAlterDatabaseRenameStmt(Node *node, const char *queryString,
@ -537,6 +538,11 @@ extern List * AlterSchemaOwnerStmtObjectAddress(Node *node, bool missing_ok,
extern List * AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok, bool
isPostprocess);
/* seclabel.c - forward declarations*/
extern List * PostprocessSecLabelStmt(Node *node, const char *queryString);
extern List * SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess);
extern void citus_test_object_relabel(const ObjectAddress *object, const char *seclabel);
/* sequence.c - forward declarations */
extern List * PreprocessAlterSequenceStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);

View File

@ -140,9 +140,9 @@ typedef enum OptionFormatType
} OptionFormatType;
extern void DefElemOptionToStatement(StringInfo buf, DefElem *option, const
DefElemOptionFormat *opt_formats, int
opt_formats_len);
extern void DefElemOptionToStatement(StringInfo buf, DefElem *option,
const DefElemOptionFormat *opt_formats,
int opt_formats_len);
/* forward declarations for deparse_statistics_stmts.c */
@ -286,6 +286,9 @@ extern void QualifyRenameTextSearchDictionaryStmt(Node *node);
extern void QualifyTextSearchConfigurationCommentStmt(Node *node);
extern void QualifyTextSearchDictionaryCommentStmt(Node *node);
/* forward declarations for deparse_seclabel_stmts.c */
extern char * DeparseSecLabelStmt(Node *node);
/* forward declarations for deparse_sequence_stmts.c */
extern char * DeparseDropSequenceStmt(Node *node);
extern char * DeparseRenameSequenceStmt(Node *node);

View File

@ -107,6 +107,7 @@ extern char * ColocationIdUpdateCommand(Oid relationId, uint32 colocationId);
extern char * CreateSchemaDDLCommand(Oid schemaId);
extern List * GrantOnSchemaDDLCommands(Oid schemaId);
extern List * GrantOnFunctionDDLCommands(Oid functionOid);
extern List * GrantOnDatabaseDDLCommands(Oid databaseOid);
extern List * GrantOnForeignServerDDLCommands(Oid serverId);
extern List * GenerateGrantOnForeignServerQueriesFromAclItem(Oid serverId,
AclItem *aclItem);

View File

@ -189,7 +189,7 @@ typedef struct RebalancePlanFunctions
extern char *VariablesToBePassedToNewConnections;
extern int MaxRebalancerLoggedIgnoredMoves;
extern int RebalancerByDiskSizeBaseCost;
extern bool RunningUnderIsolationTest;
extern bool RunningUnderCitusTestSuite;
extern bool PropagateSessionSettingsForLoopbackConnection;
extern int MaxBackgroundTaskExecutorsPerNode;

View File

@ -135,20 +135,10 @@ DEPS = {
),
"alter_role_propagation": TestDeps("minimal_schedule"),
"background_rebalance": TestDeps(
None,
[
"multi_test_helpers",
"multi_cluster_management",
],
worker_count=3,
None, ["multi_test_helpers", "multi_cluster_management"], worker_count=3
),
"background_rebalance_parallel": TestDeps(
None,
[
"multi_test_helpers",
"multi_cluster_management",
],
worker_count=6,
None, ["multi_test_helpers", "multi_cluster_management"], worker_count=6
),
"function_propagation": TestDeps("minimal_schedule"),
"citus_shards": TestDeps("minimal_schedule"),
@ -165,30 +155,17 @@ DEPS = {
),
"schema_based_sharding": TestDeps("minimal_schedule"),
"multi_sequence_default": TestDeps(
None,
[
"multi_test_helpers",
"multi_cluster_management",
"multi_table_ddl",
],
None, ["multi_test_helpers", "multi_cluster_management", "multi_table_ddl"]
),
"grant_on_schema_propagation": TestDeps("minimal_schedule"),
"propagate_extension_commands": TestDeps("minimal_schedule"),
"multi_size_queries": TestDeps("base_schedule", ["multi_copy"]),
"multi_mx_node_metadata": TestDeps(
None,
[
"multi_extension",
"multi_test_helpers",
"multi_test_helpers_superuser",
],
None, ["multi_extension", "multi_test_helpers", "multi_test_helpers_superuser"]
),
"multi_mx_function_table_reference": TestDeps(
None,
[
"multi_cluster_management",
"remove_coordinator_from_metadata",
],
["multi_cluster_management", "remove_coordinator_from_metadata"],
# because it queries node group id and it changes as we add / remove nodes
repeatable=False,
),
@ -201,16 +178,27 @@ DEPS = {
],
),
"metadata_sync_helpers": TestDeps(
None,
[
"multi_mx_node_metadata",
"multi_cluster_management",
],
None, ["multi_mx_node_metadata", "multi_cluster_management"]
),
"multi_utilities": TestDeps(
"multi_utilities": TestDeps("minimal_schedule", ["multi_data_types"]),
"multi_tenant_isolation_nonblocking": TestDeps(
"minimal_schedule", ["multi_data_types", "remove_coordinator_from_metadata"]
),
"remove_non_default_nodes": TestDeps(
None, ["multi_mx_node_metadata", "multi_cluster_management"], repeatable=False
),
"citus_split_shard_columnar_partitioned": TestDeps(
"minimal_schedule", ["remove_coordinator_from_metadata"]
),
"add_coordinator": TestDeps(
"minimal_schedule", ["remove_coordinator_from_metadata"], repeatable=False
),
"multi_multiuser_auth": TestDeps(
"minimal_schedule",
["multi_data_types"],
["multi_create_table", "multi_create_users", "multi_multiuser_load_data"],
repeatable=False,
),
"multi_prepare_plsql": TestDeps("base_schedule"),
}
@ -303,9 +291,13 @@ def run_schedule_with_multiregress(test_name, schedule, dependencies, args):
worker_count = needed_worker_count(test_name, dependencies)
# find suitable make recipe
if dependencies.schedule == "base_isolation_schedule" or "isolation" in test_name:
if dependencies.schedule == "base_isolation_schedule" or test_name.startswith(
"isolation"
):
make_recipe = "check-isolation-custom-schedule"
elif dependencies.schedule == "failure_base_schedule" or "failure" in test_name:
elif dependencies.schedule == "failure_base_schedule" or test_name.startswith(
"failure"
):
make_recipe = "check-failure-custom-schedule"
else:
make_recipe = "check-custom-schedule"
@ -418,10 +410,7 @@ def test_dependencies(test_name, test_schedule, schedule_line, args):
if "upgrade_columnar_before" not in before_tests:
before_tests.append("upgrade_columnar_before")
return TestDeps(
default_base_schedule(test_schedule, args),
before_tests,
)
return TestDeps(default_base_schedule(test_schedule, args), before_tests)
# before_ tests leave stuff around on purpose for the after tests. So they
# are not repeatable by definition.

View File

@ -2,13 +2,6 @@
-- ADD_COORDINATOR
--
-- node trying to add itself without specifying groupid => 0 should error out
-- first remove the coordinator to for testing master_add_node for coordinator
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)
SELECT master_add_node('localhost', :master_port);
ERROR: Node cannot add itself as a worker.
HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636);

View File

@ -135,4 +135,10 @@ NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table citus_split_shard_by_split_points_negative.range_paritioned_table_to_split
drop cascades to table citus_split_shard_by_split_points_negative.table_to_split
drop cascades to table citus_split_shard_by_split_points_negative.table_to_split_replication_factor_2
SELECT public.wait_for_resource_cleanup();
wait_for_resource_cleanup
---------------------------------------------------------------------
(1 row)
--END : Cleanup

View File

@ -8,298 +8,75 @@ SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
\else
\q
\endif
-- create/drop database for pg > 15
\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts3'
CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace';
\c - - - :worker_1_port
\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts4'
CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace';
\c - - - :worker_2_port
\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts5'
CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace';
\c - - - :master_port
create user create_drop_db_test_user;
-- create/drop database for pg >= 15
set citus.enable_create_database_propagation=on;
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%CREATE DATABASE%';
CREATE DATABASE mydatabase
WITH
OWNER = create_drop_db_test_user
CONNECTION LIMIT = 10
ENCODING = 'UTF8'
TABLESPACE = create_drop_db_tablespace
ALLOW_CONNECTIONS = true
IS_TEMPLATE = false
OID = 966345;
NOTICE: issuing CREATE DATABASE mydatabase OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE DATABASE mydatabase OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase'
) q2
$$
) ORDER BY result;
result
WITH OID = 966345;
ERROR: CREATE DATABASE option "oid" is not supported
CREATE DATABASE mydatabase
WITH strategy file_copy;
ERROR: Only wal_log is supported as strategy parameter for CREATE DATABASE
CREATE DATABASE st_wal_log
WITH strategy WaL_LoG;
SELECT * FROM public.check_database_on_all_nodes('st_wal_log') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
[{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}]
[{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}]
[{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}]
coordinator (local) | {"database_properties": {"datacl": null, "datname": "st_wal_log", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": {"datacl": null, "datname": "st_wal_log", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": {"datacl": null, "datname": "st_wal_log", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows)
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%DROP DATABASE%';
drop database mydatabase;
NOTICE: issuing DROP DATABASE mydatabase
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing DROP DATABASE mydatabase
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase'
) q2
$$
) ORDER BY result;
result
drop database st_wal_log;
select 1 from citus_remove_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
(3 rows)
select citus_remove_node('localhost', :worker_2_port);
citus_remove_node
---------------------------------------------------------------------
1
(1 row)
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%CREATE DATABASE%';
CREATE DATABASE mydatabase2
WITH OWNER = create_drop_db_test_user
ENCODING = 'UTF8'
TABLESPACE = create_drop_db_tablespace
ALLOW_CONNECTIONS = true
IS_TEMPLATE = false
OID = 966345;
NOTICE: issuing CREATE DATABASE mydatabase2 OWNER create_drop_db_test_user ENCODING 'UTF8' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase2'
) q2
$$
) ORDER BY result;
result
---------------------------------------------------------------------
[{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}]
[{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}]
(2 rows)
-- test COLLATION_VERSION
CREATE DATABASE test_collation_version
WITH ENCODING = 'UTF8'
COLLATION_VERSION = '1.0'
ALLOW_CONNECTIONS = false;
select 1 from citus_add_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase2'
) q2
$$
) ORDER BY result;
result
SELECT * FROM public.check_database_on_all_nodes('test_collation_version') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
[{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}]
[{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}]
[{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}]
coordinator (local) | {"database_properties": {"datacl": null, "datname": "test_collation_version", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": false, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": "1.0", "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_collation_version", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": false, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": "1.0", "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_collation_version", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": false, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": "1.0", "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows)
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%DROP DATABASE%';
drop database mydatabase2;
NOTICE: issuing DROP DATABASE mydatabase2
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing DROP DATABASE mydatabase2
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase'
) q2
$$
) ORDER BY result;
result
drop database test_collation_version;
SET client_min_messages TO WARNING;
-- test LOCALE_PROVIDER & ICU_LOCALE
CREATE DATABASE test_locale_provider
WITH ENCODING = 'UTF8'
LOCALE_PROVIDER = 'icu'
ICU_LOCALE = 'en_US';
ERROR: new locale provider (icu) does not match locale provider of the template database (libc)
HINT: Use the same locale provider as in the template database, or use template0 as template.
RESET client_min_messages;
CREATE DATABASE test_locale_provider
WITH ENCODING = 'UTF8'
LOCALE_PROVIDER = 'libc'
ICU_LOCALE = 'en_US';
ERROR: ICU locale cannot be specified unless locale provider is ICU
CREATE DATABASE test_locale_provider
WITH ENCODING = 'UTF8'
LOCALE_PROVIDER = 'libc';
SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator (local) | {"database_properties": {"datacl": null, "datname": "test_locale_provider", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_locale_provider", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_locale_provider", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows)
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%CREATE DATABASE%';
-- create a template database with all options set and allow connections false
CREATE DATABASE my_template_database
WITH OWNER = create_drop_db_test_user
ENCODING = 'UTF8'
COLLATION_VERSION = '1.0'
TABLESPACE = create_drop_db_tablespace
ALLOW_CONNECTIONS = false
IS_TEMPLATE = true;
NOTICE: issuing CREATE DATABASE my_template_database OWNER create_drop_db_test_user ENCODING 'UTF8' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS false IS_TEMPLATE true
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE DATABASE my_template_database OWNER create_drop_db_test_user ENCODING 'UTF8' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS false IS_TEMPLATE true
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'my_template_database'
) q2
$$
) ORDER BY result;
result
---------------------------------------------------------------------
[{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}]
[{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}]
[{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}]
(3 rows)
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'my_template_database'
) q2
$$
) ORDER BY result;
result
---------------------------------------------------------------------
[{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}]
[{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}]
[{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}]
(3 rows)
SET citus.log_remote_commands = true;
--template databases could not be dropped so we need to change the template flag
SELECT result from run_command_on_all_nodes(
$$
UPDATE pg_database SET datistemplate = false WHERE datname = 'my_template_database'
$$
) ORDER BY result;
result
---------------------------------------------------------------------
UPDATE 1
UPDATE 1
UPDATE 1
(3 rows)
set citus.grep_remote_commands = '%DROP DATABASE%';
drop database my_template_database;
NOTICE: issuing DROP DATABASE my_template_database
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing DROP DATABASE my_template_database
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'my_template_database'
) q2
$$
) ORDER BY result;
result
---------------------------------------------------------------------
(3 rows)
--tests for special characters in database name
set citus.enable_create_database_propagation=on;
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%CREATE DATABASE%';
create database "mydatabase#1'2";
NOTICE: issuing CREATE DATABASE "mydatabase#1'2"
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE DATABASE "mydatabase#1'2"
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
set citus.grep_remote_commands = '%DROP DATABASE%';
drop database if exists "mydatabase#1'2";
NOTICE: issuing DROP DATABASE IF EXISTS "mydatabase#1'2"
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing DROP DATABASE IF EXISTS "mydatabase#1'2"
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
drop database test_locale_provider;
\c - - - :master_port
drop tablespace create_drop_db_tablespace;
\c - - - :worker_1_port
drop tablespace create_drop_db_tablespace;
\c - - - :worker_2_port
drop tablespace create_drop_db_tablespace;
\c - - - :master_port
drop user create_drop_db_test_user;

View File

@ -0,0 +1,23 @@
--
-- PG16
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
\else
\q
\endif
-- create/drop database for pg >= 16
set citus.enable_create_database_propagation=on;
-- test icu_rules
--
-- practically we don't support it but better to test
CREATE DATABASE citus_icu_rules_test WITH icu_rules='de_DE@collation=phonebook';
ERROR: ICU rules cannot be specified unless locale provider is ICU
CREATE DATABASE citus_icu_rules_test WITH icu_rules='de_DE@collation=phonebook' locale_provider='icu';
ERROR: LOCALE or ICU_LOCALE must be specified
CREATE DATABASE citus_icu_rules_test WITH icu_rules='de_DE@collation=phonebook' locale_provider='icu' icu_locale = 'de_DE';
NOTICE: using standard form "de-DE" for ICU locale "de_DE"
ERROR: new locale provider (icu) does not match locale provider of the template database (libc)
HINT: Use the same locale provider as in the template database, or use template0 as template.

View File

@ -0,0 +1,9 @@
--
-- PG16
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
\else
\q

View File

@ -12,19 +12,9 @@
\set bob_worker_1_pw triplex-royalty-warranty-stand-cheek
\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile
\set bob_fallback_pw :bob_worker_1_pw
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
worker_1_id
---------------------------------------------------------------------
17
(1 row)
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
\gset
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
worker_2_id
---------------------------------------------------------------------
35
(1 row)
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
\gset
-- alice is a superuser so she can update own password
CREATE USER alice PASSWORD :'alice_master_pw' SUPERUSER;

View File

@ -6,19 +6,9 @@
-- Test of ability to override host/port for a node
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 20000000;
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
worker_1_id
---------------------------------------------------------------------
17
(1 row)
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
\gset
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
worker_2_id
---------------------------------------------------------------------
35
(1 row)
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
\gset
CREATE TABLE lotsa_connections (id integer, name text);
SELECT create_distributed_table('lotsa_connections', 'id');

View File

@ -1317,11 +1317,11 @@ SELECT type_ddl_plpgsql();
(1 row)
-- find all renamed types to verify the schema name didn't leak, nor a crash happened
SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup';
SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup' ORDER BY 1;
nspname | typname
---------------------------------------------------------------------
public | prepare_ddl_type_backup
otherschema | prepare_ddl_type_backup
public | prepare_ddl_type_backup
(2 rows)
DROP TYPE prepare_ddl_type_backup;
@ -1332,6 +1332,7 @@ DROP FUNCTION ddl_in_plpgsql();
DROP FUNCTION copy_in_plpgsql();
DROP TABLE prepare_ddl;
DROP TABLE local_ddl;
DROP TABLE plpgsql_table;
DROP SCHEMA otherschema;
-- clean-up functions
DROP FUNCTION plpgsql_test_1();

View File

@ -1275,8 +1275,9 @@ SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0;
TRUNCATE TABLE pg_catalog.pg_dist_colocation;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
SELECT citus_set_coordinator_host('localhost');
citus_set_coordinator_host
-- make sure we don't have any replication objects leftover on the nodes
SELECT public.wait_for_resource_cleanup();
wait_for_resource_cleanup
---------------------------------------------------------------------
(1 row)

View File

@ -526,3 +526,103 @@ BEGIN
RETURN result;
END;
$func$ LANGUAGE plpgsql;
-- Returns pg_seclabels entries from all nodes in the cluster for which
-- the object name is the input.
CREATE OR REPLACE FUNCTION get_citus_tests_label_provider_labels(object_name text,
master_port INTEGER DEFAULT 57636,
worker_1_port INTEGER DEFAULT 57637,
worker_2_port INTEGER DEFAULT 57638)
RETURNS TABLE (
node_type text,
result text
)
AS $func$
DECLARE
pg_seclabels_cmd TEXT := 'SELECT to_jsonb(q.*) FROM (' ||
'SELECT provider, objtype, label FROM pg_seclabels ' ||
'WHERE objname = ''' || object_name || ''') q';
BEGIN
RETURN QUERY
SELECT
CASE
WHEN nodeport = master_port THEN 'coordinator'
WHEN nodeport = worker_1_port THEN 'worker_1'
WHEN nodeport = worker_2_port THEN 'worker_2'
ELSE 'unexpected_node'
END AS node_type,
a.result
FROM run_command_on_all_nodes(pg_seclabels_cmd) a
JOIN pg_dist_node USING (nodeid)
ORDER BY node_type;
END;
$func$ LANGUAGE plpgsql;
-- For all nodes, returns database properties of given database, except
-- oid, datfrozenxid and datminmxid.
--
-- Also returns whether the node has a pg_dist_object record for the database
-- and whether there are any stale pg_dist_object records for a database.
CREATE OR REPLACE FUNCTION check_database_on_all_nodes(p_database_name text)
RETURNS TABLE (node_type text, result text)
AS $func$
DECLARE
pg_ge_15_options text := '';
pg_ge_16_options text := '';
BEGIN
IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'datlocprovider') THEN
pg_ge_15_options := ', daticulocale, datcollversion, datlocprovider';
ELSE
pg_ge_15_options := $$, null as daticulocale, null as datcollversion, 'c' as datlocprovider$$;
END IF;
IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'daticurules') THEN
pg_ge_16_options := ', daticurules';
ELSE
pg_ge_16_options := ', null as daticurules';
END IF;
RETURN QUERY
SELECT
CASE WHEN (groupid = 0 AND groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'coordinator (local)'
WHEN (groupid = 0) THEN 'coordinator (remote)'
WHEN (groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'worker node (local)'
ELSE 'worker node (remote)'
END AS node_type,
q2.result
FROM run_command_on_all_nodes(
format(
$$
SELECT to_jsonb(q.*)
FROM (
SELECT
(
SELECT to_jsonb(database_properties.*)
FROM (
SELECT datname, pa.rolname as database_owner,
pg_encoding_to_char(pd.encoding) as encoding,
datistemplate, datallowconn, datconnlimit, datacl,
pt.spcname AS tablespace, datcollate, datctype
%2$s -- >= pg15 options
%3$s -- >= pg16 options
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
JOIN pg_tablespace pt ON pd.dattablespace = pt.oid
WHERE datname = '%1$s'
) database_properties
) AS database_properties,
(
SELECT COUNT(*)=1
FROM pg_dist_object WHERE objid = (SELECT oid FROM pg_database WHERE datname = '%1$s')
) AS pg_dist_object_record_for_db_exists,
(
SELECT COUNT(*) > 0
FROM pg_dist_object
WHERE classid = 1262 AND objid NOT IN (SELECT oid FROM pg_database)
) AS stale_pg_dist_object_record_for_a_db_exists
) q
$$,
p_database_name, pg_ge_15_options, pg_ge_16_options
)
) q2
JOIN pg_dist_node USING (nodeid);
END;
$func$ LANGUAGE plpgsql;

View File

@ -424,6 +424,34 @@ FROM pg_total_relation_size('local_vacuum_table') s ;
35000000
(1 row)
-- vacuum (process_toast true) should be vacuuming toast tables (default is true)
select reltoastrelid from pg_class where relname='local_vacuum_table'
\gset
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
\gset
insert into local_vacuum_table select i from generate_series(1,10000) i;
VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table;
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
WHERE oid=:reltoastrelid::regclass;
frozen_performed
---------------------------------------------------------------------
t
(1 row)
delete from local_vacuum_table;
-- vacuum (process_toast false) should not be vacuuming toast tables (default is true)
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
\gset
insert into local_vacuum_table select i from generate_series(1,10000) i;
VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table;
SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class
WHERE oid=:reltoastrelid::regclass;
frozen_not_performed
---------------------------------------------------------------------
t
(1 row)
delete from local_vacuum_table;
-- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true)
insert into local_vacuum_table select i from generate_series(1,1000000) i;
delete from local_vacuum_table;

View File

@ -71,32 +71,6 @@ NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- vacuum (process_toast true) should be vacuuming toast tables (default is true)
CREATE TABLE local_vacuum_table(name text);
select reltoastrelid from pg_class where relname='local_vacuum_table'
\gset
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
\gset
VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table;
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
WHERE oid=:reltoastrelid::regclass;
frozen_performed
---------------------------------------------------------------------
t
(1 row)
-- vacuum (process_toast false) should not be vacuuming toast tables (default is true)
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
\gset
VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table;
SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class
WHERE oid=:reltoastrelid::regclass;
frozen_not_performed
---------------------------------------------------------------------
t
(1 row)
DROP TABLE local_vacuum_table;
SET citus.log_remote_commands TO OFF;
create table dist(a int, b int);
select create_distributed_table('dist','a');
@ -1492,4 +1466,5 @@ DROP TABLE compression_and_defaults, compression_and_generated_col;
set client_min_messages to error;
drop extension postgres_fdw cascade;
drop schema pg14 cascade;
DROP ROLE role_1, r1;
reset client_min_messages;

View File

@ -267,6 +267,7 @@ SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE;
DROP SCHEMA citus_schema_1 CASCADE;
SELECT public.wait_for_resource_cleanup();
\q
\endif
-- recreate a mixed publication
@ -544,3 +545,9 @@ DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE;
DROP SCHEMA citus_schema_1 CASCADE;
DROP SCHEMA publication2 CASCADE;
SELECT public.wait_for_resource_cleanup();
wait_for_resource_cleanup
---------------------------------------------------------------------
(1 row)

View File

@ -267,4 +267,10 @@ SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE;
DROP SCHEMA citus_schema_1 CASCADE;
SELECT public.wait_for_resource_cleanup();
wait_for_resource_cleanup
---------------------------------------------------------------------
(1 row)
\q

View File

@ -0,0 +1,13 @@
-- The default nodes for the citus test suite are coordinator and 2 worker nodes
-- Which we identify with master_port, worker_1_port, worker_2_port.
-- When needed in some tests, GetLocalNodeId() does not behave correctly,
-- So we remove the non default nodes. This tests expects the non default nodes
-- to not have any active placements.
SELECT any_value(citus_remove_node('localhost', nodeport))
FROM pg_dist_node
WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port);
any_value
---------------------------------------------------------------------
(1 row)

View File

@ -0,0 +1,173 @@
--
-- SECLABEL
--
-- Test suite for SECURITY LABEL ON ROLE statements
--
-- first we remove one of the worker nodes to be able to test
-- citus_add_node later
SELECT citus_remove_node('localhost', :worker_2_port);
citus_remove_node
---------------------------------------------------------------------
(1 row)
-- create two roles, one with characters that need escaping
CREATE ROLE user1;
CREATE ROLE "user 2";
-- check an invalid label for our current dummy hook citus_test_object_relabel
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'invalid_label';
ERROR: 'invalid_label' is not a valid security label for Citus tests.
-- if we disable metadata_sync, the command will not be propagated
SET citus.enable_metadata_sync TO off;
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified';
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
worker_1 |
(2 rows)
RESET citus.enable_metadata_sync;
-- check that we only support propagating for roles
SET citus.shard_replication_factor to 1;
-- distributed table
CREATE TABLE a (a int);
SELECT create_distributed_table('a', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- distributed view
CREATE VIEW v_dist AS SELECT * FROM a;
-- distributed function
CREATE FUNCTION notice(text) RETURNS void LANGUAGE plpgsql AS $$
BEGIN RAISE NOTICE '%', $1; END; $$;
SECURITY LABEL ON TABLE a IS 'citus_classified';
NOTICE: not propagating SECURITY LABEL commands whose object type is not role
HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command.
SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified';
NOTICE: not propagating SECURITY LABEL commands whose object type is not role
HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command.
SECURITY LABEL ON VIEW v_dist IS 'citus_classified';
NOTICE: not propagating SECURITY LABEL commands whose object type is not role
HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command.
SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"}
worker_1 |
(2 rows)
SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"}
worker_1 |
(2 rows)
SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"}
worker_1 |
(2 rows)
\c - - - :worker_1_port
SECURITY LABEL ON TABLE a IS 'citus_classified';
SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified';
SECURITY LABEL ON VIEW v_dist IS 'citus_classified';
\c - - - :master_port
SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"}
worker_1 | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"}
(2 rows)
SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"}
worker_1 | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"}
(2 rows)
SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"}
worker_1 | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"}
(2 rows)
DROP TABLE a CASCADE;
NOTICE: drop cascades to view v_dist
DROP FUNCTION notice;
-- test that SECURITY LABEL statement is actually propagated for ROLES
SET citus.log_remote_commands TO on;
SET citus.grep_remote_commands = '%SECURITY LABEL%';
-- we have exactly one provider loaded, so we may not include the provider in the command
SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified';
NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SECURITY LABEL ON ROLE user1 IS NULL;
NOTICE: issuing SECURITY LABEL ON ROLE user1 IS NULL
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SECURITY LABEL ON ROLE user1 IS 'citus_unclassified';
NOTICE: issuing SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified';
NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
\c - - - :worker_1_port
-- command not allowed from worker node
SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified';
ERROR: operation is not allowed on this node
HINT: Connect to the coordinator and run it again.
\c - - - :master_port
RESET citus.log_remote_commands;
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
(2 rows)
SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
(2 rows)
-- add a new node and check that it also propagates the SECURITY LABEL statement to the new node
SET citus.log_remote_commands TO on;
SET citus.grep_remote_commands = '%SECURITY LABEL%';
SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
NOTICE: issuing SELECT worker_create_or_alter_role('user1', 'CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_create_or_alter_role('user 2', 'CREATE ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
(3 rows)
SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
worker_2 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"}
(3 rows)
-- cleanup
RESET citus.log_remote_commands;
DROP ROLE user1, "user 2";

View File

@ -3,43 +3,6 @@ SET search_path TO worker_split_binary_copy_test;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 81060000;
-- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly.
SELECT citus_remove_node('localhost', 8887);
citus_remove_node
---------------------------------------------------------------------
(1 row)
SELECT citus_remove_node('localhost', 9995);
citus_remove_node
---------------------------------------------------------------------
(1 row)
SELECT citus_remove_node('localhost', 9992);
citus_remove_node
---------------------------------------------------------------------
(1 row)
SELECT citus_remove_node('localhost', 9998);
citus_remove_node
---------------------------------------------------------------------
(1 row)
SELECT citus_remove_node('localhost', 9997);
citus_remove_node
---------------------------------------------------------------------
(1 row)
SELECT citus_remove_node('localhost', 8888);
citus_remove_node
---------------------------------------------------------------------
(1 row)
-- BEGIN: Create distributed table and insert data.
CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy (
l_orderkey bigint not null,

View File

@ -32,10 +32,12 @@ test: propagate_extension_commands
test: escape_extension_name
test: ref_citus_local_fkeys
test: alter_database_owner
test: seclabel
test: distributed_triggers
test: create_single_shard_table
test: create_drop_database_propagation
test: create_drop_database_propagation_pg15
test: create_drop_database_propagation_pg16
# don't parallelize single_shard_table_udfs to make sure colocation ids are sequential
test: single_shard_table_udfs
@ -298,6 +300,7 @@ test: multi_foreign_key_relation_graph
# Replicating reference tables to coordinator. Add coordinator to pg_dist_node
# and rerun some of the tests.
# --------
test: remove_coordinator_from_metadata
test: add_coordinator
test: replicate_reference_tables_to_coordinator
test: citus_local_tables

View File

@ -83,7 +83,8 @@ test: forcedelegation_functions
# this should be run alone as it gets too many clients
test: join_pushdown
test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc statement_cancel_error_message
test: multi_agg_distinct multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction
test: multi_agg_distinct
test: multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction
test: multi_reference_table multi_select_for_update relation_access_tracking pg13_with_ties
test: custom_aggregate_support aggregate_support tdigest_aggregate_support
test: multi_average_expression multi_working_columns multi_having_pushdown having_subquery

View File

@ -90,7 +90,6 @@ my $workerCount = 2;
my $serversAreShutdown = "TRUE";
my $usingWindows = 0;
my $mitmPid = 0;
my $workerCount = 2;
if ($Config{osname} eq "MSWin32")
{
@ -510,6 +509,12 @@ if($vanillatest)
# we disable some restrictions for local objects like local views to not break postgres vanilla test behaviour.
push(@pgOptions, "citus.enforce_object_restrictions_for_local_objects=false");
}
else
{
# We currently need this config for isolation tests and security label tests
# this option loads a security label provider, which we don't want in vanilla tests
push(@pgOptions, "citus.running_under_citus_test_suite=true");
}
if ($useMitmproxy)
{
@ -560,7 +565,6 @@ if($isolationtester)
push(@pgOptions, "citus.metadata_sync_interval=1000");
push(@pgOptions, "citus.metadata_sync_retry_interval=100");
push(@pgOptions, "client_min_messages='warning'"); # pg12 introduced notice showing during isolation tests
push(@pgOptions, "citus.running_under_isolation_test=true");
# Disable all features of the maintenance daemon. Otherwise queries might
# randomly show temporarily as "waiting..." because they are waiting for the

View File

@ -10,6 +10,7 @@ test: foreign_key_to_reference_table
# Split tests go here.
test: split_shard
test: worker_split_copy_test
test: remove_non_default_nodes
test: worker_split_binary_copy_test
test: worker_split_text_copy_test
test: citus_split_shard_by_split_points_negative

View File

@ -3,8 +3,6 @@
--
-- node trying to add itself without specifying groupid => 0 should error out
-- first remove the coordinator to for testing master_add_node for coordinator
SELECT master_remove_node('localhost', :master_port);
SELECT master_add_node('localhost', :master_port);
SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset

View File

@ -113,4 +113,5 @@ SELECT citus_split_shard_by_split_points(
--BEGIN : Cleanup
\c - postgres - :master_port
DROP SCHEMA "citus_split_shard_by_split_points_negative" CASCADE;
SELECT public.wait_for_resource_cleanup();
--END : Cleanup

View File

@ -1,7 +1,40 @@
-- test for create/drop database propagation
-- This test is only executes for Postgres 14
-- For postgres 15 tests, pg15_create_drop_database_propagation.sql is used
-- Test for create/drop database propagation.
-- This test is only executes for Postgres versions < 15.
-- For versions >= 15, pg15_create_drop_database_propagation.sql is used.
-- For versions >= 16, pg16_create_drop_database_propagation.sql is used.
-- Test the UDF that we use to issue database command during metadata sync.
SELECT pg_catalog.citus_internal_database_command(null);
CREATE ROLE test_db_commands WITH LOGIN;
ALTER SYSTEM SET citus.enable_manual_metadata_changes_for_user TO 'test_db_commands';
SELECT pg_reload_conf();
SELECT pg_sleep(0.1);
SET ROLE test_db_commands;
-- fails on null input
SELECT pg_catalog.citus_internal_database_command(null);
-- fails on non create / drop db command
SELECT pg_catalog.citus_internal_database_command('CREATE TABLE foo_bar(a int)');
SELECT pg_catalog.citus_internal_database_command('SELECT 1');
SELECT pg_catalog.citus_internal_database_command('asfsfdsg');
SELECT pg_catalog.citus_internal_database_command('');
RESET ROLE;
ALTER ROLE test_db_commands nocreatedb;
SET ROLE test_db_commands;
-- make sure that pg_catalog.citus_internal_database_command doesn't cause privilege escalation
SELECT pg_catalog.citus_internal_database_command('CREATE DATABASE no_permissions');
RESET ROLE;
DROP USER test_db_commands;
ALTER SYSTEM RESET citus.enable_manual_metadata_changes_for_user;
SELECT pg_reload_conf();
SELECT pg_sleep(0.1);
\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts3'
CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace';
@ -13,6 +46,28 @@ CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace
\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts5'
CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace';
\c - - - :master_port
CREATE DATABASE local_database;
-- check that it's only created for coordinator
SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type;
DROP DATABASE local_database;
-- and is dropped
SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type;
\c - - - :worker_1_port
CREATE DATABASE local_database;
-- check that it's only created for coordinator
SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type;
DROP DATABASE local_database;
-- and is dropped
SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type;
\c - - - :master_port
create user create_drop_db_test_user;
@ -28,7 +83,7 @@ CREATE DATABASE mydatabase
ALLOW_CONNECTIONS = true
IS_TEMPLATE = false;
CREATE DATABASE mydatabase
CREATE DATABASE mydatabase_1
WITH template=template1
OWNER = create_drop_db_test_user
ENCODING = 'UTF8'
@ -37,40 +92,51 @@ CREATE DATABASE mydatabase
ALLOW_CONNECTIONS = true
IS_TEMPLATE = false;
SELECT result from run_command_on_all_nodes(
SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type;
-- Test LC / LOCALE settings that don't match the ones provided in template db.
-- All should throw an error on the coordinator.
CREATE DATABASE lc_collate_test LC_COLLATE = 'C.UTF-8';
CREATE DATABASE lc_ctype_test LC_CTYPE = 'C.UTF-8';
CREATE DATABASE locale_test LOCALE = 'C.UTF-8';
CREATE DATABASE lc_collate_lc_ctype_test LC_COLLATE = 'C.UTF-8' LC_CTYPE = 'C.UTF-8';
-- Test LC / LOCALE settings that match the ones provided in template db.
CREATE DATABASE lc_collate_test LC_COLLATE = 'C';
CREATE DATABASE lc_ctype_test LC_CTYPE = 'C';
CREATE DATABASE locale_test LOCALE = 'C';
CREATE DATABASE lc_collate_lc_ctype_test LC_COLLATE = 'C' LC_CTYPE = 'C';
SELECT * FROM public.check_database_on_all_nodes('lc_collate_test') ORDER BY node_type;
SELECT * FROM public.check_database_on_all_nodes('lc_ctype_test') ORDER BY node_type;
SELECT * FROM public.check_database_on_all_nodes('locale_test') ORDER BY node_type;
SELECT * FROM public.check_database_on_all_nodes('lc_collate_lc_ctype_test') ORDER BY node_type;
DROP DATABASE lc_collate_test;
DROP DATABASE lc_ctype_test;
DROP DATABASE locale_test;
DROP DATABASE lc_collate_lc_ctype_test;
-- ALTER TABLESPACE .. RENAME TO .. is not supported, so we need to rename it manually.
SELECT result FROM run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase'
) q2
ALTER TABLESPACE create_drop_db_tablespace RENAME TO "ts-needs\!escape"
$$
) ORDER BY result;
);
CREATE USER "role-needs\!escape";
drop database mydatabase;
CREATE DATABASE "db-needs\!escape" owner "role-needs\!escape" tablespace "ts-needs\!escape";
SELECT result from run_command_on_all_nodes(
-- Rename it to make check_database_on_all_nodes happy.
-- Today we don't support ALTER DATABASE .. RENAME TO .., so need to propagate it manually.
SELECT result FROM run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase'
) q2
ALTER DATABASE "db-needs\!escape" RENAME TO db_needs_escape
$$
) ORDER BY result;
);
SELECT * FROM public.check_database_on_all_nodes('db_needs_escape') ORDER BY node_type;
-- test database syncing after node addition
@ -81,42 +147,39 @@ CREATE DATABASE mydatabase
OWNER = create_drop_db_test_user
CONNECTION LIMIT = 10
ENCODING = 'UTF8'
TABLESPACE = create_drop_db_tablespace
TABLESPACE = "ts-needs\!escape"
ALLOW_CONNECTIONS = false
IS_TEMPLATE = false;
SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase'
) q2
$$
) ORDER BY result;
SET citus.metadata_sync_mode to 'transactional';
select 1 from citus_add_node('localhost', :worker_2_port);
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase'
) q2
$$
) ORDER BY result;
SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type;
SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type;
SELECT * FROM public.check_database_on_all_nodes('db_needs_escape') ORDER BY node_type;
select 1 from citus_remove_node('localhost', :worker_2_port);
SET citus.metadata_sync_mode to 'nontransactional';
select 1 from citus_add_node('localhost', :worker_2_port);
RESET citus.metadata_sync_mode;
SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type;
SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type;
SELECT * FROM public.check_database_on_all_nodes('db_needs_escape') ORDER BY node_type;
SELECT citus_disable_node_and_wait('localhost', :worker_1_port, true);
CREATE DATABASE test_node_activation;
SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type;
SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type;
SELECT * FROM public.check_database_on_all_nodes('db_needs_escape') ORDER BY node_type;
SELECT * FROM public.check_database_on_all_nodes('test_node_activation') ORDER BY node_type;
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%DROP DATABASE%';
@ -124,45 +187,22 @@ drop database mydatabase;
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase'
) q2
$$
) ORDER BY result;
-- check that we actually drop the database
drop database mydatabase_1;
SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type;
SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type;
-- create a template database with all options set and allow connections false
CREATE DATABASE my_template_database
WITH OWNER = create_drop_db_test_user
ENCODING = 'UTF8'
TABLESPACE = create_drop_db_tablespace
TABLESPACE = "ts-needs\!escape"
ALLOW_CONNECTIONS = false
IS_TEMPLATE = true;
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'my_template_database'
) q2
$$
) ORDER BY result;
SELECT * FROM public.check_database_on_all_nodes('my_template_database') ORDER BY node_type;
--template databases could not be dropped so we need to change the template flag
SELECT result from run_command_on_all_nodes(
@ -177,20 +217,8 @@ set citus.grep_remote_commands = '%DROP DATABASE%';
drop database my_template_database;
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'my_template_database'
) q2
$$
) ORDER BY result;
SELECT * FROM public.check_database_on_all_nodes('my_template_database') ORDER BY node_type;
--tests for special characters in database name
set citus.enable_create_database_propagation=on;
@ -202,19 +230,320 @@ create database "mydatabase#1'2";
set citus.grep_remote_commands = '%DROP DATABASE%';
drop database if exists "mydatabase#1'2";
reset citus.grep_remote_commands;
reset citus.log_remote_commands;
--clean up resources created by this test
-- it doesn't fail thanks to "if exists"
drop database if exists "mydatabase#1'2";
drop tablespace create_drop_db_tablespace;
-- recreate it to verify that it's actually dropped
create database "mydatabase#1'2";
drop database "mydatabase#1'2";
-- second time we try to drop it, it fails due to lack of "if exists"
drop database "mydatabase#1'2";
\c - - - :worker_1_port
drop tablespace create_drop_db_tablespace;
SET citus.enable_create_database_propagation TO ON;
\c - - - :worker_2_port
-- show that dropping the database from workers is not allowed when citus.enable_create_database_propagation is on
DROP DATABASE db_needs_escape;
drop tablespace create_drop_db_tablespace;
-- and the same applies to create database too
create database error_test;
\c - - - :master_port
SET citus.enable_create_database_propagation TO ON;
DROP DATABASE test_node_activation;
DROP DATABASE db_needs_escape;
DROP USER "role-needs\!escape";
-- drop database with force options test
create database db_force_test;
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%DROP DATABASE%';
drop database db_force_test with (force);
reset citus.log_remote_commands;
reset citus.grep_remote_commands;
SELECT * FROM public.check_database_on_all_nodes('db_force_test') ORDER BY node_type;
-- test that we won't propagate non-distributed databases in citus_add_node
select 1 from citus_remove_node('localhost', :worker_2_port);
SET citus.enable_create_database_propagation TO off;
CREATE DATABASE non_distributed_db;
SET citus.enable_create_database_propagation TO on;
create database distributed_db;
select 1 from citus_add_node('localhost', :worker_2_port);
--non_distributed_db should not be propagated to worker_2
SELECT * FROM public.check_database_on_all_nodes('non_distributed_db') ORDER BY node_type;
--distributed_db should be propagated to worker_2
SELECT * FROM public.check_database_on_all_nodes('distributed_db') ORDER BY node_type;
--clean up resources created by this test
drop database distributed_db;
set citus.enable_create_database_propagation TO off;
drop database non_distributed_db;
-- test role grants on DATABASE in metadata sync
SELECT result from run_command_on_all_nodes(
$$
create database db_role_grants_test_non_distributed
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
revoke connect,temp,temporary,create on database db_role_grants_test_non_distributed from public
$$
) ORDER BY result;
SET citus.enable_create_database_propagation TO on;
CREATE ROLE db_role_grants_test_role_exists_on_node_2;
select 1 from citus_remove_node('localhost', :worker_2_port);
CREATE DATABASE db_role_grants_test;
revoke connect,temp,temporary,create on database db_role_grants_test from public;
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%CREATE ROLE%';
CREATE ROLE db_role_grants_test_role_missing_on_node_2;
RESET citus.log_remote_commands ;
RESET citus.grep_remote_commands;
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%GRANT%';
grant CONNECT,TEMPORARY,CREATE on DATABASE db_role_grants_test to db_role_grants_test_role_exists_on_node_2;
grant CONNECT,TEMPORARY,CREATE on DATABASE db_role_grants_test to db_role_grants_test_role_missing_on_node_2;
grant CONNECT,TEMPORARY,CREATE on DATABASE db_role_grants_test_non_distributed to db_role_grants_test_role_exists_on_node_2;
grant CONNECT,TEMPORARY,CREATE on DATABASE db_role_grants_test_non_distributed to db_role_grants_test_role_missing_on_node_2;
-- check the privileges before add_node for database db_role_grants_test,
-- role db_role_grants_test_role_exists_on_node_2
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'CREATE')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'TEMPORARY')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'CONNECT')
$$
) ORDER BY result;
-- check the privileges before add_node for database db_role_grants_test,
-- role db_role_grants_test_role_missing_on_node_2
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'CREATE')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'TEMPORARY')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'CONNECT')
$$
) ORDER BY result;
-- check the privileges before add_node for database db_role_grants_test_non_distributed,
-- role db_role_grants_test_role_exists_on_node_2
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'CREATE')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'TEMPORARY')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'CONNECT')
$$
) ORDER BY result;
-- check the privileges before add_node for database db_role_grants_test_non_distributed,
-- role db_role_grants_test_role_missing_on_node_2
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'CREATE')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'TEMPORARY')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'CONNECT')
$$
) ORDER BY result;
RESET citus.log_remote_commands;
RESET citus.grep_remote_commands;
select 1 from citus_add_node('localhost', :worker_2_port);
-- check the privileges after add_node for database db_role_grants_test,
-- role db_role_grants_test_role_exists_on_node_2
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'CREATE')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'TEMPORARY')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'CONNECT')
$$
) ORDER BY result;
-- check the privileges after add_node for database db_role_grants_test,
-- role db_role_grants_test_role_missing_on_node_2
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'CREATE')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'TEMPORARY')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'CONNECT')
$$
) ORDER BY result;
-- check the privileges after add_node for database db_role_grants_test_non_distributed,
-- role db_role_grants_test_role_exists_on_node_2
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'CREATE')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'TEMPORARY')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'CONNECT')
$$
) ORDER BY result;
-- check the privileges after add_node for database db_role_grants_test_non_distributed,
-- role db_role_grants_test_role_missing_on_node_2
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'CREATE')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'TEMPORARY')
$$
) ORDER BY result;
SELECT result from run_command_on_all_nodes(
$$
select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'CONNECT')
$$
) ORDER BY result;
grant connect,temp,temporary,create on database db_role_grants_test to public;
DROP DATABASE db_role_grants_test;
SELECT result from run_command_on_all_nodes(
$$
drop database db_role_grants_test_non_distributed
$$
) ORDER BY result;
DROP ROLE db_role_grants_test_role_exists_on_node_2;
DROP ROLE db_role_grants_test_role_missing_on_node_2;
--clean up resources created by this test
-- DROP TABLESPACE is not supported, so we need to drop it manually.
SELECT result FROM run_command_on_all_nodes(
$$
drop tablespace "ts-needs\!escape"
$$
);
drop user create_drop_db_test_user;
reset citus.enable_create_database_propagation;

View File

@ -9,236 +9,57 @@ SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
\q
\endif
-- create/drop database for pg > 15
-- create/drop database for pg >= 15
\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts3'
CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace';
\c - - - :worker_1_port
\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts4'
CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace';
\c - - - :worker_2_port
\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts5'
CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace';
\c - - - :master_port
create user create_drop_db_test_user;
set citus.enable_create_database_propagation=on;
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%CREATE DATABASE%';
CREATE DATABASE mydatabase
WITH
OWNER = create_drop_db_test_user
CONNECTION LIMIT = 10
ENCODING = 'UTF8'
TABLESPACE = create_drop_db_tablespace
ALLOW_CONNECTIONS = true
IS_TEMPLATE = false
OID = 966345;
WITH OID = 966345;
SET citus.log_remote_commands = false;
CREATE DATABASE mydatabase
WITH strategy file_copy;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase'
) q2
$$
) ORDER BY result;
CREATE DATABASE st_wal_log
WITH strategy WaL_LoG;
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%DROP DATABASE%';
drop database mydatabase;
SELECT * FROM public.check_database_on_all_nodes('st_wal_log') ORDER BY node_type;
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase'
) q2
$$
) ORDER BY result;
drop database st_wal_log;
select citus_remove_node('localhost', :worker_2_port);
select 1 from citus_remove_node('localhost', :worker_2_port);
-- test COLLATION_VERSION
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%CREATE DATABASE%';
CREATE DATABASE mydatabase2
WITH OWNER = create_drop_db_test_user
ENCODING = 'UTF8'
TABLESPACE = create_drop_db_tablespace
ALLOW_CONNECTIONS = true
IS_TEMPLATE = false
OID = 966345;
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase2'
) q2
$$
) ORDER BY result;
CREATE DATABASE test_collation_version
WITH ENCODING = 'UTF8'
COLLATION_VERSION = '1.0'
ALLOW_CONNECTIONS = false;
select 1 from citus_add_node('localhost', :worker_2_port);
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase2'
) q2
$$
) ORDER BY result;
SELECT * FROM public.check_database_on_all_nodes('test_collation_version') ORDER BY node_type;
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%DROP DATABASE%';
drop database mydatabase2;
drop database test_collation_version;
SET citus.log_remote_commands = false;
SET client_min_messages TO WARNING;
-- test LOCALE_PROVIDER & ICU_LOCALE
CREATE DATABASE test_locale_provider
WITH ENCODING = 'UTF8'
LOCALE_PROVIDER = 'icu'
ICU_LOCALE = 'en_US';
RESET client_min_messages;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'mydatabase'
) q2
$$
) ORDER BY result;
CREATE DATABASE test_locale_provider
WITH ENCODING = 'UTF8'
LOCALE_PROVIDER = 'libc'
ICU_LOCALE = 'en_US';
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%CREATE DATABASE%';
CREATE DATABASE test_locale_provider
WITH ENCODING = 'UTF8'
LOCALE_PROVIDER = 'libc';
-- create a template database with all options set and allow connections false
CREATE DATABASE my_template_database
WITH OWNER = create_drop_db_test_user
ENCODING = 'UTF8'
COLLATION_VERSION = '1.0'
TABLESPACE = create_drop_db_tablespace
ALLOW_CONNECTIONS = false
IS_TEMPLATE = true;
SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER BY node_type;
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'my_template_database'
) q2
$$
) ORDER BY result;
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'my_template_database'
) q2
$$
) ORDER BY result;
SET citus.log_remote_commands = true;
--template databases could not be dropped so we need to change the template flag
SELECT result from run_command_on_all_nodes(
$$
UPDATE pg_database SET datistemplate = false WHERE datname = 'my_template_database'
$$
) ORDER BY result;
set citus.grep_remote_commands = '%DROP DATABASE%';
drop database my_template_database;
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding,
pd.datistemplate, pd.datallowconn, pd.datconnlimit,
pd.datcollate , pd. datctype , pd.datacl,
pa.rolname AS database_owner, pt.spcname AS tablespace
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
join pg_tablespace pt on pd.dattablespace = pt.oid
WHERE datname = 'my_template_database'
) q2
$$
) ORDER BY result;
--tests for special characters in database name
set citus.enable_create_database_propagation=on;
SET citus.log_remote_commands = true;
set citus.grep_remote_commands = '%CREATE DATABASE%';
create database "mydatabase#1'2";
set citus.grep_remote_commands = '%DROP DATABASE%';
drop database if exists "mydatabase#1'2";
drop database test_locale_provider;
\c - - - :master_port
drop tablespace create_drop_db_tablespace;
\c - - - :worker_1_port
drop tablespace create_drop_db_tablespace;
\c - - - :worker_2_port
drop tablespace create_drop_db_tablespace;
\c - - - :master_port
drop user create_drop_db_test_user;

View File

@ -0,0 +1,22 @@
--
-- PG16
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
\else
\q
\endif
-- create/drop database for pg >= 16
set citus.enable_create_database_propagation=on;
-- test icu_rules
--
-- practically we don't support it but better to test
CREATE DATABASE citus_icu_rules_test WITH icu_rules='de_DE@collation=phonebook';
CREATE DATABASE citus_icu_rules_test WITH icu_rules='de_DE@collation=phonebook' locale_provider='icu';
CREATE DATABASE citus_icu_rules_test WITH icu_rules='de_DE@collation=phonebook' locale_provider='icu' icu_locale = 'de_DE';

View File

@ -16,9 +16,9 @@
\set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile
\set bob_fallback_pw :bob_worker_1_pw
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
\gset
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
\gset
-- alice is a superuser so she can update own password

View File

@ -7,9 +7,9 @@
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 20000000;
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port
\gset
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port;
SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port
\gset
CREATE TABLE lotsa_connections (id integer, name text);

View File

@ -624,7 +624,7 @@ CREATE TYPE prepare_ddl_type AS (x int, y int);
SELECT type_ddl_plpgsql();
-- find all renamed types to verify the schema name didn't leak, nor a crash happened
SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup';
SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup' ORDER BY 1;
DROP TYPE prepare_ddl_type_backup;
RESET search_path;
@ -635,6 +635,7 @@ DROP FUNCTION ddl_in_plpgsql();
DROP FUNCTION copy_in_plpgsql();
DROP TABLE prepare_ddl;
DROP TABLE local_ddl;
DROP TABLE plpgsql_table;
DROP SCHEMA otherschema;
-- clean-up functions

View File

@ -608,5 +608,5 @@ ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
SELECT citus_set_coordinator_host('localhost');
-- make sure we don't have any replication objects leftover on the nodes
SELECT public.wait_for_resource_cleanup();

View File

@ -550,3 +550,105 @@ BEGIN
RETURN result;
END;
$func$ LANGUAGE plpgsql;
-- Returns pg_seclabels entries from all nodes in the cluster for which
-- the object name is the input.
CREATE OR REPLACE FUNCTION get_citus_tests_label_provider_labels(object_name text,
master_port INTEGER DEFAULT 57636,
worker_1_port INTEGER DEFAULT 57637,
worker_2_port INTEGER DEFAULT 57638)
RETURNS TABLE (
node_type text,
result text
)
AS $func$
DECLARE
pg_seclabels_cmd TEXT := 'SELECT to_jsonb(q.*) FROM (' ||
'SELECT provider, objtype, label FROM pg_seclabels ' ||
'WHERE objname = ''' || object_name || ''') q';
BEGIN
RETURN QUERY
SELECT
CASE
WHEN nodeport = master_port THEN 'coordinator'
WHEN nodeport = worker_1_port THEN 'worker_1'
WHEN nodeport = worker_2_port THEN 'worker_2'
ELSE 'unexpected_node'
END AS node_type,
a.result
FROM run_command_on_all_nodes(pg_seclabels_cmd) a
JOIN pg_dist_node USING (nodeid)
ORDER BY node_type;
END;
$func$ LANGUAGE plpgsql;
-- For all nodes, returns database properties of given database, except
-- oid, datfrozenxid and datminmxid.
--
-- Also returns whether the node has a pg_dist_object record for the database
-- and whether there are any stale pg_dist_object records for a database.
CREATE OR REPLACE FUNCTION check_database_on_all_nodes(p_database_name text)
RETURNS TABLE (node_type text, result text)
AS $func$
DECLARE
pg_ge_15_options text := '';
pg_ge_16_options text := '';
BEGIN
IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'datlocprovider') THEN
pg_ge_15_options := ', daticulocale, datcollversion, datlocprovider';
ELSE
pg_ge_15_options := $$, null as daticulocale, null as datcollversion, 'c' as datlocprovider$$;
END IF;
IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'daticurules') THEN
pg_ge_16_options := ', daticurules';
ELSE
pg_ge_16_options := ', null as daticurules';
END IF;
RETURN QUERY
SELECT
CASE WHEN (groupid = 0 AND groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'coordinator (local)'
WHEN (groupid = 0) THEN 'coordinator (remote)'
WHEN (groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'worker node (local)'
ELSE 'worker node (remote)'
END AS node_type,
q2.result
FROM run_command_on_all_nodes(
format(
$$
SELECT to_jsonb(q.*)
FROM (
SELECT
(
SELECT to_jsonb(database_properties.*)
FROM (
SELECT datname, pa.rolname as database_owner,
pg_encoding_to_char(pd.encoding) as encoding,
datistemplate, datallowconn, datconnlimit, datacl,
pt.spcname AS tablespace, datcollate, datctype
%2$s -- >= pg15 options
%3$s -- >= pg16 options
FROM pg_database pd
JOIN pg_authid pa ON pd.datdba = pa.oid
JOIN pg_tablespace pt ON pd.dattablespace = pt.oid
WHERE datname = '%1$s'
) database_properties
) AS database_properties,
(
SELECT COUNT(*)=1
FROM pg_dist_object WHERE objid = (SELECT oid FROM pg_database WHERE datname = '%1$s')
) AS pg_dist_object_record_for_db_exists,
(
SELECT COUNT(*) > 0
FROM pg_dist_object
WHERE classid = 1262 AND objid NOT IN (SELECT oid FROM pg_database)
) AS stale_pg_dist_object_record_for_a_db_exists
) q
$$,
p_database_name, pg_ge_15_options, pg_ge_16_options
)
) q2
JOIN pg_dist_node USING (nodeid);
END;
$func$ LANGUAGE plpgsql;

View File

@ -272,6 +272,27 @@ VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table;
SELECT CASE WHEN s BETWEEN 20000000 AND 49999999 THEN 35000000 ELSE s END size
FROM pg_total_relation_size('local_vacuum_table') s ;
-- vacuum (process_toast true) should be vacuuming toast tables (default is true)
select reltoastrelid from pg_class where relname='local_vacuum_table'
\gset
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
\gset
insert into local_vacuum_table select i from generate_series(1,10000) i;
VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table;
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
WHERE oid=:reltoastrelid::regclass;
delete from local_vacuum_table;
-- vacuum (process_toast false) should not be vacuuming toast tables (default is true)
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
\gset
insert into local_vacuum_table select i from generate_series(1,10000) i;
VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table;
SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class
WHERE oid=:reltoastrelid::regclass;
delete from local_vacuum_table;
-- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true)
insert into local_vacuum_table select i from generate_series(1,1000000) i;
delete from local_vacuum_table;

View File

@ -22,25 +22,6 @@ VACUUM (INDEX_CLEANUP "AUTOX") t1;
VACUUM (FULL, FREEZE, VERBOSE false, ANALYZE, SKIP_LOCKED, INDEX_CLEANUP, PROCESS_TOAST, TRUNCATE) t1;
VACUUM (FULL, FREEZE false, VERBOSE false, ANALYZE false, SKIP_LOCKED false, INDEX_CLEANUP "Auto", PROCESS_TOAST true, TRUNCATE false) t1;
-- vacuum (process_toast true) should be vacuuming toast tables (default is true)
CREATE TABLE local_vacuum_table(name text);
select reltoastrelid from pg_class where relname='local_vacuum_table'
\gset
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
\gset
VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table;
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
WHERE oid=:reltoastrelid::regclass;
-- vacuum (process_toast false) should not be vacuuming toast tables (default is true)
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
\gset
VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table;
SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class
WHERE oid=:reltoastrelid::regclass;
DROP TABLE local_vacuum_table;
SET citus.log_remote_commands TO OFF;
create table dist(a int, b int);
@ -777,4 +758,5 @@ DROP TABLE compression_and_defaults, compression_and_generated_col;
set client_min_messages to error;
drop extension postgres_fdw cascade;
drop schema pg14 cascade;
DROP ROLE role_1, r1;
reset client_min_messages;

View File

@ -195,6 +195,7 @@ SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE;
DROP SCHEMA citus_schema_1 CASCADE;
SELECT public.wait_for_resource_cleanup();
\q
\endif
@ -391,3 +392,5 @@ DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE;
DROP SCHEMA citus_schema_1 CASCADE;
DROP SCHEMA publication2 CASCADE;
SELECT public.wait_for_resource_cleanup();

View File

@ -0,0 +1,8 @@
-- The default nodes for the citus test suite are coordinator and 2 worker nodes
-- Which we identify with master_port, worker_1_port, worker_2_port.
-- When needed in some tests, GetLocalNodeId() does not behave correctly,
-- So we remove the non default nodes. This tests expects the non default nodes
-- to not have any active placements.
SELECT any_value(citus_remove_node('localhost', nodeport))
FROM pg_dist_node
WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port);

View File

@ -0,0 +1,87 @@
--
-- SECLABEL
--
-- Test suite for SECURITY LABEL ON ROLE statements
--
-- first we remove one of the worker nodes to be able to test
-- citus_add_node later
SELECT citus_remove_node('localhost', :worker_2_port);
-- create two roles, one with characters that need escaping
CREATE ROLE user1;
CREATE ROLE "user 2";
-- check an invalid label for our current dummy hook citus_test_object_relabel
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'invalid_label';
-- if we disable metadata_sync, the command will not be propagated
SET citus.enable_metadata_sync TO off;
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified';
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
RESET citus.enable_metadata_sync;
-- check that we only support propagating for roles
SET citus.shard_replication_factor to 1;
-- distributed table
CREATE TABLE a (a int);
SELECT create_distributed_table('a', 'a');
-- distributed view
CREATE VIEW v_dist AS SELECT * FROM a;
-- distributed function
CREATE FUNCTION notice(text) RETURNS void LANGUAGE plpgsql AS $$
BEGIN RAISE NOTICE '%', $1; END; $$;
SECURITY LABEL ON TABLE a IS 'citus_classified';
SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified';
SECURITY LABEL ON VIEW v_dist IS 'citus_classified';
SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type;
SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type;
SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type;
\c - - - :worker_1_port
SECURITY LABEL ON TABLE a IS 'citus_classified';
SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified';
SECURITY LABEL ON VIEW v_dist IS 'citus_classified';
\c - - - :master_port
SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type;
SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type;
SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type;
DROP TABLE a CASCADE;
DROP FUNCTION notice;
-- test that SECURITY LABEL statement is actually propagated for ROLES
SET citus.log_remote_commands TO on;
SET citus.grep_remote_commands = '%SECURITY LABEL%';
-- we have exactly one provider loaded, so we may not include the provider in the command
SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified';
SECURITY LABEL ON ROLE user1 IS NULL;
SECURITY LABEL ON ROLE user1 IS 'citus_unclassified';
SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified';
\c - - - :worker_1_port
-- command not allowed from worker node
SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified';
\c - - - :master_port
RESET citus.log_remote_commands;
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type;
-- add a new node and check that it also propagates the SECURITY LABEL statement to the new node
SET citus.log_remote_commands TO on;
SET citus.grep_remote_commands = '%SECURITY LABEL%';
SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type;
-- cleanup
RESET citus.log_remote_commands;
DROP ROLE user1, "user 2";

View File

@ -4,14 +4,6 @@ SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 81060000;
-- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly.
SELECT citus_remove_node('localhost', 8887);
SELECT citus_remove_node('localhost', 9995);
SELECT citus_remove_node('localhost', 9992);
SELECT citus_remove_node('localhost', 9998);
SELECT citus_remove_node('localhost', 9997);
SELECT citus_remove_node('localhost', 8888);
-- BEGIN: Create distributed table and insert data.
CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy (
l_orderkey bigint not null,