Merge branch 'main' into grant_role_2pc

grant_database_2pc_onur_1
Gürkan İndibay 2024-01-09 10:58:04 +03:00 committed by GitHub
commit 665c65cf0e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 1450 additions and 107 deletions

View File

@ -41,6 +41,7 @@
#include "distributed/metadata_utility.h"
#include "distributed/multi_executor.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/serialize_distributed_ddls.h"
#include "distributed/worker_protocol.h"
#include "distributed/worker_transaction.h"
@ -248,6 +249,9 @@ IsSetTablespaceStatement(AlterDatabaseStmt *stmt)
*
* In this stage we can prepare the commands that need to be run on all workers to grant
* on databases.
*
* We also serialize database commands globally by acquiring a Citus specific advisory
* lock based on OCLASS_DATABASE on the first primary worker node.
*/
List *
PreprocessAlterDatabaseStmt(Node *node, const char *queryString,
@ -264,6 +268,7 @@ PreprocessAlterDatabaseStmt(Node *node, const char *queryString,
}
EnsureCoordinator();
SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->dbname);
char *sql = DeparseTreeNode((Node *) stmt);
@ -291,11 +296,14 @@ PreprocessAlterDatabaseStmt(Node *node, const char *queryString,
#if PG_VERSION_NUM >= PG_VERSION_15
/*
* PreprocessAlterDatabaseSetStmt is executed before the statement is applied to the local
* postgres instance.
* PreprocessAlterDatabaseRefreshCollStmt is executed before the statement is applied to
* the local postgres instance.
*
* In this stage we can prepare the commands that need to be run on all workers to grant
* on databases.
*
* We also serialize database commands globally by acquiring a Citus specific advisory
* lock based on OCLASS_DATABASE on the first primary worker node.
*/
List *
PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString,
@ -312,6 +320,7 @@ PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString,
}
EnsureCoordinator();
SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->dbname);
char *sql = DeparseTreeNode((Node *) stmt);
@ -325,8 +334,51 @@ PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString,
#endif
/*
* PreprocessAlterDatabaseRenameStmt is executed before the statement is applied to the local
* PreprocessAlterDatabaseRenameStmt is executed before the statement is applied to
* the local postgres instance.
*
* We also serialize database commands globally by acquiring a Citus specific advisory
* lock based on OCLASS_DATABASE on the first primary worker node.
*
* We acquire this lock here instead of PostprocessAlterDatabaseRenameStmt because the
* command renames the database and SerializeDistributedDDLsOnObjectClass resolves the
* object on workers based on database name. For this reason, we need to acquire the lock
* before the command is applied to the local postgres instance.
*/
List *
PreprocessAlterDatabaseRenameStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
bool missingOk = true;
RenameStmt *stmt = castNode(RenameStmt, node);
ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->subname,
missingOk);
if (!ShouldPropagate() || !IsAnyObjectDistributed(list_make1(dbAddress)))
{
return NIL;
}
EnsureCoordinator();
/*
* Different than other ALTER DATABASE commands, we first acquire a lock
* by providing InvalidOid because we want ALTER TABLE .. RENAME TO ..
* commands to block not only with ALTER DATABASE operations but also
* with CREATE DATABASE operations because they might cause name conflicts
* and that could also cause deadlocks too.
*/
SerializeDistributedDDLsOnObjectClass(OCLASS_DATABASE);
SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->subname);
return NIL;
}
/*
* PostprocessAlterDatabaseRenameStmt is executed after the statement is applied to the local
* postgres instance. In this stage we prepare ALTER DATABASE RENAME statement to be run on
* all workers.
*/
@ -361,6 +413,9 @@ PostprocessAlterDatabaseRenameStmt(Node *node, const char *queryString)
*
* In this stage we can prepare the commands that need to be run on all workers to grant
* on databases.
*
* We also serialize database commands globally by acquiring a Citus specific advisory
* lock based on OCLASS_DATABASE on the first primary worker node.
*/
List *
PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString,
@ -377,6 +432,7 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString,
}
EnsureCoordinator();
SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->dbname);
char *sql = DeparseTreeNode((Node *) stmt);
@ -389,12 +445,15 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString,
/*
* PostprocessAlterDatabaseStmt is executed before the statement is applied to the local
* PreprocessCreateDatabaseStmt is executed before the statement is applied to the local
* Postgres instance.
*
* In this stage, we perform validations that we want to ensure before delegating to
* previous utility hooks because it might not be convenient to throw an error in an
* implicit transaction that creates a database.
*
* We also serialize database commands globally by acquiring a Citus specific advisory
* lock based on OCLASS_DATABASE on the first primary worker node.
*/
List *
PreprocessCreateDatabaseStmt(Node *node, const char *queryString,
@ -405,11 +464,13 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString,
return NIL;
}
EnsureCoordinator();
EnsurePropagationToCoordinator();
CreatedbStmt *stmt = castNode(CreatedbStmt, node);
EnsureSupportedCreateDatabaseCommand(stmt);
SerializeDistributedDDLsOnObjectClass(OCLASS_DATABASE);
return NIL;
}
@ -430,7 +491,7 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString)
return NIL;
}
EnsureCoordinator();
EnsurePropagationToCoordinator();
/*
* Given that CREATE DATABASE doesn't support "IF NOT EXISTS" and we're
@ -448,16 +509,19 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString)
(void *) createDatabaseCommand,
ENABLE_DDL_PROPAGATION);
return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands);
return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands);
}
/*
* PreprocessDropDatabaseStmt is executed after the statement is applied to the local
* PreprocessDropDatabaseStmt is executed before the statement is applied to the local
* postgres instance. In this stage we can prepare the commands that need to be run on
* all workers to drop the database. Since the DROP DATABASE statement gives error in
* transaction context, we need to use NontransactionalNodeDDLTaskList to send the
* DROP DATABASE statement to the workers.
*
* We also serialize database commands globally by acquiring a Citus specific advisory
* lock based on OCLASS_DATABASE on the first primary worker node.
*/
List *
PreprocessDropDatabaseStmt(Node *node, const char *queryString,
@ -468,7 +532,7 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString,
return NIL;
}
EnsureCoordinator();
EnsurePropagationToCoordinator();
DropdbStmt *stmt = (DropdbStmt *) node;
@ -488,13 +552,15 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString,
return NIL;
}
SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->dbname);
char *dropDatabaseCommand = DeparseTreeNode(node);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) dropDatabaseCommand,
ENABLE_DDL_PROPAGATION);
return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands);
return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands);
}

View File

@ -536,7 +536,7 @@ static DistributeObjectOps Database_Set = {
static DistributeObjectOps Database_Rename = {
.deparse = DeparseAlterDatabaseRenameStmt,
.qualify = NULL,
.preprocess = NULL,
.preprocess = PreprocessAlterDatabaseRenameStmt,
.postprocess = PostprocessAlterDatabaseRenameStmt,
.objectType = OBJECT_DATABASE,
.operationType = DIST_OPS_ALTER,

View File

@ -741,9 +741,9 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
ereport(NOTICE, (errmsg("Citus partially supports CREATE DATABASE for "
"distributed databases"),
errdetail("Citus does not propagate CREATE DATABASE "
"command to workers"),
"command to other nodes"),
errhint("You can manually create a database and its "
"extensions on workers.")));
"extensions on other nodes.")));
}
}
else if (IsA(parsetree, CreateRoleStmt) && !EnableCreateRolePropagation)

View File

@ -109,7 +109,7 @@ TupleStoreTupleDestPutTuple(TupleDestination *self, Task *task,
uint64 tupleSize = tupleLibpqSize;
if (tupleSize == 0)
{
tupleSize = HeapTupleHeaderGetDatumLength(heapTuple);
tupleSize = heapTuple->t_len;
}
/*

View File

@ -2771,12 +2771,24 @@ EnsureCoordinatorIsInMetadata(void)
{
bool isCoordinatorInMetadata = false;
PrimaryNodeForGroup(COORDINATOR_GROUP_ID, &isCoordinatorInMetadata);
if (!isCoordinatorInMetadata)
if (isCoordinatorInMetadata)
{
return;
}
/* be more descriptive when we're not on coordinator */
if (IsCoordinator())
{
ereport(ERROR, (errmsg("coordinator is not added to the metadata"),
errhint("Use SELECT citus_set_coordinator_host('<hostname>') "
"to configure the coordinator hostname")));
}
else
{
ereport(ERROR, (errmsg("coordinator is not added to the metadata"),
errhint("Use SELECT citus_set_coordinator_host('<hostname>') "
"on coordinator to configure the coordinator hostname")));
}
}

View File

@ -0,0 +1,275 @@
/*-------------------------------------------------------------------------
*
* serialize_distributed_ddls.c
*
* This file contains functions for serializing distributed DDLs.
*
* If you're adding support for serializing a new DDL, you should
* extend the following functions to support the new object class:
* AcquireCitusAdvisoryObjectClassLockGetOid()
* AcquireCitusAdvisoryObjectClassLockCheckPrivileges()
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "miscadmin.h"
#include "catalog/dependency.h"
#include "catalog/pg_database_d.h"
#include "commands/dbcommands.h"
#include "storage/lock.h"
#include "utils/builtins.h"
#include "pg_version_compat.h"
#include "distributed/adaptive_executor.h"
#include "distributed/argutils.h"
#include "distributed/deparse_shard_query.h"
#include "distributed/resource_lock.h"
#include "distributed/serialize_distributed_ddls.h"
PG_FUNCTION_INFO_V1(citus_internal_acquire_citus_advisory_object_class_lock);
static void SerializeDistributedDDLsOnObjectClassInternal(ObjectClass objectClass,
char *qualifiedObjectName);
static char * AcquireCitusAdvisoryObjectClassLockCommand(ObjectClass objectClass,
char *qualifiedObjectName);
static void AcquireCitusAdvisoryObjectClassLock(ObjectClass objectClass,
char *qualifiedObjectName);
static Oid AcquireCitusAdvisoryObjectClassLockGetOid(ObjectClass objectClass,
char *qualifiedObjectName);
static void AcquireCitusAdvisoryObjectClassLockCheckPrivileges(ObjectClass objectClass,
Oid oid);
/*
* citus_internal_acquire_citus_advisory_object_class_lock is an internal UDF
* to call AcquireCitusAdvisoryObjectClassLock().
*/
Datum
citus_internal_acquire_citus_advisory_object_class_lock(PG_FUNCTION_ARGS)
{
CheckCitusVersion(ERROR);
PG_ENSURE_ARGNOTNULL(0, "object_class");
ObjectClass objectClass = PG_GETARG_INT32(0);
char *qualifiedObjectName = PG_ARGISNULL(1) ? NULL : PG_GETARG_CSTRING(1);
AcquireCitusAdvisoryObjectClassLock(objectClass, qualifiedObjectName);
PG_RETURN_VOID();
}
/*
* SerializeDistributedDDLsOnObjectClass is a wrapper around
* SerializeDistributedDDLsOnObjectClassInternal to acquire the lock on given
* object class itself, see the comment in header file for more details about
* the difference between this function and
* SerializeDistributedDDLsOnObjectClassObject().
*/
void
SerializeDistributedDDLsOnObjectClass(ObjectClass objectClass)
{
SerializeDistributedDDLsOnObjectClassInternal(objectClass, NULL);
}
/*
* SerializeDistributedDDLsOnObjectClassObject is a wrapper around
* SerializeDistributedDDLsOnObjectClassInternal to acquire the lock on given
* object that belongs to given object class, see the comment in header file
* for more details about the difference between this function and
* SerializeDistributedDDLsOnObjectClass().
*/
void
SerializeDistributedDDLsOnObjectClassObject(ObjectClass objectClass,
char *qualifiedObjectName)
{
if (qualifiedObjectName == NULL)
{
elog(ERROR, "qualified object name cannot be NULL");
}
SerializeDistributedDDLsOnObjectClassInternal(objectClass, qualifiedObjectName);
}
/*
* SerializeDistributedDDLsOnObjectClassInternal serializes distributed DDLs
* that target given object class by acquiring a Citus specific advisory lock
* on the first primary worker node if there are any workers in the cluster.
*
* The lock is acquired via a coordinated transaction. For this reason,
* it automatically gets released when (maybe implicit) transaction on
* current server commits or rolls back.
*
* If qualifiedObjectName is provided to be non-null, then the oid of the
* object is first resolved on the first primary worker node and then the
* lock is acquired on that oid. If qualifiedObjectName is null, then the
* lock is acquired on the object class itself.
*
* Note that those two lock types don't conflict with each other and are
* acquired for different purposes. The lock on the object class
* (qualifiedObjectName = NULL) is used to serialize DDLs that target the
* object class itself, e.g., when creating a new object of that class, and
* the latter is used to serialize DDLs that target a specific object of
* that class, e.g., when altering an object.
*
* In some cases, we may want to acquire both locks at the same time. For
* example, when renaming a database, we want to acquire both lock types
* because while the object class lock is used to ensure that another session
* doesn't create a new database with the same name, the object lock is used
* to ensure that another session doesn't alter the same database.
*/
static void
SerializeDistributedDDLsOnObjectClassInternal(ObjectClass objectClass,
char *qualifiedObjectName)
{
WorkerNode *firstWorkerNode = GetFirstPrimaryWorkerNode();
if (firstWorkerNode == NULL)
{
/*
* If there are no worker nodes in the cluster, then we don't need
* to acquire the lock at all; and we cannot indeed.
*/
return;
}
/*
* Indeed we would already ensure permission checks in remote node
* --via AcquireCitusAdvisoryObjectClassLock()-- but we first do so on
* the local node to avoid from reporting confusing error messages.
*/
Oid oid = AcquireCitusAdvisoryObjectClassLockGetOid(objectClass, qualifiedObjectName);
AcquireCitusAdvisoryObjectClassLockCheckPrivileges(objectClass, oid);
Task *task = CitusMakeNode(Task);
task->taskType = DDL_TASK;
char *command = AcquireCitusAdvisoryObjectClassLockCommand(objectClass,
qualifiedObjectName);
SetTaskQueryString(task, command);
ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement);
SetPlacementNodeMetadata(targetPlacement, firstWorkerNode);
task->taskPlacementList = list_make1(targetPlacement);
/* need to be in a transaction to acquire a lock that's bound to transactions */
UseCoordinatedTransaction();
bool localExecutionSupported = true;
ExecuteUtilityTaskList(list_make1(task), localExecutionSupported);
}
/*
* AcquireCitusAdvisoryObjectClassLockCommand returns a command to call
* pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock().
*/
static char *
AcquireCitusAdvisoryObjectClassLockCommand(ObjectClass objectClass,
char *qualifiedObjectName)
{
/* safe to cast to int as it's an enum */
int objectClassInt = (int) objectClass;
char *quotedObjectName =
!qualifiedObjectName ? "NULL" :
quote_literal_cstr(qualifiedObjectName);
StringInfo command = makeStringInfo();
appendStringInfo(command,
"SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(%d, %s)",
objectClassInt, quotedObjectName);
return command->data;
}
/*
* AcquireCitusAdvisoryObjectClassLock acquires a Citus specific advisory
* ExclusiveLock based on given object class.
*/
static void
AcquireCitusAdvisoryObjectClassLock(ObjectClass objectClass, char *qualifiedObjectName)
{
Oid oid = AcquireCitusAdvisoryObjectClassLockGetOid(objectClass, qualifiedObjectName);
AcquireCitusAdvisoryObjectClassLockCheckPrivileges(objectClass, oid);
LOCKTAG locktag;
SET_LOCKTAG_GLOBAL_DDL_SERIALIZATION(locktag, objectClass, oid);
LOCKMODE lockmode = ExclusiveLock;
bool sessionLock = false;
bool dontWait = false;
LockAcquire(&locktag, lockmode, sessionLock, dontWait);
}
/*
* AcquireCitusAdvisoryObjectClassLockGetOid returns the oid of given object
* that belongs to given object class. If qualifiedObjectName is NULL, then
* it returns InvalidOid.
*/
static Oid
AcquireCitusAdvisoryObjectClassLockGetOid(ObjectClass objectClass,
char *qualifiedObjectName)
{
if (qualifiedObjectName == NULL)
{
return InvalidOid;
}
bool missingOk = false;
switch (objectClass)
{
case OCLASS_DATABASE:
{
return get_database_oid(qualifiedObjectName, missingOk);
}
default:
elog(ERROR, "unsupported object class: %d", objectClass);
}
}
/*
* AcquireCitusAdvisoryObjectClassLockCheckPrivileges is used to perform privilege checks
* before acquiring the Citus specific advisory lock on given object class and oid.
*/
static void
AcquireCitusAdvisoryObjectClassLockCheckPrivileges(ObjectClass objectClass, Oid oid)
{
switch (objectClass)
{
case OCLASS_DATABASE:
{
if (OidIsValid(oid) && !object_ownercheck(DatabaseRelationId, oid,
GetUserId()))
{
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE,
get_database_name(oid));
}
else if (!OidIsValid(oid) && !have_createdb_privilege())
{
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to create / rename database")));
}
break;
}
default:
elog(ERROR, "unsupported object class: %d", objectClass);
}
}

View File

@ -10,3 +10,5 @@
#include "udfs/commit_management_command_2pc/12.2-1.sql"
ALTER TABLE pg_catalog.pg_dist_transaction ADD COLUMN outer_xid xid8;
#include "udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql"

View File

@ -1,6 +1,7 @@
-- citus--12.2-1--12.1-1
DROP FUNCTION pg_catalog.citus_internal_database_command(text);
DROP FUNCTION pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(int, cstring);
#include "../udfs/citus_add_rebalance_strategy/10.1-1.sql"

View File

@ -0,0 +1,5 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(objectClass int, qualifiedObjectName cstring)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_acquire_citus_advisory_object_class_lock$$;

View File

@ -0,0 +1,5 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(objectClass int, qualifiedObjectName cstring)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_acquire_citus_advisory_object_class_lock$$;

View File

@ -245,6 +245,9 @@ extern List * DropDatabaseStmtObjectAddress(Node *node, bool missingOk,
extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missingOk,
bool isPostprocess);
extern List * GenerateGrantDatabaseCommandList(void);
extern List * PreprocessAlterDatabaseRenameStmt(Node *node, const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PostprocessAlterDatabaseRenameStmt(Node *node, const char *queryString);
extern void EnsureSupportedCreateDatabaseCommand(CreatedbStmt *stmt);
extern char * CreateDatabaseDDLCommand(Oid dbId);

View File

@ -14,6 +14,7 @@
#include "c.h"
#include "catalog/dependency.h"
#include "nodes/pg_list.h"
#include "storage/lock.h"
#include "tcop/utility.h"
@ -45,7 +46,8 @@ typedef enum AdvisoryLocktagClass
ADV_LOCKTAG_CLASS_CITUS_CLEANUP_OPERATION_ID = 10,
ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION = 12,
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_PLACEMENT_COLOCATION = 13,
ADV_LOCKTAG_CLASS_CITUS_BACKGROUND_TASK = 14
ADV_LOCKTAG_CLASS_CITUS_BACKGROUND_TASK = 14,
ADV_LOCKTAG_CLASS_CITUS_GLOBAL_DDL_SERIALIZATION = 15
} AdvisoryLocktagClass;
/* CitusOperations has constants for citus operations */
@ -142,6 +144,72 @@ typedef enum CitusOperations
(uint32) (taskId), \
ADV_LOCKTAG_CLASS_CITUS_BACKGROUND_TASK)
/*
* IsNodeWideObjectClass returns true if the given object class is node-wide,
* i.e., that is not bound to a particular database but to whole server.
*
* Defined here as an inlined function so that SET_LOCKTAG_GLOBAL_DDL_SERIALIZATION
* macro can use it.
*/
static inline bool
IsNodeWideObjectClass(ObjectClass objectClass)
{
if ((int) objectClass < 0 || objectClass > LAST_OCLASS)
{
elog(ERROR, "invalid object class: %d", objectClass);
}
/*
* We don't expect Postgres to change an object class to a node-wide one in the
* future, but a newly added object class may be node-wide.
*
* So we put a static assert here to make sure that the developer who adds support
* for a new Postgres version is aware of this.
*
* If new object classes are added and none of them are node-wide, then update
* this assertion check based on latest supported major Postgres version.
*/
StaticAssertStmt(PG_MAJORVERSION_NUM <= 16,
"better to check if any of newly added ObjectClass'es are node-wide");
switch (objectClass)
{
case OCLASS_ROLE:
case OCLASS_DATABASE:
case OCLASS_TBLSPACE:
#if PG_VERSION_NUM >= PG_VERSION_15
case OCLASS_PARAMETER_ACL:
#endif
#if PG_VERSION_NUM >= PG_VERSION_16
case OCLASS_ROLE_MEMBERSHIP:
#endif
{
return true;
}
default:
return false;
}
}
/*
* Automatically sets databaseId to InvalidOid if the object class is
* node-wide, i.e., that is not bound to a particular database but to
* whole server. If the object class is not node-wide, sets databaseId
* to MyDatabaseId.
*
* That way, the lock is local to each database if the object class is
* not node-wide, and global if it is.
*/
#define SET_LOCKTAG_GLOBAL_DDL_SERIALIZATION(tag, objectClass, oid) \
SET_LOCKTAG_ADVISORY(tag, \
(uint32) (IsNodeWideObjectClass(objectClass) ? InvalidOid : \
MyDatabaseId), \
(uint32) objectClass, \
(uint32) oid, \
ADV_LOCKTAG_CLASS_CITUS_GLOBAL_DDL_SERIALIZATION)
/*
* DistLockConfigs are used to configure the locking behaviour of AcquireDistributedLockOnRelations
*/

View File

@ -0,0 +1,37 @@
/*-------------------------------------------------------------------------
*
* serialize_distributed_ddls.h
*
* Declarations for public functions related to serializing distributed
* DDLs.
*
*-------------------------------------------------------------------------
*/
#ifndef SERIALIZE_DDLS_OVER_CATALOG_H
#define SERIALIZE_DDLS_OVER_CATALOG_H
#include "postgres.h"
#include "catalog/dependency.h"
/*
* Note that those two lock types don't conflict with each other and are
* acquired for different purposes. The lock on the object class
* --SerializeDistributedDDLsOnObjectClass()-- is used to serialize DDLs
* that target the object class itself, e.g., when creating a new object
* of that class, and the latter one --SerializeDistributedDDLsOnObjectClassObject()--
* is used to serialize DDLs that target a specific object of that class,
* e.g., when altering an object.
*
* In some cases, we may want to acquire both locks at the same time. For
* example, when renaming a database, we want to acquire both lock types
* because while the object class lock is used to ensure that another session
* doesn't create a new database with the same name, the object lock is used
* to ensure that another session doesn't alter the same database.
*/
extern void SerializeDistributedDDLsOnObjectClass(ObjectClass objectClass);
extern void SerializeDistributedDDLsOnObjectClassObject(ObjectClass objectClass,
char *qualifiedObjectName);
#endif /* SERIALIZE_DDLS_OVER_CATALOG_H */

View File

@ -48,14 +48,21 @@ get_guc_variables_compat(int *gucCount)
#define pgstat_fetch_stat_local_beentry(a) pgstat_get_local_beentry_by_index(a)
#define have_createdb_privilege() have_createdb_privilege()
#else
#include "miscadmin.h"
#include "catalog/pg_authid.h"
#include "catalog/pg_class_d.h"
#include "catalog/pg_database_d.h"
#include "catalog/pg_namespace.h"
#include "catalog/pg_proc_d.h"
#include "storage/relfilenode.h"
#include "utils/guc.h"
#include "utils/guc_tables.h"
#include "utils/syscache.h"
#define pg_clean_ascii_compat(a, b) pg_clean_ascii(a)
@ -105,6 +112,11 @@ object_ownercheck(Oid classid, Oid objectid, Oid roleid)
return pg_proc_ownercheck(objectid, roleid);
}
case DatabaseRelationId:
{
return pg_database_ownercheck(objectid, roleid);
}
default:
{
ereport(ERROR,
@ -140,6 +152,28 @@ object_aclcheck(Oid classid, Oid objectid, Oid roleid, AclMode mode)
}
static inline bool
have_createdb_privilege(void)
{
bool result = false;
HeapTuple utup;
/* Superusers can always do everything */
if (superuser())
{
return true;
}
utup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(GetUserId()));
if (HeapTupleIsValid(utup))
{
result = ((Form_pg_authid) GETSTRUCT(utup))->rolcreatedb;
ReleaseSysCache(utup);
}
return result;
}
typedef bool TU_UpdateIndexes;
/*

View File

@ -203,6 +203,12 @@ DEPS = {
"foreign_key_to_reference_shard_rebalance": TestDeps(
"minimal_schedule", ["remove_coordinator_from_metadata"]
),
"limit_intermediate_size": TestDeps("base_schedule"),
"columnar_drop": TestDeps(
"minimal_schedule",
["columnar_create", "columnar_load"],
repeatable=False,
),
}

View File

@ -182,8 +182,8 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
set citus.enable_create_database_propagation=off;
CREATE database local_regression;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
alter DATABASE local_regression with CONNECTION LIMIT 100;
alter DATABASE local_regression rename to local_regression2;
drop database local_regression2;

View File

@ -254,8 +254,8 @@ SELECT run_command_on_workers('SHOW enable_hashagg');
-- also test case sensitivity
CREATE DATABASE "REGRESSION";
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
ALTER ROLE CURRENT_USER IN DATABASE "REGRESSION" SET public.myguc TO "Hello from coordinator only";
SELECT d.datname, r.setconfig FROM pg_db_role_setting r LEFT JOIN pg_database d ON r.setdatabase=d.oid WHERE r.setconfig::text LIKE '%Hello from coordinator only%';
datname | setconfig

View File

@ -39,8 +39,8 @@ SELECT :columnar_stripes_before_drop - count(distinct storage_id) FROM columnar.
SELECT current_database() datname \gset
CREATE DATABASE db_to_drop;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c db_to_drop
CREATE EXTENSION citus_columnar;
SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset

View File

@ -64,8 +64,8 @@ CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace
\c - - - :master_port
CREATE DATABASE local_database;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
-- check that it's only created for coordinator
SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type;
node_type | result
@ -88,8 +88,8 @@ SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node
\c - - - :worker_1_port
CREATE DATABASE local_database;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
-- check that it's only created for coordinator
SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type;
node_type | result
@ -452,18 +452,14 @@ drop database "mydatabase#1'2";
ERROR: database "mydatabase#1'2" does not exist
\c - - - :worker_1_port
SET citus.enable_create_database_propagation TO ON;
-- show that dropping the database from workers is not allowed when citus.enable_create_database_propagation is on
-- show that dropping the database from workers is allowed when citus.enable_create_database_propagation is on
DROP DATABASE db_needs_escape;
ERROR: operation is not allowed on this node
HINT: Connect to the coordinator and run it again.
-- and the same applies to create database too
create database error_test;
ERROR: operation is not allowed on this node
HINT: Connect to the coordinator and run it again.
drop database error_test;
\c - - - :master_port
SET citus.enable_create_database_propagation TO ON;
DROP DATABASE test_node_activation;
DROP DATABASE db_needs_escape;
DROP USER "role-needs\!escape";
-- drop database with force options test
create database db_force_test;
@ -494,8 +490,8 @@ select 1 from citus_remove_node('localhost', :worker_2_port);
SET citus.enable_create_database_propagation TO off;
CREATE DATABASE non_distributed_db;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
SET citus.enable_create_database_propagation TO on;
create database distributed_db;
select 1 from citus_add_node('localhost', :worker_2_port);
@ -932,8 +928,8 @@ DROP ROLE propagated_role, non_propagated_role;
SET citus.enable_create_database_propagation TO OFF;
CREATE DATABASE local_database_1;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
SET citus.enable_create_database_propagation TO ON;
CREATE ROLE local_role_1;
GRANT CONNECT, TEMPORARY, CREATE ON DATABASE local_database_1 TO local_role_1;
@ -941,6 +937,334 @@ ALTER DATABASE local_database_1 SET default_transaction_read_only = 'true';
REVOKE CONNECT, TEMPORARY, CREATE ON DATABASE local_database_1 FROM local_role_1;
DROP ROLE local_role_1;
DROP DATABASE local_database_1;
-- test create / drop database commands from workers
-- remove one of the workers to test node activation too
SELECT 1 from citus_remove_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
\c - - - :worker_1_port
CREATE DATABASE local_worker_db;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
SET citus.enable_create_database_propagation TO ON;
CREATE DATABASE db_created_from_worker
WITH template=template1
OWNER = create_drop_db_test_user
ENCODING = 'UTF8'
CONNECTION LIMIT = 42
TABLESPACE = "ts-needs\!escape"
ALLOW_CONNECTIONS = false;
\c - - - :master_port
SET citus.enable_create_database_propagation TO ON;
SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
\c - - - :worker_1_port
SET citus.enable_create_database_propagation TO ON;
SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (local) | {"database_properties": {"datacl": null, "datname": "local_worker_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows)
SELECT * FROM public.check_database_on_all_nodes('db_created_from_worker') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator (remote) | {"database_properties": {"datacl": null, "datname": "db_created_from_worker", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 42, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (local) | {"database_properties": {"datacl": null, "datname": "db_created_from_worker", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 42, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": {"datacl": null, "datname": "db_created_from_worker", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 42, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows)
DROP DATABASE db_created_from_worker;
SELECT * FROM public.check_database_on_all_nodes('db_created_from_worker') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows)
-- drop the local database while the GUC is on
DROP DATABASE local_worker_db;
SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows)
SET citus.enable_create_database_propagation TO OFF;
CREATE DATABASE local_worker_db;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
-- drop the local database while the GUC is off
DROP DATABASE local_worker_db;
SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows)
SET citus.enable_create_database_propagation TO ON;
CREATE DATABASE another_db_created_from_worker;
\c - - - :master_port
SELECT 1 FROM citus_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
\c - - - :worker_1_port
SET citus.enable_create_database_propagation TO ON;
-- fails because coordinator is not added into metadata
DROP DATABASE another_db_created_from_worker;
ERROR: coordinator is not added to the metadata
HINT: Use SELECT citus_set_coordinator_host('<hostname>') on coordinator to configure the coordinator hostname
-- fails because coordinator is not added into metadata
CREATE DATABASE new_db;
ERROR: coordinator is not added to the metadata
HINT: Use SELECT citus_set_coordinator_host('<hostname>') on coordinator to configure the coordinator hostname
\c - - - :master_port
SET client_min_messages TO WARNING;
SELECT 1 FROM citus_add_node('localhost', :master_port, 0);
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
SET citus.enable_create_database_propagation TO ON;
-- dropping a database that was created from a worker via a different node works fine
DROP DATABASE another_db_created_from_worker;
SELECT * FROM public.check_database_on_all_nodes('another_db_created_from_worker') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows)
-- Show that we automatically propagate the dependencies (only roles atm) when
-- creating a database from workers too.
SELECT 1 from citus_remove_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
\c - - - :worker_1_port
set citus.enable_create_role_propagation TO off;
create role non_propagated_role;
NOTICE: not propagating CREATE ROLE/USER commands to other nodes
HINT: Connect to other nodes directly to manually create all necessary users and roles.
set citus.enable_create_role_propagation TO on;
set citus.enable_create_database_propagation TO on;
create database test_db OWNER non_propagated_role;
create role propagated_role;
\c - - - :master_port
-- not supported from workers, so need to execute this via coordinator
grant connect on database test_db to propagated_role;
SET citus.enable_create_database_propagation TO ON;
SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT * FROM public.check_database_on_all_nodes('test_db') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator (local) | {"database_properties": {"datacl": ["=Tc/non_propagated_role", "non_propagated_role=CTc/non_propagated_role", "propagated_role=c/non_propagated_role"], "datname": "test_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "non_propagated_role", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": {"datacl": ["=Tc/non_propagated_role", "non_propagated_role=CTc/non_propagated_role", "propagated_role=c/non_propagated_role"], "datname": "test_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "non_propagated_role", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": {"datacl": ["=Tc/non_propagated_role", "non_propagated_role=CTc/non_propagated_role", "propagated_role=c/non_propagated_role"], "datname": "test_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "non_propagated_role", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows)
REVOKE CONNECT ON DATABASE test_db FROM propagated_role;
DROP DATABASE test_db;
DROP ROLE propagated_role, non_propagated_role;
-- test pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock with null input
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(null, 'regression');
ERROR: object_class cannot be NULL
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), null);
citus_internal_acquire_citus_advisory_object_class_lock
---------------------------------------------------------------------
(1 row)
-- OCLASS_DATABASE
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), NULL);
citus_internal_acquire_citus_advisory_object_class_lock
---------------------------------------------------------------------
(1 row)
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), 'regression');
citus_internal_acquire_citus_advisory_object_class_lock
---------------------------------------------------------------------
(1 row)
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), '');
ERROR: database "" does not exist
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), 'no_such_db');
ERROR: database "no_such_db" does not exist
-- invalid OCLASS
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(-1, NULL);
ERROR: unsupported object class: -1
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(-1, 'regression');
ERROR: unsupported object class: -1
-- invalid OCLASS
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(100, NULL);
ERROR: unsupported object class: 100
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(100, 'regression');
ERROR: unsupported object class: 100
-- another valid OCLASS, but not implemented yet
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(10, NULL);
ERROR: unsupported object class: 10
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(10, 'regression');
ERROR: unsupported object class: 10
SELECT 1 FROM run_command_on_all_nodes('ALTER SYSTEM SET citus.enable_create_database_propagation TO ON');
?column?
---------------------------------------------------------------------
1
1
1
(3 rows)
SELECT 1 FROM run_command_on_all_nodes('SELECT pg_reload_conf()');
?column?
---------------------------------------------------------------------
1
1
1
(3 rows)
SELECT pg_sleep(0.1);
pg_sleep
---------------------------------------------------------------------
(1 row)
-- only one of them succeeds and we don't run into a distributed deadlock
SELECT COUNT(*) FROM run_command_on_all_nodes('CREATE DATABASE concurrent_create_db') WHERE success;
count
---------------------------------------------------------------------
1
(1 row)
SELECT * FROM public.check_database_on_all_nodes('concurrent_create_db') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator (local) | {"database_properties": {"datacl": null, "datname": "concurrent_create_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": {"datacl": null, "datname": "concurrent_create_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": {"datacl": null, "datname": "concurrent_create_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows)
SELECT COUNT(*) FROM run_command_on_all_nodes('DROP DATABASE concurrent_create_db') WHERE success;
count
---------------------------------------------------------------------
1
(1 row)
SELECT * FROM public.check_database_on_all_nodes('concurrent_create_db') ORDER BY node_type;
node_type | result
---------------------------------------------------------------------
coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows)
-- revert the system wide change that enables citus.enable_create_database_propagation on all nodes
SELECT 1 FROM run_command_on_all_nodes('ALTER SYSTEM SET citus.enable_create_database_propagation TO OFF');
?column?
---------------------------------------------------------------------
1
1
1
(3 rows)
SELECT 1 FROM run_command_on_all_nodes('SELECT pg_reload_conf()');
?column?
---------------------------------------------------------------------
1
1
1
(3 rows)
SELECT pg_sleep(0.1);
pg_sleep
---------------------------------------------------------------------
(1 row)
-- but keep it enabled for coordinator for the rest of the tests
SET citus.enable_create_database_propagation TO ON;
CREATE DATABASE distributed_db;
CREATE USER no_createdb;
SET ROLE no_createdb;
SET citus.enable_create_database_propagation TO ON;
CREATE DATABASE no_createdb;
ERROR: permission denied to create / rename database
ALTER DATABASE distributed_db RENAME TO rename_test;
ERROR: permission denied to create / rename database
DROP DATABASE distributed_db;
ERROR: must be owner of database distributed_db
ALTER DATABASE distributed_db SET TABLESPACE pg_default;
ERROR: must be owner of database distributed_db
ALTER DATABASE distributed_db SET timezone TO 'UTC';
ERROR: must be owner of database distributed_db
ALTER DATABASE distributed_db RESET timezone;
ERROR: must be owner of database distributed_db
GRANT ALL ON DATABASE distributed_db TO postgres;
WARNING: no privileges were granted for "distributed_db"
RESET ROLE;
ALTER ROLE no_createdb createdb;
SET ROLE no_createdb;
CREATE DATABASE no_createdb;
ALTER DATABASE distributed_db RENAME TO rename_test;
ERROR: must be owner of database distributed_db
RESET ROLE;
SELECT 1 FROM run_command_on_all_nodes($$GRANT ALL ON TABLESPACE pg_default TO no_createdb$$);
?column?
---------------------------------------------------------------------
1
1
1
(3 rows)
ALTER DATABASE distributed_db OWNER TO no_createdb;
SET ROLE no_createdb;
ALTER DATABASE distributed_db SET TABLESPACE pg_default;
ALTER DATABASE distributed_db SET timezone TO 'UTC';
ALTER DATABASE distributed_db RESET timezone;
GRANT ALL ON DATABASE distributed_db TO postgres;
ALTER DATABASE distributed_db RENAME TO rename_test;
DROP DATABASE rename_test;
RESET ROLE;
SELECT 1 FROM run_command_on_all_nodes($$REVOKE ALL ON TABLESPACE pg_default FROM no_createdb$$);
?column?
---------------------------------------------------------------------
1
1
1
(3 rows)
DROP DATABASE no_createdb;
DROP USER no_createdb;
SET citus.enable_create_database_propagation TO ON;
--clean up resources created by this test
-- DROP TABLESPACE is not supported, so we need to drop it manually.
SELECT result FROM run_command_on_all_nodes(

View File

@ -6,14 +6,14 @@ SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 35137400;
CREATE DATABASE citus_created;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c citus_created
CREATE EXTENSION citus;
CREATE DATABASE citus_not_created;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c citus_not_created
DROP DATABASE citus_created;
\c regression
@ -26,14 +26,14 @@ SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 35137400;
CREATE DATABASE citus_created;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c citus_created
CREATE EXTENSION citus;
CREATE DATABASE citus_not_created;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c citus_not_created
DROP DATABASE citus_created;
\c regression

View File

@ -8,8 +8,8 @@ CREATE SCHEMA failure_non_main_db_2pc;
SET SEARCH_PATH TO 'failure_non_main_db_2pc';
CREATE DATABASE other_db1;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
SELECT citus.mitmproxy('conn.onQuery(query="COMMIT PREPARED").kill()');
mitmproxy
---------------------------------------------------------------------
@ -101,8 +101,8 @@ SELECT citus_set_coordinator_host('localhost');
\c - - - :worker_1_port
CREATE DATABASE other_db2;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
SELECT citus.mitmproxy('conn.onQuery(query="COMMIT PREPARED").kill()');
mitmproxy
---------------------------------------------------------------------

View File

@ -542,8 +542,8 @@ create user myuser;
create user myuser_1;
create database test_db;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
SELECT result FROM run_command_on_workers($$create database test_db$$);
result
---------------------------------------------------------------------

View File

@ -0,0 +1,211 @@
Parsed test spec with 2 sessions
starting permutation: s1-begin s2-begin s1-acquire-citus-adv-oclass-lock s2-acquire-citus-adv-oclass-lock s1-commit s2-commit
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-acquire-citus-adv-oclass-lock: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database;
citus_internal_acquire_citus_advisory_object_class_lock
---------------------------------------------------------------------
(1 row)
step s2-acquire-citus-adv-oclass-lock: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; <waiting ...>
step s1-commit: COMMIT;
step s2-acquire-citus-adv-oclass-lock: <... completed>
citus_internal_acquire_citus_advisory_object_class_lock
---------------------------------------------------------------------
(1 row)
step s2-commit: COMMIT;
starting permutation: s1-create-testdb1 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock-with-oid-testdb1 s2-acquire-citus-adv-oclass-lock-with-oid-testdb1 s1-commit s2-commit s1-drop-testdb1
step s1-create-testdb1: CREATE DATABASE testdb1;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-acquire-citus-adv-oclass-lock-with-oid-testdb1: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database;
citus_internal_acquire_citus_advisory_object_class_lock
---------------------------------------------------------------------
(1 row)
step s2-acquire-citus-adv-oclass-lock-with-oid-testdb1: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; <waiting ...>
step s1-commit: COMMIT;
step s2-acquire-citus-adv-oclass-lock-with-oid-testdb1: <... completed>
citus_internal_acquire_citus_advisory_object_class_lock
---------------------------------------------------------------------
(1 row)
step s2-commit: COMMIT;
step s1-drop-testdb1: DROP DATABASE testdb1;
starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock-with-oid-testdb1 s2-acquire-citus-adv-oclass-lock-with-oid-testdb2 s1-commit s2-commit s1-drop-testdb1 s2-drop-testdb2
step s1-create-testdb1: CREATE DATABASE testdb1;
step s2-create-testdb2: CREATE DATABASE testdb2;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-acquire-citus-adv-oclass-lock-with-oid-testdb1: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database;
citus_internal_acquire_citus_advisory_object_class_lock
---------------------------------------------------------------------
(1 row)
step s2-acquire-citus-adv-oclass-lock-with-oid-testdb2: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb2') FROM oclass_database;
citus_internal_acquire_citus_advisory_object_class_lock
---------------------------------------------------------------------
(1 row)
step s1-commit: COMMIT;
step s2-commit: COMMIT;
step s1-drop-testdb1: DROP DATABASE testdb1;
step s2-drop-testdb2: DROP DATABASE testdb2;
starting permutation: s2-create-testdb2 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock s2-acquire-citus-adv-oclass-lock-with-oid-testdb2 s1-commit s2-commit s2-drop-testdb2
step s2-create-testdb2: CREATE DATABASE testdb2;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-acquire-citus-adv-oclass-lock: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database;
citus_internal_acquire_citus_advisory_object_class_lock
---------------------------------------------------------------------
(1 row)
step s2-acquire-citus-adv-oclass-lock-with-oid-testdb2: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb2') FROM oclass_database;
citus_internal_acquire_citus_advisory_object_class_lock
---------------------------------------------------------------------
(1 row)
step s1-commit: COMMIT;
step s2-commit: COMMIT;
step s2-drop-testdb2: DROP DATABASE testdb2;
starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-db1 s2-rollback s2-drop-testdb2 s1-drop-db1
step s2-create-testdb2: CREATE DATABASE testdb2;
step s2-begin: BEGIN;
step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C';
step s1-create-db1: CREATE DATABASE db1;
step s2-rollback: ROLLBACK;
step s2-drop-testdb2: DROP DATABASE testdb2;
step s1-drop-db1: DROP DATABASE db1;
starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-user-dbuser s1-grant-on-testdb2-to-dbuser s2-rollback s2-drop-testdb2 s1-drop-user-dbuser
step s2-create-testdb2: CREATE DATABASE testdb2;
step s2-begin: BEGIN;
step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C';
step s1-create-user-dbuser: CREATE USER dbuser;
step s1-grant-on-testdb2-to-dbuser: GRANT ALL ON DATABASE testdb2 TO dbuser;
step s2-rollback: ROLLBACK;
step s2-drop-testdb2: DROP DATABASE testdb2;
step s1-drop-user-dbuser: DROP USER dbuser;
starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-testdb1 s1-create-user-dbuser s1-grant-on-testdb1-to-dbuser s2-rollback s2-drop-testdb2 s1-drop-testdb1 s1-drop-user-dbuser
step s2-create-testdb2: CREATE DATABASE testdb2;
step s2-begin: BEGIN;
step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C';
step s1-create-testdb1: CREATE DATABASE testdb1;
step s1-create-user-dbuser: CREATE USER dbuser;
step s1-grant-on-testdb1-to-dbuser: GRANT ALL ON DATABASE testdb1 TO dbuser;
step s2-rollback: ROLLBACK;
step s2-drop-testdb2: DROP DATABASE testdb2;
step s1-drop-testdb1: DROP DATABASE testdb1;
step s1-drop-user-dbuser: DROP USER dbuser;
starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb2-rename-to-db1 s1-commit s2-rollback s1-drop-db1 s2-drop-testdb2
step s1-create-testdb1: CREATE DATABASE testdb1;
step s2-create-testdb2: CREATE DATABASE testdb2;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1;
step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; <waiting ...>
step s1-commit: COMMIT;
step s2-alter-testdb2-rename-to-db1: <... completed>
ERROR: database "db1" already exists
step s2-rollback: ROLLBACK;
step s1-drop-db1: DROP DATABASE db1;
step s2-drop-testdb2: DROP DATABASE testdb2;
starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb2-rename-to-db1 s1-rollback s2-commit s1-drop-testdb1 s2-drop-db1
step s1-create-testdb1: CREATE DATABASE testdb1;
step s2-create-testdb2: CREATE DATABASE testdb2;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1;
step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; <waiting ...>
step s1-rollback: ROLLBACK;
step s2-alter-testdb2-rename-to-db1: <... completed>
step s2-commit: COMMIT;
step s1-drop-testdb1: DROP DATABASE testdb1;
step s2-drop-db1: DROP DATABASE db1;
starting permutation: s1-create-testdb1 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb1-rename-to-db1 s1-commit s2-rollback s1-drop-db1
step s1-create-testdb1: CREATE DATABASE testdb1;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1;
step s2-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; <waiting ...>
step s1-commit: COMMIT;
step s2-alter-testdb1-rename-to-db1: <... completed>
ERROR: database "testdb1" does not exist
step s2-rollback: ROLLBACK;
step s1-drop-db1: DROP DATABASE db1;
starting permutation: s1-create-testdb1 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb1-rename-to-db1 s1-rollback s2-commit s2-drop-db1
step s1-create-testdb1: CREATE DATABASE testdb1;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1;
step s2-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; <waiting ...>
step s1-rollback: ROLLBACK;
step s2-alter-testdb1-rename-to-db1: <... completed>
step s2-commit: COMMIT;
step s2-drop-db1: DROP DATABASE db1;
starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-create-db1 s2-rollback s2-drop-testdb2 s1-drop-db1
step s2-create-testdb2: CREATE DATABASE testdb2;
step s2-begin: BEGIN;
step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1;
step s1-create-db1: CREATE DATABASE db1; <waiting ...>
step s2-rollback: ROLLBACK;
step s1-create-db1: <... completed>
step s2-drop-testdb2: DROP DATABASE testdb2;
step s1-drop-db1: DROP DATABASE db1;
starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-create-db1 s2-commit s2-drop-db1
step s2-create-testdb2: CREATE DATABASE testdb2;
step s2-begin: BEGIN;
step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1;
step s1-create-db1: CREATE DATABASE db1; <waiting ...>
step s2-commit: COMMIT;
step s1-create-db1: <... completed>
ERROR: database "db1" already exists
step s2-drop-db1: DROP DATABASE db1;
starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db2 s1-create-db1 s2-commit s2-drop-db2 s1-drop-db1
step s2-create-testdb2: CREATE DATABASE testdb2;
step s2-begin: BEGIN;
step s2-alter-testdb2-rename-to-db2: ALTER DATABASE testdb2 RENAME TO db2;
step s1-create-db1: CREATE DATABASE db1; <waiting ...>
step s2-commit: COMMIT;
step s1-create-db1: <... completed>
step s2-drop-db2: DROP DATABASE db2;
step s1-drop-db1: DROP DATABASE db1;
starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-drop-testdb2 s2-rollback
step s2-create-testdb2: CREATE DATABASE testdb2;
step s2-begin: BEGIN;
step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1;
step s1-drop-testdb2: DROP DATABASE testdb2; <waiting ...>
step s2-rollback: ROLLBACK;
step s1-drop-testdb2: <... completed>
starting permutation: s2-create-testdb2 s1-create-db1 s2-begin s2-alter-testdb2-rename-to-db2 s1-drop-db1 s2-commit s2-drop-db2
step s2-create-testdb2: CREATE DATABASE testdb2;
step s1-create-db1: CREATE DATABASE db1;
step s2-begin: BEGIN;
step s2-alter-testdb2-rename-to-db2: ALTER DATABASE testdb2 RENAME TO db2;
step s1-drop-db1: DROP DATABASE db1;
step s2-commit: COMMIT;
step s2-drop-db2: DROP DATABASE db2;

View File

@ -16,7 +16,8 @@ SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10;
ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 2 kB)
DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place.
HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable.
SET citus.max_intermediate_result_size TO 17;
SET citus.max_intermediate_result_size TO 9;
-- regular adaptive executor CTE should fail
WITH cte AS MATERIALIZED
(
SELECT
@ -38,20 +39,9 @@ FROM
ORDER BY
1,2
LIMIT 10;
user_id | value_2
---------------------------------------------------------------------
1 | 0
1 | 0
1 | 0
1 | 0
1 | 0
1 | 0
1 | 0
1 | 0
1 | 0
1 | 0
(10 rows)
ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 9 kB)
DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place.
HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable.
-- router queries should be able to get limitted too
SET citus.max_intermediate_result_size TO 2;
-- this should pass, since we fetch small portions in each subplan
@ -117,11 +107,9 @@ WITH cte AS MATERIALIZED (
AND EXISTS (select * from cte2, cte3)
)
SELECT count(*) FROM cte WHERE EXISTS (select * from cte);
count
---------------------------------------------------------------------
105
(1 row)
ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 4 kB)
DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place.
HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable.
SET citus.max_intermediate_result_size TO 3;
-- this should fail since the cte-subplan exceeds the limit even if the
-- cte2 and cte3 does not

View File

@ -1426,8 +1426,9 @@ SELECT * FROM multi_extension.print_extension_changes();
| function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void
| function citus_internal.mark_object_distributed(oid,text,oid) void
| function citus_internal.start_management_transaction(xid8) void
| function citus_internal_acquire_citus_advisory_object_class_lock(integer,cstring) void
| function citus_internal_database_command(text) void
(5 rows)
(6 rows)
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version
@ -1623,8 +1624,8 @@ CREATE EXTENSION citus;
-- Check that maintenance daemon can also be started in another database
CREATE DATABASE another;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c another
CREATE EXTENSION citus;
CREATE SCHEMA test;
@ -1682,13 +1683,13 @@ NOTICE: drop cascades to function test_daemon.maintenance_daemon_died(text)
-- create a test database, configure citus with single node
CREATE DATABASE another;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c - - - :worker_1_port
CREATE DATABASE another;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c - - - :master_port
\c another
CREATE EXTENSION citus;

View File

@ -33,8 +33,8 @@ $definition$ create_function_test_maintenance_worker
\gset
CREATE DATABASE db1;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
SELECT oid AS db1_oid
FROM pg_database
WHERE datname = 'db1'
@ -42,13 +42,13 @@ WHERE datname = 'db1'
\c - - - :worker_1_port
CREATE DATABASE db1;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c - - - :worker_2_port
CREATE DATABASE db1;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c db1 - - :worker_1_port
CREATE EXTENSION citus;
\c db1 - - :worker_2_port
@ -94,8 +94,8 @@ FROM pg_dist_node;
CREATE DATABASE db2;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
SELECT oid AS db2_oid
FROM pg_database
WHERE datname = 'db2'
@ -103,13 +103,13 @@ WHERE datname = 'db2'
\c - - - :worker_1_port
CREATE DATABASE db2;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c - - - :worker_2_port
CREATE DATABASE db2;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c db2 - - :worker_1_port
CREATE EXTENSION citus;
\c db2 - - :worker_2_port

View File

@ -5,5 +5,6 @@
-- databases.
CREATE DATABASE new_database;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
DROP DATABASE new_database;

View File

@ -3,8 +3,8 @@ SET search_path TO other_databases;
SET citus.next_shard_id TO 10231023;
CREATE DATABASE other_db1;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c other_db1
SHOW citus.main_db;
citus.main_db
@ -94,8 +94,8 @@ DROP USER other_db_user9, nonsuperuser;
\c - - - :worker_1_port
CREATE DATABASE other_db2;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
\c other_db2
CREATE USER worker_user1;
BEGIN;

View File

@ -207,8 +207,8 @@ DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VAL
-- https://github.com/postgres/postgres/commit/30a53b7
CREATE DATABASE test_db WITH LOCALE_PROVIDER = 'icu' LOCALE = '' ICU_RULES = '&a < g' TEMPLATE = 'template0';
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DETAIL: Citus does not propagate CREATE DATABASE command to workers
HINT: You can manually create a database and its extensions on workers.
DETAIL: Citus does not propagate CREATE DATABASE command to other nodes
HINT: You can manually create a database and its extensions on other nodes.
NOTICE: using standard form "und" for ICU locale ""
SELECT result FROM run_command_on_workers
($$CREATE DATABASE test_db WITH LOCALE_PROVIDER = 'icu' LOCALE = '' ICU_RULES = '&a < g' TEMPLATE = 'template0'$$);

View File

@ -255,19 +255,19 @@ SELECT citus_remove_node('localhost', :master_port);
-- they fail because the coordinator is not added to metadata
DROP ROLE test_role_renamed;
ERROR: coordinator is not added to the metadata
HINT: Use SELECT citus_set_coordinator_host('<hostname>') to configure the coordinator hostname
HINT: Use SELECT citus_set_coordinator_host('<hostname>') on coordinator to configure the coordinator hostname
ALTER ROLE test_role_renamed RENAME TO test_role;
ERROR: coordinator is not added to the metadata
HINT: Use SELECT citus_set_coordinator_host('<hostname>') to configure the coordinator hostname
HINT: Use SELECT citus_set_coordinator_host('<hostname>') on coordinator to configure the coordinator hostname
ALTER ROLE test_role_renamed CREATEDB;
ERROR: coordinator is not added to the metadata
HINT: Use SELECT citus_set_coordinator_host('<hostname>') to configure the coordinator hostname
HINT: Use SELECT citus_set_coordinator_host('<hostname>') on coordinator to configure the coordinator hostname
ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF;
ERROR: coordinator is not added to the metadata
HINT: Use SELECT citus_set_coordinator_host('<hostname>') to configure the coordinator hostname
HINT: Use SELECT citus_set_coordinator_host('<hostname>') on coordinator to configure the coordinator hostname
GRANT another_user TO test_role_renamed;
ERROR: coordinator is not added to the metadata
HINT: Use SELECT citus_set_coordinator_host('<hostname>') to configure the coordinator hostname
HINT: Use SELECT citus_set_coordinator_host('<hostname>') on coordinator to configure the coordinator hostname
\c - - - :master_port
DROP ROLE test_role_renamed, another_user;
SET client_min_messages TO WARNING;

View File

@ -67,6 +67,7 @@ ORDER BY 1;
function citus_internal.replace_isolation_tester_func()
function citus_internal.restore_isolation_tester_func()
function citus_internal.start_management_transaction(xid8)
function citus_internal_acquire_citus_advisory_object_class_lock(integer,cstring)
function citus_internal_add_colocation_metadata(integer,integer,integer,regtype,oid)
function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean)
function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char")
@ -348,5 +349,5 @@ ORDER BY 1;
view citus_stat_tenants_local
view pg_dist_shard_placement
view time_partitions
(338 rows)
(339 rows)

View File

@ -77,6 +77,7 @@ test: isolation_global_pid
test: isolation_citus_locks
test: isolation_reference_table
test: isolation_schema_based_sharding
test: isolation_database_cmd_from_any_node
test: isolation_citus_pause_node
test: isolation_citus_schema_distribute_undistribute

View File

@ -0,0 +1,102 @@
setup
{
-- OCLASS for database changed in PG 16 from 25 to 26
SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS value INTO oclass_database;
}
teardown
{
DROP TABLE IF EXISTS oclass_database;
}
session "s1"
setup { SET citus.enable_create_database_propagation TO ON; }
step "s1-begin" { BEGIN; }
step "s1-commit" { COMMIT; }
step "s1-rollback" { ROLLBACK; }
step "s1-create-user-dbuser" { CREATE USER dbuser; }
step "s1-drop-user-dbuser" { DROP USER dbuser; }
step "s1-acquire-citus-adv-oclass-lock" { SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; }
step "s1-acquire-citus-adv-oclass-lock-with-oid-testdb1" { SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; }
step "s1-create-testdb1" { CREATE DATABASE testdb1; }
step "s1-drop-testdb1" { DROP DATABASE testdb1; }
step "s1-alter-testdb1-rename-to-db1" { ALTER DATABASE testdb1 RENAME TO db1; }
step "s1-grant-on-testdb1-to-dbuser" { GRANT ALL ON DATABASE testdb1 TO dbuser;}
step "s1-drop-testdb2" { DROP DATABASE testdb2; }
step "s1-grant-on-testdb2-to-dbuser" { GRANT ALL ON DATABASE testdb2 TO dbuser;}
step "s1-create-db1" { CREATE DATABASE db1; }
step "s1-drop-db1" { DROP DATABASE db1; }
session "s2"
setup { SET citus.enable_create_database_propagation TO ON; }
step "s2-begin" { BEGIN; }
step "s2-commit" { COMMIT; }
step "s2-rollback" { ROLLBACK; }
step "s2-acquire-citus-adv-oclass-lock" { SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; }
step "s2-acquire-citus-adv-oclass-lock-with-oid-testdb1" { SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; }
step "s2-acquire-citus-adv-oclass-lock-with-oid-testdb2" { SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb2') FROM oclass_database; }
step "s2-alter-testdb1-rename-to-db1" { ALTER DATABASE testdb1 RENAME TO db1; }
step "s2-create-testdb2" { CREATE DATABASE testdb2; }
step "s2-drop-testdb2" { DROP DATABASE testdb2; }
step "s2-alter-testdb2-rename-to-db1" { ALTER DATABASE testdb2 RENAME TO db1; }
step "s2-alter-testdb2-rename-to-db2" { ALTER DATABASE testdb2 RENAME TO db2; }
step "s2-alter-testdb2-set-lc_monetary" { ALTER DATABASE testdb2 SET lc_monetary TO 'C'; }
step "s2-drop-db1" { DROP DATABASE db1; }
step "s2-drop-db2" { DROP DATABASE db2; }
// Given that we cannot execute CREATE / DROP DATABASE commands in a transaction block, we instead acquire the
// underlying advisory lock in some of below tests.
// e.g., CREATE DATABASE vs CREATE DATABASE
permutation "s1-begin" "s2-begin" "s1-acquire-citus-adv-oclass-lock" "s2-acquire-citus-adv-oclass-lock" "s1-commit" "s2-commit"
// e.g., DROP DATABASE vs DROP DATABASE
// dropping the same database
permutation "s1-create-testdb1" "s1-begin" "s2-begin" "s1-acquire-citus-adv-oclass-lock-with-oid-testdb1" "s2-acquire-citus-adv-oclass-lock-with-oid-testdb1" "s1-commit" "s2-commit" "s1-drop-testdb1"
// dropping a different database
permutation "s1-create-testdb1" "s2-create-testdb2" "s1-begin" "s2-begin" "s1-acquire-citus-adv-oclass-lock-with-oid-testdb1" "s2-acquire-citus-adv-oclass-lock-with-oid-testdb2" "s1-commit" "s2-commit" "s1-drop-testdb1" "s2-drop-testdb2"
// CREATE DATABASE vs DROP DATABASE
permutation "s2-create-testdb2" "s1-begin" "s2-begin" "s1-acquire-citus-adv-oclass-lock" "s2-acquire-citus-adv-oclass-lock-with-oid-testdb2" "s1-commit" "s2-commit" "s2-drop-testdb2"
// CREATE DATABASE vs ALTER DATABASE SET <config>
permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-set-lc_monetary" "s1-create-db1" "s2-rollback" "s2-drop-testdb2" "s1-drop-db1"
// GRANT .. ON DATABASE .. TO ... vs ALTER DATABASE SET <config>
// on the same database
permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-set-lc_monetary" "s1-create-user-dbuser" "s1-grant-on-testdb2-to-dbuser" "s2-rollback" "s2-drop-testdb2" "s1-drop-user-dbuser"
// on a different database
permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-set-lc_monetary" "s1-create-testdb1" "s1-create-user-dbuser" "s1-grant-on-testdb1-to-dbuser" "s2-rollback" "s2-drop-testdb2" "s1-drop-testdb1" "s1-drop-user-dbuser"
// ALTER DATABASE .. RENAME TO .. vs ALTER DATABASE .. RENAME TO ..
// try to rename different databases to the same name
permutation "s1-create-testdb1" "s2-create-testdb2" "s1-begin" "s2-begin" "s1-alter-testdb1-rename-to-db1" "s2-alter-testdb2-rename-to-db1" "s1-commit" "s2-rollback" "s1-drop-db1" "s2-drop-testdb2"
permutation "s1-create-testdb1" "s2-create-testdb2" "s1-begin" "s2-begin" "s1-alter-testdb1-rename-to-db1" "s2-alter-testdb2-rename-to-db1" "s1-rollback" "s2-commit" "s1-drop-testdb1" "s2-drop-db1"
// try to rename same database
permutation "s1-create-testdb1" "s1-begin" "s2-begin" "s1-alter-testdb1-rename-to-db1" "s2-alter-testdb1-rename-to-db1" "s1-commit" "s2-rollback" "s1-drop-db1"
permutation "s1-create-testdb1" "s1-begin" "s2-begin" "s1-alter-testdb1-rename-to-db1" "s2-alter-testdb1-rename-to-db1" "s1-rollback" "s2-commit" "s2-drop-db1"
// CREATE DATABASE vs ALTER DATABASE .. RENAME TO ..
permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-rename-to-db1" "s1-create-db1" "s2-rollback" "s2-drop-testdb2" "s1-drop-db1"
permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-rename-to-db1" "s1-create-db1" "s2-commit" "s2-drop-db1"
permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-rename-to-db2" "s1-create-db1" "s2-commit" "s2-drop-db2" "s1-drop-db1"
// DROP DATABASE vs ALTER DATABASE .. RENAME TO ..
// try to rename the same database
permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-rename-to-db1" "s1-drop-testdb2" "s2-rollback"
// try to rename a different database
permutation "s2-create-testdb2" "s1-create-db1" "s2-begin" "s2-alter-testdb2-rename-to-db2" "s1-drop-db1" "s2-commit" "s2-drop-db2"

View File

@ -242,18 +242,18 @@ drop database "mydatabase#1'2";
SET citus.enable_create_database_propagation TO ON;
-- show that dropping the database from workers is not allowed when citus.enable_create_database_propagation is on
-- show that dropping the database from workers is allowed when citus.enable_create_database_propagation is on
DROP DATABASE db_needs_escape;
-- and the same applies to create database too
create database error_test;
drop database error_test;
\c - - - :master_port
SET citus.enable_create_database_propagation TO ON;
DROP DATABASE test_node_activation;
DROP DATABASE db_needs_escape;
DROP USER "role-needs\!escape";
-- drop database with force options test
@ -550,6 +550,204 @@ REVOKE CONNECT, TEMPORARY, CREATE ON DATABASE local_database_1 FROM local_role_1
DROP ROLE local_role_1;
DROP DATABASE local_database_1;
-- test create / drop database commands from workers
-- remove one of the workers to test node activation too
SELECT 1 from citus_remove_node('localhost', :worker_2_port);
\c - - - :worker_1_port
CREATE DATABASE local_worker_db;
SET citus.enable_create_database_propagation TO ON;
CREATE DATABASE db_created_from_worker
WITH template=template1
OWNER = create_drop_db_test_user
ENCODING = 'UTF8'
CONNECTION LIMIT = 42
TABLESPACE = "ts-needs\!escape"
ALLOW_CONNECTIONS = false;
\c - - - :master_port
SET citus.enable_create_database_propagation TO ON;
SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
\c - - - :worker_1_port
SET citus.enable_create_database_propagation TO ON;
SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type;
SELECT * FROM public.check_database_on_all_nodes('db_created_from_worker') ORDER BY node_type;
DROP DATABASE db_created_from_worker;
SELECT * FROM public.check_database_on_all_nodes('db_created_from_worker') ORDER BY node_type;
-- drop the local database while the GUC is on
DROP DATABASE local_worker_db;
SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type;
SET citus.enable_create_database_propagation TO OFF;
CREATE DATABASE local_worker_db;
-- drop the local database while the GUC is off
DROP DATABASE local_worker_db;
SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type;
SET citus.enable_create_database_propagation TO ON;
CREATE DATABASE another_db_created_from_worker;
\c - - - :master_port
SELECT 1 FROM citus_remove_node('localhost', :master_port);
\c - - - :worker_1_port
SET citus.enable_create_database_propagation TO ON;
-- fails because coordinator is not added into metadata
DROP DATABASE another_db_created_from_worker;
-- fails because coordinator is not added into metadata
CREATE DATABASE new_db;
\c - - - :master_port
SET client_min_messages TO WARNING;
SELECT 1 FROM citus_add_node('localhost', :master_port, 0);
RESET client_min_messages;
SET citus.enable_create_database_propagation TO ON;
-- dropping a database that was created from a worker via a different node works fine
DROP DATABASE another_db_created_from_worker;
SELECT * FROM public.check_database_on_all_nodes('another_db_created_from_worker') ORDER BY node_type;
-- Show that we automatically propagate the dependencies (only roles atm) when
-- creating a database from workers too.
SELECT 1 from citus_remove_node('localhost', :worker_2_port);
\c - - - :worker_1_port
set citus.enable_create_role_propagation TO off;
create role non_propagated_role;
set citus.enable_create_role_propagation TO on;
set citus.enable_create_database_propagation TO on;
create database test_db OWNER non_propagated_role;
create role propagated_role;
\c - - - :master_port
-- not supported from workers, so need to execute this via coordinator
grant connect on database test_db to propagated_role;
SET citus.enable_create_database_propagation TO ON;
SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
SELECT * FROM public.check_database_on_all_nodes('test_db') ORDER BY node_type;
REVOKE CONNECT ON DATABASE test_db FROM propagated_role;
DROP DATABASE test_db;
DROP ROLE propagated_role, non_propagated_role;
-- test pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock with null input
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(null, 'regression');
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), null);
-- OCLASS_DATABASE
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), NULL);
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), 'regression');
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), '');
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), 'no_such_db');
-- invalid OCLASS
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(-1, NULL);
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(-1, 'regression');
-- invalid OCLASS
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(100, NULL);
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(100, 'regression');
-- another valid OCLASS, but not implemented yet
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(10, NULL);
SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(10, 'regression');
SELECT 1 FROM run_command_on_all_nodes('ALTER SYSTEM SET citus.enable_create_database_propagation TO ON');
SELECT 1 FROM run_command_on_all_nodes('SELECT pg_reload_conf()');
SELECT pg_sleep(0.1);
-- only one of them succeeds and we don't run into a distributed deadlock
SELECT COUNT(*) FROM run_command_on_all_nodes('CREATE DATABASE concurrent_create_db') WHERE success;
SELECT * FROM public.check_database_on_all_nodes('concurrent_create_db') ORDER BY node_type;
SELECT COUNT(*) FROM run_command_on_all_nodes('DROP DATABASE concurrent_create_db') WHERE success;
SELECT * FROM public.check_database_on_all_nodes('concurrent_create_db') ORDER BY node_type;
-- revert the system wide change that enables citus.enable_create_database_propagation on all nodes
SELECT 1 FROM run_command_on_all_nodes('ALTER SYSTEM SET citus.enable_create_database_propagation TO OFF');
SELECT 1 FROM run_command_on_all_nodes('SELECT pg_reload_conf()');
SELECT pg_sleep(0.1);
-- but keep it enabled for coordinator for the rest of the tests
SET citus.enable_create_database_propagation TO ON;
CREATE DATABASE distributed_db;
CREATE USER no_createdb;
SET ROLE no_createdb;
SET citus.enable_create_database_propagation TO ON;
CREATE DATABASE no_createdb;
ALTER DATABASE distributed_db RENAME TO rename_test;
DROP DATABASE distributed_db;
ALTER DATABASE distributed_db SET TABLESPACE pg_default;
ALTER DATABASE distributed_db SET timezone TO 'UTC';
ALTER DATABASE distributed_db RESET timezone;
GRANT ALL ON DATABASE distributed_db TO postgres;
RESET ROLE;
ALTER ROLE no_createdb createdb;
SET ROLE no_createdb;
CREATE DATABASE no_createdb;
ALTER DATABASE distributed_db RENAME TO rename_test;
RESET ROLE;
SELECT 1 FROM run_command_on_all_nodes($$GRANT ALL ON TABLESPACE pg_default TO no_createdb$$);
ALTER DATABASE distributed_db OWNER TO no_createdb;
SET ROLE no_createdb;
ALTER DATABASE distributed_db SET TABLESPACE pg_default;
ALTER DATABASE distributed_db SET timezone TO 'UTC';
ALTER DATABASE distributed_db RESET timezone;
GRANT ALL ON DATABASE distributed_db TO postgres;
ALTER DATABASE distributed_db RENAME TO rename_test;
DROP DATABASE rename_test;
RESET ROLE;
SELECT 1 FROM run_command_on_all_nodes($$REVOKE ALL ON TABLESPACE pg_default FROM no_createdb$$);
DROP DATABASE no_createdb;
DROP USER no_createdb;
SET citus.enable_create_database_propagation TO ON;
--clean up resources created by this test
-- DROP TABLESPACE is not supported, so we need to drop it manually.

View File

@ -17,7 +17,8 @@ cte2 AS MATERIALIZED (
SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10;
SET citus.max_intermediate_result_size TO 17;
SET citus.max_intermediate_result_size TO 9;
-- regular adaptive executor CTE should fail
WITH cte AS MATERIALIZED
(
SELECT

View File

@ -6,3 +6,4 @@
-- databases.
CREATE DATABASE new_database;
DROP DATABASE new_database;