mirror of https://github.com/citusdata/citus.git
Add support for schema-based-sharding via a GUC (#6866)
DESCRIPTION: Adds citus.enable_schema_based_sharding GUC that allows sharding the database based on schemas when enabled. * Refactor the logic that automatically creates Citus managed tables * Refactor CreateSingleShardTable() to allow specifying colocation id instead * Add support for schema-based-sharding via a GUC ### What this PR is about: Add **citus.enable_schema_based_sharding GUC** to enable schema-based sharding. Each schema created while this GUC is ON will be considered as a tenant schema. Later on, regardless of whether the GUC is ON or OFF, any table created in a tenant schema will be converted to a single shard distributed table (without a shard key). All the tenant tables that belong to a particular schema will be co-located with each other and will have a shard count of 1. We introduce a new metadata table --pg_dist_tenant_schema-- to do the bookkeeping for tenant schemas: ```sql psql> \d pg_dist_tenant_schema Table "pg_catalog.pg_dist_tenant_schema" ┌───────────────┬─────────┬───────────┬──────────┬─────────┐ │ Column │ Type │ Collation │ Nullable │ Default │ ├───────────────┼─────────┼───────────┼──────────┼─────────┤ │ schemaid │ oid │ │ not null │ │ │ colocationid │ integer │ │ not null │ │ └───────────────┴─────────┴───────────┴──────────┴─────────┘ Indexes: "pg_dist_tenant_schema_pkey" PRIMARY KEY, btree (schemaid) "pg_dist_tenant_schema_unique_colocationid_index" UNIQUE, btree (colocationid) psql> table pg_dist_tenant_schema; ┌───────────┬───────────────┐ │ schemaid │ colocationid │ ├───────────┼───────────────┤ │ 41963 │ 91 │ │ 41962 │ 90 │ └───────────┴───────────────┘ (2 rows) ``` Colocation id column of pg_dist_tenant_schema can never be NULL even for the tenant schemas that don't have a tenant table yet. This is because, we assign colocation ids to tenant schemas as soon as they are created. That way, we can keep associating tenant schemas with particular colocation groups even if all the tenant tables of a tenant schema are dropped and recreated later on. When a tenant schema is dropped, we delete the corresponding row from pg_dist_tenant_schema. In that case, we delete the corresponding colocation group from pg_dist_colocation as well. ### Future work for 12.0 release: We're building schema-based sharding on top of the infrastructure that adds support for creating distributed tables without a shard key (https://github.com/citusdata/citus/pull/6867). However, not all the operations that can be done on distributed tables without a shard key necessarily make sense (in the same way) in the context of schema-based sharding. For example, we need to think about what happens if user attempts altering schema of a tenant table. We will tackle such scenarios in a future PR. We will also add a new UDF --citus.schema_tenant_set() or such-- to allow users to use an existing schema as a tenant schema, and another one --citus.schema_tenant_unset() or such-- to stop using a schema as a tenant schema in future PRs.pull/6924/head
parent
2c7beee562
commit
246b054a7d
|
@ -6160,3 +6160,4 @@ if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
|
|||
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
|
||||
$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
|
||||
fi
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
#include "miscadmin.h"
|
||||
|
||||
#include "access/genam.h"
|
||||
#include "access/htup_details.h"
|
||||
|
@ -1500,3 +1501,38 @@ FinalizeCitusLocalTableCreation(Oid relationId)
|
|||
InvalidateForeignKeyGraph();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ShouldAddNewTableToMetadata takes a relationId and returns true if we need to add a
|
||||
* newly created table to metadata, false otherwise.
|
||||
* For partitions and temporary tables, ShouldAddNewTableToMetadata returns false.
|
||||
* For other tables created, returns true, if we are on a coordinator that is added
|
||||
* as worker, and ofcourse, if the GUC use_citus_managed_tables is set to on.
|
||||
*/
|
||||
bool
|
||||
ShouldAddNewTableToMetadata(Oid relationId)
|
||||
{
|
||||
if (get_rel_persistence(relationId) == RELPERSISTENCE_TEMP ||
|
||||
PartitionTableNoLock(relationId))
|
||||
{
|
||||
/*
|
||||
* Shouldn't add table to metadata if it's a temp table, or a partition.
|
||||
* Creating partitions of a table that is added to metadata is already handled.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
if (AddAllLocalTablesToMetadata && !IsBinaryUpgrade &&
|
||||
IsCoordinator() && CoordinatorAddedAsWorkerNode())
|
||||
{
|
||||
/*
|
||||
* We have verified that the GUC is set to true, and we are not upgrading,
|
||||
* and we are on the coordinator that is added as worker node.
|
||||
* So return true here, to add this newly created table to metadata.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -111,8 +111,8 @@ typedef struct
|
|||
{
|
||||
int shardCount;
|
||||
bool shardCountIsStrict;
|
||||
char *colocateWithTableName;
|
||||
char *distributionColumnName;
|
||||
ColocationParam colocationParam;
|
||||
} DistributedTableParams;
|
||||
|
||||
|
||||
|
@ -296,7 +296,11 @@ create_distributed_table(PG_FUNCTION_ARGS)
|
|||
"when the distribution column is null ")));
|
||||
}
|
||||
|
||||
CreateSingleShardTable(relationId, colocateWithTableName);
|
||||
ColocationParam colocationParam = {
|
||||
.colocationParamType = COLOCATE_WITH_TABLE_LIKE_OPT,
|
||||
.colocateWithTableName = colocateWithTableName,
|
||||
};
|
||||
CreateSingleShardTable(relationId, colocationParam);
|
||||
}
|
||||
|
||||
PG_RETURN_VOID();
|
||||
|
@ -1006,7 +1010,10 @@ CreateDistributedTable(Oid relationId, char *distributionColumnName,
|
|||
}
|
||||
|
||||
DistributedTableParams distributedTableParams = {
|
||||
.colocateWithTableName = colocateWithTableName,
|
||||
.colocationParam = {
|
||||
.colocateWithTableName = colocateWithTableName,
|
||||
.colocationParamType = COLOCATE_WITH_TABLE_LIKE_OPT
|
||||
},
|
||||
.shardCount = shardCount,
|
||||
.shardCountIsStrict = shardCountIsStrict,
|
||||
.distributionColumnName = distributionColumnName
|
||||
|
@ -1031,10 +1038,10 @@ CreateReferenceTable(Oid relationId)
|
|||
* single shard distributed table that doesn't have a shard key.
|
||||
*/
|
||||
void
|
||||
CreateSingleShardTable(Oid relationId, char *colocateWithTableName)
|
||||
CreateSingleShardTable(Oid relationId, ColocationParam colocationParam)
|
||||
{
|
||||
DistributedTableParams distributedTableParams = {
|
||||
.colocateWithTableName = colocateWithTableName,
|
||||
.colocationParam = colocationParam,
|
||||
.shardCount = 1,
|
||||
.shardCountIsStrict = true,
|
||||
.distributionColumnName = NULL
|
||||
|
@ -1155,9 +1162,23 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
|
|||
* ColocationIdForNewTable assumes caller acquires lock on relationId. In our case,
|
||||
* our caller already acquired lock on relationId.
|
||||
*/
|
||||
uint32 colocationId = ColocationIdForNewTable(relationId, tableType,
|
||||
distributedTableParams,
|
||||
distributionColumn);
|
||||
uint32 colocationId = INVALID_COLOCATION_ID;
|
||||
if (distributedTableParams &&
|
||||
distributedTableParams->colocationParam.colocationParamType ==
|
||||
COLOCATE_WITH_COLOCATION_ID)
|
||||
{
|
||||
colocationId = distributedTableParams->colocationParam.colocationId;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* ColocationIdForNewTable assumes caller acquires lock on relationId. In our case,
|
||||
* our caller already acquired lock on relationId.
|
||||
*/
|
||||
colocationId = ColocationIdForNewTable(relationId, tableType,
|
||||
distributedTableParams,
|
||||
distributionColumn);
|
||||
}
|
||||
|
||||
EnsureRelationCanBeDistributed(relationId, distributionColumn,
|
||||
citusTableParams.distributionMethod,
|
||||
|
@ -1257,7 +1278,10 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
|
|||
MemoryContextReset(citusPartitionContext);
|
||||
|
||||
DistributedTableParams childDistributedTableParams = {
|
||||
.colocateWithTableName = parentRelationName,
|
||||
.colocationParam = {
|
||||
.colocationParamType = COLOCATE_WITH_TABLE_LIKE_OPT,
|
||||
.colocateWithTableName = parentRelationName,
|
||||
},
|
||||
.shardCount = distributedTableParams->shardCount,
|
||||
.shardCountIsStrict = false,
|
||||
.distributionColumnName = distributedTableParams->distributionColumnName,
|
||||
|
@ -1308,30 +1332,39 @@ DecideCitusTableParams(CitusTableType tableType,
|
|||
{
|
||||
case HASH_DISTRIBUTED:
|
||||
{
|
||||
Assert(distributedTableParams->colocationParam.colocationParamType ==
|
||||
COLOCATE_WITH_TABLE_LIKE_OPT);
|
||||
|
||||
citusTableParams.distributionMethod = DISTRIBUTE_BY_HASH;
|
||||
citusTableParams.replicationModel =
|
||||
DecideDistTableReplicationModel(DISTRIBUTE_BY_HASH,
|
||||
distributedTableParams->
|
||||
distributedTableParams->colocationParam.
|
||||
colocateWithTableName);
|
||||
break;
|
||||
}
|
||||
|
||||
case APPEND_DISTRIBUTED:
|
||||
{
|
||||
Assert(distributedTableParams->colocationParam.colocationParamType ==
|
||||
COLOCATE_WITH_TABLE_LIKE_OPT);
|
||||
|
||||
citusTableParams.distributionMethod = DISTRIBUTE_BY_APPEND;
|
||||
citusTableParams.replicationModel =
|
||||
DecideDistTableReplicationModel(APPEND_DISTRIBUTED,
|
||||
distributedTableParams->
|
||||
distributedTableParams->colocationParam.
|
||||
colocateWithTableName);
|
||||
break;
|
||||
}
|
||||
|
||||
case RANGE_DISTRIBUTED:
|
||||
{
|
||||
Assert(distributedTableParams->colocationParam.colocationParamType ==
|
||||
COLOCATE_WITH_TABLE_LIKE_OPT);
|
||||
|
||||
citusTableParams.distributionMethod = DISTRIBUTE_BY_RANGE;
|
||||
citusTableParams.replicationModel =
|
||||
DecideDistTableReplicationModel(RANGE_DISTRIBUTED,
|
||||
distributedTableParams->
|
||||
distributedTableParams->colocationParam.
|
||||
colocateWithTableName);
|
||||
break;
|
||||
}
|
||||
|
@ -1768,7 +1801,11 @@ ColocationIdForNewTable(Oid relationId, CitusTableType tableType,
|
|||
|
||||
if (tableType == APPEND_DISTRIBUTED || tableType == RANGE_DISTRIBUTED)
|
||||
{
|
||||
if (!IsColocateWithDefault(distributedTableParams->colocateWithTableName))
|
||||
Assert(distributedTableParams->colocationParam.colocationParamType ==
|
||||
COLOCATE_WITH_TABLE_LIKE_OPT);
|
||||
char *colocateWithTableName =
|
||||
distributedTableParams->colocationParam.colocateWithTableName;
|
||||
if (!IsColocateWithDefault(colocateWithTableName))
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot distribute relation"),
|
||||
|
@ -1795,8 +1832,13 @@ ColocationIdForNewTable(Oid relationId, CitusTableType tableType,
|
|||
Oid distributionColumnCollation =
|
||||
distributionColumn ? get_typcollation(distributionColumnType) : InvalidOid;
|
||||
|
||||
Assert(distributedTableParams->colocationParam.colocationParamType ==
|
||||
COLOCATE_WITH_TABLE_LIKE_OPT);
|
||||
char *colocateWithTableName =
|
||||
distributedTableParams->colocationParam.colocateWithTableName;
|
||||
|
||||
/* get an advisory lock to serialize concurrent default group creations */
|
||||
if (IsColocateWithDefault(distributedTableParams->colocateWithTableName))
|
||||
if (IsColocateWithDefault(colocateWithTableName))
|
||||
{
|
||||
AcquireColocationDefaultLock();
|
||||
}
|
||||
|
@ -1808,10 +1850,9 @@ ColocationIdForNewTable(Oid relationId, CitusTableType tableType,
|
|||
distributedTableParams->shardCount,
|
||||
distributedTableParams->
|
||||
shardCountIsStrict,
|
||||
distributedTableParams->
|
||||
colocateWithTableName);
|
||||
|
||||
if (IsColocateWithDefault(distributedTableParams->colocateWithTableName) &&
|
||||
if (IsColocateWithDefault(colocateWithTableName) &&
|
||||
(colocationId != INVALID_COLOCATION_ID))
|
||||
{
|
||||
/*
|
||||
|
@ -1824,7 +1865,7 @@ ColocationIdForNewTable(Oid relationId, CitusTableType tableType,
|
|||
|
||||
if (colocationId == INVALID_COLOCATION_ID)
|
||||
{
|
||||
if (IsColocateWithDefault(distributedTableParams->colocateWithTableName))
|
||||
if (IsColocateWithDefault(colocateWithTableName))
|
||||
{
|
||||
/*
|
||||
* Generate a new colocation ID and insert a pg_dist_colocation
|
||||
|
@ -1835,7 +1876,7 @@ ColocationIdForNewTable(Oid relationId, CitusTableType tableType,
|
|||
distributionColumnType,
|
||||
distributionColumnCollation);
|
||||
}
|
||||
else if (IsColocateWithNone(distributedTableParams->colocateWithTableName))
|
||||
else if (IsColocateWithNone(colocateWithTableName))
|
||||
{
|
||||
/*
|
||||
* Generate a new colocation ID and insert a pg_dist_colocation
|
||||
|
|
|
@ -294,8 +294,8 @@ static DistributeObjectOps Any_CreateForeignServer = {
|
|||
static DistributeObjectOps Any_CreateSchema = {
|
||||
.deparse = DeparseCreateSchemaStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = PreprocessCreateSchemaStmt,
|
||||
.postprocess = NULL,
|
||||
.preprocess = NULL,
|
||||
.postprocess = PostprocessCreateSchemaStmt,
|
||||
.operationType = DIST_OPS_CREATE,
|
||||
.address = CreateSchemaStmtObjectAddress,
|
||||
.markDistributed = true,
|
||||
|
|
|
@ -90,7 +90,27 @@ master_remove_partition_metadata(PG_FUNCTION_ARGS)
|
|||
|
||||
DeletePartitionRow(relationId);
|
||||
|
||||
DeleteColocationGroupIfNoTablesBelong(colocationId);
|
||||
/*
|
||||
* We want to keep using the same colocation group for the tenant even if
|
||||
* all the tables that belong to it are dropped and new tables are created
|
||||
* for the tenant etc. For this reason, if a colocation group belongs to a
|
||||
* tenant schema, we don't delete the colocation group even if there are no
|
||||
* tables that belong to it.
|
||||
*
|
||||
* We do the same if system catalog cannot find the schema of the table
|
||||
* because this means that the whole schema is dropped.
|
||||
*
|
||||
* In that case, we want to delete the colocation group regardless of
|
||||
* whether the schema is a tenant schema or not. Even more, calling
|
||||
* IsTenantSchema() with InvalidOid would cause an error, hence we check
|
||||
* whether the schema is valid beforehand.
|
||||
*/
|
||||
bool missingOk = true;
|
||||
Oid schemaId = get_namespace_oid(schemaName, missingOk);
|
||||
if (!OidIsValid(schemaId) || !IsTenantSchema(schemaId))
|
||||
{
|
||||
DeleteColocationGroupIfNoTablesBelong(colocationId);
|
||||
}
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "catalog/namespace.h"
|
||||
#include "catalog/pg_class.h"
|
||||
#include "catalog/pg_namespace.h"
|
||||
#include "distributed/colocation_utils.h"
|
||||
#include "distributed/commands.h"
|
||||
#include <distributed/connection_management.h>
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
|
@ -33,6 +34,7 @@
|
|||
#include "distributed/resource_lock.h"
|
||||
#include <distributed/remote_commands.h>
|
||||
#include <distributed/remote_commands.h>
|
||||
#include "distributed/tenant_schema_metadata.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "utils/fmgroids.h"
|
||||
|
@ -45,16 +47,18 @@ static List * FilterDistributedSchemas(List *schemas);
|
|||
static bool SchemaHasDistributedTableWithFKey(char *schemaName);
|
||||
static bool ShouldPropagateCreateSchemaStmt(void);
|
||||
static List * GetGrantCommandsFromCreateSchemaStmt(Node *node);
|
||||
static bool CreateSchemaStmtCreatesTable(CreateSchemaStmt *stmt);
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessCreateSchemaStmt is called during the planning phase for
|
||||
* PostprocessCreateSchemaStmt is called during the planning phase for
|
||||
* CREATE SCHEMA ..
|
||||
*/
|
||||
List *
|
||||
PreprocessCreateSchemaStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
PostprocessCreateSchemaStmt(Node *node, const char *queryString)
|
||||
{
|
||||
CreateSchemaStmt *createSchemaStmt = castNode(CreateSchemaStmt, node);
|
||||
|
||||
if (!ShouldPropagateCreateSchemaStmt())
|
||||
{
|
||||
return NIL;
|
||||
|
@ -74,6 +78,38 @@ PreprocessCreateSchemaStmt(Node *node, const char *queryString,
|
|||
|
||||
commands = list_concat(commands, GetGrantCommandsFromCreateSchemaStmt(node));
|
||||
|
||||
if (ShouldUseSchemaBasedSharding(createSchemaStmt->schemaname))
|
||||
{
|
||||
/* for now, we don't allow creating tenant tables when creating the schema itself */
|
||||
if (CreateSchemaStmtCreatesTable(createSchemaStmt))
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot create tenant table in CREATE "
|
||||
"SCHEMA statement"),
|
||||
errhint("Use CREATE TABLE statement to create "
|
||||
"tenant tables.")));
|
||||
}
|
||||
|
||||
bool missingOk = false;
|
||||
Oid schemaId = get_namespace_oid(createSchemaStmt->schemaname, missingOk);
|
||||
|
||||
/*
|
||||
* Register the tenant schema on the coordinator and save the command
|
||||
* to register it on the workers.
|
||||
*/
|
||||
int shardCount = 1;
|
||||
int replicationFactor = 1;
|
||||
Oid distributionColumnType = InvalidOid;
|
||||
Oid distributionColumnCollation = InvalidOid;
|
||||
uint32 colocationId = CreateColocationGroup(
|
||||
shardCount, replicationFactor, distributionColumnType,
|
||||
distributionColumnCollation);
|
||||
|
||||
InsertTenantSchemaLocally(schemaId, colocationId);
|
||||
|
||||
commands = lappend(commands, TenantSchemaInsertCommand(schemaId, colocationId));
|
||||
}
|
||||
|
||||
commands = lappend(commands, ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
|
@ -402,3 +438,27 @@ GetGrantCommandsFromCreateSchemaStmt(Node *node)
|
|||
|
||||
return commands;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateSchemaStmtCreatesTable returns true if given CreateSchemaStmt
|
||||
* creates a table using "schema_element" list.
|
||||
*/
|
||||
static bool
|
||||
CreateSchemaStmtCreatesTable(CreateSchemaStmt *stmt)
|
||||
{
|
||||
Node *element = NULL;
|
||||
foreach_ptr(element, stmt->schemaElts)
|
||||
{
|
||||
/*
|
||||
* CREATE TABLE AS and CREATE FOREIGN TABLE commands cannot be
|
||||
* used as schema_elements anyway, so we don't need to check them.
|
||||
*/
|
||||
if (IsA(element, CreateStmt))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,244 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
* schema_based_sharding.c
|
||||
*
|
||||
* Routines for schema-based sharding.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
#include "miscadmin.h"
|
||||
#include "catalog/pg_namespace_d.h"
|
||||
#include "commands/extension.h"
|
||||
#include "distributed/argutils.h"
|
||||
#include "distributed/backend_data.h"
|
||||
#include "distributed/colocation_utils.h"
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/multi_partitioning_utils.h"
|
||||
#include "distributed/tenant_schema_metadata.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "utils/syscache.h"
|
||||
|
||||
|
||||
PG_FUNCTION_INFO_V1(citus_internal_unregister_tenant_schema_globally);
|
||||
|
||||
|
||||
/* controlled via citus.enable_schema_based_sharding GUC */
|
||||
bool EnableSchemaBasedSharding = false;
|
||||
|
||||
|
||||
/*
|
||||
* ShouldUseSchemaBasedSharding returns true if schema given name should be
|
||||
* used as a tenant schema.
|
||||
*/
|
||||
bool
|
||||
ShouldUseSchemaBasedSharding(char *schemaName)
|
||||
{
|
||||
if (!EnableSchemaBasedSharding)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (IsBinaryUpgrade)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Citus utility hook skips processing CREATE SCHEMA commands while an
|
||||
* extension is being created. For this reason, we don't expect to get
|
||||
* here while an extension is being created.
|
||||
*/
|
||||
Assert(!creating_extension);
|
||||
|
||||
/*
|
||||
* CREATE SCHEMA commands issued by internal backends are not meant to
|
||||
* create tenant schemas but to sync metadata.
|
||||
*
|
||||
* On workers, Citus utility hook skips processing CREATE SCHEMA commands
|
||||
* because we temporarily disable DDL propagation on workers when sending
|
||||
* CREATE SCHEMA commands. For this reason, right now this check is a bit
|
||||
* redundant but we prefer to keep it here to be on the safe side.
|
||||
*/
|
||||
if (IsCitusInternalBackend() || IsRebalancerInternalBackend())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Not do an oid comparison based on PG_PUBLIC_NAMESPACE because
|
||||
* we want to treat "public" schema in the same way even if it's
|
||||
* recreated.
|
||||
*/
|
||||
if (strcmp(schemaName, "public") == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ShouldCreateTenantSchemaTable returns true if we should create a tenant
|
||||
* schema table for given relationId.
|
||||
*/
|
||||
bool
|
||||
ShouldCreateTenantSchemaTable(Oid relationId)
|
||||
{
|
||||
if (IsBinaryUpgrade)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* CREATE TABLE commands issued by internal backends are not meant to
|
||||
* create tenant tables but to sync metadata.
|
||||
*/
|
||||
if (IsCitusInternalBackend() || IsRebalancerInternalBackend())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
Oid schemaId = get_rel_namespace(relationId);
|
||||
return IsTenantSchema(schemaId);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateTenantSchemaTable creates a tenant table with given relationId.
|
||||
*
|
||||
* This means creating a single shard distributed table without a shard
|
||||
* key and colocating it with the other tables in its schema.
|
||||
*/
|
||||
void
|
||||
CreateTenantSchemaTable(Oid relationId)
|
||||
{
|
||||
if (!IsCoordinator())
|
||||
{
|
||||
/*
|
||||
* We don't support creating tenant tables from workers. We could
|
||||
* let ShouldCreateTenantSchemaTable() to return false to allow users
|
||||
* to create a local table as usual but that would be confusing because
|
||||
* it might sound like we allow creating tenant tables from workers.
|
||||
* For this reason, we prefer to throw an error instead.
|
||||
*
|
||||
* Indeed, CreateSingleShardTable() would already do so but we
|
||||
* prefer to throw an error with a more meaningful message, rather
|
||||
* than saying "operation is not allowed on this node".
|
||||
*/
|
||||
ereport(ERROR, (errmsg("cannot create a tenant table from a worker node"),
|
||||
errhint("Connect to the coordinator node and try again.")));
|
||||
}
|
||||
|
||||
if (IsForeignTable(relationId))
|
||||
{
|
||||
/* throw an error that is nicer than the one CreateSingleShardTable() would throw */
|
||||
ereport(ERROR, (errmsg("cannot create a tenant table from a foreign table")));
|
||||
}
|
||||
else if (PartitionTable(relationId))
|
||||
{
|
||||
ErrorIfIllegalPartitioningInTenantSchema(PartitionParentOid(relationId),
|
||||
relationId);
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't expect this to happen because ShouldCreateTenantSchemaTable()
|
||||
* should've already verified that; but better to check.
|
||||
*/
|
||||
Oid schemaId = get_rel_namespace(relationId);
|
||||
uint32 colocationId = SchemaIdGetTenantColocationId(schemaId);
|
||||
if (colocationId == INVALID_COLOCATION_ID)
|
||||
{
|
||||
ereport(ERROR, (errmsg("schema \"%s\" is not a tenant schema",
|
||||
get_namespace_name(schemaId))));
|
||||
}
|
||||
|
||||
ColocationParam colocationParam = {
|
||||
.colocationParamType = COLOCATE_WITH_COLOCATION_ID,
|
||||
.colocationId = colocationId,
|
||||
};
|
||||
CreateSingleShardTable(relationId, colocationParam);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ErrorIfIllegalPartitioningInTenantSchema throws an error if the
|
||||
* partitioning relationship between the parent and the child is illegal
|
||||
* because they are in different schemas while one of them is a tenant table.
|
||||
*/
|
||||
void
|
||||
ErrorIfIllegalPartitioningInTenantSchema(Oid parentRelationId, Oid partitionRelationId)
|
||||
{
|
||||
Oid partitionSchemaId = get_rel_namespace(partitionRelationId);
|
||||
Oid parentSchemaId = get_rel_namespace(parentRelationId);
|
||||
|
||||
bool partitionIsTenantTable = IsTenantSchema(partitionSchemaId);
|
||||
bool parentIsTenantTable = IsTenantSchema(parentSchemaId);
|
||||
|
||||
bool illegalPartitioning = false;
|
||||
if (partitionIsTenantTable != parentIsTenantTable)
|
||||
{
|
||||
illegalPartitioning = true;
|
||||
}
|
||||
else if (partitionIsTenantTable && parentIsTenantTable)
|
||||
{
|
||||
illegalPartitioning = (parentSchemaId != partitionSchemaId);
|
||||
}
|
||||
|
||||
if (illegalPartitioning)
|
||||
{
|
||||
ereport(ERROR, (errmsg("partitioning with tenant tables is not "
|
||||
"supported when the parent and the child "
|
||||
"are in different schemas")));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_internal_unregister_tenant_schema_globally removes given schema from
|
||||
* the tenant schema metadata table, deletes the colocation group of the schema
|
||||
* and sends the command to do the same on the workers.
|
||||
*/
|
||||
Datum
|
||||
citus_internal_unregister_tenant_schema_globally(PG_FUNCTION_ARGS)
|
||||
{
|
||||
PG_ENSURE_ARGNOTNULL(0, "schema_id");
|
||||
Oid schemaId = PG_GETARG_OID(0);
|
||||
|
||||
PG_ENSURE_ARGNOTNULL(1, "schema_name");
|
||||
text *schemaName = PG_GETARG_TEXT_PP(1);
|
||||
char *schemaNameStr = text_to_cstring(schemaName);
|
||||
|
||||
/*
|
||||
* Skip on workers because we expect this to be called from the coordinator
|
||||
* only via drop hook.
|
||||
*/
|
||||
if (!IsCoordinator())
|
||||
{
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
/* make sure that the schema is dropped already */
|
||||
HeapTuple namespaceTuple = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(schemaId));
|
||||
if (HeapTupleIsValid(namespaceTuple))
|
||||
{
|
||||
ReleaseSysCache(namespaceTuple);
|
||||
|
||||
ereport(ERROR, (errmsg("schema is expected to be already dropped "
|
||||
"because this function is only expected to "
|
||||
"be called from Citus drop hook")));
|
||||
}
|
||||
|
||||
uint32 tenantSchemaColocationId = SchemaIdGetTenantColocationId(schemaId);
|
||||
|
||||
DeleteTenantSchemaLocally(schemaId);
|
||||
SendCommandToWorkersWithMetadata(TenantSchemaDeleteCommand(schemaNameStr));
|
||||
|
||||
DeleteColocationGroup(tenantSchemaColocationId);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
|
@ -229,6 +229,17 @@ PostprocessCreateTableStmt(CreateStmt *createStatement, const char *queryString)
|
|||
{
|
||||
PostprocessCreateTableStmtForeignKeys(createStatement);
|
||||
|
||||
bool missingOk = false;
|
||||
Oid relationId = RangeVarGetRelid(createStatement->relation, NoLock, missingOk);
|
||||
Oid schemaId = get_rel_namespace(relationId);
|
||||
if (createStatement->ofTypename && IsTenantSchema(schemaId))
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot create a tenant table by using CREATE TABLE "
|
||||
"OF syntax")));
|
||||
}
|
||||
|
||||
if (createStatement->inhRelations != NIL)
|
||||
{
|
||||
if (createStatement->partbound != NULL)
|
||||
|
@ -239,15 +250,31 @@ PostprocessCreateTableStmt(CreateStmt *createStatement, const char *queryString)
|
|||
else
|
||||
{
|
||||
/* process CREATE TABLE ... INHERITS ... */
|
||||
|
||||
if (IsTenantSchema(schemaId))
|
||||
{
|
||||
ereport(ERROR, (errmsg("tenant tables cannot inherit or "
|
||||
"be inherited")));
|
||||
}
|
||||
|
||||
RangeVar *parentRelation = NULL;
|
||||
foreach_ptr(parentRelation, createStatement->inhRelations)
|
||||
{
|
||||
bool missingOk = false;
|
||||
Oid parentRelationId = RangeVarGetRelid(parentRelation, NoLock,
|
||||
missingOk);
|
||||
Assert(parentRelationId != InvalidOid);
|
||||
|
||||
if (IsCitusTable(parentRelationId))
|
||||
/*
|
||||
* Throw a better error message if the user tries to inherit a
|
||||
* tenant table or if the user tries to inherit from a tenant
|
||||
* table.
|
||||
*/
|
||||
if (IsTenantSchema(get_rel_namespace(parentRelationId)))
|
||||
{
|
||||
ereport(ERROR, (errmsg("tenant tables cannot inherit or "
|
||||
"be inherited")));
|
||||
}
|
||||
else if (IsCitusTable(parentRelationId))
|
||||
{
|
||||
/* here we error out if inheriting a distributed table */
|
||||
ereport(ERROR, (errmsg("non-distributed tables cannot inherit "
|
||||
|
@ -282,6 +309,15 @@ PostprocessCreateTableStmtForeignKeys(CreateStmt *createStatement)
|
|||
bool missingOk = false;
|
||||
Oid relationId = RangeVarGetRelid(createStatement->relation, NoLock, missingOk);
|
||||
|
||||
if (ShouldCreateTenantSchemaTable(relationId))
|
||||
{
|
||||
/*
|
||||
* Avoid unnecessarily adding the table into metadata if we will
|
||||
* distribute it as a tenant table later.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* As we are just creating the table, we cannot have foreign keys that our
|
||||
* relation is referenced. So we use INCLUDE_REFERENCING_CONSTRAINTS here.
|
||||
|
@ -378,6 +414,8 @@ PostprocessCreateTableStmtPartitionOf(CreateStmt *createStatement, const
|
|||
}
|
||||
}
|
||||
|
||||
ErrorIfIllegalPartitioningInTenantSchema(PartitionParentOid(relationId), relationId);
|
||||
|
||||
/*
|
||||
* If a partition is being created and if its parent is a distributed
|
||||
* table, we will distribute this table as well.
|
||||
|
@ -385,9 +423,8 @@ PostprocessCreateTableStmtPartitionOf(CreateStmt *createStatement, const
|
|||
if (IsCitusTable(parentRelationId))
|
||||
{
|
||||
/*
|
||||
* We can create Citus local tables and single-shard distributed tables
|
||||
* right away, without switching to sequential mode, because they are going to
|
||||
* have only one shard.
|
||||
* We can create Citus local tables right away, without switching to
|
||||
* sequential mode, because they are going to have only one shard.
|
||||
*/
|
||||
if (IsCitusTableType(parentRelationId, CITUS_LOCAL_TABLE))
|
||||
{
|
||||
|
@ -396,25 +433,7 @@ PostprocessCreateTableStmtPartitionOf(CreateStmt *createStatement, const
|
|||
return;
|
||||
}
|
||||
|
||||
char *parentRelationName = generate_qualified_relation_name(parentRelationId);
|
||||
|
||||
if (IsCitusTableType(parentRelationId, SINGLE_SHARD_DISTRIBUTED))
|
||||
{
|
||||
CreateSingleShardTable(relationId, parentRelationName);
|
||||
return;
|
||||
}
|
||||
|
||||
Var *parentDistributionColumn = DistPartitionKeyOrError(parentRelationId);
|
||||
char *distributionColumnName =
|
||||
ColumnToColumnName(parentRelationId, (Node *) parentDistributionColumn);
|
||||
char parentDistributionMethod = DISTRIBUTE_BY_HASH;
|
||||
|
||||
SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(parentRelationId,
|
||||
relationId);
|
||||
|
||||
CreateDistributedTable(relationId, distributionColumnName,
|
||||
parentDistributionMethod, ShardCount, false,
|
||||
parentRelationName);
|
||||
DistributePartitionUsingParent(parentRelationId, relationId);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -477,6 +496,9 @@ PreprocessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
ErrorIfIllegalPartitioningInTenantSchema(parentRelationId,
|
||||
partitionRelationId);
|
||||
|
||||
if (!IsCitusTable(parentRelationId))
|
||||
{
|
||||
/*
|
||||
|
@ -612,13 +634,26 @@ DistributePartitionUsingParent(Oid parentCitusRelationId, Oid partitionRelationI
|
|||
{
|
||||
char *parentRelationName = generate_qualified_relation_name(parentCitusRelationId);
|
||||
|
||||
if (!HasDistributionKey(parentCitusRelationId))
|
||||
/*
|
||||
* We can create tenant tables and single shard tables right away, without
|
||||
* switching to sequential mode, because they are going to have only one shard.
|
||||
*/
|
||||
if (ShouldCreateTenantSchemaTable(partitionRelationId))
|
||||
{
|
||||
CreateTenantSchemaTable(partitionRelationId);
|
||||
return;
|
||||
}
|
||||
else if (!HasDistributionKey(parentCitusRelationId))
|
||||
{
|
||||
/*
|
||||
* If the parent is null key distributed, we should distribute the partition
|
||||
* with null distribution key as well.
|
||||
*/
|
||||
CreateSingleShardTable(partitionRelationId, parentRelationName);
|
||||
ColocationParam colocationParam = {
|
||||
.colocationParamType = COLOCATE_WITH_TABLE_LIKE_OPT,
|
||||
.colocateWithTableName = parentRelationName,
|
||||
};
|
||||
CreateSingleShardTable(partitionRelationId, colocationParam);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -4056,3 +4091,84 @@ ErrorIfTableHasIdentityColumn(Oid relationId)
|
|||
|
||||
relation_close(relation, NoLock);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ConvertNewTableIfNecessary converts the given table to a tenant schema
|
||||
* table or a Citus managed table if necessary.
|
||||
*
|
||||
* Input node is expected to be a CreateStmt or a CreateTableAsStmt.
|
||||
*/
|
||||
void
|
||||
ConvertNewTableIfNecessary(Node *createStmt)
|
||||
{
|
||||
/*
|
||||
* Need to increment command counter so that next command
|
||||
* can see the new table.
|
||||
*/
|
||||
CommandCounterIncrement();
|
||||
|
||||
if (IsA(createStmt, CreateTableAsStmt))
|
||||
{
|
||||
CreateTableAsStmt *createTableAsStmt = (CreateTableAsStmt *) createStmt;
|
||||
|
||||
bool missingOk = false;
|
||||
Oid createdRelationId = RangeVarGetRelid(createTableAsStmt->into->rel,
|
||||
NoLock, missingOk);
|
||||
|
||||
if (ShouldCreateTenantSchemaTable(createdRelationId))
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot create a tenant table using "
|
||||
"CREATE TABLE AS or SELECT INTO "
|
||||
"statements")));
|
||||
}
|
||||
|
||||
/*
|
||||
* We simply ignore the tables created by using that syntax when using
|
||||
* Citus managed tables.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
CreateStmt *baseCreateTableStmt = (CreateStmt *) createStmt;
|
||||
|
||||
bool missingOk = false;
|
||||
Oid createdRelationId = RangeVarGetRelid(baseCreateTableStmt->relation,
|
||||
NoLock, missingOk);
|
||||
|
||||
/* not try to convert the table if it already exists and IF NOT EXISTS syntax is used */
|
||||
if (baseCreateTableStmt->if_not_exists && IsCitusTable(createdRelationId))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check ShouldCreateTenantSchemaTable() before ShouldAddNewTableToMetadata()
|
||||
* because we don't want to unnecessarily add the table into metadata
|
||||
* (as a Citus managed table) before distributing it as a tenant table.
|
||||
*/
|
||||
if (ShouldCreateTenantSchemaTable(createdRelationId))
|
||||
{
|
||||
/*
|
||||
* We skip creating tenant schema table if the table is a partition
|
||||
* table because in that case PostprocessCreateTableStmt() should've
|
||||
* already created a tenant schema table from the partition table.
|
||||
*/
|
||||
if (!PartitionTable(createdRelationId))
|
||||
{
|
||||
CreateTenantSchemaTable(createdRelationId);
|
||||
}
|
||||
}
|
||||
else if (ShouldAddNewTableToMetadata(createdRelationId))
|
||||
{
|
||||
/*
|
||||
* Here we set autoConverted to false, since the user explicitly
|
||||
* wants these tables to be added to metadata, by setting the
|
||||
* GUC use_citus_managed_tables to true.
|
||||
*/
|
||||
bool autoConverted = false;
|
||||
bool cascade = true;
|
||||
CreateCitusLocalTable(createdRelationId, cascade, autoConverted);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -116,7 +116,6 @@ static void PostStandardProcessUtility(Node *parsetree);
|
|||
static void DecrementUtilityHookCountersIfNecessary(Node *parsetree);
|
||||
static bool IsDropSchemaOrDB(Node *parsetree);
|
||||
static bool ShouldCheckUndistributeCitusLocalTables(void);
|
||||
static bool ShouldAddNewTableToMetadata(Node *parsetree);
|
||||
|
||||
/*
|
||||
* ProcessUtilityParseTree is a convenience method to create a PlannedStmt out of
|
||||
|
@ -344,26 +343,32 @@ multi_ProcessUtility(PlannedStmt *pstmt,
|
|||
}
|
||||
ResetConstraintDropped();
|
||||
|
||||
/*
|
||||
* We're only interested in top-level CREATE TABLE commands
|
||||
* to create a tenant schema table or a Citus managed table.
|
||||
*/
|
||||
if (context == PROCESS_UTILITY_TOPLEVEL &&
|
||||
ShouldAddNewTableToMetadata(parsetree))
|
||||
(IsA(parsetree, CreateStmt) ||
|
||||
IsA(parsetree, CreateForeignTableStmt) ||
|
||||
IsA(parsetree, CreateTableAsStmt)))
|
||||
{
|
||||
/*
|
||||
* Here we need to increment command counter so that next command
|
||||
* can see the new table.
|
||||
*/
|
||||
CommandCounterIncrement();
|
||||
CreateStmt *createTableStmt = (CreateStmt *) parsetree;
|
||||
Oid relationId = RangeVarGetRelid(createTableStmt->relation,
|
||||
NoLock, false);
|
||||
Node *createStmt = NULL;
|
||||
if (IsA(parsetree, CreateTableAsStmt))
|
||||
{
|
||||
createStmt = parsetree;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Not directly cast to CreateStmt to guard against the case where
|
||||
* the definition of CreateForeignTableStmt changes in future.
|
||||
*/
|
||||
createStmt =
|
||||
IsA(parsetree, CreateStmt) ? parsetree :
|
||||
(Node *) &(((CreateForeignTableStmt *) parsetree)->base);
|
||||
}
|
||||
|
||||
/*
|
||||
* Here we set autoConverted to false, since the user explicitly
|
||||
* wants these tables to be added to metadata, by setting the
|
||||
* GUC use_citus_managed_tables to true.
|
||||
*/
|
||||
bool autoConverted = false;
|
||||
bool cascade = true;
|
||||
CreateCitusLocalTable(relationId, cascade, autoConverted);
|
||||
ConvertNewTableIfNecessary(createStmt);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1060,60 +1065,6 @@ ShouldCheckUndistributeCitusLocalTables(void)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ShouldAddNewTableToMetadata takes a Node* and returns true if we need to add a
|
||||
* newly created table to metadata, false otherwise.
|
||||
* This function checks whether the given Node* is a CREATE TABLE statement.
|
||||
* For partitions and temporary tables, ShouldAddNewTableToMetadata returns false.
|
||||
* For other tables created, returns true, if we are on a coordinator that is added
|
||||
* as worker, and ofcourse, if the GUC use_citus_managed_tables is set to on.
|
||||
*/
|
||||
static bool
|
||||
ShouldAddNewTableToMetadata(Node *parsetree)
|
||||
{
|
||||
CreateStmt *createTableStmt;
|
||||
|
||||
if (IsA(parsetree, CreateStmt))
|
||||
{
|
||||
createTableStmt = (CreateStmt *) parsetree;
|
||||
}
|
||||
else if (IsA(parsetree, CreateForeignTableStmt))
|
||||
{
|
||||
CreateForeignTableStmt *createForeignTableStmt =
|
||||
(CreateForeignTableStmt *) parsetree;
|
||||
createTableStmt = (CreateStmt *) &(createForeignTableStmt->base);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* if the command is not CREATE [FOREIGN] TABLE, we can early return false */
|
||||
return false;
|
||||
}
|
||||
|
||||
if (createTableStmt->relation->relpersistence == RELPERSISTENCE_TEMP ||
|
||||
createTableStmt->partbound != NULL)
|
||||
{
|
||||
/*
|
||||
* Shouldn't add table to metadata if it's a temp table, or a partition.
|
||||
* Creating partitions of a table that is added to metadata is already handled.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
if (AddAllLocalTablesToMetadata && !IsBinaryUpgrade &&
|
||||
IsCoordinator() && CoordinatorAddedAsWorkerNode())
|
||||
{
|
||||
/*
|
||||
* We have verified that the GUC is set to true, and we are not upgrading,
|
||||
* and we are on the coordinator that is added as worker node.
|
||||
* So return true here, to add this newly created table to metadata.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* NotifyUtilityHookConstraintDropped sets ConstraintDropped to true to tell us
|
||||
* last command dropped a table constraint.
|
||||
|
|
|
@ -958,7 +958,7 @@ ResetShardPlacementAssociation(struct MultiConnection *connection)
|
|||
|
||||
|
||||
/*
|
||||
* ResetPlacementConnectionManagement() disassociates connections from
|
||||
* ResetPlacementConnectionManagement() dissociates connections from
|
||||
* placements and shards. This will be called at the end of XACT_EVENT_COMMIT
|
||||
* and XACT_EVENT_ABORT.
|
||||
*/
|
||||
|
|
|
@ -178,6 +178,7 @@ typedef struct MetadataCacheData
|
|||
Oid distColocationRelationId;
|
||||
Oid distColocationConfigurationIndexId;
|
||||
Oid distPartitionRelationId;
|
||||
Oid distTenantSchemaRelationId;
|
||||
Oid distPartitionLogicalRelidIndexId;
|
||||
Oid distPartitionColocationidIndexId;
|
||||
Oid distShardLogicalRelidIndexId;
|
||||
|
@ -188,6 +189,8 @@ typedef struct MetadataCacheData
|
|||
Oid distPlacementGroupidIndexId;
|
||||
Oid distTransactionRelationId;
|
||||
Oid distTransactionGroupIndexId;
|
||||
Oid distTenantSchemaPrimaryKeyIndexId;
|
||||
Oid distTenantSchemaUniqueColocationIdIndexId;
|
||||
Oid citusCatalogNamespaceId;
|
||||
Oid copyFormatTypeId;
|
||||
Oid readIntermediateResultFuncId;
|
||||
|
@ -2843,6 +2846,39 @@ DistColocationConfigurationIndexId(void)
|
|||
}
|
||||
|
||||
|
||||
/* return oid of pg_dist_tenant_schema relation */
|
||||
Oid
|
||||
DistTenantSchemaRelationId(void)
|
||||
{
|
||||
CachedRelationLookup("pg_dist_tenant_schema",
|
||||
&MetadataCache.distTenantSchemaRelationId);
|
||||
|
||||
return MetadataCache.distTenantSchemaRelationId;
|
||||
}
|
||||
|
||||
|
||||
/* return oid of pg_dist_tenant_schema_pkey index */
|
||||
Oid
|
||||
DistTenantSchemaPrimaryKeyIndexId(void)
|
||||
{
|
||||
CachedRelationLookup("pg_dist_tenant_schema_pkey",
|
||||
&MetadataCache.distTenantSchemaPrimaryKeyIndexId);
|
||||
|
||||
return MetadataCache.distTenantSchemaPrimaryKeyIndexId;
|
||||
}
|
||||
|
||||
|
||||
/* return oid of pg_dist_tenant_schema_unique_colocationid_index index */
|
||||
Oid
|
||||
DistTenantSchemaUniqueColocationIdIndexId(void)
|
||||
{
|
||||
CachedRelationLookup("pg_dist_tenant_schema_unique_colocationid_index",
|
||||
&MetadataCache.distTenantSchemaUniqueColocationIdIndexId);
|
||||
|
||||
return MetadataCache.distTenantSchemaUniqueColocationIdIndexId;
|
||||
}
|
||||
|
||||
|
||||
/* return oid of pg_dist_partition relation */
|
||||
Oid
|
||||
DistPartitionRelationId(void)
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "distributed/backend_data.h"
|
||||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/colocation_utils.h"
|
||||
#include "distributed/tenant_schema_metadata.h"
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/deparser.h"
|
||||
#include "distributed/distribution_column.h"
|
||||
|
@ -60,6 +61,7 @@
|
|||
#include "distributed/pg_dist_colocation.h"
|
||||
#include "distributed/pg_dist_node.h"
|
||||
#include "distributed/pg_dist_shard.h"
|
||||
#include "distributed/pg_dist_tenant_schema.h"
|
||||
#include "distributed/relation_access_tracking.h"
|
||||
#include "distributed/remote_commands.h"
|
||||
#include "distributed/resource_lock.h"
|
||||
|
@ -144,6 +146,8 @@ static char * ColocationGroupCreateCommand(uint32 colocationId, int shardCount,
|
|||
Oid distributionColumnType,
|
||||
Oid distributionColumnCollation);
|
||||
static char * ColocationGroupDeleteCommand(uint32 colocationId);
|
||||
static char * RemoteSchemaIdExpressionById(Oid schemaId);
|
||||
static char * RemoteSchemaIdExpressionByName(char *schemaName);
|
||||
static char * RemoteTypeIdExpression(Oid typeId);
|
||||
static char * RemoteCollationIdExpression(Oid colocationId);
|
||||
|
||||
|
@ -170,6 +174,8 @@ PG_FUNCTION_INFO_V1(citus_internal_update_relation_colocation);
|
|||
PG_FUNCTION_INFO_V1(citus_internal_add_object_metadata);
|
||||
PG_FUNCTION_INFO_V1(citus_internal_add_colocation_metadata);
|
||||
PG_FUNCTION_INFO_V1(citus_internal_delete_colocation_metadata);
|
||||
PG_FUNCTION_INFO_V1(citus_internal_add_tenant_schema);
|
||||
PG_FUNCTION_INFO_V1(citus_internal_delete_tenant_schema);
|
||||
|
||||
|
||||
static bool got_SIGTERM = false;
|
||||
|
@ -3788,6 +3794,52 @@ citus_internal_delete_colocation_metadata(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_internal_add_tenant_schema is an internal UDF to
|
||||
* call InsertTenantSchemaLocally on a remote node.
|
||||
*
|
||||
* None of the parameters are allowed to be NULL. To set the colocation
|
||||
* id to NULL in metadata, use INVALID_COLOCATION_ID.
|
||||
*/
|
||||
Datum
|
||||
citus_internal_add_tenant_schema(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
PG_ENSURE_ARGNOTNULL(0, "schema_id");
|
||||
Oid schemaId = PG_GETARG_OID(0);
|
||||
|
||||
PG_ENSURE_ARGNOTNULL(1, "colocation_id");
|
||||
uint32 colocationId = PG_GETARG_INT32(1);
|
||||
|
||||
InsertTenantSchemaLocally(schemaId, colocationId);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_internal_delete_tenant_schema is an internal UDF to
|
||||
* call DeleteTenantSchemaLocally on a remote node.
|
||||
*
|
||||
* The schemaId parameter is not allowed to be NULL. Morever, input schema is
|
||||
* expected to be dropped already because this function is called from Citus
|
||||
* drop hook and only used to clean up metadata after the schema is dropped.
|
||||
*/
|
||||
Datum
|
||||
citus_internal_delete_tenant_schema(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
PG_ENSURE_ARGNOTNULL(0, "schema_id");
|
||||
Oid schemaId = PG_GETARG_OID(0);
|
||||
|
||||
DeleteTenantSchemaLocally(schemaId);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SyncNewColocationGroup synchronizes a new pg_dist_colocation entry to a worker.
|
||||
*/
|
||||
|
@ -3937,6 +3989,72 @@ ColocationGroupDeleteCommand(uint32 colocationId)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* TenantSchemaInsertCommand returns a command to call
|
||||
* citus_internal_add_tenant_schema().
|
||||
*/
|
||||
char *
|
||||
TenantSchemaInsertCommand(Oid schemaId, uint32 colocationId)
|
||||
{
|
||||
StringInfo command = makeStringInfo();
|
||||
appendStringInfo(command,
|
||||
"SELECT pg_catalog.citus_internal_add_tenant_schema(%s, %u)",
|
||||
RemoteSchemaIdExpressionById(schemaId), colocationId);
|
||||
|
||||
return command->data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TenantSchemaDeleteCommand returns a command to call
|
||||
* citus_internal_delete_tenant_schema().
|
||||
*/
|
||||
char *
|
||||
TenantSchemaDeleteCommand(char *schemaName)
|
||||
{
|
||||
StringInfo command = makeStringInfo();
|
||||
appendStringInfo(command,
|
||||
"SELECT pg_catalog.citus_internal_delete_tenant_schema(%s)",
|
||||
RemoteSchemaIdExpressionByName(schemaName));
|
||||
|
||||
return command->data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* RemoteSchemaIdExpressionById returns an expression in text form that
|
||||
* can be used to obtain the OID of the schema with given schema id on a
|
||||
* different node when included in a query string.
|
||||
*/
|
||||
static char *
|
||||
RemoteSchemaIdExpressionById(Oid schemaId)
|
||||
{
|
||||
char *schemaName = get_namespace_name(schemaId);
|
||||
if (schemaName == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("schema with OID %u does not exist", schemaId)));
|
||||
}
|
||||
|
||||
return RemoteSchemaIdExpressionByName(schemaName);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* RemoteSchemaIdExpressionByName returns an expression in text form that
|
||||
* can be used to obtain the OID of the schema with given schema name on a
|
||||
* different node when included in a query string.
|
||||
*/
|
||||
static char *
|
||||
RemoteSchemaIdExpressionByName(char *schemaName)
|
||||
{
|
||||
StringInfo regnamespaceExpr = makeStringInfo();
|
||||
appendStringInfo(regnamespaceExpr, "%s::regnamespace",
|
||||
quote_literal_cstr(quote_identifier(schemaName)));
|
||||
|
||||
return regnamespaceExpr->data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SetMetadataSyncNodesFromNodeList sets list of nodes that needs to be metadata
|
||||
* synced among given node list into metadataSyncContext.
|
||||
|
@ -4331,6 +4449,14 @@ SyncDistributedObjects(MetadataSyncContext *context)
|
|||
SendDistTableMetadataCommands(context);
|
||||
SendDistObjectCommands(context);
|
||||
|
||||
/*
|
||||
* Commands to insert pg_dist_tenant_schema entries.
|
||||
*
|
||||
* Need to be done after syncing distributed objects because the schemas
|
||||
* need to exist on the worker.
|
||||
*/
|
||||
SendTenantSchemaMetadataCommands(context);
|
||||
|
||||
/*
|
||||
* After creating each table, handle the inter table relationship between
|
||||
* those tables.
|
||||
|
@ -4403,6 +4529,10 @@ SendMetadataDeletionCommands(MetadataSyncContext *context)
|
|||
|
||||
/* remove pg_dist_colocation entries */
|
||||
SendOrCollectCommandListToActivatedNodes(context, list_make1(DELETE_ALL_COLOCATION));
|
||||
|
||||
/* remove pg_dist_tenant_schema entries */
|
||||
SendOrCollectCommandListToActivatedNodes(context,
|
||||
list_make1(DELETE_ALL_TENANT_SCHEMAS));
|
||||
}
|
||||
|
||||
|
||||
|
@ -4502,6 +4632,53 @@ SendColocationMetadataCommands(MetadataSyncContext *context)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* SendTenantSchemaMetadataCommands sends tenant schema metadata entries with
|
||||
* transactional or nontransactional mode according to transactionMode inside
|
||||
* metadataSyncContext.
|
||||
*/
|
||||
void
|
||||
SendTenantSchemaMetadataCommands(MetadataSyncContext *context)
|
||||
{
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 0;
|
||||
|
||||
Relation pgDistTenantSchema = table_open(DistTenantSchemaRelationId(),
|
||||
AccessShareLock);
|
||||
SysScanDesc scanDesc = systable_beginscan(pgDistTenantSchema, InvalidOid, false, NULL,
|
||||
scanKeyCount, scanKey);
|
||||
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(context->context);
|
||||
HeapTuple heapTuple = NULL;
|
||||
while (true)
|
||||
{
|
||||
ResetMetadataSyncMemoryContext(context);
|
||||
|
||||
heapTuple = systable_getnext(scanDesc);
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
Form_pg_dist_tenant_schema tenantSchemaForm =
|
||||
(Form_pg_dist_tenant_schema) GETSTRUCT(heapTuple);
|
||||
|
||||
StringInfo insertTenantSchemaCommand = makeStringInfo();
|
||||
appendStringInfo(insertTenantSchemaCommand,
|
||||
"SELECT pg_catalog.citus_internal_add_tenant_schema(%s, %u)",
|
||||
RemoteSchemaIdExpressionById(tenantSchemaForm->schemaid),
|
||||
tenantSchemaForm->colocationid);
|
||||
|
||||
List *commandList = list_make1(insertTenantSchemaCommand->data);
|
||||
SendOrCollectCommandListToActivatedNodes(context, commandList);
|
||||
}
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
|
||||
systable_endscan(scanDesc);
|
||||
table_close(pgDistTenantSchema, AccessShareLock);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SendDependencyCreationCommands sends dependency creation commands to workers
|
||||
* with transactional or nontransactional mode according to transactionMode
|
||||
|
|
|
@ -1381,6 +1381,20 @@ RegisterCitusConfigVariables(void)
|
|||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enable_schema_based_sharding",
|
||||
gettext_noop("Enables schema based sharding."),
|
||||
gettext_noop("The schemas created while this is ON will be automatically "
|
||||
"associated with individual colocation groups such that the "
|
||||
"tables created in those schemas will be automatically "
|
||||
"converted to colocated distributed tables without a shard "
|
||||
"key."),
|
||||
&EnableSchemaBasedSharding,
|
||||
false,
|
||||
PGC_USERSET,
|
||||
GUC_STANDARD,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enable_single_hash_repartition_joins",
|
||||
gettext_noop("Enables single hash repartitioning between hash "
|
||||
|
|
|
@ -1,3 +1,25 @@
|
|||
-- citus--11.3-1--12.0-1
|
||||
|
||||
-- bump version to 12.0-1
|
||||
|
||||
CREATE TABLE citus.pg_dist_tenant_schema (
|
||||
schemaid oid NOT NULL,
|
||||
colocationid int NOT NULL,
|
||||
CONSTRAINT pg_dist_tenant_schema_pkey PRIMARY KEY (schemaid),
|
||||
CONSTRAINT pg_dist_tenant_schema_unique_colocationid_index UNIQUE (colocationid)
|
||||
);
|
||||
|
||||
ALTER TABLE citus.pg_dist_tenant_schema SET SCHEMA pg_catalog;
|
||||
|
||||
GRANT SELECT ON pg_catalog.pg_dist_tenant_schema TO public;
|
||||
|
||||
-- udfs used to modify pg_dist_tenant_schema on workers, to sync metadata
|
||||
#include "udfs/citus_internal_add_tenant_schema/12.0-1.sql"
|
||||
#include "udfs/citus_internal_delete_tenant_schema/12.0-1.sql"
|
||||
|
||||
#include "udfs/citus_prepare_pg_upgrade/12.0-1.sql"
|
||||
#include "udfs/citus_finish_pg_upgrade/12.0-1.sql"
|
||||
|
||||
-- udfs used to modify pg_dist_tenant_schema globally via drop trigger
|
||||
#include "udfs/citus_internal_unregister_tenant_schema_globally/12.0-1.sql"
|
||||
#include "udfs/citus_drop_trigger/12.0-1.sql"
|
||||
|
|
|
@ -1,8 +1,18 @@
|
|||
-- citus--12.0-1--11.3-1
|
||||
|
||||
-- Throw an error if user has any distributed tables without a shard key.
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Throw an error if user has created any tenant schemas.
|
||||
IF EXISTS (SELECT 1 FROM pg_catalog.pg_dist_tenant_schema)
|
||||
THEN
|
||||
RAISE EXCEPTION 'cannot downgrade Citus because there are '
|
||||
'tenant schemas created.'
|
||||
USING HINT = 'To downgrade Citus to an older version, you should '
|
||||
'first issue SELECT citus.schema_tenant_unset("%s") '
|
||||
'for each tenant schema.';
|
||||
END IF;
|
||||
|
||||
-- Throw an error if user has any distributed tables without a shard key.
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM pg_dist_partition
|
||||
WHERE repmodel != 't' AND partmethod = 'n' AND colocationid != 0)
|
||||
|
@ -19,3 +29,15 @@ BEGIN
|
|||
END IF;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
DROP FUNCTION pg_catalog.citus_internal_add_tenant_schema(Oid, int);
|
||||
|
||||
#include "../udfs/citus_prepare_pg_upgrade/11.2-1.sql"
|
||||
#include "../udfs/citus_finish_pg_upgrade/11.2-1.sql"
|
||||
|
||||
DROP FUNCTION pg_catalog.citus_internal_delete_tenant_schema(Oid);
|
||||
DROP FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(Oid, text);
|
||||
|
||||
#include "../udfs/citus_drop_trigger/10.2-1.sql"
|
||||
|
||||
DROP TABLE pg_catalog.pg_dist_tenant_schema;
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
SET search_path = pg_catalog
|
||||
AS $cdbdt$
|
||||
DECLARE
|
||||
constraint_event_count INTEGER;
|
||||
v_obj record;
|
||||
dropped_table_is_a_partition boolean := false;
|
||||
BEGIN
|
||||
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
|
||||
WHERE object_type IN ('table', 'foreign table')
|
||||
LOOP
|
||||
-- first drop the table and metadata on the workers
|
||||
-- then drop all the shards on the workers
|
||||
-- finally remove the pg_dist_partition entry on the coordinator
|
||||
PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
|
||||
-- If both original and normal values are false, the dropped table was a partition
|
||||
-- that was dropped as a result of its parent being dropped
|
||||
-- NOTE: the other way around is not true:
|
||||
-- the table being a partition doesn't imply both original and normal values are false
|
||||
SELECT (v_obj.original = false AND v_obj.normal = false) INTO dropped_table_is_a_partition;
|
||||
|
||||
-- The partition's shards will be dropped when dropping the parent's shards, so we can skip:
|
||||
-- i.e. we call citus_drop_all_shards with drop_shards_metadata_only parameter set to true
|
||||
IF dropped_table_is_a_partition
|
||||
THEN
|
||||
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := true);
|
||||
ELSE
|
||||
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false);
|
||||
END IF;
|
||||
|
||||
PERFORM master_remove_partition_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
END LOOP;
|
||||
|
||||
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
|
||||
LOOP
|
||||
-- Remove entries from pg_catalog.pg_dist_tenant_schema for all dropped tenant schemas.
|
||||
-- Also delete the corresponding colocation group from pg_catalog.pg_dist_colocation.
|
||||
--
|
||||
-- Although normally we automatically delete the colocation groups when they become empty,
|
||||
-- we don't do so for the colocation groups that are created for tenant schemas. For this
|
||||
-- reason, here we need to delete the colocation group when the tenant schema is dropped.
|
||||
IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_tenant_schema WHERE schemaid = v_obj.objid)
|
||||
THEN
|
||||
PERFORM pg_catalog.citus_internal_unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name);
|
||||
END IF;
|
||||
|
||||
-- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects
|
||||
PERFORM master_unmark_object_distributed(v_obj.classid, v_obj.objid, v_obj.objsubid);
|
||||
END LOOP;
|
||||
|
||||
SELECT COUNT(*) INTO constraint_event_count
|
||||
FROM pg_event_trigger_dropped_objects()
|
||||
WHERE object_type IN ('table constraint');
|
||||
|
||||
IF constraint_event_count > 0
|
||||
THEN
|
||||
-- Tell utility hook that a table constraint is dropped so we might
|
||||
-- need to undistribute some of the citus local tables that are not
|
||||
-- connected to any reference tables.
|
||||
PERFORM notify_constraint_dropped();
|
||||
END IF;
|
||||
END;
|
||||
$cdbdt$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_drop_trigger()
|
||||
IS 'perform checks and actions at the end of DROP actions';
|
|
@ -34,9 +34,20 @@ BEGIN
|
|||
PERFORM master_remove_partition_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
END LOOP;
|
||||
|
||||
-- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects
|
||||
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
|
||||
LOOP
|
||||
-- Remove entries from pg_catalog.pg_dist_tenant_schema for all dropped tenant schemas.
|
||||
-- Also delete the corresponding colocation group from pg_catalog.pg_dist_colocation.
|
||||
--
|
||||
-- Although normally we automatically delete the colocation groups when they become empty,
|
||||
-- we don't do so for the colocation groups that are created for tenant schemas. For this
|
||||
-- reason, here we need to delete the colocation group when the tenant schema is dropped.
|
||||
IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_tenant_schema WHERE schemaid = v_obj.objid)
|
||||
THEN
|
||||
PERFORM pg_catalog.citus_internal_unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name);
|
||||
END IF;
|
||||
|
||||
-- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects
|
||||
PERFORM master_unmark_object_distributed(v_obj.classid, v_obj.objid, v_obj.objsubid);
|
||||
END LOOP;
|
||||
|
||||
|
|
|
@ -0,0 +1,160 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_finish_pg_upgrade()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
SET search_path = pg_catalog
|
||||
AS $cppu$
|
||||
DECLARE
|
||||
table_name regclass;
|
||||
command text;
|
||||
trigger_name text;
|
||||
BEGIN
|
||||
|
||||
|
||||
IF substring(current_Setting('server_version'), '\d+')::int >= 14 THEN
|
||||
EXECUTE $cmd$
|
||||
-- disable propagation to prevent EnsureCoordinator errors
|
||||
-- the aggregate created here does not depend on Citus extension (yet)
|
||||
-- since we add the dependency with the next command
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE AGGREGATE array_cat_agg(anycompatiblearray) (SFUNC = array_cat, STYPE = anycompatiblearray);
|
||||
COMMENT ON AGGREGATE array_cat_agg(anycompatiblearray)
|
||||
IS 'concatenate input arrays into a single array';
|
||||
RESET citus.enable_ddl_propagation;
|
||||
$cmd$;
|
||||
ELSE
|
||||
EXECUTE $cmd$
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE AGGREGATE array_cat_agg(anyarray) (SFUNC = array_cat, STYPE = anyarray);
|
||||
COMMENT ON AGGREGATE array_cat_agg(anyarray)
|
||||
IS 'concatenate input arrays into a single array';
|
||||
RESET citus.enable_ddl_propagation;
|
||||
$cmd$;
|
||||
END IF;
|
||||
|
||||
--
|
||||
-- Citus creates the array_cat_agg but because of a compatibility
|
||||
-- issue between pg13-pg14, we drop and create it during upgrade.
|
||||
-- And as Citus creates it, there needs to be a dependency to the
|
||||
-- Citus extension, so we create that dependency here.
|
||||
-- We are not using:
|
||||
-- ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg
|
||||
-- because we don't have an easy way to check if the aggregate
|
||||
-- exists with anyarray type or anycompatiblearray type.
|
||||
|
||||
INSERT INTO pg_depend
|
||||
SELECT
|
||||
'pg_proc'::regclass::oid as classid,
|
||||
(SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') as objid,
|
||||
0 as objsubid,
|
||||
'pg_extension'::regclass::oid as refclassid,
|
||||
(select oid from pg_extension where extname = 'citus') as refobjid,
|
||||
0 as refobjsubid ,
|
||||
'e' as deptype;
|
||||
|
||||
--
|
||||
-- restore citus catalog tables
|
||||
--
|
||||
INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition;
|
||||
INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard;
|
||||
INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement;
|
||||
INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata;
|
||||
INSERT INTO pg_catalog.pg_dist_node SELECT * FROM public.pg_dist_node;
|
||||
INSERT INTO pg_catalog.pg_dist_local_group SELECT * FROM public.pg_dist_local_group;
|
||||
INSERT INTO pg_catalog.pg_dist_transaction SELECT * FROM public.pg_dist_transaction;
|
||||
INSERT INTO pg_catalog.pg_dist_colocation SELECT * FROM public.pg_dist_colocation;
|
||||
INSERT INTO pg_catalog.pg_dist_cleanup SELECT * FROM public.pg_dist_cleanup;
|
||||
INSERT INTO pg_catalog.pg_dist_tenant_schema SELECT schemaname::regnamespace, colocationid FROM public.pg_dist_tenant_schema;
|
||||
-- enterprise catalog tables
|
||||
INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo;
|
||||
INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo;
|
||||
|
||||
INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT
|
||||
name,
|
||||
default_strategy,
|
||||
shard_cost_function::regprocedure::regproc,
|
||||
node_capacity_function::regprocedure::regproc,
|
||||
shard_allowed_on_node_function::regprocedure::regproc,
|
||||
default_threshold,
|
||||
minimum_threshold,
|
||||
improvement_threshold
|
||||
FROM public.pg_dist_rebalance_strategy;
|
||||
|
||||
--
|
||||
-- drop backup tables
|
||||
--
|
||||
DROP TABLE public.pg_dist_authinfo;
|
||||
DROP TABLE public.pg_dist_colocation;
|
||||
DROP TABLE public.pg_dist_local_group;
|
||||
DROP TABLE public.pg_dist_node;
|
||||
DROP TABLE public.pg_dist_node_metadata;
|
||||
DROP TABLE public.pg_dist_partition;
|
||||
DROP TABLE public.pg_dist_placement;
|
||||
DROP TABLE public.pg_dist_poolinfo;
|
||||
DROP TABLE public.pg_dist_shard;
|
||||
DROP TABLE public.pg_dist_transaction;
|
||||
DROP TABLE public.pg_dist_rebalance_strategy;
|
||||
DROP TABLE public.pg_dist_cleanup;
|
||||
DROP TABLE public.pg_dist_tenant_schema;
|
||||
--
|
||||
-- reset sequences
|
||||
--
|
||||
PERFORM setval('pg_catalog.pg_dist_shardid_seq', (SELECT MAX(shardid)+1 AS max_shard_id FROM pg_dist_shard), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_placement_placementid_seq', (SELECT MAX(placementid)+1 AS max_placement_id FROM pg_dist_placement), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_groupid_seq', (SELECT MAX(groupid)+1 AS max_group_id FROM pg_dist_node), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_node_nodeid_seq', (SELECT MAX(nodeid)+1 AS max_node_id FROM pg_dist_node), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_colocationid_seq', (SELECT MAX(colocationid)+1 AS max_colocation_id FROM pg_dist_colocation), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_operationid_seq', (SELECT MAX(operation_id)+1 AS max_operation_id FROM pg_dist_cleanup), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_cleanup_recordid_seq', (SELECT MAX(record_id)+1 AS max_record_id FROM pg_dist_cleanup), false);
|
||||
PERFORM setval('pg_catalog.pg_dist_clock_logical_seq', (SELECT last_value FROM public.pg_dist_clock_logical_seq), false);
|
||||
DROP TABLE public.pg_dist_clock_logical_seq;
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- register triggers
|
||||
--
|
||||
FOR table_name IN SELECT logicalrelid FROM pg_catalog.pg_dist_partition JOIN pg_class ON (logicalrelid = oid) WHERE relkind <> 'f'
|
||||
LOOP
|
||||
trigger_name := 'truncate_trigger_' || table_name::oid;
|
||||
command := 'create trigger ' || trigger_name || ' after truncate on ' || table_name || ' execute procedure pg_catalog.citus_truncate_trigger()';
|
||||
EXECUTE command;
|
||||
command := 'update pg_trigger set tgisinternal = true where tgname = ' || quote_literal(trigger_name);
|
||||
EXECUTE command;
|
||||
END LOOP;
|
||||
|
||||
--
|
||||
-- set dependencies
|
||||
--
|
||||
INSERT INTO pg_depend
|
||||
SELECT
|
||||
'pg_class'::regclass::oid as classid,
|
||||
p.logicalrelid::regclass::oid as objid,
|
||||
0 as objsubid,
|
||||
'pg_extension'::regclass::oid as refclassid,
|
||||
(select oid from pg_extension where extname = 'citus') as refobjid,
|
||||
0 as refobjsubid ,
|
||||
'n' as deptype
|
||||
FROM pg_catalog.pg_dist_partition p;
|
||||
|
||||
-- set dependencies for columnar table access method
|
||||
PERFORM columnar_internal.columnar_ensure_am_depends_catalog();
|
||||
|
||||
-- restore pg_dist_object from the stable identifiers
|
||||
TRUNCATE pg_catalog.pg_dist_object;
|
||||
INSERT INTO pg_catalog.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid)
|
||||
SELECT
|
||||
address.classid,
|
||||
address.objid,
|
||||
address.objsubid,
|
||||
naming.distribution_argument_index,
|
||||
naming.colocationid
|
||||
FROM
|
||||
public.pg_dist_object naming,
|
||||
pg_catalog.pg_get_object_address(naming.type, naming.object_names, naming.object_args) address;
|
||||
|
||||
DROP TABLE public.pg_dist_object;
|
||||
END;
|
||||
$cppu$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_finish_pg_upgrade()
|
||||
IS 'perform tasks to restore citus settings from a location that has been prepared before pg_upgrade';
|
|
@ -63,6 +63,7 @@ BEGIN
|
|||
INSERT INTO pg_catalog.pg_dist_transaction SELECT * FROM public.pg_dist_transaction;
|
||||
INSERT INTO pg_catalog.pg_dist_colocation SELECT * FROM public.pg_dist_colocation;
|
||||
INSERT INTO pg_catalog.pg_dist_cleanup SELECT * FROM public.pg_dist_cleanup;
|
||||
INSERT INTO pg_catalog.pg_dist_tenant_schema SELECT schemaname::regnamespace, colocationid FROM public.pg_dist_tenant_schema;
|
||||
-- enterprise catalog tables
|
||||
INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo;
|
||||
INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo;
|
||||
|
@ -93,6 +94,7 @@ BEGIN
|
|||
DROP TABLE public.pg_dist_transaction;
|
||||
DROP TABLE public.pg_dist_rebalance_strategy;
|
||||
DROP TABLE public.pg_dist_cleanup;
|
||||
DROP TABLE public.pg_dist_tenant_schema;
|
||||
--
|
||||
-- reset sequences
|
||||
--
|
||||
|
|
8
src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.0-1.sql
generated
Normal file
8
src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.0-1.sql
generated
Normal file
|
@ -0,0 +1,8 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_tenant_schema(schema_id Oid, colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_add_tenant_schema(Oid, int) IS
|
||||
'insert given tenant schema into pg_dist_tenant_schema with given colocation id';
|
|
@ -0,0 +1,8 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_tenant_schema(schema_id Oid, colocation_id int)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_add_tenant_schema(Oid, int) IS
|
||||
'insert given tenant schema into pg_dist_tenant_schema with given colocation id';
|
8
src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.0-1.sql
generated
Normal file
8
src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.0-1.sql
generated
Normal file
|
@ -0,0 +1,8 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_tenant_schema(schema_id Oid)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_delete_tenant_schema(Oid) IS
|
||||
'delete given tenant schema from pg_dist_tenant_schema';
|
|
@ -0,0 +1,8 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_tenant_schema(schema_id Oid)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_delete_tenant_schema(Oid) IS
|
||||
'delete given tenant schema from pg_dist_tenant_schema';
|
7
src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.0-1.sql
generated
Normal file
7
src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.0-1.sql
generated
Normal file
|
@ -0,0 +1,7 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME';
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS
|
||||
'Delete a tenant schema and the corresponding colocation group from metadata tables.';
|
|
@ -0,0 +1,7 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
VOLATILE
|
||||
AS 'MODULE_PATHNAME';
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS
|
||||
'Delete a tenant schema and the corresponding colocation group from metadata tables.';
|
|
@ -0,0 +1,82 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_prepare_pg_upgrade()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
SET search_path = pg_catalog
|
||||
AS $cppu$
|
||||
BEGIN
|
||||
|
||||
DELETE FROM pg_depend WHERE
|
||||
objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
|
||||
refobjid IN (select oid from pg_extension where extname = 'citus');
|
||||
--
|
||||
-- We are dropping the aggregates because postgres 14 changed
|
||||
-- array_cat type from anyarray to anycompatiblearray. When
|
||||
-- upgrading to pg14, specifically when running pg_restore on
|
||||
-- array_cat_agg we would get an error. So we drop the aggregate
|
||||
-- and create the right one on citus_finish_pg_upgrade.
|
||||
|
||||
DROP AGGREGATE IF EXISTS array_cat_agg(anyarray);
|
||||
DROP AGGREGATE IF EXISTS array_cat_agg(anycompatiblearray);
|
||||
--
|
||||
-- Drop existing backup tables
|
||||
--
|
||||
DROP TABLE IF EXISTS public.pg_dist_partition;
|
||||
DROP TABLE IF EXISTS public.pg_dist_shard;
|
||||
DROP TABLE IF EXISTS public.pg_dist_placement;
|
||||
DROP TABLE IF EXISTS public.pg_dist_node_metadata;
|
||||
DROP TABLE IF EXISTS public.pg_dist_node;
|
||||
DROP TABLE IF EXISTS public.pg_dist_local_group;
|
||||
DROP TABLE IF EXISTS public.pg_dist_transaction;
|
||||
DROP TABLE IF EXISTS public.pg_dist_colocation;
|
||||
DROP TABLE IF EXISTS public.pg_dist_authinfo;
|
||||
DROP TABLE IF EXISTS public.pg_dist_poolinfo;
|
||||
DROP TABLE IF EXISTS public.pg_dist_rebalance_strategy;
|
||||
DROP TABLE IF EXISTS public.pg_dist_object;
|
||||
DROP TABLE IF EXISTS public.pg_dist_cleanup;
|
||||
DROP TABLE IF EXISTS public.pg_dist_tenant_schema;
|
||||
DROP TABLE IF EXISTS public.pg_dist_clock_logical_seq;
|
||||
|
||||
--
|
||||
-- backup citus catalog tables
|
||||
--
|
||||
CREATE TABLE public.pg_dist_partition AS SELECT * FROM pg_catalog.pg_dist_partition;
|
||||
CREATE TABLE public.pg_dist_shard AS SELECT * FROM pg_catalog.pg_dist_shard;
|
||||
CREATE TABLE public.pg_dist_placement AS SELECT * FROM pg_catalog.pg_dist_placement;
|
||||
CREATE TABLE public.pg_dist_node_metadata AS SELECT * FROM pg_catalog.pg_dist_node_metadata;
|
||||
CREATE TABLE public.pg_dist_node AS SELECT * FROM pg_catalog.pg_dist_node;
|
||||
CREATE TABLE public.pg_dist_local_group AS SELECT * FROM pg_catalog.pg_dist_local_group;
|
||||
CREATE TABLE public.pg_dist_transaction AS SELECT * FROM pg_catalog.pg_dist_transaction;
|
||||
CREATE TABLE public.pg_dist_colocation AS SELECT * FROM pg_catalog.pg_dist_colocation;
|
||||
CREATE TABLE public.pg_dist_cleanup AS SELECT * FROM pg_catalog.pg_dist_cleanup;
|
||||
-- save names of the tenant schemas instead of their oids because the oids might change after pg upgrade
|
||||
CREATE TABLE public.pg_dist_tenant_schema AS SELECT schemaid::regnamespace::text AS schemaname, colocationid FROM pg_catalog.pg_dist_tenant_schema;
|
||||
-- enterprise catalog tables
|
||||
CREATE TABLE public.pg_dist_authinfo AS SELECT * FROM pg_catalog.pg_dist_authinfo;
|
||||
CREATE TABLE public.pg_dist_poolinfo AS SELECT * FROM pg_catalog.pg_dist_poolinfo;
|
||||
-- sequences
|
||||
CREATE TABLE public.pg_dist_clock_logical_seq AS SELECT last_value FROM pg_catalog.pg_dist_clock_logical_seq;
|
||||
CREATE TABLE public.pg_dist_rebalance_strategy AS SELECT
|
||||
name,
|
||||
default_strategy,
|
||||
shard_cost_function::regprocedure::text,
|
||||
node_capacity_function::regprocedure::text,
|
||||
shard_allowed_on_node_function::regprocedure::text,
|
||||
default_threshold,
|
||||
minimum_threshold,
|
||||
improvement_threshold
|
||||
FROM pg_catalog.pg_dist_rebalance_strategy;
|
||||
|
||||
-- store upgrade stable identifiers on pg_dist_object catalog
|
||||
CREATE TABLE public.pg_dist_object AS SELECT
|
||||
address.type,
|
||||
address.object_names,
|
||||
address.object_args,
|
||||
objects.distribution_argument_index,
|
||||
objects.colocationid
|
||||
FROM pg_catalog.pg_dist_object objects,
|
||||
pg_catalog.pg_identify_object_as_address(objects.classid, objects.objid, objects.objsubid) address;
|
||||
END;
|
||||
$cppu$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_prepare_pg_upgrade()
|
||||
IS 'perform tasks to copy citus settings to a location that could later be restored after pg_upgrade is done';
|
|
@ -33,6 +33,7 @@ BEGIN
|
|||
DROP TABLE IF EXISTS public.pg_dist_rebalance_strategy;
|
||||
DROP TABLE IF EXISTS public.pg_dist_object;
|
||||
DROP TABLE IF EXISTS public.pg_dist_cleanup;
|
||||
DROP TABLE IF EXISTS public.pg_dist_tenant_schema;
|
||||
DROP TABLE IF EXISTS public.pg_dist_clock_logical_seq;
|
||||
|
||||
--
|
||||
|
@ -47,6 +48,8 @@ BEGIN
|
|||
CREATE TABLE public.pg_dist_transaction AS SELECT * FROM pg_catalog.pg_dist_transaction;
|
||||
CREATE TABLE public.pg_dist_colocation AS SELECT * FROM pg_catalog.pg_dist_colocation;
|
||||
CREATE TABLE public.pg_dist_cleanup AS SELECT * FROM pg_catalog.pg_dist_cleanup;
|
||||
-- save names of the tenant schemas instead of their oids because the oids might change after pg upgrade
|
||||
CREATE TABLE public.pg_dist_tenant_schema AS SELECT schemaid::regnamespace::text AS schemaname, colocationid FROM pg_catalog.pg_dist_tenant_schema;
|
||||
-- enterprise catalog tables
|
||||
CREATE TABLE public.pg_dist_authinfo AS SELECT * FROM pg_catalog.pg_dist_authinfo;
|
||||
CREATE TABLE public.pg_dist_poolinfo AS SELECT * FROM pg_catalog.pg_dist_poolinfo;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "catalog/pg_type.h"
|
||||
#include "commands/sequence.h"
|
||||
#include "distributed/colocation_utils.h"
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/metadata_utility.h"
|
||||
#include "distributed/coordinator_protocol.h"
|
||||
|
@ -30,6 +31,7 @@
|
|||
#include "distributed/pg_dist_colocation.h"
|
||||
#include "distributed/resource_lock.h"
|
||||
#include "distributed/shardinterval_utils.h"
|
||||
#include "distributed/tenant_schema_metadata.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "distributed/utils/array_type.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
|
@ -49,7 +51,6 @@ static bool HashPartitionedShardIntervalsEqual(ShardInterval *leftShardInterval,
|
|||
ShardInterval *rightShardInterval);
|
||||
static int CompareShardPlacementsByNode(const void *leftElement,
|
||||
const void *rightElement);
|
||||
static void DeleteColocationGroup(uint32 colocationId);
|
||||
static uint32 CreateColocationGroupForRelation(Oid sourceRelationId);
|
||||
static void BreakColocation(Oid sourceRelationId);
|
||||
|
||||
|
@ -546,6 +547,13 @@ ColocationId(int shardCount, int replicationFactor, Oid distributionColumnType,
|
|||
Form_pg_dist_colocation colocationForm =
|
||||
(Form_pg_dist_colocation) GETSTRUCT(colocationTuple);
|
||||
|
||||
/* avoid chosing a colocation group that belongs to a tenant schema */
|
||||
if (IsTenantSchemaColocationGroup(colocationForm->colocationid))
|
||||
{
|
||||
colocationTuple = systable_getnext(scanDescriptor);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (colocationId == INVALID_COLOCATION_ID || colocationId >
|
||||
colocationForm->colocationid)
|
||||
{
|
||||
|
@ -1258,9 +1266,9 @@ DeleteColocationGroupIfNoTablesBelong(uint32 colocationId)
|
|||
|
||||
/*
|
||||
* DeleteColocationGroup deletes the colocation group from pg_dist_colocation
|
||||
* throughout the cluster.
|
||||
* throughout the cluster and dissociates the tenant schema if any.
|
||||
*/
|
||||
static void
|
||||
void
|
||||
DeleteColocationGroup(uint32 colocationId)
|
||||
{
|
||||
DeleteColocationGroupLocally(colocationId);
|
||||
|
@ -1422,4 +1430,25 @@ EnsureTableCanBeColocatedWith(Oid relationId, char replicationModel,
|
|||
"%s and %s.", sourceRelationName,
|
||||
relationName)));
|
||||
}
|
||||
|
||||
/* prevent colocating regular tables with tenant tables */
|
||||
Oid sourceRelationSchemaId = get_rel_namespace(sourceRelationId);
|
||||
Oid targetRelationSchemaId = get_rel_namespace(relationId);
|
||||
if (IsTenantSchema(sourceRelationSchemaId) &&
|
||||
sourceRelationSchemaId != targetRelationSchemaId)
|
||||
{
|
||||
char *relationName = get_rel_name(relationId);
|
||||
char *sourceRelationName = get_rel_name(sourceRelationId);
|
||||
char *sourceRelationSchemaName = get_namespace_name(sourceRelationSchemaId);
|
||||
|
||||
ereport(ERROR, (errmsg("cannot colocate tables %s and %s",
|
||||
sourceRelationName, relationName),
|
||||
errdetail("Cannot colocate tables with tenant tables "
|
||||
"by using colocate_with option."),
|
||||
errhint("Consider using \"CREATE TABLE\" statement "
|
||||
"to create this table as a tenant table in "
|
||||
"the same schema to automatically colocate "
|
||||
"it with %s.%s",
|
||||
sourceRelationSchemaName, sourceRelationName)));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,239 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* tenant_schema_metadata.c
|
||||
*
|
||||
* This file contains functions to query and modify tenant schema metadata,
|
||||
* which is used to track the schemas used for schema-based sharding in
|
||||
* Citus.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "access/genam.h"
|
||||
#include "access/htup.h"
|
||||
#include "access/table.h"
|
||||
#include "distributed/colocation_utils.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/pg_dist_tenant_schema.h"
|
||||
#include "distributed/tenant_schema_metadata.h"
|
||||
#include "storage/lockdefs.h"
|
||||
#include "utils/relcache.h"
|
||||
#include "utils/fmgroids.h"
|
||||
|
||||
|
||||
static Oid ColocationIdGetTenantSchemaId(uint32 colocationId);
|
||||
|
||||
|
||||
/*
|
||||
* IsTenantSchema returns true if there is a tenant schema with given schemaId.
|
||||
*/
|
||||
bool
|
||||
IsTenantSchema(Oid schemaId)
|
||||
{
|
||||
/*
|
||||
* We don't allow creating tenant schemas when there is a version
|
||||
* mismatch. Even more, SchemaIdGetTenantColocationId() would throw an
|
||||
* error if the underlying pg_dist_tenant_schema metadata table has not
|
||||
* been created yet, which is the case in older versions. For this reason,
|
||||
* it's safe to assume that it cannot be a tenant schema when there is a
|
||||
* version mismatch.
|
||||
*
|
||||
* But it's a bit tricky that we do the same when version checks are
|
||||
* disabled because then CheckCitusVersion() returns true even if there
|
||||
* is a version mismatch. And in that case, the tests that are trying to
|
||||
* create tables (in multi_extension.sql) in older versions would
|
||||
* fail when deciding whether we should create a tenant table or not.
|
||||
*
|
||||
* The downside of doing so is that, for example, we will skip deleting
|
||||
* the tenant schema entry from pg_dist_tenant_schema when dropping a
|
||||
* tenant schema while the version checks are disabled even if there was
|
||||
* no version mismatch. But we're okay with that because we don't expect
|
||||
* users to disable version checks anyway.
|
||||
*/
|
||||
if (!EnableVersionChecks || !CheckCitusVersion(DEBUG4))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return SchemaIdGetTenantColocationId(schemaId) != INVALID_COLOCATION_ID;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsTenantSchemaColocationGroup returns true if there is a tenant schema
|
||||
* that is associated with given colocation id.
|
||||
*/
|
||||
bool
|
||||
IsTenantSchemaColocationGroup(uint32 colocationId)
|
||||
{
|
||||
return OidIsValid(ColocationIdGetTenantSchemaId(colocationId));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SchemaIdGetTenantColocationId returns the colocation id associated with
|
||||
* the tenant schema with given id.
|
||||
*
|
||||
* Returns INVALID_COLOCATION_ID if there is no tenant schema with given id.
|
||||
*/
|
||||
uint32
|
||||
SchemaIdGetTenantColocationId(Oid schemaId)
|
||||
{
|
||||
uint32 colocationId = INVALID_COLOCATION_ID;
|
||||
|
||||
if (!OidIsValid(schemaId))
|
||||
{
|
||||
ereport(ERROR, (errmsg("schema id is invalid")));
|
||||
}
|
||||
|
||||
Relation pgDistTenantSchema = table_open(DistTenantSchemaRelationId(),
|
||||
AccessShareLock);
|
||||
ScanKeyData scanKey[1];
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_tenant_schema_schemaid, BTEqualStrategyNumber,
|
||||
F_OIDEQ, ObjectIdGetDatum(schemaId));
|
||||
|
||||
bool indexOk = true;
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistTenantSchema,
|
||||
DistTenantSchemaPrimaryKeyIndexId(),
|
||||
indexOk, NULL, 1, scanKey);
|
||||
|
||||
HeapTuple heapTuple = systable_getnext_ordered(scanDescriptor, ForwardScanDirection);
|
||||
if (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
bool isNull = false;
|
||||
colocationId = DatumGetUInt32(
|
||||
heap_getattr(heapTuple,
|
||||
Anum_pg_dist_tenant_schema_colocationid,
|
||||
RelationGetDescr(pgDistTenantSchema),
|
||||
&isNull));
|
||||
Assert(!isNull);
|
||||
}
|
||||
|
||||
systable_endscan(scanDescriptor);
|
||||
table_close(pgDistTenantSchema, AccessShareLock);
|
||||
|
||||
return colocationId;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ColocationIdGetTenantSchemaId returns the oid of the tenant schema that
|
||||
* is associated with given colocation id.
|
||||
*
|
||||
* Returns InvalidOid if there is no such tenant schema.
|
||||
*/
|
||||
static Oid
|
||||
ColocationIdGetTenantSchemaId(uint32 colocationId)
|
||||
{
|
||||
if (colocationId == INVALID_COLOCATION_ID)
|
||||
{
|
||||
ereport(ERROR, (errmsg("colocation id is invalid")));
|
||||
}
|
||||
|
||||
Relation pgDistTenantSchema = table_open(DistTenantSchemaRelationId(),
|
||||
AccessShareLock);
|
||||
ScanKeyData scanKey[1];
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_tenant_schema_colocationid,
|
||||
BTEqualStrategyNumber, F_INT4EQ, UInt32GetDatum(colocationId));
|
||||
|
||||
bool indexOk = true;
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistTenantSchema,
|
||||
DistTenantSchemaUniqueColocationIdIndexId(),
|
||||
indexOk, NULL, 1, scanKey);
|
||||
|
||||
HeapTuple heapTuple = systable_getnext_ordered(scanDescriptor, ForwardScanDirection);
|
||||
Oid schemaId = InvalidOid;
|
||||
if (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
bool isNull = false;
|
||||
schemaId = heap_getattr(heapTuple, Anum_pg_dist_tenant_schema_schemaid,
|
||||
RelationGetDescr(pgDistTenantSchema), &isNull);
|
||||
Assert(!isNull);
|
||||
}
|
||||
|
||||
systable_endscan(scanDescriptor);
|
||||
table_close(pgDistTenantSchema, AccessShareLock);
|
||||
|
||||
return schemaId;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* InsertTenantSchemaLocally inserts an entry into pg_dist_tenant_schema
|
||||
* with given schemaId and colocationId.
|
||||
*
|
||||
* Throws a constraint violation error if there is already an entry with
|
||||
* given schemaId, or if given colocation id is already associated with
|
||||
* another tenant schema.
|
||||
*/
|
||||
void
|
||||
InsertTenantSchemaLocally(Oid schemaId, uint32 colocationId)
|
||||
{
|
||||
if (!OidIsValid(schemaId))
|
||||
{
|
||||
ereport(ERROR, (errmsg("schema id is invalid")));
|
||||
}
|
||||
|
||||
if (colocationId == INVALID_COLOCATION_ID)
|
||||
{
|
||||
ereport(ERROR, (errmsg("colocation id is invalid")));
|
||||
}
|
||||
|
||||
Datum values[Natts_pg_dist_tenant_schema] = { 0 };
|
||||
bool isNulls[Natts_pg_dist_tenant_schema] = { 0 };
|
||||
|
||||
values[Anum_pg_dist_tenant_schema_schemaid - 1] = ObjectIdGetDatum(schemaId);
|
||||
values[Anum_pg_dist_tenant_schema_colocationid - 1] = UInt32GetDatum(colocationId);
|
||||
|
||||
Relation pgDistTenantSchema = table_open(DistTenantSchemaRelationId(),
|
||||
RowExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistTenantSchema);
|
||||
HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||
|
||||
CatalogTupleInsert(pgDistTenantSchema, heapTuple);
|
||||
CommandCounterIncrement();
|
||||
|
||||
table_close(pgDistTenantSchema, NoLock);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeleteTenantSchemaLocally deletes the entry for given schemaId from
|
||||
* pg_dist_tenant_schema.
|
||||
*
|
||||
* Throws an error if there is no such tenant schema.
|
||||
*/
|
||||
void
|
||||
DeleteTenantSchemaLocally(Oid schemaId)
|
||||
{
|
||||
if (!OidIsValid(schemaId))
|
||||
{
|
||||
ereport(ERROR, (errmsg("schema id is invalid")));
|
||||
}
|
||||
|
||||
Relation pgDistTenantSchema = table_open(DistTenantSchemaRelationId(),
|
||||
RowExclusiveLock);
|
||||
ScanKeyData scanKey[1];
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_dist_tenant_schema_schemaid, BTEqualStrategyNumber,
|
||||
F_OIDEQ, ObjectIdGetDatum(schemaId));
|
||||
|
||||
bool indexOk = true;
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistTenantSchema,
|
||||
DistTenantSchemaPrimaryKeyIndexId(),
|
||||
indexOk, NULL, 1, scanKey);
|
||||
|
||||
HeapTuple heapTuple = systable_getnext_ordered(scanDescriptor, ForwardScanDirection);
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not find tuple for tenant schema %u", schemaId)));
|
||||
}
|
||||
|
||||
CatalogTupleDelete(pgDistTenantSchema, &heapTuple->t_self);
|
||||
CommandCounterIncrement();
|
||||
|
||||
systable_endscan(scanDescriptor);
|
||||
table_close(pgDistTenantSchema, NoLock);
|
||||
}
|
|
@ -48,6 +48,7 @@ extern void UpdateRelationColocationGroup(Oid distributedRelationId, uint32 colo
|
|||
bool localOnly);
|
||||
extern void DeleteColocationGroupIfNoTablesBelong(uint32 colocationId);
|
||||
extern List * ColocationGroupTableList(uint32 colocationId, uint32 count);
|
||||
extern void DeleteColocationGroup(uint32 colocationId);
|
||||
extern void DeleteColocationGroupLocally(uint32 colocationId);
|
||||
extern uint32 FindColocateWithColocationId(Oid relationId, char replicationModel,
|
||||
Oid distributionColumnType,
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
|
||||
extern bool AddAllLocalTablesToMetadata;
|
||||
extern bool EnableSchemaBasedSharding;
|
||||
|
||||
/* controlled via GUC, should be accessed via EnableLocalReferenceForeignKeys() */
|
||||
extern bool EnableLocalReferenceForeignKeys;
|
||||
|
@ -458,8 +459,7 @@ extern void UnmarkRolesDistributed(List *roles);
|
|||
extern List * FilterDistributedRoles(List *roles);
|
||||
|
||||
/* schema.c - forward declarations */
|
||||
extern List * PreprocessCreateSchemaStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PostprocessCreateSchemaStmt(Node *node, const char *queryString);
|
||||
extern List * PreprocessDropSchemaStmt(Node *dropSchemaStatement,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
|
@ -586,6 +586,7 @@ extern char * GetAlterColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationI
|
|||
|
||||
extern void ErrorIfTableHasUnsupportedIdentityColumn(Oid relationId);
|
||||
extern void ErrorIfTableHasIdentityColumn(Oid relationId);
|
||||
extern void ConvertNewTableIfNecessary(Node *createStmt);
|
||||
|
||||
/* text_search.c - forward declarations */
|
||||
extern List * GetCreateTextSearchConfigStatements(const ObjectAddress *address);
|
||||
|
@ -770,6 +771,7 @@ extern void ExecuteForeignKeyCreateCommandList(List *ddlCommandList,
|
|||
/* create_citus_local_table.c */
|
||||
extern void CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys,
|
||||
bool autoConverted);
|
||||
extern bool ShouldAddNewTableToMetadata(Oid relationId);
|
||||
extern List * GetExplicitIndexOidList(Oid relationId);
|
||||
|
||||
extern bool ShouldPropagateSetCommand(VariableSetStmt *setStmt);
|
||||
|
@ -780,4 +782,12 @@ extern void CreateCitusLocalTablePartitionOf(CreateStmt *createStatement,
|
|||
extern void UpdateAutoConvertedForConnectedRelations(List *relationId, bool
|
||||
autoConverted);
|
||||
|
||||
/* schema_based_sharding.c */
|
||||
extern bool ShouldUseSchemaBasedSharding(char *schemaName);
|
||||
extern bool ShouldCreateTenantSchemaTable(Oid relationId);
|
||||
extern bool IsTenantSchema(Oid schemaId);
|
||||
extern void ErrorIfIllegalPartitioningInTenantSchema(Oid parentRelationId,
|
||||
Oid partitionRelationId);
|
||||
extern void CreateTenantSchemaTable(Oid relationId);
|
||||
|
||||
#endif /*CITUS_COMMANDS_H */
|
||||
|
|
|
@ -241,6 +241,7 @@ extern Oid DistRebalanceStrategyRelationId(void);
|
|||
extern Oid DistLocalGroupIdRelationId(void);
|
||||
extern Oid DistObjectRelationId(void);
|
||||
extern Oid DistEnabledCustomAggregatesId(void);
|
||||
extern Oid DistTenantSchemaRelationId(void);
|
||||
|
||||
/* index oids */
|
||||
extern Oid DistNodeNodeIdIndexId(void);
|
||||
|
@ -263,6 +264,8 @@ extern Oid DistTransactionGroupIndexId(void);
|
|||
extern Oid DistPlacementGroupidIndexId(void);
|
||||
extern Oid DistObjectPrimaryKeyIndexId(void);
|
||||
extern Oid DistCleanupPrimaryKeyIndexId(void);
|
||||
extern Oid DistTenantSchemaPrimaryKeyIndexId(void);
|
||||
extern Oid DistTenantSchemaUniqueColocationIdIndexId(void);
|
||||
|
||||
/* sequence oids */
|
||||
extern Oid DistBackgroundJobJobIdSequenceId(void);
|
||||
|
|
|
@ -137,6 +137,8 @@ extern void SyncNewColocationGroupToNodes(uint32 colocationId, int shardCount,
|
|||
Oid distributionColumType,
|
||||
Oid distributionColumnCollation);
|
||||
extern void SyncDeleteColocationGroupToNodes(uint32 colocationId);
|
||||
extern char * TenantSchemaInsertCommand(Oid schemaId, uint32 colocationId);
|
||||
extern char * TenantSchemaDeleteCommand(char *schemaName);
|
||||
|
||||
extern MetadataSyncContext * CreateMetadataSyncContext(List *nodeList,
|
||||
bool collectCommands,
|
||||
|
@ -163,6 +165,7 @@ extern void SendNodeWideObjectsSyncCommands(MetadataSyncContext *context);
|
|||
extern void SendShellTableDeletionCommands(MetadataSyncContext *context);
|
||||
extern void SendMetadataDeletionCommands(MetadataSyncContext *context);
|
||||
extern void SendColocationMetadataCommands(MetadataSyncContext *context);
|
||||
extern void SendTenantSchemaMetadataCommands(MetadataSyncContext *context);
|
||||
extern void SendDependencyCreationCommands(MetadataSyncContext *context);
|
||||
extern void SendDistTableMetadataCommands(MetadataSyncContext *context);
|
||||
extern void SendDistObjectCommands(MetadataSyncContext *context);
|
||||
|
@ -174,6 +177,7 @@ extern void SendInterTableRelationshipCommands(MetadataSyncContext *context);
|
|||
#define DELETE_ALL_DISTRIBUTED_OBJECTS "DELETE FROM pg_catalog.pg_dist_object"
|
||||
#define DELETE_ALL_PARTITIONS "DELETE FROM pg_dist_partition"
|
||||
#define DELETE_ALL_COLOCATION "DELETE FROM pg_catalog.pg_dist_colocation"
|
||||
#define DELETE_ALL_TENANT_SCHEMAS "DELETE FROM pg_catalog.pg_dist_tenant_schema"
|
||||
#define WORKER_DROP_ALL_SHELL_TABLES \
|
||||
"CALL pg_catalog.worker_drop_all_shell_tables(%s)"
|
||||
#define CITUS_INTERNAL_MARK_NODE_NOT_SYNCED \
|
||||
|
|
|
@ -201,6 +201,37 @@ typedef enum SizeQueryType
|
|||
TABLE_SIZE /* pg_table_size() */
|
||||
} SizeQueryType;
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
COLOCATE_WITH_TABLE_LIKE_OPT,
|
||||
COLOCATE_WITH_COLOCATION_ID
|
||||
} ColocationParamType;
|
||||
|
||||
/*
|
||||
* Param used to specify the colocation target of a distributed table. It can
|
||||
* be either a table name or a colocation id.
|
||||
*
|
||||
* When colocationParamType is COLOCATE_WITH_COLOCATION_ID, colocationId is
|
||||
* expected to be a valid colocation id. When colocationParamType is set to
|
||||
* COLOCATE_WITH_TABLE_LIKE_OPT, colocateWithTableName is expected to
|
||||
* be a valid table name, "default" or "none".
|
||||
*
|
||||
* Among the functions used to create a Citus table, right now only
|
||||
* CreateSingleShardTable() accepts a ColocationParam.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
union
|
||||
{
|
||||
char *colocateWithTableName;
|
||||
uint32 colocationId;
|
||||
};
|
||||
|
||||
ColocationParamType colocationParamType;
|
||||
} ColocationParam;
|
||||
|
||||
|
||||
typedef enum BackgroundJobStatus
|
||||
{
|
||||
BACKGROUND_JOB_STATUS_SCHEDULED,
|
||||
|
@ -326,7 +357,7 @@ extern void DeletePartitionRow(Oid distributedRelationId);
|
|||
extern void DeleteShardRow(uint64 shardId);
|
||||
extern void UpdatePlacementGroupId(uint64 placementId, int groupId);
|
||||
extern void DeleteShardPlacementRow(uint64 placementId);
|
||||
extern void CreateSingleShardTable(Oid relationId, char *colocateWithTableName);
|
||||
extern void CreateSingleShardTable(Oid relationId, ColocationParam colocationParam);
|
||||
extern void CreateDistributedTable(Oid relationId, char *distributionColumnName,
|
||||
char distributionMethod, int shardCount,
|
||||
bool shardCountIsStrict, char *colocateWithTableName);
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* pg_dist_tenant_schema.h
|
||||
* definition of the system catalog for the schemas used for schema-based
|
||||
* sharding in Citus.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef PG_DIST_TENANT_SCHEMA_H
|
||||
#define PG_DIST_TENANT_SCHEMA_H
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
|
||||
/* ----------------
|
||||
* pg_dist_tenant_schema definition.
|
||||
* ----------------
|
||||
*/
|
||||
typedef struct FormData_pg_dist_tenant_schema
|
||||
{
|
||||
Oid schemaid;
|
||||
uint32 colocationid;
|
||||
} FormData_pg_dist_tenant_schema;
|
||||
|
||||
/* ----------------
|
||||
* Form_pg_dist_tenant_schema corresponds to a pointer to a tuple with
|
||||
* the format of pg_dist_tenant_schema relation.
|
||||
* ----------------
|
||||
*/
|
||||
typedef FormData_pg_dist_tenant_schema *Form_pg_dist_tenant_schema;
|
||||
|
||||
/* ----------------
|
||||
* compiler constants for pg_dist_tenant_schema
|
||||
* ----------------
|
||||
*/
|
||||
#define Natts_pg_dist_tenant_schema 2
|
||||
#define Anum_pg_dist_tenant_schema_schemaid 1
|
||||
#define Anum_pg_dist_tenant_schema_colocationid 2
|
||||
|
||||
#endif /* PG_DIST_TENANT_SCHEMA_H */
|
|
@ -0,0 +1,32 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* tenant_schema_metadata.h
|
||||
*
|
||||
* This file contains functions to query and modify tenant schema metadata,
|
||||
* which is used to track the schemas used for schema-based sharding in
|
||||
* Citus.
|
||||
*
|
||||
* -------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef TENANT_SCHEMA_METADATA_H
|
||||
#define TENANT_SCHEMA_METADATA_H
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
/* accessors */
|
||||
extern uint32 SchemaIdGetTenantColocationId(Oid schemaId);
|
||||
extern bool IsTenantSchema(Oid schemaId);
|
||||
extern bool IsTenantSchemaColocationGroup(uint32 colocationId);
|
||||
|
||||
/*
|
||||
* Local only modifiers.
|
||||
*
|
||||
* These functions may not make much sense by themselves. They are mainly
|
||||
* exported for tenant-schema management (schema_based_sharding.c) and
|
||||
* metadata-sync layer (metadata_sync.c).
|
||||
*/
|
||||
extern void InsertTenantSchemaLocally(Oid schemaId, uint32 colocationId);
|
||||
extern void DeleteTenantSchemaLocally(Oid schemaId);
|
||||
|
||||
#endif /* TENANT_SCHEMA_METADATA_H */
|
|
@ -1,4 +1,4 @@
|
|||
test: upgrade_basic_after upgrade_ref2ref_after upgrade_type_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects upgrade_autoconverted_after upgrade_citus_stat_activity upgrade_citus_locks upgrade_single_shard_table_after
|
||||
test: upgrade_basic_after upgrade_ref2ref_after upgrade_type_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects upgrade_autoconverted_after upgrade_citus_stat_activity upgrade_citus_locks upgrade_single_shard_table_after upgrade_schema_based_sharding_after
|
||||
|
||||
# This test cannot be run with run_test.py currently due to its dependence on
|
||||
# the specific PG versions that we use to run upgrade tests. For now we leave
|
||||
|
|
|
@ -5,7 +5,7 @@ test: upgrade_basic_before
|
|||
test: upgrade_ref2ref_before
|
||||
test: upgrade_type_before
|
||||
test: upgrade_distributed_function_before upgrade_rebalance_strategy_before
|
||||
test: upgrade_autoconverted_before upgrade_single_shard_table_before
|
||||
test: upgrade_autoconverted_before upgrade_single_shard_table_before upgrade_schema_based_sharding_before
|
||||
test: upgrade_citus_stat_activity
|
||||
test: upgrade_citus_locks
|
||||
test: upgrade_distributed_triggers_before
|
||||
|
|
|
@ -152,6 +152,7 @@ DEPS = {
|
|||
"isolation_extension_commands": TestDeps(
|
||||
None, ["isolation_setup", "isolation_add_remove_node"]
|
||||
),
|
||||
"schema_based_sharding": TestDeps("minimal_schedule"),
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -453,7 +453,9 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
|
|||
DROP TABLE IF EXISTS citus_local_table_1, citus_local_table_2, citus_local_table_3;
|
||||
-- this GUC will add the next three tables to metadata automatically
|
||||
SET citus.use_citus_managed_tables TO ON;
|
||||
CREATE TABLE citus_local_table_1(a INT UNIQUE);
|
||||
-- try to create the table twice by using IF NOT EXISTS syntax
|
||||
CREATE TABLE IF NOT EXISTS citus_local_table_1(a INT UNIQUE);
|
||||
CREATE TABLE IF NOT EXISTS citus_local_table_1(a INT UNIQUE);
|
||||
CREATE TABLE citus_local_table_2(a INT UNIQUE);
|
||||
CREATE TABLE citus_local_table_3(a INT UNIQUE);
|
||||
RESET citus.use_citus_managed_tables;
|
||||
|
@ -1340,3 +1342,4 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
|
|||
(1 row)
|
||||
|
||||
DROP SCHEMA drop_fkey_cascade CASCADE;
|
||||
DROP USER another_user;
|
||||
|
|
|
@ -116,11 +116,11 @@ SELECT create_distributed_table('nullkey_c1_t1', null, colocate_with=>'none');
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT colocationid AS nullkey_c1_t1_colocation_id FROM pg_dist_partition WHERE logicalrelid = 'create_single_shard_table.nullkey_c1_t1'::regclass \gset
|
||||
SELECT colocationid AS nullkey_c1_t1_colocationid FROM pg_dist_partition WHERE logicalrelid = 'create_single_shard_table.nullkey_c1_t1'::regclass \gset
|
||||
BEGIN;
|
||||
DROP TABLE nullkey_c1_t1;
|
||||
-- make sure that we delete the colocation group after dropping the last table that belongs to it
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid = :'nullkey_c1_t1_colocation_id';
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid = :'nullkey_c1_t1_colocationid';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-begin s2-begin s1-tenant-1-create-table-1 s2-tenant-1-create-table-2 s1-commit s2-tenant-1-verify-colocation s2-commit
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-tenant-1-create-table-1: CREATE TABLE tenant_1.tbl_1 (a int);
|
||||
step s2-tenant-1-create-table-2: CREATE TABLE tenant_1.tbl_2 (a int);
|
||||
step s1-commit: COMMIT;
|
||||
step s2-tenant-1-verify-colocation: SELECT COUNT(DISTINCT(colocationid))=1 FROM pg_dist_partition WHERE logicalrelid::text LIKE 'tenant_1.%';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
step s2-commit: COMMIT;
|
||||
|
||||
starting permutation: s1-begin s2-begin s1-tenant-4-create-table-1 s2-tenant-4-create-table-2 s1-commit s2-tenant-4-verify-colocation s2-commit
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-tenant-4-create-table-1: CREATE TABLE tenant_4.tbl_1 (a int);
|
||||
step s2-tenant-4-create-table-2: CREATE TABLE tenant_4.tbl_2 (a int);
|
||||
step s1-commit: COMMIT;
|
||||
step s2-tenant-4-verify-colocation: SELECT COUNT(DISTINCT(colocationid))=1 FROM pg_dist_partition WHERE logicalrelid::text LIKE 'tenant_4.%';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
step s2-commit: COMMIT;
|
||||
|
||||
starting permutation: s1-begin s2-begin s1-tenant-2-create-table-1 s2-tenant-3-create-table-1 s1-commit s2-commit
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-tenant-2-create-table-1: CREATE TABLE tenant_2.tbl_1 (a int);
|
||||
step s2-tenant-3-create-table-1: CREATE TABLE tenant_3.tbl_1 (a int);
|
||||
step s1-commit: COMMIT;
|
||||
step s2-commit: COMMIT;
|
|
@ -1357,9 +1357,13 @@ SELECT * FROM multi_extension.print_extension_changes();
|
|||
-- Snapshot of state at 12.0-1
|
||||
ALTER EXTENSION citus UPDATE TO '12.0-1';
|
||||
SELECT * FROM multi_extension.print_extension_changes();
|
||||
previous_object | current_object
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
| function citus_internal_add_tenant_schema(oid,integer) void
|
||||
| function citus_internal_delete_tenant_schema(oid) void
|
||||
| function citus_internal_unregister_tenant_schema_globally(oid,text) void
|
||||
| table pg_dist_tenant_schema
|
||||
(4 rows)
|
||||
|
||||
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
||||
-- show running version
|
||||
|
@ -1777,6 +1781,53 @@ SELECT citus_remove_node('localhost', :master_port);
|
|||
|
||||
(1 row)
|
||||
|
||||
-- confirm that we can create a tenant schema / table on an empty node
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
CREATE SCHEMA tenant_schema;
|
||||
CREATE TABLE tenant_schema.test(x int, y int);
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_schema.test'::regclass
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_schema';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- and make sure that we can't remove the coordinator due to "test"
|
||||
SELECT citus_remove_node('localhost', :master_port);
|
||||
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
|
||||
DETAIL: One of the table(s) that prevents the operation complete successfully is tenant_schema.test
|
||||
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
|
||||
BEGIN;
|
||||
SET LOCAL client_min_messages TO WARNING;
|
||||
DROP SCHEMA tenant_schema CASCADE;
|
||||
COMMIT;
|
||||
-- and now we should be able to remove the coordinator
|
||||
SELECT citus_remove_node('localhost', :master_port);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA tenant_schema;
|
||||
-- Make sure that we can sync metadata for empty tenant schemas
|
||||
-- when adding the first node to the cluster.
|
||||
SELECT 1 FROM citus_add_node('localhost', :worker_1_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
DROP SCHEMA tenant_schema;
|
||||
SELECT citus_remove_node('localhost', :worker_1_port);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_schema_based_sharding;
|
||||
-- confirm that we can create a reference table on an empty node
|
||||
CREATE TABLE test (x int, y int);
|
||||
INSERT INTO test VALUES (1,2);
|
||||
|
@ -1802,6 +1853,19 @@ SELECT citus_add_local_table_to_metadata('test');
|
|||
(1 row)
|
||||
|
||||
DROP TABLE test;
|
||||
-- Verify that we don't consider the schemas created by extensions as tenant schemas.
|
||||
-- Easiest way of verifying this is to drop and re-create columnar extension.
|
||||
DROP EXTENSION citus_columnar;
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
CREATE EXTENSION citus_columnar;
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid IN ('columnar'::regnamespace, 'columnar_internal'::regnamespace);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_schema_based_sharding;
|
||||
DROP EXTENSION citus;
|
||||
CREATE EXTENSION citus;
|
||||
DROP TABLE version_mismatch_table;
|
||||
|
|
|
@ -76,6 +76,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
|
@ -103,7 +104,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
(32 rows)
|
||||
(33 rows)
|
||||
|
||||
-- Create a test table with constraints and SERIAL and default from user defined sequence
|
||||
CREATE SEQUENCE user_defined_seq;
|
||||
|
@ -135,64 +136,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS public.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
GRANT USAGE ON SCHEMA public TO pg_database_owner;
|
||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
|
||||
RESET ROLE
|
||||
RESET ROLE
|
||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||
SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table');
|
||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
|
||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('public.mx_test_table')
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(50 rows)
|
||||
|
||||
-- Show that CREATE INDEX commands are included in the activate node snapshot
|
||||
CREATE INDEX mx_index ON mx_test_table(col_2);
|
||||
SELECT unnest(activate_node_snapshot()) order by 1;
|
||||
unnest
|
||||
---------------------------------------------------------------------
|
||||
ALTER DATABASE regression OWNER TO postgres;
|
||||
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
|
||||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
|
@ -235,6 +179,65 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(51 rows)
|
||||
|
||||
-- Show that CREATE INDEX commands are included in the activate node snapshot
|
||||
CREATE INDEX mx_index ON mx_test_table(col_2);
|
||||
SELECT unnest(activate_node_snapshot()) order by 1;
|
||||
unnest
|
||||
---------------------------------------------------------------------
|
||||
ALTER DATABASE regression OWNER TO postgres;
|
||||
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
|
||||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS public.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
GRANT USAGE ON SCHEMA public TO pg_database_owner;
|
||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
|
||||
RESET ROLE
|
||||
RESET ROLE
|
||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||
SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table');
|
||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
|
||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('public.mx_test_table')
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(52 rows)
|
||||
|
||||
-- Show that schema changes are included in the activate node snapshot
|
||||
CREATE SCHEMA mx_testing_schema;
|
||||
ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema;
|
||||
|
@ -253,6 +256,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
|
@ -294,7 +298,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(53 rows)
|
||||
(54 rows)
|
||||
|
||||
-- Show that append distributed tables are not included in the activate node snapshot
|
||||
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
|
||||
|
@ -320,6 +324,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
|
@ -361,7 +366,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(53 rows)
|
||||
(54 rows)
|
||||
|
||||
-- Show that range distributed tables are not included in the activate node snapshot
|
||||
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
|
||||
|
@ -380,6 +385,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
|
@ -421,7 +427,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(53 rows)
|
||||
(54 rows)
|
||||
|
||||
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
|
||||
-- Ensure that hasmetadata=false for all nodes
|
||||
|
@ -1924,6 +1930,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE TABLE public.test_table (id integer DEFAULT worker_nextval('public.mx_test_sequence_0'::regclass), id2 integer DEFAULT worker_nextval('public.mx_test_sequence_1'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
|
@ -2008,7 +2015,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310074, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310075, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310076, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310073, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310083, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310084, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310085, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310086, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(117 rows)
|
||||
(118 rows)
|
||||
|
||||
-- shouldn't work since test_table is MX
|
||||
ALTER TABLE test_table ADD COLUMN id3 bigserial;
|
||||
|
|
|
@ -76,6 +76,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
|
@ -103,7 +104,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
(32 rows)
|
||||
(33 rows)
|
||||
|
||||
-- Create a test table with constraints and SERIAL and default from user defined sequence
|
||||
CREATE SEQUENCE user_defined_seq;
|
||||
|
@ -135,64 +136,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS public.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
GRANT USAGE ON SCHEMA public TO postgres;
|
||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
|
||||
RESET ROLE
|
||||
RESET ROLE
|
||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||
SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table');
|
||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
|
||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('public.mx_test_table')
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(50 rows)
|
||||
|
||||
-- Show that CREATE INDEX commands are included in the activate node snapshot
|
||||
CREATE INDEX mx_index ON mx_test_table(col_2);
|
||||
SELECT unnest(activate_node_snapshot()) order by 1;
|
||||
unnest
|
||||
---------------------------------------------------------------------
|
||||
ALTER DATABASE regression OWNER TO postgres;
|
||||
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
|
||||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
|
@ -235,6 +179,65 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(51 rows)
|
||||
|
||||
-- Show that CREATE INDEX commands are included in the activate node snapshot
|
||||
CREATE INDEX mx_index ON mx_test_table(col_2);
|
||||
SELECT unnest(activate_node_snapshot()) order by 1;
|
||||
unnest
|
||||
---------------------------------------------------------------------
|
||||
ALTER DATABASE regression OWNER TO postgres;
|
||||
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
|
||||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS public.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
GRANT USAGE ON SCHEMA public TO postgres;
|
||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
|
||||
RESET ROLE
|
||||
RESET ROLE
|
||||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
|
||||
SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table');
|
||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
|
||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('public.mx_test_table')
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(52 rows)
|
||||
|
||||
-- Show that schema changes are included in the activate node snapshot
|
||||
CREATE SCHEMA mx_testing_schema;
|
||||
ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema;
|
||||
|
@ -253,6 +256,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
|
@ -294,7 +298,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(53 rows)
|
||||
(54 rows)
|
||||
|
||||
-- Show that append distributed tables are not included in the activate node snapshot
|
||||
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
|
||||
|
@ -320,6 +324,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
|
@ -361,7 +366,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(53 rows)
|
||||
(54 rows)
|
||||
|
||||
-- Show that range distributed tables are not included in the activate node snapshot
|
||||
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
|
||||
|
@ -380,6 +385,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
|
@ -421,7 +427,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(53 rows)
|
||||
(54 rows)
|
||||
|
||||
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
|
||||
-- Ensure that hasmetadata=false for all nodes
|
||||
|
@ -1924,6 +1930,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
CREATE TABLE public.test_table (id integer DEFAULT worker_nextval('public.mx_test_sequence_0'::regclass), id2 integer DEFAULT worker_nextval('public.mx_test_sequence_1'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
DELETE FROM pg_catalog.pg_dist_tenant_schema
|
||||
DELETE FROM pg_dist_node
|
||||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
|
@ -2008,7 +2015,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310074, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310075, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310076, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310073, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310083, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310084, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310085, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310086, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(117 rows)
|
||||
(118 rows)
|
||||
|
||||
-- shouldn't work since test_table is MX
|
||||
ALTER TABLE test_table ADD COLUMN id3 bigserial;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -156,8 +156,30 @@ WHERE shardid = (
|
|||
SELECT shardid AS round_robin_test_c1_shard_id FROM pg_dist_shard WHERE logicalrelid = 'single_node.single_node_nullkey_c1'::regclass \gset
|
||||
SELECT create_distributed_table('single_node_nullkey_c1_' || :round_robin_test_c1_shard_id , null, colocate_with=>'none', distribution_type=>null);
|
||||
ERROR: relation "single_node_nullkey_c1_90630532" is a shard relation
|
||||
-- create a tenant schema on single node setup
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
CREATE SCHEMA tenant_1;
|
||||
CREATE TABLE tenant_1.tbl_1 (a int);
|
||||
-- verify that we recorded tenant_1 in pg_dist_tenant_schema
|
||||
SELECT COUNT(*)=1 FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_1';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- verify that tenant_1.tbl_1 is recorded in pg_dist_partition, as a single-shard table
|
||||
SELECT COUNT(*)=1 FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_1.tbl_1'::regclass AND
|
||||
partmethod = 'n' AND repmodel = 's' AND colocationid IS NOT NULL;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_schema_based_sharding;
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP TABLE failover_to_local, single_node_nullkey_c1, single_node_nullkey_c2;
|
||||
DROP SCHEMA tenant_1 CASCADE;
|
||||
RESET client_min_messages;
|
||||
-- so that we don't have to update rest of the test output
|
||||
SET citus.next_shard_id TO 90630500;
|
||||
|
|
|
@ -156,8 +156,30 @@ WHERE shardid = (
|
|||
SELECT shardid AS round_robin_test_c1_shard_id FROM pg_dist_shard WHERE logicalrelid = 'single_node.single_node_nullkey_c1'::regclass \gset
|
||||
SELECT create_distributed_table('single_node_nullkey_c1_' || :round_robin_test_c1_shard_id , null, colocate_with=>'none', distribution_type=>null);
|
||||
ERROR: relation "single_node_nullkey_c1_90630532" is a shard relation
|
||||
-- create a tenant schema on single node setup
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
CREATE SCHEMA tenant_1;
|
||||
CREATE TABLE tenant_1.tbl_1 (a int);
|
||||
-- verify that we recorded tenant_1 in pg_dist_tenant_schema
|
||||
SELECT COUNT(*)=1 FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_1';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- verify that tenant_1.tbl_1 is recorded in pg_dist_partition, as a single-shard table
|
||||
SELECT COUNT(*)=1 FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_1.tbl_1'::regclass AND
|
||||
partmethod = 'n' AND repmodel = 's' AND colocationid IS NOT NULL;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_schema_based_sharding;
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP TABLE failover_to_local, single_node_nullkey_c1, single_node_nullkey_c2;
|
||||
DROP SCHEMA tenant_1 CASCADE;
|
||||
RESET client_min_messages;
|
||||
-- so that we don't have to update rest of the test output
|
||||
SET citus.next_shard_id TO 90630500;
|
||||
|
|
|
@ -69,16 +69,19 @@ ORDER BY 1;
|
|||
function citus_internal_add_placement_metadata(bigint,bigint,integer,bigint)
|
||||
function citus_internal_add_placement_metadata(bigint,integer,bigint,integer,bigint)
|
||||
function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text)
|
||||
function citus_internal_add_tenant_schema(oid,integer)
|
||||
function citus_internal_adjust_local_clock_to_remote(cluster_clock)
|
||||
function citus_internal_delete_colocation_metadata(integer)
|
||||
function citus_internal_delete_partition_metadata(regclass)
|
||||
function citus_internal_delete_shard_metadata(bigint)
|
||||
function citus_internal_delete_tenant_schema(oid)
|
||||
function citus_internal_global_blocked_processes()
|
||||
function citus_internal_is_replication_origin_tracking_active()
|
||||
function citus_internal_local_blocked_processes()
|
||||
function citus_internal_mark_node_not_synced(integer,integer)
|
||||
function citus_internal_start_replication_origin_tracking()
|
||||
function citus_internal_stop_replication_origin_tracking()
|
||||
function citus_internal_unregister_tenant_schema_globally(oid,text)
|
||||
function citus_internal_update_placement_metadata(bigint,integer,integer)
|
||||
function citus_internal_update_relation_colocation(oid,integer)
|
||||
function citus_is_clock_after(cluster_clock,cluster_clock)
|
||||
|
@ -306,6 +309,7 @@ ORDER BY 1;
|
|||
table pg_dist_poolinfo
|
||||
table pg_dist_rebalance_strategy
|
||||
table pg_dist_shard
|
||||
table pg_dist_tenant_schema
|
||||
table pg_dist_transaction
|
||||
type citus.distribution_type
|
||||
type citus.shard_transfer_mode
|
||||
|
@ -330,5 +334,5 @@ ORDER BY 1;
|
|||
view citus_stat_tenants_local
|
||||
view pg_dist_shard_placement
|
||||
view time_partitions
|
||||
(322 rows)
|
||||
(326 rows)
|
||||
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
ALTER SCHEMA "tenant\'_1" RENAME TO tenant_1;
|
||||
ALTER SCHEMA "tenant\'_2" RENAME TO tenant_2;
|
||||
-- verify that colocation id is set even for empty tenant
|
||||
SELECT colocationid > 0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_1';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- verify the same on workers
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT colocationid > 0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_1';
|
||||
$$);
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
t
|
||||
(2 rows)
|
||||
|
||||
-- verify that colocation id is set for non-empty tenant
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_2.test_table'::regclass
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_2';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- verify the same on workers
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_2.test_table'::regclass
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_2';
|
||||
$$);
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
t
|
||||
(2 rows)
|
||||
|
||||
CREATE TABLE tenant_1.tbl_1(a int, b text);
|
||||
CREATE TABLE tenant_2.tbl_1(a int, b text);
|
||||
-- Show that we can create further tenant tables in the tenant schemas
|
||||
-- after pg upgrade.
|
||||
SELECT COUNT(*)=2 FROM pg_dist_partition
|
||||
WHERE logicalrelid IN ('tenant_1.tbl_1'::regclass, 'tenant_2.tbl_1'::regclass) AND
|
||||
partmethod = 'n' AND repmodel = 's' AND colocationid > 0;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_1.tbl_1'::regclass AND
|
||||
partmethod = 'n' AND repmodel = 's'
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_1';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_2.tbl_1'::regclass AND
|
||||
partmethod = 'n' AND repmodel = 's'
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_2';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- rollback the changes made on following schemas to make this test idempotent
|
||||
DROP TABLE tenant_1.tbl_1, tenant_2.tbl_1;
|
||||
ALTER SCHEMA tenant_1 RENAME TO "tenant\'_1";
|
||||
ALTER SCHEMA tenant_2 RENAME TO "tenant\'_2";
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
CREATE SCHEMA tenant_3;
|
||||
-- Show that we can create furher tenant schemas after pg upgrade.
|
||||
SELECT COUNT(*)=1 FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_3';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- drop the schema created in this test to this test idempotent
|
||||
DROP SCHEMA tenant_3 CASCADE;
|
||||
RESET citus.enable_schema_based_sharding;
|
|
@ -0,0 +1,10 @@
|
|||
SET citus.enable_schema_based_sharding TO ON;
|
||||
-- Create tenant tables with schema names that need escaping
|
||||
-- to verify that citus_prepare_pg_upgrade() correctly saves
|
||||
-- them into public schema.
|
||||
-- empty tenant
|
||||
CREATE SCHEMA "tenant\'_1";
|
||||
-- non-empty tenant
|
||||
CREATE SCHEMA "tenant\'_2";
|
||||
CREATE TABLE "tenant\'_2".test_table(a int, b text);
|
||||
RESET citus.enable_schema_based_sharding;
|
|
@ -76,6 +76,7 @@ test: isolation_fix_partition_shard_index_names
|
|||
test: isolation_global_pid
|
||||
test: isolation_citus_locks
|
||||
test: isolation_reference_table
|
||||
test: isolation_schema_based_sharding
|
||||
|
||||
# Rebalancer
|
||||
test: isolation_blocking_move_single_shard_commands
|
||||
|
|
|
@ -33,6 +33,7 @@ test: ref_citus_local_fkeys
|
|||
test: alter_database_owner
|
||||
test: distributed_triggers
|
||||
test: create_single_shard_table
|
||||
test: schema_based_sharding
|
||||
|
||||
test: multi_test_catalog_views
|
||||
test: multi_table_ddl
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
setup
|
||||
{
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
CREATE SCHEMA tenant_1;
|
||||
CREATE SCHEMA tenant_2;
|
||||
CREATE SCHEMA tenant_3;
|
||||
|
||||
CREATE SCHEMA tenant_4;
|
||||
CREATE TABLE tenant_4.first_table (a int);
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP SCHEMA tenant_1, tenant_2, tenant_3, tenant_4 CASCADE;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "s1-begin" { BEGIN; }
|
||||
step "s1-tenant-1-create-table-1" { CREATE TABLE tenant_1.tbl_1 (a int); }
|
||||
step "s1-tenant-4-create-table-1" { CREATE TABLE tenant_4.tbl_1 (a int); }
|
||||
step "s1-tenant-2-create-table-1" { CREATE TABLE tenant_2.tbl_1 (a int); }
|
||||
step "s1-commit" { COMMIT; }
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-begin" { BEGIN; }
|
||||
step "s2-tenant-1-create-table-2" { CREATE TABLE tenant_1.tbl_2 (a int); }
|
||||
step "s2-tenant-4-create-table-2" { CREATE TABLE tenant_4.tbl_2 (a int); }
|
||||
step "s2-tenant-1-verify-colocation" { SELECT COUNT(DISTINCT(colocationid))=1 FROM pg_dist_partition WHERE logicalrelid::text LIKE 'tenant_1.%'; }
|
||||
step "s2-tenant-4-verify-colocation" { SELECT COUNT(DISTINCT(colocationid))=1 FROM pg_dist_partition WHERE logicalrelid::text LIKE 'tenant_4.%'; }
|
||||
step "s2-tenant-3-create-table-1" { CREATE TABLE tenant_3.tbl_1 (a int); }
|
||||
step "s2-commit" { COMMIT; }
|
||||
|
||||
// two sessions competing with each other to create the first table in the same schema
|
||||
permutation "s1-begin" "s2-begin" "s1-tenant-1-create-table-1" "s2-tenant-1-create-table-2" "s1-commit" "s2-tenant-1-verify-colocation" "s2-commit"
|
||||
|
||||
// two sessions creating further tenant tables in the same schema
|
||||
permutation "s1-begin" "s2-begin" "s1-tenant-4-create-table-1" "s2-tenant-4-create-table-2" "s1-commit" "s2-tenant-4-verify-colocation" "s2-commit"
|
||||
|
||||
// two sessions creating tenant tables in different schemas
|
||||
permutation "s1-begin" "s2-begin" "s1-tenant-2-create-table-1" "s2-tenant-3-create-table-1" "s1-commit" "s2-commit"
|
|
@ -214,7 +214,11 @@ DROP TABLE IF EXISTS citus_local_table_1, citus_local_table_2, citus_local_table
|
|||
|
||||
-- this GUC will add the next three tables to metadata automatically
|
||||
SET citus.use_citus_managed_tables TO ON;
|
||||
CREATE TABLE citus_local_table_1(a INT UNIQUE);
|
||||
|
||||
-- try to create the table twice by using IF NOT EXISTS syntax
|
||||
CREATE TABLE IF NOT EXISTS citus_local_table_1(a INT UNIQUE);
|
||||
CREATE TABLE IF NOT EXISTS citus_local_table_1(a INT UNIQUE);
|
||||
|
||||
CREATE TABLE citus_local_table_2(a INT UNIQUE);
|
||||
CREATE TABLE citus_local_table_3(a INT UNIQUE);
|
||||
RESET citus.use_citus_managed_tables;
|
||||
|
@ -738,3 +742,4 @@ CALL drop_constraint_via_proc_exception();
|
|||
SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalrelid IN ('reference_table_1'::regclass, 'citus_local_table_1'::regclass) ORDER BY logicalrelid;
|
||||
|
||||
DROP SCHEMA drop_fkey_cascade CASCADE;
|
||||
DROP USER another_user;
|
||||
|
|
|
@ -66,12 +66,12 @@ CREATE TABLE nullkey_c1_t2(a int, b int);
|
|||
CREATE TABLE nullkey_c1_t3(a int, b int);
|
||||
SELECT create_distributed_table('nullkey_c1_t1', null, colocate_with=>'none');
|
||||
|
||||
SELECT colocationid AS nullkey_c1_t1_colocation_id FROM pg_dist_partition WHERE logicalrelid = 'create_single_shard_table.nullkey_c1_t1'::regclass \gset
|
||||
SELECT colocationid AS nullkey_c1_t1_colocationid FROM pg_dist_partition WHERE logicalrelid = 'create_single_shard_table.nullkey_c1_t1'::regclass \gset
|
||||
|
||||
BEGIN;
|
||||
DROP TABLE nullkey_c1_t1;
|
||||
-- make sure that we delete the colocation group after dropping the last table that belongs to it
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid = :'nullkey_c1_t1_colocation_id';
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid = :'nullkey_c1_t1_colocationid';
|
||||
ROLLBACK;
|
||||
|
||||
SELECT create_distributed_table('nullkey_c1_t2', null, colocate_with=>'nullkey_c1_t1');
|
||||
|
|
|
@ -928,6 +928,41 @@ DROP TABLE test;
|
|||
-- and now we should be able to remove the coordinator
|
||||
SELECT citus_remove_node('localhost', :master_port);
|
||||
|
||||
-- confirm that we can create a tenant schema / table on an empty node
|
||||
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
|
||||
CREATE SCHEMA tenant_schema;
|
||||
CREATE TABLE tenant_schema.test(x int, y int);
|
||||
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_schema.test'::regclass
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_schema';
|
||||
|
||||
-- and make sure that we can't remove the coordinator due to "test"
|
||||
SELECT citus_remove_node('localhost', :master_port);
|
||||
|
||||
BEGIN;
|
||||
SET LOCAL client_min_messages TO WARNING;
|
||||
DROP SCHEMA tenant_schema CASCADE;
|
||||
COMMIT;
|
||||
|
||||
-- and now we should be able to remove the coordinator
|
||||
SELECT citus_remove_node('localhost', :master_port);
|
||||
|
||||
CREATE SCHEMA tenant_schema;
|
||||
|
||||
-- Make sure that we can sync metadata for empty tenant schemas
|
||||
-- when adding the first node to the cluster.
|
||||
SELECT 1 FROM citus_add_node('localhost', :worker_1_port);
|
||||
|
||||
DROP SCHEMA tenant_schema;
|
||||
SELECT citus_remove_node('localhost', :worker_1_port);
|
||||
|
||||
RESET citus.enable_schema_based_sharding;
|
||||
|
||||
-- confirm that we can create a reference table on an empty node
|
||||
CREATE TABLE test (x int, y int);
|
||||
INSERT INTO test VALUES (1,2);
|
||||
|
@ -940,6 +975,19 @@ CREATE TABLE test (x int, y int);
|
|||
INSERT INTO test VALUES (1,2);
|
||||
SELECT citus_add_local_table_to_metadata('test');
|
||||
DROP TABLE test;
|
||||
|
||||
-- Verify that we don't consider the schemas created by extensions as tenant schemas.
|
||||
-- Easiest way of verifying this is to drop and re-create columnar extension.
|
||||
DROP EXTENSION citus_columnar;
|
||||
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
|
||||
CREATE EXTENSION citus_columnar;
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid IN ('columnar'::regnamespace, 'columnar_internal'::regnamespace);
|
||||
|
||||
RESET citus.enable_schema_based_sharding;
|
||||
|
||||
DROP EXTENSION citus;
|
||||
CREATE EXTENSION citus;
|
||||
|
||||
|
|
|
@ -0,0 +1,941 @@
|
|||
CREATE SCHEMA regular_schema;
|
||||
SET search_path TO regular_schema;
|
||||
|
||||
SET citus.next_shard_id TO 1920000;
|
||||
SET citus.shard_count TO 32;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
|
||||
|
||||
SET client_min_messages TO NOTICE;
|
||||
|
||||
-- Verify that the UDFs used to sync tenant schema metadata to workers
|
||||
-- fail on NULL input.
|
||||
SELECT citus_internal_add_tenant_schema(NULL, 1);
|
||||
SELECT citus_internal_add_tenant_schema(1, NULL);
|
||||
SELECT citus_internal_delete_tenant_schema(NULL);
|
||||
SELECT citus_internal_unregister_tenant_schema_globally(1, NULL);
|
||||
SELECT citus_internal_unregister_tenant_schema_globally(NULL, 'text');
|
||||
|
||||
-- Verify that citus_internal_unregister_tenant_schema_globally can only
|
||||
-- be called on schemas that are dropped already.
|
||||
SELECT citus_internal_unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema');
|
||||
|
||||
SELECT 1 FROM citus_remove_node('localhost', :worker_2_port);
|
||||
|
||||
CREATE TABLE regular_schema.test_table(a int, b text);
|
||||
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
|
||||
-- show that regular_schema doesn't show up in pg_dist_tenant_schema
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'regular_schema';
|
||||
|
||||
-- empty tenant
|
||||
CREATE SCHEMA "tenant\'_1";
|
||||
|
||||
-- non-empty tenant
|
||||
CREATE SCHEMA "tenant\'_2";
|
||||
CREATE TABLE "tenant\'_2".test_table(a int, b text);
|
||||
|
||||
-- empty tenant
|
||||
CREATE SCHEMA "tenant\'_3";
|
||||
CREATE TABLE "tenant\'_3".test_table(a int, b text);
|
||||
DROP TABLE "tenant\'_3".test_table;
|
||||
|
||||
-- add a node after creating tenant schemas
|
||||
SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
|
||||
|
||||
ALTER SCHEMA "tenant\'_1" RENAME TO tenant_1;
|
||||
ALTER SCHEMA "tenant\'_2" RENAME TO tenant_2;
|
||||
ALTER SCHEMA "tenant\'_3" RENAME TO tenant_3;
|
||||
|
||||
-- verify that create_distributed_table() and others fail when called on tenant tables
|
||||
SELECT create_distributed_table('tenant_2.test_table', 'a');
|
||||
SELECT create_reference_table('tenant_2.test_table');
|
||||
SELECT citus_add_local_table_to_metadata('tenant_2.test_table');
|
||||
|
||||
-- (on coordinator) verify that colocation id is set for empty tenants too
|
||||
SELECT colocationid > 0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text IN ('tenant_1', 'tenant_3');
|
||||
|
||||
-- (on workers) verify that colocation id is set for empty tenants too
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT array_agg(colocationid > 0) FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text IN ('tenant_1', 'tenant_3');
|
||||
$$);
|
||||
|
||||
-- Verify that tenant_2.test_table is recorded in pg_dist_partition as a
|
||||
-- single-shard table.
|
||||
SELECT COUNT(*)=1 FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_2.test_table'::regclass AND
|
||||
partmethod = 'n' AND repmodel = 's' AND colocationid > 0;
|
||||
|
||||
-- (on coordinator) verify that colocation id is properly set for non-empty tenant schema
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_2.test_table'::regclass
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_2';
|
||||
|
||||
-- (on workers) verify that colocation id is properly set for non-empty tenant schema
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_2.test_table'::regclass
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_2';
|
||||
$$);
|
||||
|
||||
-- create a tenant table for tenant_1 after add_node
|
||||
CREATE TABLE tenant_1.test_table(a int, b text);
|
||||
|
||||
-- (on coordinator) verify that colocation id is properly set for now-non-empty tenant schema
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_1.test_table'::regclass
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_1';
|
||||
|
||||
-- (on workers) verify that colocation id is properly set for now-non-empty tenant schema
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_1.test_table'::regclass
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_1';
|
||||
$$);
|
||||
|
||||
-- verify that tenant_1 and tenant_2 have different colocation ids
|
||||
SELECT COUNT(DISTINCT(colocationid))=2 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text IN ('tenant_1', 'tenant_2');
|
||||
|
||||
-- verify that we don't allow creating tenant tables via CREATE SCHEMA command
|
||||
CREATE SCHEMA schema_using_schema_elements CREATE TABLE test_table(a int, b text);
|
||||
|
||||
CREATE SCHEMA tenant_4;
|
||||
CREATE TABLE tenant_4.tbl_1(a int, b text);
|
||||
CREATE TABLE tenant_4.tbl_2(a int, b text);
|
||||
|
||||
-- verify that we don't allow creating a foreign table in a tenant schema, with a nice error message
|
||||
CREATE FOREIGN TABLE tenant_4.foreign_table (
|
||||
id bigint not null,
|
||||
full_name text not null default ''
|
||||
) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true', table_name 'foreign_table');
|
||||
|
||||
-- verify that we don't allow creating a foreign table in a tenant schema
|
||||
CREATE TEMPORARY TABLE tenant_4.temp_table (a int, b text);
|
||||
|
||||
CREATE TABLE tenant_4.partitioned_table(a int, b text, PRIMARY KEY (a)) PARTITION BY RANGE (a);
|
||||
CREATE TABLE tenant_4.partitioned_table_child_1 PARTITION OF tenant_4.partitioned_table FOR VALUES FROM (1) TO (2);
|
||||
|
||||
CREATE TABLE tenant_4.another_partitioned_table(a int, b text, FOREIGN KEY (a) REFERENCES tenant_4.partitioned_table(a)) PARTITION BY RANGE (a);
|
||||
CREATE TABLE tenant_4.another_partitioned_table_child PARTITION OF tenant_4.another_partitioned_table FOR VALUES FROM (1) TO (2);
|
||||
|
||||
-- verify that we allow creating partitioned tables in a tenant schema
|
||||
SELECT COUNT(*)=1 FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_4.partitioned_table_child_1'::regclass AND
|
||||
partmethod = 'n' AND repmodel = 's' AND colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_4.partitioned_table'::regclass);
|
||||
|
||||
SELECT EXISTS(
|
||||
SELECT 1
|
||||
FROM pg_inherits
|
||||
WHERE inhrelid = 'tenant_4.partitioned_table_child_1'::regclass AND
|
||||
inhparent = 'tenant_4.partitioned_table'::regclass
|
||||
) AS is_partition;
|
||||
|
||||
SELECT COUNT(*)=1 FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_4.another_partitioned_table_child'::regclass AND
|
||||
partmethod = 'n' AND repmodel = 's' AND colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_4.another_partitioned_table'::regclass);
|
||||
|
||||
SELECT EXISTS(
|
||||
SELECT 1
|
||||
FROM pg_inherits
|
||||
WHERE inhrelid = 'tenant_4.another_partitioned_table_child'::regclass AND
|
||||
inhparent = 'tenant_4.another_partitioned_table'::regclass
|
||||
) AS is_partition;
|
||||
|
||||
-- verify the foreign key between parents
|
||||
SELECT EXISTS(
|
||||
SELECT 1
|
||||
FROM pg_constraint
|
||||
WHERE conrelid = 'tenant_4.another_partitioned_table'::regclass AND
|
||||
confrelid = 'tenant_4.partitioned_table'::regclass AND
|
||||
contype = 'f'
|
||||
) AS foreign_key_exists;
|
||||
|
||||
INSERT INTO tenant_4.another_partitioned_table VALUES (1, 'a');
|
||||
|
||||
INSERT INTO tenant_4.partitioned_table VALUES (1, 'a');
|
||||
INSERT INTO tenant_4.another_partitioned_table VALUES (1, 'a');
|
||||
|
||||
CREATE SCHEMA tenant_5;
|
||||
CREATE TABLE tenant_5.tbl_1(a int, b text);
|
||||
|
||||
CREATE TABLE tenant_5.partitioned_table(a int, b text) PARTITION BY RANGE (a);
|
||||
|
||||
-- verify that we don't allow creating a partition table that is child of a partitioned table in a different tenant schema
|
||||
CREATE TABLE tenant_4.partitioned_table_child_2 PARTITION OF tenant_5.partitioned_table FOR VALUES FROM (1) TO (2);
|
||||
|
||||
-- verify that we don't allow creating a local partition table that is child of a tenant partitioned table
|
||||
CREATE TABLE regular_schema.local_child_table PARTITION OF tenant_5.partitioned_table FOR VALUES FROM (1) TO (2);
|
||||
|
||||
SET citus.use_citus_managed_tables TO ON;
|
||||
CREATE TABLE regular_schema.local_child_table PARTITION OF tenant_5.partitioned_table FOR VALUES FROM (1) TO (2);
|
||||
RESET citus.use_citus_managed_tables;
|
||||
|
||||
CREATE TABLE regular_schema.local_partitioned_table(a int, b text) PARTITION BY RANGE (a);
|
||||
|
||||
CREATE TABLE regular_schema.citus_local_partitioned_table(a int, b text) PARTITION BY RANGE (a);
|
||||
SELECT citus_add_local_table_to_metadata('regular_schema.citus_local_partitioned_table');
|
||||
|
||||
CREATE TABLE regular_schema.dist_partitioned_table(a int, b text) PARTITION BY RANGE (a);
|
||||
SELECT create_distributed_table('regular_schema.dist_partitioned_table', 'a');
|
||||
|
||||
-- verify that we don't allow creating a partition table that is child of a non-tenant partitioned table
|
||||
CREATE TABLE tenant_4.partitioned_table_child_2 PARTITION OF regular_schema.local_partitioned_table FOR VALUES FROM (1) TO (2);
|
||||
CREATE TABLE tenant_4.partitioned_table_child_2 PARTITION OF regular_schema.citus_local_partitioned_table FOR VALUES FROM (1) TO (2);
|
||||
CREATE TABLE tenant_4.partitioned_table_child_2 PARTITION OF regular_schema.dist_partitioned_table FOR VALUES FROM (1) TO (2);
|
||||
|
||||
CREATE TABLE tenant_4.parent_attach_test(a int, b text) PARTITION BY RANGE (a);
|
||||
CREATE TABLE tenant_4.child_attach_test(a int, b text);
|
||||
|
||||
CREATE TABLE tenant_5.parent_attach_test(a int, b text) PARTITION BY RANGE (a);
|
||||
CREATE TABLE tenant_5.child_attach_test(a int, b text);
|
||||
|
||||
CREATE TABLE regular_schema.parent_attach_test_local(a int, b text) PARTITION BY RANGE (a);
|
||||
|
||||
CREATE TABLE regular_schema.parent_attach_test_citus_local(a int, b text) PARTITION BY RANGE (a);
|
||||
SELECT citus_add_local_table_to_metadata('regular_schema.parent_attach_test_citus_local');
|
||||
|
||||
CREATE TABLE regular_schema.parent_attach_test_dist(a int, b text) PARTITION BY RANGE (a);
|
||||
SELECT create_distributed_table('regular_schema.parent_attach_test_dist', 'a');
|
||||
|
||||
CREATE TABLE regular_schema.child_attach_test_local(a int, b text);
|
||||
|
||||
CREATE TABLE regular_schema.child_attach_test_citus_local(a int, b text);
|
||||
SELECT citus_add_local_table_to_metadata('regular_schema.child_attach_test_citus_local');
|
||||
|
||||
CREATE TABLE regular_schema.child_attach_test_dist(a int, b text);
|
||||
SELECT create_distributed_table('regular_schema.child_attach_test_dist', 'a');
|
||||
|
||||
-- verify that we don't allow attaching a tenant table into a tenant partitioned table, if they are not in the same schema
|
||||
ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION tenant_5.child_attach_test FOR VALUES FROM (1) TO (2);
|
||||
|
||||
-- verify that we don't allow attaching a non-tenant table into a tenant partitioned table
|
||||
ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION regular_schema.child_attach_test_local FOR VALUES FROM (1) TO (2);
|
||||
ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION regular_schema.child_attach_test_citus_local FOR VALUES FROM (1) TO (2);
|
||||
ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION regular_schema.child_attach_test_dist FOR VALUES FROM (1) TO (2);
|
||||
|
||||
-- verify that we don't allow attaching a tenant table into a non-tenant partitioned table
|
||||
ALTER TABLE regular_schema.parent_attach_test_local ATTACH PARTITION tenant_4.child_attach_test FOR VALUES FROM (1) TO (2);
|
||||
ALTER TABLE regular_schema.parent_attach_test_citus_local ATTACH PARTITION tenant_4.child_attach_test FOR VALUES FROM (1) TO (2);
|
||||
ALTER TABLE regular_schema.parent_attach_test_dist ATTACH PARTITION tenant_4.child_attach_test FOR VALUES FROM (1) TO (2);
|
||||
|
||||
ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION tenant_4.child_attach_test FOR VALUES FROM (1) TO (2);
|
||||
|
||||
-- verify that we don't allow multi-level partitioning on tenant tables
|
||||
CREATE TABLE tenant_4.multi_level_test(a int, b text) PARTITION BY RANGE (a);
|
||||
ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION tenant_4.multi_level_test FOR VALUES FROM (1) TO (2);
|
||||
|
||||
-- verify that we allow attaching a tenant table into a tenant partitioned table, if they are in the same schema
|
||||
SELECT COUNT(*)=1 FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_4.parent_attach_test'::regclass AND
|
||||
partmethod = 'n' AND repmodel = 's' AND colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_4.child_attach_test'::regclass);
|
||||
|
||||
SELECT EXISTS(
|
||||
SELECT 1
|
||||
FROM pg_inherits
|
||||
WHERE inhrelid = 'tenant_4.child_attach_test'::regclass AND
|
||||
inhparent = 'tenant_4.parent_attach_test'::regclass
|
||||
) AS is_partition;
|
||||
|
||||
-- verify that we don't allow creating tenant tables by using CREATE TABLE AS / SELECT INTO commands
|
||||
CREATE TABLE tenant_4.tbl_3 AS SELECT 1 AS a, 'text' as b;
|
||||
CREATE TABLE IF NOT EXISTS tenant_4.tbl_3 AS SELECT 1 as a, 'text' as b;
|
||||
SELECT 1 as a, 'text' as b INTO tenant_4.tbl_3;
|
||||
|
||||
CREATE TYPE employee_type AS (name text, salary numeric);
|
||||
|
||||
-- verify that we don't allow creating tenant tables by using CREATE TABLE OF commands
|
||||
CREATE TABLE tenant_4.employees OF employee_type (
|
||||
PRIMARY KEY (name),
|
||||
salary WITH OPTIONS DEFAULT 1000
|
||||
);
|
||||
|
||||
-- verify that we act accordingly when if not exists is used
|
||||
CREATE TABLE IF NOT EXISTS tenant_4.tbl_3(a int, b text);
|
||||
CREATE TABLE IF NOT EXISTS tenant_4.tbl_3(a int, b text);
|
||||
|
||||
CREATE TABLE regular_schema.local(a int, b text);
|
||||
|
||||
CREATE TABLE regular_schema.citus_local(a int, b text);
|
||||
SELECT citus_add_local_table_to_metadata('regular_schema.citus_local');
|
||||
|
||||
CREATE TABLE regular_schema.dist(a int, b text);
|
||||
SELECT create_distributed_table('regular_schema.dist', 'a');
|
||||
|
||||
-- verify that we can create a table LIKE another table
|
||||
CREATE TABLE tenant_5.test_table_like_1(LIKE tenant_5.tbl_1); -- using a table from the same schema
|
||||
CREATE TABLE tenant_5.test_table_like_2(LIKE tenant_4.tbl_1); -- using a table from another schema
|
||||
CREATE TABLE tenant_5.test_table_like_3(LIKE regular_schema.local); -- using a local table
|
||||
CREATE TABLE tenant_5.test_table_like_4(LIKE regular_schema.citus_local); -- using a citus local table
|
||||
CREATE TABLE tenant_5.test_table_like_5(LIKE regular_schema.dist); -- using a distributed table
|
||||
|
||||
-- verify that all of them are converted to tenant tables
|
||||
SELECT COUNT(*) = 5
|
||||
FROM pg_dist_partition
|
||||
WHERE logicalrelid::text LIKE 'tenant_5.test_table_like_%' AND
|
||||
partmethod = 'n' AND repmodel = 's' AND colocationid = (
|
||||
SELECT colocationid FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_5'
|
||||
);
|
||||
|
||||
CREATE TABLE regular_schema.local_table_using_like(LIKE tenant_5.tbl_1);
|
||||
|
||||
-- verify that regular_schema.local_table_using_like is not a tenant table
|
||||
SELECT COUNT(*) = 0 FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'regular_schema.local_table_using_like'::regclass;
|
||||
|
||||
-- verify that INHERITS syntax is not supported when creating a tenant table
|
||||
CREATE TABLE tenant_5.test_table_inherits_1(x int) INHERITS (tenant_5.tbl_1); -- using a table from the same schema
|
||||
CREATE TABLE tenant_5.test_table_inherits_2(x int) INHERITS (tenant_4.tbl_1); -- using a table from another schema
|
||||
CREATE TABLE tenant_5.test_table_inherits_3(x int) INHERITS (regular_schema.local); -- using a local table
|
||||
CREATE TABLE tenant_5.test_table_inherits_4(x int) INHERITS (regular_schema.citus_local); -- using a citus local table
|
||||
CREATE TABLE tenant_5.test_table_inherits_5(x int) INHERITS (regular_schema.dist); -- using a distributed table
|
||||
|
||||
-- verify that INHERITS syntax is not supported when creating a local table based on a tenant table
|
||||
CREATE TABLE regular_schema.local_table_using_inherits(x int) INHERITS (tenant_5.tbl_1);
|
||||
|
||||
CREATE TABLE tenant_5.tbl_2(a int, b text);
|
||||
|
||||
CREATE SCHEMA "CiTuS.TeeN_108";
|
||||
ALTER SCHEMA "CiTuS.TeeN_108" RENAME TO citus_teen_proper;
|
||||
|
||||
SELECT schemaid AS citus_teen_schemaid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'citus_teen_proper' \gset
|
||||
SELECT colocationid AS citus_teen_colocationid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'citus_teen_proper' \gset
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT schemaid INTO citus_teen_schemaid FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'citus_teen_proper'
|
||||
$$);
|
||||
|
||||
-- (on coordinator) verify that colocation id is set for the tenant with a weird name too
|
||||
SELECT :citus_teen_colocationid > 0;
|
||||
|
||||
-- (on workers) verify that the same colocation id is used on workers too
|
||||
SELECT format(
|
||||
'SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=1 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = ''citus_teen_proper'' AND
|
||||
colocationid = %s;
|
||||
$$);',
|
||||
:citus_teen_colocationid) AS verify_workers_query \gset
|
||||
|
||||
:verify_workers_query
|
||||
|
||||
ALTER SCHEMA citus_teen_proper RENAME TO "CiTuS.TeeN_108";
|
||||
|
||||
SET citus.enable_schema_based_sharding TO OFF;
|
||||
|
||||
-- Show that the tables created in tenant schemas are considered to be
|
||||
-- tenant tables even if the GUC was set to off when creating the table.
|
||||
CREATE TABLE tenant_5.tbl_3(a int, b text);
|
||||
SELECT COUNT(*)=1 FROM pg_dist_partition WHERE logicalrelid = 'tenant_5.tbl_3'::regclass;
|
||||
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
|
||||
-- Verify that tables that belong to tenant_4 and tenant_5 are stored on
|
||||
-- different worker nodes due to order we followed when creating first tenant
|
||||
-- tables in each of them.
|
||||
SELECT COUNT(DISTINCT(nodename, nodeport))=2 FROM citus_shards
|
||||
WHERE table_name IN ('tenant_4.tbl_1'::regclass, 'tenant_5.tbl_1'::regclass);
|
||||
|
||||
-- show that all the tables in tenant_4 are colocated with each other.
|
||||
SELECT COUNT(DISTINCT(colocationid))=1 FROM pg_dist_partition
|
||||
WHERE logicalrelid::regclass::text LIKE 'tenant_4.%';
|
||||
|
||||
-- verify the same for tenant_5 too
|
||||
SELECT COUNT(DISTINCT(colocationid))=1 FROM pg_dist_partition
|
||||
WHERE logicalrelid::regclass::text LIKE 'tenant_5.%';
|
||||
|
||||
SELECT schemaid AS tenant_4_schemaid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_4' \gset
|
||||
SELECT colocationid AS tenant_4_colocationid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_4' \gset
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT schemaid INTO tenant_4_schemaid FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_4'
|
||||
$$);
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
|
||||
-- Rename it to a name that contains a single quote to verify that we properly
|
||||
-- escape its name when sending the command to delete the pg_dist_tenant_schema
|
||||
-- entry on workers.
|
||||
ALTER SCHEMA tenant_4 RENAME TO "tenant\'_4";
|
||||
|
||||
DROP SCHEMA "tenant\'_4", "CiTuS.TeeN_108" CASCADE;
|
||||
|
||||
SET client_min_messages TO NOTICE;
|
||||
|
||||
-- (on coordinator) Verify that dropping a tenant schema deletes the associated
|
||||
-- pg_dist_tenant_schema entry and pg_dist_colocation too.
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE schemaid = :tenant_4_schemaid;
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid = :tenant_4_colocationid;
|
||||
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE schemaid = :citus_teen_schemaid;
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid = :citus_teen_colocationid;
|
||||
|
||||
-- (on workers) Verify that dropping a tenant schema deletes the associated
|
||||
-- pg_dist_tenant_schema entry and pg_dist_colocation too.
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid = (SELECT schemaid FROM tenant_4_schemaid)
|
||||
$$);
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid = (SELECT schemaid FROM citus_teen_schemaid)
|
||||
$$);
|
||||
|
||||
SELECT format(
|
||||
'SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid = %s;
|
||||
$$);',
|
||||
:tenant_4_colocationid) AS verify_workers_query \gset
|
||||
|
||||
:verify_workers_query
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
DROP TABLE tenant_4_schemaid
|
||||
$$);
|
||||
|
||||
SELECT format(
|
||||
'SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid = %s;
|
||||
$$);',
|
||||
:citus_teen_colocationid) AS verify_workers_query \gset
|
||||
|
||||
:verify_workers_query
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
DROP TABLE citus_teen_schemaid
|
||||
$$);
|
||||
|
||||
-- show that we don't allow colocating a Citus table with a tenant table
|
||||
CREATE TABLE regular_schema.null_shard_key_1(a int, b text);
|
||||
SELECT create_distributed_table('regular_schema.null_shard_key_1', null, colocate_with => 'tenant_5.tbl_2');
|
||||
SELECT create_distributed_table('regular_schema.null_shard_key_1', 'a', colocate_with => 'tenant_5.tbl_2');
|
||||
|
||||
CREATE TABLE regular_schema.null_shard_key_table_2(a int, b text);
|
||||
SELECT create_distributed_table('regular_schema.null_shard_key_table_2', null);
|
||||
|
||||
-- Show that we don't chose to colocate regular single-shard tables with
|
||||
-- tenant tables by default.
|
||||
SELECT * FROM pg_dist_tenant_schema WHERE colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'regular_schema.null_shard_key_table_2'::regclass
|
||||
);
|
||||
|
||||
-- save the colocation id used for tenant_5
|
||||
SELECT colocationid AS tenant_5_old_colocationid FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_5' \gset
|
||||
|
||||
-- drop all the tables that belong to tenant_5 and create a new one
|
||||
DROP TABLE tenant_5.tbl_1, tenant_5.tbl_2, tenant_5.tbl_3;
|
||||
CREATE TABLE tenant_5.tbl_4(a int, b text);
|
||||
|
||||
-- (on coordinator) verify that tenant_5 is still associated with the same colocation id
|
||||
SELECT colocationid = :tenant_5_old_colocationid FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_5';
|
||||
|
||||
-- (on workers) verify that tenant_5 is still associated with the same colocation id
|
||||
SELECT format(
|
||||
'SELECT result FROM run_command_on_workers($$
|
||||
SELECT colocationid = %s FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = ''tenant_5'';
|
||||
$$);',
|
||||
:tenant_5_old_colocationid) AS verify_workers_query \gset
|
||||
|
||||
:verify_workers_query
|
||||
|
||||
SELECT schemaid AS tenant_1_schemaid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_1' \gset
|
||||
SELECT colocationid AS tenant_1_colocationid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_1' \gset
|
||||
|
||||
SELECT schemaid AS tenant_2_schemaid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_2' \gset
|
||||
SELECT colocationid AS tenant_2_colocationid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_2' \gset
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT schemaid INTO tenant_1_schemaid FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_1'
|
||||
$$);
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT schemaid INTO tenant_2_schemaid FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_2'
|
||||
$$);
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
SET citus.enable_schema_based_sharding TO OFF;
|
||||
|
||||
DROP SCHEMA tenant_1 CASCADE;
|
||||
|
||||
CREATE ROLE test_non_super_user;
|
||||
ALTER ROLE test_non_super_user NOSUPERUSER;
|
||||
|
||||
ALTER SCHEMA tenant_2 OWNER TO test_non_super_user;
|
||||
-- XXX: ALTER SCHEMA .. OWNER TO .. is not propagated to workers,
|
||||
-- see https://github.com/citusdata/citus/issues/4812.
|
||||
SELECT result FROM run_command_on_workers($$ALTER SCHEMA tenant_2 OWNER TO test_non_super_user$$);
|
||||
|
||||
DROP OWNED BY test_non_super_user CASCADE;
|
||||
|
||||
DROP ROLE test_non_super_user;
|
||||
|
||||
SET client_min_messages TO NOTICE;
|
||||
|
||||
-- (on coordinator) Verify that dropping a tenant schema always deletes
|
||||
-- the associated pg_dist_tenant_schema entry even if the the schema was
|
||||
-- dropped while the GUC was set to off.
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE schemaid IN (:tenant_1_schemaid, :tenant_2_schemaid);
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid IN (:tenant_1_colocationid, :tenant_2_colocationid);
|
||||
|
||||
-- (on workers) Verify that dropping a tenant schema always deletes
|
||||
-- the associated pg_dist_tenant_schema entry even if the the schema was
|
||||
-- dropped while the GUC was set to off.
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid IN (SELECT schemaid FROM tenant_1_schemaid UNION SELECT schemaid FROM tenant_2_schemaid)
|
||||
$$);
|
||||
|
||||
SELECT format(
|
||||
'SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid IN (%s, %s);
|
||||
$$);',
|
||||
:tenant_1_colocationid, :tenant_2_colocationid) AS verify_workers_query \gset
|
||||
|
||||
:verify_workers_query
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
DROP TABLE tenant_1_schemaid
|
||||
$$);
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
DROP TABLE tenant_2_schemaid
|
||||
$$);
|
||||
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
SET client_min_messages TO NOTICE;
|
||||
|
||||
-- show that all schemaid values are unique and non-null in pg_dist_tenant_schema
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE schemaid IS NULL;
|
||||
SELECT (SELECT COUNT(*) FROM pg_dist_tenant_schema) =
|
||||
(SELECT COUNT(DISTINCT(schemaid)) FROM pg_dist_tenant_schema);
|
||||
|
||||
-- show that all colocationid values are unique and non-null in pg_dist_tenant_schema
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE colocationid IS NULL;
|
||||
SELECT (SELECT COUNT(*) FROM pg_dist_tenant_schema) =
|
||||
(SELECT COUNT(DISTINCT(colocationid)) FROM pg_dist_tenant_schema);
|
||||
|
||||
CREATE TABLE public.cannot_be_a_tenant_table(a int, b text);
|
||||
|
||||
-- show that we don't consider public schema as a tenant schema
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'public';
|
||||
|
||||
DROP TABLE public.cannot_be_a_tenant_table;
|
||||
|
||||
BEGIN;
|
||||
ALTER SCHEMA public RENAME TO public_renamed;
|
||||
CREATE SCHEMA public;
|
||||
|
||||
-- Show that we don't consider public schema as a tenant schema,
|
||||
-- even if it's recreated.
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'public';
|
||||
ROLLBACK;
|
||||
|
||||
CREATE TEMPORARY TABLE temp_table(a int, b text);
|
||||
|
||||
-- show that we don't consider temporary schemas as tenant schemas
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = '%pg_temp%';
|
||||
|
||||
DROP TABLE temp_table;
|
||||
|
||||
-- test creating a tenant schema and a tenant table for it in the same transaction
|
||||
BEGIN;
|
||||
CREATE SCHEMA tenant_7;
|
||||
CREATE TABLE tenant_7.tbl_1(a int, b text);
|
||||
CREATE TABLE tenant_7.tbl_2(a int, b text);
|
||||
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_7.tbl_1'::regclass
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_7';
|
||||
|
||||
-- make sure that both tables created in tenant_7 are colocated
|
||||
SELECT COUNT(DISTINCT(colocationid)) = 1 FROM pg_dist_partition
|
||||
WHERE logicalrelid IN ('tenant_7.tbl_1'::regclass, 'tenant_7.tbl_2'::regclass);
|
||||
COMMIT;
|
||||
|
||||
-- Test creating a tenant schema and a tenant table for it in the same transaction
|
||||
-- but this time rollback the transaction.
|
||||
BEGIN;
|
||||
CREATE SCHEMA tenant_8;
|
||||
CREATE TABLE tenant_8.tbl_1(a int, b text);
|
||||
CREATE TABLE tenant_8.tbl_2(a int, b text);
|
||||
ROLLBACK;
|
||||
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_8';
|
||||
SELECT COUNT(*)=0 FROM pg_dist_partition WHERE logicalrelid::text LIKE 'tenant_8.%';
|
||||
|
||||
-- Verify that citus.enable_schema_based_sharding and citus.use_citus_managed_tables
|
||||
-- GUC don't interfere with each other when creating a table in tenant schema.
|
||||
--
|
||||
-- In utility hook, we check whether the CREATE TABLE command is issued on a tenant
|
||||
-- schema before checking whether citus.use_citus_managed_tables is set to ON to
|
||||
-- avoid converting the table into a Citus managed table unnecessarily.
|
||||
--
|
||||
-- If the CREATE TABLE command is issued on a tenant schema, we skip the check
|
||||
-- for citus.use_citus_managed_tables.
|
||||
SET citus.use_citus_managed_tables TO ON;
|
||||
CREATE TABLE tenant_7.tbl_3(a int, b text, PRIMARY KEY(a));
|
||||
RESET citus.use_citus_managed_tables;
|
||||
|
||||
-- Verify that we don't unnecessarily convert a table into a Citus managed
|
||||
-- table when creating it with a pre-defined foreign key to a reference table.
|
||||
CREATE TABLE reference_table(a int PRIMARY KEY);
|
||||
SELECT create_reference_table('reference_table');
|
||||
|
||||
-- Notice that tenant_7.tbl_4 have foreign keys both to tenant_7.tbl_3 and
|
||||
-- to reference_table.
|
||||
CREATE TABLE tenant_7.tbl_4(a int REFERENCES reference_table, FOREIGN KEY(a) REFERENCES tenant_7.tbl_3(a) ON DELETE CASCADE);
|
||||
|
||||
INSERT INTO tenant_7.tbl_3 VALUES (1, 'a'), (2, 'b'), (3, 'c');
|
||||
INSERT INTO reference_table VALUES (1), (2), (3);
|
||||
INSERT INTO tenant_7.tbl_4 VALUES (1), (2), (3);
|
||||
|
||||
DELETE FROM tenant_7.tbl_3 WHERE a < 3;
|
||||
SELECT * FROM tenant_7.tbl_4 ORDER BY a;
|
||||
|
||||
SELECT COUNT(*)=2 FROM pg_dist_partition
|
||||
WHERE logicalrelid IN ('tenant_7.tbl_3'::regclass, 'tenant_7.tbl_4'::regclass) AND
|
||||
partmethod = 'n' AND repmodel = 's' AND
|
||||
colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_7.tbl_1'::regclass);
|
||||
|
||||
CREATE TABLE local_table(a int PRIMARY KEY);
|
||||
|
||||
-- fails because tenant tables cannot have foreign keys to local tables
|
||||
CREATE TABLE tenant_7.tbl_5(a int REFERENCES local_table(a));
|
||||
|
||||
-- Fails because tenant tables cannot have foreign keys to tenant tables
|
||||
-- that belong to different tenant schemas.
|
||||
CREATE TABLE tenant_5.tbl_5(a int, b text, FOREIGN KEY(a) REFERENCES tenant_7.tbl_3(a));
|
||||
|
||||
CREATE SCHEMA tenant_9;
|
||||
|
||||
SELECT schemaid AS tenant_9_schemaid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_9' \gset
|
||||
SELECT colocationid AS tenant_9_colocationid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_9' \gset
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT schemaid INTO tenant_9_schemaid FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_9'
|
||||
$$);
|
||||
|
||||
DROP SCHEMA tenant_9;
|
||||
|
||||
-- (on coordinator) Make sure that dropping an empty tenant schema
|
||||
-- doesn't leave any dangling entries in pg_dist_tenant_schema and
|
||||
-- pg_dist_colocation.
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE schemaid = :tenant_9_schemaid;
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid = :tenant_9_colocationid;
|
||||
|
||||
-- (on workers) Make sure that dropping an empty tenant schema
|
||||
-- doesn't leave any dangling entries in pg_dist_tenant_schema and
|
||||
-- pg_dist_colocation.
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid = (SELECT schemaid FROM tenant_9_schemaid)
|
||||
$$);
|
||||
|
||||
SELECT format(
|
||||
'SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid = %s;
|
||||
$$);',
|
||||
:tenant_9_colocationid) AS verify_workers_query \gset
|
||||
|
||||
:verify_workers_query
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
DROP TABLE tenant_9_schemaid
|
||||
$$);
|
||||
|
||||
CREATE TABLE tenant_3.search_path_test(a int);
|
||||
INSERT INTO tenant_3.search_path_test VALUES (1), (10);
|
||||
|
||||
CREATE TABLE tenant_5.search_path_test(a int);
|
||||
INSERT INTO tenant_5.search_path_test VALUES (2);
|
||||
|
||||
CREATE TABLE tenant_7.search_path_test(a int);
|
||||
INSERT INTO tenant_7.search_path_test VALUES (3);
|
||||
|
||||
CREATE FUNCTION increment_one()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
UPDATE search_path_test SET a = a + 1;
|
||||
END;
|
||||
$$;
|
||||
|
||||
CREATE FUNCTION decrement_one()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
UPDATE search_path_test SET a = a - 1;
|
||||
END;
|
||||
$$;
|
||||
|
||||
SET search_path TO tenant_5;
|
||||
|
||||
PREPARE list_tuples AS SELECT * FROM search_path_test ORDER BY a;
|
||||
|
||||
SELECT * FROM search_path_test ORDER BY a;
|
||||
|
||||
SET search_path TO tenant_3;
|
||||
DELETE FROM search_path_test WHERE a = 1;
|
||||
SELECT * FROM search_path_test ORDER BY a;
|
||||
SELECT regular_schema.increment_one();
|
||||
EXECUTE list_tuples;
|
||||
|
||||
SET search_path TO tenant_7;
|
||||
DROP TABLE search_path_test;
|
||||
SELECT * FROM pg_dist_partition WHERE logicalrelid::text = 'search_path_test';
|
||||
|
||||
SET search_path TO tenant_5;
|
||||
SELECT regular_schema.decrement_one();
|
||||
EXECUTE list_tuples;
|
||||
|
||||
SET search_path TO regular_schema;
|
||||
|
||||
CREATE USER test_other_super_user WITH superuser;
|
||||
|
||||
\c - test_other_super_user
|
||||
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
CREATE SCHEMA tenant_9;
|
||||
|
||||
\c - postgres
|
||||
|
||||
SET search_path TO regular_schema;
|
||||
SET citus.next_shard_id TO 1930000;
|
||||
SET citus.shard_count TO 32;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET client_min_messages TO NOTICE;
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
|
||||
SELECT schemaid AS tenant_9_schemaid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_9' \gset
|
||||
SELECT colocationid AS tenant_9_colocationid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_9' \gset
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT schemaid INTO tenant_9_schemaid FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_9'
|
||||
$$);
|
||||
|
||||
DROP OWNED BY test_other_super_user;
|
||||
|
||||
-- (on coordinator) Make sure that dropping an empty tenant schema
|
||||
-- (via DROP OWNED BY) doesn't leave any dangling entries in
|
||||
-- pg_dist_tenant_schema and pg_dist_colocation.
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE schemaid = :tenant_9_schemaid;
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid = :tenant_9_colocationid;
|
||||
|
||||
-- (on workers) Make sure that dropping an empty tenant schema
|
||||
-- (via DROP OWNED BY) doesn't leave any dangling entries in
|
||||
-- pg_dist_tenant_schema and pg_dist_colocation.
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid = (SELECT schemaid FROM tenant_9_schemaid)
|
||||
$$);
|
||||
|
||||
SELECT format(
|
||||
'SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid = %s;
|
||||
$$);',
|
||||
:tenant_9_colocationid) AS verify_workers_query \gset
|
||||
|
||||
:verify_workers_query
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
DROP TABLE tenant_9_schemaid
|
||||
$$);
|
||||
|
||||
DROP USER test_other_super_user;
|
||||
|
||||
CREATE ROLE test_non_super_user WITH LOGIN;
|
||||
ALTER ROLE test_non_super_user NOSUPERUSER;
|
||||
|
||||
GRANT CREATE ON DATABASE regression TO test_non_super_user;
|
||||
SELECT result FROM run_command_on_workers($$GRANT CREATE ON DATABASE regression TO test_non_super_user$$);
|
||||
|
||||
GRANT CREATE ON SCHEMA public TO test_non_super_user ;
|
||||
|
||||
\c - test_non_super_user
|
||||
|
||||
SET search_path TO regular_schema;
|
||||
SET citus.next_shard_id TO 1940000;
|
||||
SET citus.shard_count TO 32;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET client_min_messages TO NOTICE;
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
|
||||
-- test create / drop tenant schema / table
|
||||
|
||||
CREATE SCHEMA tenant_10;
|
||||
CREATE TABLE tenant_10.tbl_1(a int, b text);
|
||||
CREATE TABLE tenant_10.tbl_2(a int, b text);
|
||||
|
||||
DROP TABLE tenant_10.tbl_2;
|
||||
|
||||
CREATE SCHEMA tenant_11;
|
||||
|
||||
SELECT schemaid AS tenant_10_schemaid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_10' \gset
|
||||
SELECT colocationid AS tenant_10_colocationid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_10' \gset
|
||||
|
||||
SELECT schemaid AS tenant_11_schemaid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_11' \gset
|
||||
SELECT colocationid AS tenant_11_colocationid FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_11' \gset
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT schemaid INTO tenant_10_schemaid FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_10'
|
||||
$$);
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT schemaid INTO tenant_11_schemaid FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_11'
|
||||
$$);
|
||||
|
||||
-- (on coordinator) Verify metadata for tenant schemas that are created via non-super-user.
|
||||
SELECT COUNT(DISTINCT(schemaid))=2 FROM pg_dist_tenant_schema WHERE schemaid IN (:tenant_10_schemaid, :tenant_11_schemaid);
|
||||
SELECT COUNT(DISTINCT(colocationid))=2 FROM pg_dist_colocation WHERE colocationid IN (:tenant_10_colocationid, :tenant_11_colocationid);
|
||||
|
||||
-- (on workers) Verify metadata for tenant schemas that are created via non-super-user.
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(DISTINCT(schemaid))=2 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid IN (SELECT schemaid FROM tenant_10_schemaid UNION SELECT schemaid FROM tenant_11_schemaid)
|
||||
$$);
|
||||
|
||||
SELECT format(
|
||||
'SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(DISTINCT(colocationid))=2 FROM pg_dist_colocation WHERE colocationid IN (%s, %s);
|
||||
$$);',
|
||||
:tenant_10_colocationid, :tenant_11_colocationid) AS verify_workers_query \gset
|
||||
|
||||
:verify_workers_query
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA tenant_10, tenant_11 CASCADE;
|
||||
SET client_min_messages TO NOTICE;
|
||||
|
||||
-- (on coordinator) Verify that dropping a tenant schema via non-super-user
|
||||
-- deletes the associated pg_dist_tenant_schema entry.
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema WHERE schemaid IN (:tenant_10_schemaid, :tenant_11_schemaid);
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid IN (:tenant_10_colocationid, :tenant_11_colocationid);
|
||||
|
||||
-- (on workers) Verify that dropping a tenant schema via non-super-user
|
||||
-- deletes the associated pg_dist_tenant_schema entry.
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid IN (SELECT schemaid FROM tenant_10_schemaid UNION SELECT schemaid FROM tenant_11_schemaid)
|
||||
$$);
|
||||
|
||||
SELECT format(
|
||||
'SELECT result FROM run_command_on_workers($$
|
||||
SELECT COUNT(*)=0 FROM pg_dist_colocation WHERE colocationid IN (%s, %s);
|
||||
$$);',
|
||||
:tenant_10_colocationid, :tenant_11_colocationid) AS verify_workers_query \gset
|
||||
|
||||
:verify_workers_query
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
DROP TABLE tenant_10_schemaid
|
||||
$$);
|
||||
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
DROP TABLE tenant_11_schemaid
|
||||
$$);
|
||||
|
||||
\c - postgres
|
||||
|
||||
REVOKE CREATE ON DATABASE regression FROM test_non_super_user;
|
||||
SELECT result FROM run_command_on_workers($$REVOKE CREATE ON DATABASE regression FROM test_non_super_user$$);
|
||||
|
||||
REVOKE CREATE ON SCHEMA public FROM test_non_super_user;
|
||||
|
||||
DROP ROLE test_non_super_user;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
|
||||
-- test creating a tenant table from workers
|
||||
CREATE TABLE tenant_3.tbl_1(a int, b text);
|
||||
|
||||
-- test creating a tenant schema from workers
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
CREATE SCHEMA worker_tenant_schema;
|
||||
SET citus.enable_schema_based_sharding TO OFF;
|
||||
|
||||
-- Enable the GUC on workers to make sure that the CREATE SCHEMA/ TABLE
|
||||
-- commands that we send to workers don't recursively try creating a
|
||||
-- tenant schema / table.
|
||||
ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON;
|
||||
SELECT pg_reload_conf();
|
||||
|
||||
\c - - - :worker_2_port
|
||||
|
||||
ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON;
|
||||
SELECT pg_reload_conf();
|
||||
|
||||
-- Verify that citus_internal_unregister_tenant_schema_globally is a no-op
|
||||
-- on workers.
|
||||
SELECT citus_internal_unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3');
|
||||
|
||||
\c - - - :master_port
|
||||
|
||||
SET search_path TO regular_schema;
|
||||
SET citus.next_shard_id TO 1950000;
|
||||
SET citus.shard_count TO 32;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET client_min_messages TO NOTICE;
|
||||
|
||||
CREATE TABLE tenant_3.tbl_1(a int, b text);
|
||||
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
CREATE SCHEMA tenant_6;
|
||||
CREATE TABLE tenant_6.tbl_1(a int, b text);
|
||||
|
||||
-- verify pg_dist_partition entries for tenant_3.tbl_1 and tenant_6.tbl_1
|
||||
SELECT COUNT(*)=2 FROM pg_dist_partition
|
||||
WHERE logicalrelid IN ('tenant_3.tbl_1'::regclass, 'tenant_6.tbl_1'::regclass) AND
|
||||
partmethod = 'n' AND repmodel = 's' AND colocationid > 0;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
|
||||
ALTER SYSTEM RESET citus.enable_schema_based_sharding;
|
||||
SELECT pg_reload_conf();
|
||||
|
||||
\c - - - :worker_2_port
|
||||
|
||||
ALTER SYSTEM RESET citus.enable_schema_based_sharding;
|
||||
SELECT pg_reload_conf();
|
||||
|
||||
\c - - - :master_port
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA regular_schema, tenant_3, tenant_5, tenant_7, tenant_6 CASCADE;
|
||||
|
||||
SELECT citus_remove_node('localhost', :master_port);
|
|
@ -98,8 +98,25 @@ WHERE shardid = (
|
|||
SELECT shardid AS round_robin_test_c1_shard_id FROM pg_dist_shard WHERE logicalrelid = 'single_node.single_node_nullkey_c1'::regclass \gset
|
||||
SELECT create_distributed_table('single_node_nullkey_c1_' || :round_robin_test_c1_shard_id , null, colocate_with=>'none', distribution_type=>null);
|
||||
|
||||
-- create a tenant schema on single node setup
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
|
||||
CREATE SCHEMA tenant_1;
|
||||
CREATE TABLE tenant_1.tbl_1 (a int);
|
||||
|
||||
-- verify that we recorded tenant_1 in pg_dist_tenant_schema
|
||||
SELECT COUNT(*)=1 FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_1';
|
||||
|
||||
-- verify that tenant_1.tbl_1 is recorded in pg_dist_partition, as a single-shard table
|
||||
SELECT COUNT(*)=1 FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_1.tbl_1'::regclass AND
|
||||
partmethod = 'n' AND repmodel = 's' AND colocationid IS NOT NULL;
|
||||
|
||||
RESET citus.enable_schema_based_sharding;
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP TABLE failover_to_local, single_node_nullkey_c1, single_node_nullkey_c2;
|
||||
DROP SCHEMA tenant_1 CASCADE;
|
||||
RESET client_min_messages;
|
||||
|
||||
-- so that we don't have to update rest of the test output
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
ALTER SCHEMA "tenant\'_1" RENAME TO tenant_1;
|
||||
ALTER SCHEMA "tenant\'_2" RENAME TO tenant_2;
|
||||
|
||||
-- verify that colocation id is set even for empty tenant
|
||||
SELECT colocationid > 0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_1';
|
||||
|
||||
-- verify the same on workers
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT colocationid > 0 FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_1';
|
||||
$$);
|
||||
|
||||
-- verify that colocation id is set for non-empty tenant
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_2.test_table'::regclass
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_2';
|
||||
|
||||
-- verify the same on workers
|
||||
SELECT result FROM run_command_on_workers($$
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_2.test_table'::regclass
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_2';
|
||||
$$);
|
||||
|
||||
CREATE TABLE tenant_1.tbl_1(a int, b text);
|
||||
CREATE TABLE tenant_2.tbl_1(a int, b text);
|
||||
|
||||
-- Show that we can create further tenant tables in the tenant schemas
|
||||
-- after pg upgrade.
|
||||
SELECT COUNT(*)=2 FROM pg_dist_partition
|
||||
WHERE logicalrelid IN ('tenant_1.tbl_1'::regclass, 'tenant_2.tbl_1'::regclass) AND
|
||||
partmethod = 'n' AND repmodel = 's' AND colocationid > 0;
|
||||
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_1.tbl_1'::regclass AND
|
||||
partmethod = 'n' AND repmodel = 's'
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_1';
|
||||
|
||||
SELECT colocationid = (
|
||||
SELECT colocationid FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'tenant_2.tbl_1'::regclass AND
|
||||
partmethod = 'n' AND repmodel = 's'
|
||||
)
|
||||
FROM pg_dist_tenant_schema
|
||||
WHERE schemaid::regnamespace::text = 'tenant_2';
|
||||
|
||||
-- rollback the changes made on following schemas to make this test idempotent
|
||||
DROP TABLE tenant_1.tbl_1, tenant_2.tbl_1;
|
||||
ALTER SCHEMA tenant_1 RENAME TO "tenant\'_1";
|
||||
ALTER SCHEMA tenant_2 RENAME TO "tenant\'_2";
|
||||
|
||||
SET citus.enable_schema_based_sharding TO ON;
|
||||
|
||||
CREATE SCHEMA tenant_3;
|
||||
|
||||
-- Show that we can create furher tenant schemas after pg upgrade.
|
||||
SELECT COUNT(*)=1 FROM pg_dist_tenant_schema WHERE schemaid::regnamespace::text = 'tenant_3';
|
||||
|
||||
-- drop the schema created in this test to this test idempotent
|
||||
DROP SCHEMA tenant_3 CASCADE;
|
||||
|
||||
RESET citus.enable_schema_based_sharding;
|
|
@ -0,0 +1,14 @@
|
|||
SET citus.enable_schema_based_sharding TO ON;
|
||||
|
||||
-- Create tenant tables with schema names that need escaping
|
||||
-- to verify that citus_prepare_pg_upgrade() correctly saves
|
||||
-- them into public schema.
|
||||
|
||||
-- empty tenant
|
||||
CREATE SCHEMA "tenant\'_1";
|
||||
|
||||
-- non-empty tenant
|
||||
CREATE SCHEMA "tenant\'_2";
|
||||
CREATE TABLE "tenant\'_2".test_table(a int, b text);
|
||||
|
||||
RESET citus.enable_schema_based_sharding;
|
Loading…
Reference in New Issue