mirror of https://github.com/citusdata/citus.git
Make enterprise features open source (#6008)
This PR makes all of the features open source that were previously only available in Citus Enterprise. Features that this adds: 1. Non blocking shard moves/shard rebalancer (`citus.logical_replication_timeout`) 2. Propagation of CREATE/DROP/ALTER ROLE statements 3. Propagation of GRANT statements 4. Propagation of CLUSTER statements 5. Propagation of ALTER DATABASE ... OWNER TO ... 6. Optimization for COPY when loading JSON to avoid double parsing of the JSON object (`citus.skip_jsonb_validation_in_copy`) 7. Support for row level security 8. Support for `pg_dist_authinfo`, which allows storing different authentication options for different users, e.g. you can store passwords or certificates here. 9. Support for `pg_dist_poolinfo`, which allows using connection poolers in between coordinator and workers 10. Tracking distributed query execution times using citus_stat_statements (`citus.stat_statements_max`, `citus.stat_statements_purge_interval`, `citus.stat_statements_track`). This is disabled by default. 11. Blocking tenant_isolation 12. Support for `sslkey` and `sslcert` in `citus.node_conninfo`pull/6006/head
parent
e244e9ffb6
commit
184c7c0bce
|
@ -520,12 +520,6 @@ workflows:
|
|||
version: 2
|
||||
build_and_test:
|
||||
jobs:
|
||||
- check-merge-to-enterprise:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /release-[0-9]+\.[0-9]+.*/ # match with releaseX.Y.*
|
||||
|
||||
- build:
|
||||
name: build-13
|
||||
pg_major: 13
|
||||
|
@ -612,6 +606,81 @@ workflows:
|
|||
make: check-failure
|
||||
requires: [build-13]
|
||||
|
||||
- test-citus:
|
||||
name: 'test-13_check-enterprise'
|
||||
pg_major: 13
|
||||
image_tag: '<< pipeline.parameters.pg13_version >>'
|
||||
make: check-enterprise
|
||||
requires: [build-13]
|
||||
- test-citus:
|
||||
name: 'test-13_check-enterprise-isolation'
|
||||
pg_major: 13
|
||||
image_tag: '<< pipeline.parameters.pg13_version >>'
|
||||
make: check-enterprise-isolation
|
||||
requires: [build-13]
|
||||
- test-citus:
|
||||
name: 'test-13_check-enterprise-isolation-logicalrep-1'
|
||||
pg_major: 13
|
||||
image_tag: '<< pipeline.parameters.pg13_version >>'
|
||||
make: check-enterprise-isolation-logicalrep-1
|
||||
requires: [build-13]
|
||||
- test-citus:
|
||||
name: 'test-13_check-enterprise-isolation-logicalrep-2'
|
||||
pg_major: 13
|
||||
image_tag: '<< pipeline.parameters.pg13_version >>'
|
||||
make: check-enterprise-isolation-logicalrep-2
|
||||
requires: [build-13]
|
||||
- test-citus:
|
||||
name: 'test-13_check-enterprise-isolation-logicalrep-3'
|
||||
pg_major: 13
|
||||
image_tag: '<< pipeline.parameters.pg13_version >>'
|
||||
make: check-enterprise-isolation-logicalrep-3
|
||||
requires: [build-13]
|
||||
- test-citus:
|
||||
name: 'test-13_check-enterprise-failure'
|
||||
pg_major: 13
|
||||
image: citus/failtester
|
||||
image_tag: '<< pipeline.parameters.pg13_version >>'
|
||||
make: check-enterprise-failure
|
||||
requires: [build-13]
|
||||
|
||||
- test-citus:
|
||||
name: 'test-14_check-enterprise'
|
||||
pg_major: 14
|
||||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
||||
make: check-enterprise
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-enterprise-isolation'
|
||||
pg_major: 14
|
||||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
||||
make: check-enterprise-isolation
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-enterprise-isolation-logicalrep-1'
|
||||
pg_major: 14
|
||||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
||||
make: check-enterprise-isolation-logicalrep-1
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-enterprise-isolation-logicalrep-2'
|
||||
pg_major: 14
|
||||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
||||
make: check-enterprise-isolation-logicalrep-2
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-enterprise-isolation-logicalrep-3'
|
||||
pg_major: 14
|
||||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
||||
make: check-enterprise-isolation-logicalrep-3
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-enterprise-failure'
|
||||
pg_major: 14
|
||||
image: citus/failtester
|
||||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
||||
make: check-enterprise-failure
|
||||
requires: [build-14]
|
||||
- test-citus:
|
||||
name: 'test-14_check-multi'
|
||||
pg_major: 14
|
||||
|
|
20
Makefile
20
Makefile
|
@ -11,7 +11,7 @@ endif
|
|||
|
||||
include Makefile.global
|
||||
|
||||
all: extension
|
||||
all: extension pg_send_cancellation
|
||||
|
||||
|
||||
# build columnar only
|
||||
|
@ -37,15 +37,25 @@ clean-extension:
|
|||
clean-full:
|
||||
$(MAKE) -C src/backend/distributed/ clean-full
|
||||
.PHONY: extension install-extension clean-extension clean-full
|
||||
# Add to generic targets
|
||||
install: install-extension install-headers
|
||||
|
||||
install-downgrades:
|
||||
$(MAKE) -C src/backend/distributed/ install-downgrades
|
||||
install-all: install-headers
|
||||
install-all: install-headers install-pg_send_cancellation
|
||||
$(MAKE) -C src/backend/columnar/ install-all
|
||||
$(MAKE) -C src/backend/distributed/ install-all
|
||||
|
||||
clean: clean-extension
|
||||
# build citus_send_cancellation binary
|
||||
pg_send_cancellation:
|
||||
$(MAKE) -C src/bin/pg_send_cancellation/ all
|
||||
install-pg_send_cancellation: pg_send_cancellation
|
||||
$(MAKE) -C src/bin/pg_send_cancellation/ install
|
||||
clean-pg_send_cancellation:
|
||||
$(MAKE) -C src/bin/pg_send_cancellation/ clean
|
||||
.PHONY: pg_send_cancellation install-pg_send_cancellation clean-pg_send_cancellation
|
||||
|
||||
# Add to generic targets
|
||||
install: install-extension install-headers install-pg_send_cancellation
|
||||
clean: clean-extension clean-pg_send_cancellation
|
||||
|
||||
# apply or check style
|
||||
reindent:
|
||||
|
|
|
@ -20,7 +20,7 @@ DATA_built = $(generated_sql_files)
|
|||
# directories with source files
|
||||
SUBDIRS = . commands connection ddl deparser executor metadata operations planner progress relay safeclib test transaction utils worker
|
||||
# enterprise modules
|
||||
SUBDIRS +=
|
||||
SUBDIRS += replication
|
||||
|
||||
# Symlinks are not copied over to the build directory if a separete build
|
||||
# directory is used during configure (such as on CI)
|
||||
|
|
|
@ -12,8 +12,8 @@ they are often moved to files that are named after the command.
|
|||
| `create_distributed_table.c` | Implementation of UDF's for creating distributed tables |
|
||||
| `drop_distributed_table.c` | Implementation for dropping metadata for partitions of distributed tables |
|
||||
| `extension.c` | Implementation of `CREATE EXTENSION` commands for citus specific checks |
|
||||
| `foreign_constraint.c` | Implementation of helper functions for foreign key constraints |
|
||||
| `grant.c` | Placeholder for code granting users access to relations, implemented as enterprise feature |
|
||||
| `foreign_constraint.c` | Implementation of and helper functions for foreign key constraints |
|
||||
| `grant.c` | Implementation of `GRANT` commands for roles/users on relations |
|
||||
| `index.c` | Implementation of commands specific to indices on distributed tables |
|
||||
| `multi_copy.c` | Implementation of `COPY` command. There are multiple different copy modes which are described in detail below |
|
||||
| `policy.c` | Implementation of `CREATE\ALTER POLICY` commands. |
|
||||
|
|
|
@ -10,41 +10,105 @@
|
|||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "distributed/pg_version_constants.h"
|
||||
|
||||
#include "commands/defrem.h"
|
||||
|
||||
#include "catalog/namespace.h"
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
|
||||
|
||||
/* placeholder for PreprocessClusterStmt */
|
||||
static bool IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt);
|
||||
|
||||
/*
|
||||
* PreprocessClusterStmt first determines whether a given cluster statement involves
|
||||
* a distributed table. If so (and if it is supported, i.e. no verbose), it
|
||||
* creates a DDLJob to encapsulate information needed during the worker node
|
||||
* portion of DDL execution before returning that DDLJob in a List. If no
|
||||
* distributed table is involved, this function returns NIL.
|
||||
*/
|
||||
List *
|
||||
PreprocessClusterStmt(Node *node, const char *clusterCommand,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
ClusterStmt *clusterStmt = castNode(ClusterStmt, node);
|
||||
bool showPropagationWarning = false;
|
||||
bool missingOK = false;
|
||||
DDLJob *ddlJob = NULL;
|
||||
|
||||
/* CLUSTER all */
|
||||
if (clusterStmt->relation == NULL)
|
||||
{
|
||||
showPropagationWarning = true;
|
||||
ereport(WARNING, (errmsg("not propagating CLUSTER command to worker nodes"),
|
||||
errhint("Provide a specific table in order to CLUSTER "
|
||||
"distributed tables.")));
|
||||
|
||||
return NIL;
|
||||
}
|
||||
else
|
||||
|
||||
/* PostgreSQL uses access exclusive lock for CLUSTER command */
|
||||
Oid relationId = RangeVarGetRelid(clusterStmt->relation, AccessExclusiveLock,
|
||||
missingOK);
|
||||
|
||||
/*
|
||||
* If the table does not exist, don't do anything here to allow PostgreSQL
|
||||
* to throw the appropriate error or notice message later.
|
||||
*/
|
||||
if (!OidIsValid(relationId))
|
||||
{
|
||||
bool missingOK = false;
|
||||
return NIL;
|
||||
}
|
||||
|
||||
Oid relationId = RangeVarGetRelid(clusterStmt->relation, AccessShareLock,
|
||||
missingOK);
|
||||
/* we have no planning to do unless the table is distributed */
|
||||
bool isCitusRelation = IsCitusTable(relationId);
|
||||
if (!isCitusRelation)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (OidIsValid(relationId))
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
if (IsClusterStmtVerbose_compat(clusterStmt))
|
||||
#else
|
||||
if (clusterStmt->verbose)
|
||||
#endif
|
||||
{
|
||||
ereport(ERROR, (errmsg("cannot run CLUSTER command"),
|
||||
errdetail("VERBOSE option is currently unsupported "
|
||||
"for distributed tables.")));
|
||||
}
|
||||
|
||||
ddlJob = palloc0(sizeof(DDLJob));
|
||||
ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId);
|
||||
ddlJob->metadataSyncCommand = clusterCommand;
|
||||
ddlJob->taskList = DDLTaskList(relationId, clusterCommand);
|
||||
|
||||
return list_make1(ddlJob);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsClusterStmtVerbose_compat returns true if the given statement
|
||||
* is a cluster statement with verbose option.
|
||||
*/
|
||||
static bool
|
||||
IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt)
|
||||
{
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
if (clusterStmt->options & CLUOPT_VERBOSE)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
#else
|
||||
DefElem *opt = NULL;
|
||||
foreach_ptr(opt, clusterStmt->params)
|
||||
{
|
||||
if (strcmp(opt->defname, "verbose") == 0)
|
||||
{
|
||||
showPropagationWarning = IsCitusTable(relationId);
|
||||
return defGetBoolean(opt);
|
||||
}
|
||||
}
|
||||
|
||||
if (showPropagationWarning)
|
||||
{
|
||||
ereport(WARNING, (errmsg("not propagating CLUSTER command to worker nodes")));
|
||||
}
|
||||
|
||||
return NIL;
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid);
|
|||
static Oid get_database_owner(Oid db_oid);
|
||||
|
||||
/* controlled via GUC */
|
||||
bool EnableAlterDatabaseOwner = false;
|
||||
bool EnableAlterDatabaseOwner = true;
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -412,7 +412,10 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
|
|||
|
||||
case OCLASS_PROC:
|
||||
{
|
||||
return CreateFunctionDDLCommandsIdempotent(dependency);
|
||||
List *DDLCommands = CreateFunctionDDLCommandsIdempotent(dependency);
|
||||
List *grantDDLCommands = GrantOnFunctionDDLCommands(dependency->objectId);
|
||||
DDLCommands = list_concat(DDLCommands, grantDDLCommands);
|
||||
return DDLCommands;
|
||||
}
|
||||
|
||||
case OCLASS_ROLE:
|
||||
|
@ -455,7 +458,13 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
|
|||
|
||||
case OCLASS_FOREIGN_SERVER:
|
||||
{
|
||||
return GetForeignServerCreateDDLCommand(dependency->objectId);
|
||||
Oid serverId = dependency->objectId;
|
||||
|
||||
List *DDLCommands = GetForeignServerCreateDDLCommand(serverId);
|
||||
List *grantDDLCommands = GrantOnForeignServerDDLCommands(serverId);
|
||||
DDLCommands = list_concat(DDLCommands, grantDDLCommands);
|
||||
|
||||
return DDLCommands;
|
||||
}
|
||||
|
||||
default:
|
||||
|
|
|
@ -208,7 +208,23 @@ static DistributeObjectOps Any_View = {
|
|||
static DistributeObjectOps Any_CreatePolicy = {
|
||||
.deparse = NULL,
|
||||
.qualify = NULL,
|
||||
.preprocess = PreprocessCreatePolicyStmt,
|
||||
.preprocess = NULL,
|
||||
.postprocess = PostprocessCreatePolicyStmt,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_CreateRole = {
|
||||
.deparse = DeparseCreateRoleStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = PreprocessCreateRoleStmt,
|
||||
.postprocess = NULL,
|
||||
.address = CreateRoleStmtObjectAddress,
|
||||
.markDistributed = true,
|
||||
};
|
||||
static DistributeObjectOps Any_DropRole = {
|
||||
.deparse = DeparseDropRoleStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = PreprocessDropRoleStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
|
@ -254,6 +270,14 @@ static DistributeObjectOps Any_Grant = {
|
|||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_GrantRole = {
|
||||
.deparse = DeparseGrantRoleStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = PreprocessGrantRoleStmt,
|
||||
.postprocess = PostprocessGrantRoleStmt,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_Index = {
|
||||
.deparse = NULL,
|
||||
.qualify = NULL,
|
||||
|
@ -410,6 +434,14 @@ static DistributeObjectOps Extension_Drop = {
|
|||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps FDW_Grant = {
|
||||
.deparse = DeparseGrantOnFDWStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = PreprocessGrantOnFDWStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps ForeignServer_Drop = {
|
||||
.deparse = DeparseDropForeignServerStmt,
|
||||
.qualify = NULL,
|
||||
|
@ -418,6 +450,14 @@ static DistributeObjectOps ForeignServer_Drop = {
|
|||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps ForeignServer_Grant = {
|
||||
.deparse = DeparseGrantOnForeignServerStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = PreprocessGrantOnForeignServerStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps ForeignServer_Rename = {
|
||||
.deparse = DeparseAlterForeignServerRenameStmt,
|
||||
.qualify = NULL,
|
||||
|
@ -478,6 +518,14 @@ static DistributeObjectOps Function_Drop = {
|
|||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Function_Grant = {
|
||||
.deparse = DeparseGrantOnFunctionStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = PreprocessGrantOnFunctionStmt,
|
||||
.postprocess = PostprocessGrantOnFunctionStmt,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps View_Drop = {
|
||||
.deparse = DeparseDropViewStmt,
|
||||
.qualify = QualifyDropViewStmt,
|
||||
|
@ -553,6 +601,14 @@ static DistributeObjectOps Procedure_Drop = {
|
|||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Procedure_Grant = {
|
||||
.deparse = DeparseGrantOnFunctionStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = PreprocessGrantOnFunctionStmt,
|
||||
.postprocess = PostprocessGrantOnFunctionStmt,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Procedure_Rename = {
|
||||
.deparse = DeparseRenameFunctionStmt,
|
||||
.qualify = QualifyRenameFunctionStmt,
|
||||
|
@ -602,6 +658,14 @@ static DistributeObjectOps Sequence_Drop = {
|
|||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Sequence_Grant = {
|
||||
.deparse = DeparseGrantOnSequenceStmt,
|
||||
.qualify = QualifyGrantOnSequenceStmt,
|
||||
.preprocess = PreprocessGrantOnSequenceStmt,
|
||||
.postprocess = PostprocessGrantOnSequenceStmt,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Sequence_Rename = {
|
||||
.deparse = DeparseRenameSequenceStmt,
|
||||
.qualify = QualifyRenameSequenceStmt,
|
||||
|
@ -768,6 +832,14 @@ static DistributeObjectOps Routine_Drop = {
|
|||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Routine_Grant = {
|
||||
.deparse = DeparseGrantOnFunctionStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = PreprocessGrantOnFunctionStmt,
|
||||
.postprocess = PostprocessGrantOnFunctionStmt,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Routine_Rename = {
|
||||
.deparse = DeparseRenameFunctionStmt,
|
||||
.qualify = QualifyRenameFunctionStmt,
|
||||
|
@ -1350,6 +1422,11 @@ GetDistributeObjectOps(Node *node)
|
|||
return &Any_CreatePolicy;
|
||||
}
|
||||
|
||||
case T_CreateRoleStmt:
|
||||
{
|
||||
return &Any_CreateRole;
|
||||
}
|
||||
|
||||
case T_CreateSchemaStmt:
|
||||
{
|
||||
return &Any_CreateSchema;
|
||||
|
@ -1397,6 +1474,11 @@ GetDistributeObjectOps(Node *node)
|
|||
}
|
||||
}
|
||||
|
||||
case T_DropRoleStmt:
|
||||
{
|
||||
return &Any_DropRole;
|
||||
}
|
||||
|
||||
case T_DropStmt:
|
||||
{
|
||||
DropStmt *stmt = castNode(DropStmt, node);
|
||||
|
@ -1504,6 +1586,11 @@ GetDistributeObjectOps(Node *node)
|
|||
}
|
||||
}
|
||||
|
||||
case T_GrantRoleStmt:
|
||||
{
|
||||
return &Any_GrantRole;
|
||||
}
|
||||
|
||||
case T_GrantStmt:
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
|
@ -1514,6 +1601,36 @@ GetDistributeObjectOps(Node *node)
|
|||
return &Schema_Grant;
|
||||
}
|
||||
|
||||
case OBJECT_SEQUENCE:
|
||||
{
|
||||
return &Sequence_Grant;
|
||||
}
|
||||
|
||||
case OBJECT_FDW:
|
||||
{
|
||||
return &FDW_Grant;
|
||||
}
|
||||
|
||||
case OBJECT_FOREIGN_SERVER:
|
||||
{
|
||||
return &ForeignServer_Grant;
|
||||
}
|
||||
|
||||
case OBJECT_FUNCTION:
|
||||
{
|
||||
return &Function_Grant;
|
||||
}
|
||||
|
||||
case OBJECT_PROCEDURE:
|
||||
{
|
||||
return &Procedure_Grant;
|
||||
}
|
||||
|
||||
case OBJECT_ROUTINE:
|
||||
{
|
||||
return &Routine_Grant;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
return &Any_Grant;
|
||||
|
|
|
@ -12,7 +12,10 @@
|
|||
|
||||
#include "access/genam.h"
|
||||
#include "citus_version.h"
|
||||
#include "catalog/dependency.h"
|
||||
#include "catalog/pg_depend.h"
|
||||
#include "catalog/pg_extension_d.h"
|
||||
#include "catalog/pg_foreign_data_wrapper.h"
|
||||
#include "commands/defrem.h"
|
||||
#include "commands/extension.h"
|
||||
#include "distributed/citus_ruleutils.h"
|
||||
|
@ -27,9 +30,12 @@
|
|||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/relation_access_tracking.h"
|
||||
#include "distributed/transaction_management.h"
|
||||
#include "foreign/foreign.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/fmgroids.h"
|
||||
#include "utils/syscache.h"
|
||||
|
||||
|
||||
/* Local functions forward declarations for helper functions */
|
||||
|
@ -42,6 +48,7 @@ static List * GetAllViews(void);
|
|||
static bool ShouldPropagateExtensionCommand(Node *parseTree);
|
||||
static bool IsAlterExtensionSetSchemaCitus(Node *parseTree);
|
||||
static Node * RecreateExtensionStmt(Oid extensionOid);
|
||||
static List * GenerateGrantCommandsOnExtesionDependentFDWs(Oid extensionId);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -820,6 +827,12 @@ CreateExtensionDDLCommand(const ObjectAddress *extensionAddress)
|
|||
|
||||
List *ddlCommands = list_make1((void *) ddlCommand);
|
||||
|
||||
/* any privilege granted on FDWs that belong to the extension should be included */
|
||||
List *FDWGrants =
|
||||
GenerateGrantCommandsOnExtesionDependentFDWs(extensionAddress->objectId);
|
||||
|
||||
ddlCommands = list_concat(ddlCommands, FDWGrants);
|
||||
|
||||
return ddlCommands;
|
||||
}
|
||||
|
||||
|
@ -878,6 +891,88 @@ RecreateExtensionStmt(Oid extensionOid)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateGrantCommandsOnExtesionDependentFDWs returns a list of commands that GRANTs
|
||||
* the privileges on FDWs that are depending on the given extension.
|
||||
*/
|
||||
static List *
|
||||
GenerateGrantCommandsOnExtesionDependentFDWs(Oid extensionId)
|
||||
{
|
||||
List *commands = NIL;
|
||||
List *FDWOids = GetDependentFDWsToExtension(extensionId);
|
||||
|
||||
Oid FDWOid = InvalidOid;
|
||||
foreach_oid(FDWOid, FDWOids)
|
||||
{
|
||||
Acl *aclEntry = GetPrivilegesForFDW(FDWOid);
|
||||
|
||||
if (aclEntry == NULL)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
AclItem *privileges = ACL_DAT(aclEntry);
|
||||
int numberOfPrivsGranted = ACL_NUM(aclEntry);
|
||||
|
||||
for (int i = 0; i < numberOfPrivsGranted; i++)
|
||||
{
|
||||
commands = list_concat(commands,
|
||||
GenerateGrantOnFDWQueriesFromAclItem(FDWOid,
|
||||
&privileges[i]));
|
||||
}
|
||||
}
|
||||
|
||||
return commands;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetDependentFDWsToExtension gets an extension oid and returns the list of oids of FDWs
|
||||
* that are depending on the given extension.
|
||||
*/
|
||||
List *
|
||||
GetDependentFDWsToExtension(Oid extensionId)
|
||||
{
|
||||
List *extensionFDWs = NIL;
|
||||
ScanKeyData key[3];
|
||||
int scanKeyCount = 3;
|
||||
HeapTuple tup;
|
||||
|
||||
Relation pgDepend = table_open(DependRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyInit(&key[0],
|
||||
Anum_pg_depend_refclassid,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(ExtensionRelationId));
|
||||
ScanKeyInit(&key[1],
|
||||
Anum_pg_depend_refobjid,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(extensionId));
|
||||
ScanKeyInit(&key[2],
|
||||
Anum_pg_depend_classid,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(ForeignDataWrapperRelationId));
|
||||
|
||||
SysScanDesc scan = systable_beginscan(pgDepend, InvalidOid, false,
|
||||
NULL, scanKeyCount, key);
|
||||
|
||||
while (HeapTupleIsValid(tup = systable_getnext(scan)))
|
||||
{
|
||||
Form_pg_depend pgDependEntry = (Form_pg_depend) GETSTRUCT(tup);
|
||||
|
||||
if (pgDependEntry->deptype == DEPENDENCY_EXTENSION)
|
||||
{
|
||||
extensionFDWs = lappend_oid(extensionFDWs, pgDependEntry->objid);
|
||||
}
|
||||
}
|
||||
|
||||
systable_endscan(scan);
|
||||
table_close(pgDepend, AccessShareLock);
|
||||
|
||||
return extensionFDWs;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AlterExtensionSchemaStmtObjectAddress returns the ObjectAddress of the extension that is
|
||||
* the subject of the AlterObjectSchemaStmt. Errors if missing_ok is false.
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* foreign_data_wrapper.c
|
||||
* Commands for FOREIGN DATA WRAPPER statements.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "catalog/pg_foreign_data_wrapper.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/deparser.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "foreign/foreign.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "utils/syscache.h"
|
||||
|
||||
static bool NameListHasFDWOwnedByDistributedExtension(List *FDWNames);
|
||||
static ObjectAddress GetObjectAddressByFDWName(char *FDWName, bool missing_ok);
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessGrantOnFDWStmt is executed before the statement is applied to the
|
||||
* local postgres instance.
|
||||
*
|
||||
* In this stage we can prepare the commands that need to be run on all workers to grant
|
||||
* on foreign data wrappers.
|
||||
*/
|
||||
List *
|
||||
PreprocessGrantOnFDWStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
Assert(stmt->objtype == OBJECT_FDW);
|
||||
|
||||
if (!NameListHasFDWOwnedByDistributedExtension(stmt->objects))
|
||||
{
|
||||
/*
|
||||
* We propagate granted privileges on a FDW only if it belongs to a distributed
|
||||
* extension. For now, we skip for custom FDWs, as most of the users prefer
|
||||
* extension FDWs.
|
||||
*/
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (list_length(stmt->objects) > 1)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot grant on FDW with other FDWs"),
|
||||
errhint("Try granting on each object in separate commands")));
|
||||
}
|
||||
|
||||
if (!ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
Assert(list_length(stmt->objects) == 1);
|
||||
|
||||
char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* NameListHasFDWOwnedByDistributedExtension takes a namelist of FDWs and returns true
|
||||
* if at least one of them depends on a distributed extension. Returns false otherwise.
|
||||
*/
|
||||
static bool
|
||||
NameListHasFDWOwnedByDistributedExtension(List *FDWNames)
|
||||
{
|
||||
Value *FDWValue = NULL;
|
||||
foreach_ptr(FDWValue, FDWNames)
|
||||
{
|
||||
/* captures the extension address during lookup */
|
||||
ObjectAddress extensionAddress = { 0 };
|
||||
ObjectAddress FDWAddress = GetObjectAddressByFDWName(strVal(FDWValue), false);
|
||||
|
||||
if (IsObjectAddressOwnedByExtension(&FDWAddress, &extensionAddress))
|
||||
{
|
||||
if (IsObjectDistributed(&extensionAddress))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetObjectAddressByFDWName takes a FDW name and returns the object address.
|
||||
*/
|
||||
static ObjectAddress
|
||||
GetObjectAddressByFDWName(char *FDWName, bool missing_ok)
|
||||
{
|
||||
ForeignDataWrapper *FDW = GetForeignDataWrapperByName(FDWName, missing_ok);
|
||||
Oid FDWId = FDW->fdwid;
|
||||
ObjectAddress address = { 0 };
|
||||
ObjectAddressSet(address, ForeignDataWrapperRelationId, FDWId);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetPrivilegesForFDW takes a FDW object id and returns the privileges granted
|
||||
* on that FDW as a Acl object. Returns NULL if there is no privilege granted.
|
||||
*/
|
||||
Acl *
|
||||
GetPrivilegesForFDW(Oid FDWOid)
|
||||
{
|
||||
HeapTuple fdwtup = SearchSysCache1(FOREIGNDATAWRAPPEROID, ObjectIdGetDatum(FDWOid));
|
||||
|
||||
bool isNull = true;
|
||||
Datum aclDatum = SysCacheGetAttr(FOREIGNDATAWRAPPEROID, fdwtup,
|
||||
Anum_pg_foreign_data_wrapper_fdwacl, &isNull);
|
||||
if (isNull)
|
||||
{
|
||||
ReleaseSysCache(fdwtup);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Acl *aclEntry = DatumGetAclPCopy(aclDatum);
|
||||
|
||||
ReleaseSysCache(fdwtup);
|
||||
|
||||
return aclEntry;
|
||||
}
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
#include "miscadmin.h"
|
||||
|
||||
#include "catalog/pg_foreign_server.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
|
@ -23,8 +24,11 @@
|
|||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "nodes/primnodes.h"
|
||||
#include "utils/builtins.h"
|
||||
|
||||
static char * GetForeignServerAlterOwnerCommand(Oid serverId);
|
||||
static Node * RecreateForeignServerStmt(Oid serverId);
|
||||
static bool NameListHasDistributedServer(List *serverNames);
|
||||
static ObjectAddress GetObjectAddressByServerName(char *serverName, bool missing_ok);
|
||||
|
||||
|
||||
|
@ -62,6 +66,53 @@ AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessGrantOnForeignServerStmt is executed before the statement is applied to the
|
||||
* local postgres instance.
|
||||
*
|
||||
* In this stage we can prepare the commands that need to be run on all workers to grant
|
||||
* on servers.
|
||||
*/
|
||||
List *
|
||||
PreprocessGrantOnForeignServerStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
Assert(stmt->objtype == OBJECT_FOREIGN_SERVER);
|
||||
|
||||
bool includesDistributedServer = NameListHasDistributedServer(stmt->objects);
|
||||
|
||||
if (!includesDistributedServer)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (list_length(stmt->objects) > 1)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot grant on distributed server with other servers"),
|
||||
errhint("Try granting on each object in separate commands")));
|
||||
}
|
||||
|
||||
if (!ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
Assert(list_length(stmt->objects) == 1);
|
||||
|
||||
char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* RenameForeignServerStmtObjectAddress finds the ObjectAddress for the server that is
|
||||
* renamed by given RenmaeStmt. If missingOk is false and if the server does not exist,
|
||||
|
@ -109,14 +160,37 @@ GetForeignServerCreateDDLCommand(Oid serverId)
|
|||
Node *stmt = RecreateForeignServerStmt(serverId);
|
||||
|
||||
/* capture ddl command for the create statement */
|
||||
const char *ddlCommand = DeparseTreeNode(stmt);
|
||||
const char *createCommand = DeparseTreeNode(stmt);
|
||||
const char *alterOwnerCommand = GetForeignServerAlterOwnerCommand(serverId);
|
||||
|
||||
List *ddlCommands = list_make1((void *) ddlCommand);
|
||||
List *ddlCommands = list_make2((void *) createCommand,
|
||||
(void *) alterOwnerCommand);
|
||||
|
||||
return ddlCommands;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetForeignServerAlterOwnerCommand returns "ALTER SERVER .. OWNER TO .." statement
|
||||
* for the specified foreign server.
|
||||
*/
|
||||
static char *
|
||||
GetForeignServerAlterOwnerCommand(Oid serverId)
|
||||
{
|
||||
ForeignServer *server = GetForeignServer(serverId);
|
||||
Oid ownerId = server->owner;
|
||||
char *ownerName = GetUserNameFromId(ownerId, false);
|
||||
|
||||
StringInfo alterCommand = makeStringInfo();
|
||||
|
||||
appendStringInfo(alterCommand, "ALTER SERVER %s OWNER TO %s;",
|
||||
quote_identifier(server->servername),
|
||||
quote_identifier(ownerName));
|
||||
|
||||
return alterCommand->data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* RecreateForeignServerStmt returns a parsetree for a CREATE SERVER statement
|
||||
* that would recreate the given server on a new node.
|
||||
|
@ -161,6 +235,28 @@ RecreateForeignServerStmt(Oid serverId)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* NameListHasDistributedServer takes a namelist of servers and returns true if at least
|
||||
* one of them is distributed. Returns false otherwise.
|
||||
*/
|
||||
static bool
|
||||
NameListHasDistributedServer(List *serverNames)
|
||||
{
|
||||
String *serverValue = NULL;
|
||||
foreach_ptr(serverValue, serverNames)
|
||||
{
|
||||
ObjectAddress address = GetObjectAddressByServerName(strVal(serverValue), false);
|
||||
|
||||
if (IsObjectDistributed(&address))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
static ObjectAddress
|
||||
GetObjectAddressByServerName(char *serverName, bool missing_ok)
|
||||
{
|
||||
|
|
|
@ -6,7 +6,9 @@
|
|||
* We currently support replicating function definitions on the
|
||||
* coordinator in all the worker nodes in the form of
|
||||
*
|
||||
* CREATE OR REPLACE FUNCTION ... queries.
|
||||
* CREATE OR REPLACE FUNCTION ... queries and
|
||||
* GRANT ... ON FUNCTION queries
|
||||
*
|
||||
*
|
||||
* ALTER or DROP operations are not yet propagated.
|
||||
*
|
||||
|
@ -104,6 +106,7 @@ static void DistributeFunctionColocatedWithDistributedTable(RegProcedure funcOid
|
|||
functionAddress);
|
||||
static void DistributeFunctionColocatedWithReferenceTable(const
|
||||
ObjectAddress *functionAddress);
|
||||
static List * FilterDistributedFunctions(GrantStmt *grantStmt);
|
||||
|
||||
static void EnsureExtensionFunctionCanBeDistributed(const ObjectAddress functionAddress,
|
||||
const ObjectAddress extensionAddress,
|
||||
|
@ -239,8 +242,17 @@ create_distributed_function(PG_FUNCTION_ARGS)
|
|||
const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true);
|
||||
const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid);
|
||||
initStringInfo(&ddlCommand);
|
||||
appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_METADATA_SYNC,
|
||||
createFunctionSQL, alterFunctionOwnerSQL, ENABLE_METADATA_SYNC);
|
||||
appendStringInfo(&ddlCommand, "%s;%s;%s", DISABLE_METADATA_SYNC,
|
||||
createFunctionSQL, alterFunctionOwnerSQL);
|
||||
List *grantDDLCommands = GrantOnFunctionDDLCommands(funcOid);
|
||||
char *grantOnFunctionSQL = NULL;
|
||||
foreach_ptr(grantOnFunctionSQL, grantDDLCommands)
|
||||
{
|
||||
appendStringInfo(&ddlCommand, ";%s", grantOnFunctionSQL);
|
||||
}
|
||||
|
||||
appendStringInfo(&ddlCommand, ";%s", ENABLE_METADATA_SYNC);
|
||||
|
||||
SendCommandToWorkersAsUser(NON_COORDINATOR_NODES, CurrentUserName(),
|
||||
ddlCommand.data);
|
||||
}
|
||||
|
@ -1920,3 +1932,162 @@ EnsureExtensionFunctionCanBeDistributed(const ObjectAddress functionAddress,
|
|||
|
||||
EnsureDependenciesExistOnAllNodes(&functionAddress);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessGrantOnFunctionStmt is executed before the statement is applied to the local
|
||||
* postgres instance.
|
||||
*
|
||||
* In this stage we can prepare the commands that need to be run on all workers to grant
|
||||
* on distributed functions, procedures, routines.
|
||||
*/
|
||||
List *
|
||||
PreprocessGrantOnFunctionStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
Assert(isFunction(stmt->objtype));
|
||||
|
||||
List *distributedFunctions = FilterDistributedFunctions(stmt);
|
||||
|
||||
if (list_length(distributedFunctions) == 0 || !ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
List *grantFunctionList = NIL;
|
||||
ObjectAddress *functionAddress = NULL;
|
||||
foreach_ptr(functionAddress, distributedFunctions)
|
||||
{
|
||||
ObjectWithArgs *distFunction = ObjectWithArgsFromOid(
|
||||
functionAddress->objectId);
|
||||
grantFunctionList = lappend(grantFunctionList, distFunction);
|
||||
}
|
||||
|
||||
List *originalObjects = stmt->objects;
|
||||
GrantTargetType originalTargtype = stmt->targtype;
|
||||
|
||||
stmt->objects = grantFunctionList;
|
||||
stmt->targtype = ACL_TARGET_OBJECT;
|
||||
|
||||
char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
stmt->objects = originalObjects;
|
||||
stmt->targtype = originalTargtype;
|
||||
|
||||
List *commandList = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessGrantOnFunctionStmt makes sure dependencies of each
|
||||
* distributed function in the statement exist on all nodes
|
||||
*/
|
||||
List *
|
||||
PostprocessGrantOnFunctionStmt(Node *node, const char *queryString)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
|
||||
List *distributedFunctions = FilterDistributedFunctions(stmt);
|
||||
|
||||
if (list_length(distributedFunctions) == 0)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
ObjectAddress *functionAddress = NULL;
|
||||
foreach_ptr(functionAddress, distributedFunctions)
|
||||
{
|
||||
EnsureDependenciesExistOnAllNodes(functionAddress);
|
||||
}
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FilterDistributedFunctions determines and returns a list of distributed functions
|
||||
* ObjectAddress-es from given grant statement.
|
||||
*/
|
||||
static List *
|
||||
FilterDistributedFunctions(GrantStmt *grantStmt)
|
||||
{
|
||||
List *grantFunctionList = NIL;
|
||||
|
||||
bool grantOnFunctionCommand = (grantStmt->targtype == ACL_TARGET_OBJECT &&
|
||||
isFunction(grantStmt->objtype));
|
||||
bool grantAllFunctionsOnSchemaCommand = (grantStmt->targtype ==
|
||||
ACL_TARGET_ALL_IN_SCHEMA &&
|
||||
isFunction(grantStmt->objtype));
|
||||
|
||||
/* we are only interested in function/procedure/routine level grants */
|
||||
if (!grantOnFunctionCommand && !grantAllFunctionsOnSchemaCommand)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (grantAllFunctionsOnSchemaCommand)
|
||||
{
|
||||
List *distributedFunctionList = DistributedFunctionList();
|
||||
ObjectAddress *distributedFunction = NULL;
|
||||
List *namespaceOidList = NIL;
|
||||
|
||||
/* iterate over all namespace names provided to get their oid's */
|
||||
Value *namespaceValue = NULL;
|
||||
foreach_ptr(namespaceValue, grantStmt->objects)
|
||||
{
|
||||
char *nspname = strVal(namespaceValue);
|
||||
bool missing_ok = false;
|
||||
Oid namespaceOid = get_namespace_oid(nspname, missing_ok);
|
||||
namespaceOidList = list_append_unique_oid(namespaceOidList, namespaceOid);
|
||||
}
|
||||
|
||||
/*
|
||||
* iterate over all distributed functions to filter the ones
|
||||
* that belong to one of the namespaces from above
|
||||
*/
|
||||
foreach_ptr(distributedFunction, distributedFunctionList)
|
||||
{
|
||||
Oid namespaceOid = get_func_namespace(distributedFunction->objectId);
|
||||
|
||||
/*
|
||||
* if this distributed function's schema is one of the schemas
|
||||
* specified in the GRANT .. ALL FUNCTIONS IN SCHEMA ..
|
||||
* add it to the list
|
||||
*/
|
||||
if (list_member_oid(namespaceOidList, namespaceOid))
|
||||
{
|
||||
grantFunctionList = lappend(grantFunctionList, distributedFunction);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bool missingOk = false;
|
||||
ObjectWithArgs *objectWithArgs = NULL;
|
||||
foreach_ptr(objectWithArgs, grantStmt->objects)
|
||||
{
|
||||
ObjectAddress *functionAddress = palloc0(sizeof(ObjectAddress));
|
||||
functionAddress->classId = ProcedureRelationId;
|
||||
functionAddress->objectId = LookupFuncWithArgs(grantStmt->objtype,
|
||||
objectWithArgs,
|
||||
missingOk);
|
||||
functionAddress->objectSubId = 0;
|
||||
|
||||
/*
|
||||
* if this function from GRANT .. ON FUNCTION .. is a distributed
|
||||
* function, add it to the list
|
||||
*/
|
||||
if (IsObjectDistributed(functionAddress))
|
||||
{
|
||||
grantFunctionList = lappend(grantFunctionList, functionAddress);
|
||||
}
|
||||
}
|
||||
}
|
||||
return grantFunctionList;
|
||||
}
|
||||
|
|
|
@ -8,13 +8,244 @@
|
|||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "utils/lsyscache.h"
|
||||
|
||||
|
||||
/* placeholder for PreprocessGrantStmt */
|
||||
/* Local functions forward declarations for helper functions */
|
||||
static List * CollectGrantTableIdList(GrantStmt *grantStmt);
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessGrantStmt determines whether a given GRANT/REVOKE statement involves
|
||||
* a distributed table. If so, it creates DDLJobs to encapsulate information
|
||||
* needed during the worker node portion of DDL execution before returning the
|
||||
* DDLJobs in a List. If no distributed table is involved, this returns NIL.
|
||||
*
|
||||
* NB: So far column level privileges are not supported.
|
||||
*/
|
||||
List *
|
||||
PreprocessGrantStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
return NIL;
|
||||
GrantStmt *grantStmt = castNode(GrantStmt, node);
|
||||
StringInfoData privsString;
|
||||
StringInfoData granteesString;
|
||||
StringInfoData targetString;
|
||||
StringInfoData ddlString;
|
||||
ListCell *granteeCell = NULL;
|
||||
ListCell *tableListCell = NULL;
|
||||
bool isFirst = true;
|
||||
List *ddlJobs = NIL;
|
||||
|
||||
initStringInfo(&privsString);
|
||||
initStringInfo(&granteesString);
|
||||
initStringInfo(&targetString);
|
||||
initStringInfo(&ddlString);
|
||||
|
||||
/*
|
||||
* So far only table level grants are supported. Most other types of
|
||||
* grants aren't interesting anyway.
|
||||
*/
|
||||
if (grantStmt->objtype != OBJECT_TABLE)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
List *tableIdList = CollectGrantTableIdList(grantStmt);
|
||||
|
||||
/* nothing to do if there is no distributed table in the grant list */
|
||||
if (tableIdList == NIL)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
/* deparse the privileges */
|
||||
if (grantStmt->privileges == NIL)
|
||||
{
|
||||
appendStringInfo(&privsString, "ALL");
|
||||
}
|
||||
else
|
||||
{
|
||||
ListCell *privilegeCell = NULL;
|
||||
|
||||
isFirst = true;
|
||||
foreach(privilegeCell, grantStmt->privileges)
|
||||
{
|
||||
AccessPriv *priv = lfirst(privilegeCell);
|
||||
|
||||
if (!isFirst)
|
||||
{
|
||||
appendStringInfoString(&privsString, ", ");
|
||||
}
|
||||
isFirst = false;
|
||||
|
||||
if (priv->cols != NIL)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("grant/revoke on column list is currently "
|
||||
"unsupported")));
|
||||
}
|
||||
|
||||
Assert(priv->priv_name != NULL);
|
||||
|
||||
appendStringInfo(&privsString, "%s", priv->priv_name);
|
||||
}
|
||||
}
|
||||
|
||||
/* deparse the grantees */
|
||||
isFirst = true;
|
||||
foreach(granteeCell, grantStmt->grantees)
|
||||
{
|
||||
RoleSpec *spec = lfirst(granteeCell);
|
||||
|
||||
if (!isFirst)
|
||||
{
|
||||
appendStringInfoString(&granteesString, ", ");
|
||||
}
|
||||
isFirst = false;
|
||||
|
||||
appendStringInfoString(&granteesString, RoleSpecString(spec, true));
|
||||
}
|
||||
|
||||
/*
|
||||
* Deparse the target objects, and issue the deparsed statements to
|
||||
* workers, if applicable. That's so we easily can replicate statements
|
||||
* only to distributed relations.
|
||||
*/
|
||||
isFirst = true;
|
||||
foreach(tableListCell, tableIdList)
|
||||
{
|
||||
Oid relationId = lfirst_oid(tableListCell);
|
||||
const char *grantOption = "";
|
||||
|
||||
resetStringInfo(&targetString);
|
||||
appendStringInfo(&targetString, "%s", generate_relation_name(relationId, NIL));
|
||||
|
||||
if (grantStmt->is_grant)
|
||||
{
|
||||
if (grantStmt->grant_option)
|
||||
{
|
||||
grantOption = " WITH GRANT OPTION";
|
||||
}
|
||||
|
||||
appendStringInfo(&ddlString, "GRANT %s ON %s TO %s%s",
|
||||
privsString.data, targetString.data, granteesString.data,
|
||||
grantOption);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (grantStmt->grant_option)
|
||||
{
|
||||
grantOption = "GRANT OPTION FOR ";
|
||||
}
|
||||
|
||||
appendStringInfo(&ddlString, "REVOKE %s%s ON %s FROM %s",
|
||||
grantOption, privsString.data, targetString.data,
|
||||
granteesString.data);
|
||||
}
|
||||
|
||||
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||
ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId);
|
||||
ddlJob->metadataSyncCommand = pstrdup(ddlString.data);
|
||||
ddlJob->taskList = NIL;
|
||||
if (IsCitusTable(relationId))
|
||||
{
|
||||
ddlJob->taskList = DDLTaskList(relationId, ddlString.data);
|
||||
}
|
||||
ddlJobs = lappend(ddlJobs, ddlJob);
|
||||
|
||||
resetStringInfo(&ddlString);
|
||||
}
|
||||
|
||||
return ddlJobs;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CollectGrantTableIdList determines and returns a list of distributed table
|
||||
* Oids from grant statement.
|
||||
* Grant statement may appear in two forms
|
||||
* 1 - grant on table:
|
||||
* each distributed table oid in grant object list is added to returned list.
|
||||
* 2 - grant all tables in schema:
|
||||
* Collect namespace oid list from grant statement
|
||||
* Add each distributed table oid in the target namespace list to the returned list.
|
||||
*/
|
||||
static List *
|
||||
CollectGrantTableIdList(GrantStmt *grantStmt)
|
||||
{
|
||||
List *grantTableList = NIL;
|
||||
|
||||
bool grantOnTableCommand = (grantStmt->targtype == ACL_TARGET_OBJECT &&
|
||||
grantStmt->objtype == OBJECT_TABLE);
|
||||
bool grantAllTablesOnSchemaCommand = (grantStmt->targtype ==
|
||||
ACL_TARGET_ALL_IN_SCHEMA &&
|
||||
grantStmt->objtype == OBJECT_TABLE);
|
||||
|
||||
/* we are only interested in table level grants */
|
||||
if (!grantOnTableCommand && !grantAllTablesOnSchemaCommand)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (grantAllTablesOnSchemaCommand)
|
||||
{
|
||||
List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE);
|
||||
ListCell *citusTableIdCell = NULL;
|
||||
List *namespaceOidList = NIL;
|
||||
|
||||
ListCell *objectCell = NULL;
|
||||
foreach(objectCell, grantStmt->objects)
|
||||
{
|
||||
char *nspname = strVal(lfirst(objectCell));
|
||||
bool missing_ok = false;
|
||||
Oid namespaceOid = get_namespace_oid(nspname, missing_ok);
|
||||
Assert(namespaceOid != InvalidOid);
|
||||
namespaceOidList = list_append_unique_oid(namespaceOidList, namespaceOid);
|
||||
}
|
||||
|
||||
foreach(citusTableIdCell, citusTableIdList)
|
||||
{
|
||||
Oid relationId = lfirst_oid(citusTableIdCell);
|
||||
Oid namespaceOid = get_rel_namespace(relationId);
|
||||
if (list_member_oid(namespaceOidList, namespaceOid))
|
||||
{
|
||||
grantTableList = lappend_oid(grantTableList, relationId);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
ListCell *objectCell = NULL;
|
||||
foreach(objectCell, grantStmt->objects)
|
||||
{
|
||||
RangeVar *relvar = (RangeVar *) lfirst(objectCell);
|
||||
Oid relationId = RangeVarGetRelid(relvar, NoLock, false);
|
||||
if (IsCitusTable(relationId))
|
||||
{
|
||||
grantTableList = lappend_oid(grantTableList, relationId);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* check for distributed sequences included in GRANT ON TABLE statement */
|
||||
ObjectAddress sequenceAddress = { 0 };
|
||||
ObjectAddressSet(sequenceAddress, RelationRelationId, relationId);
|
||||
if (IsObjectDistributed(&sequenceAddress))
|
||||
{
|
||||
grantTableList = lappend_oid(grantTableList, relationId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return grantTableList;
|
||||
}
|
||||
|
|
|
@ -73,10 +73,12 @@
|
|||
#include "distributed/commands/multi_copy.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
#include "distributed/intermediate_results.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/local_executor.h"
|
||||
#include "distributed/log_utils.h"
|
||||
#include "distributed/coordinator_protocol.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/multi_partitioning_utils.h"
|
||||
#include "distributed/multi_physical_planner.h"
|
||||
#include "distributed/multi_router_planner.h"
|
||||
|
@ -102,6 +104,7 @@
|
|||
#include "libpq/pqformat.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/nodeFuncs.h"
|
||||
#include "parser/parse_func.h"
|
||||
#include "parser/parse_type.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_13
|
||||
#include "tcop/cmdtag.h"
|
||||
|
@ -117,6 +120,9 @@
|
|||
/* constant used in binary protocol */
|
||||
static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0";
|
||||
|
||||
/* if true, skip validation of JSONB columns during COPY */
|
||||
bool SkipJsonbValidationInCopy = true;
|
||||
|
||||
/* custom Citus option for appending to a shard */
|
||||
#define APPEND_TO_SHARD_OPTION "append_to_shard"
|
||||
|
||||
|
@ -242,6 +248,9 @@ typedef enum LocalCopyStatus
|
|||
/* Local functions forward declarations */
|
||||
static void CopyToExistingShards(CopyStmt *copyStatement,
|
||||
QueryCompletionCompat *completionTag);
|
||||
static bool IsCopyInBinaryFormat(CopyStmt *copyStatement);
|
||||
static List * FindJsonbInputColumns(TupleDesc tupleDescriptor,
|
||||
List *inputColumnNameList);
|
||||
static List * RemoveOptionFromList(List *optionList, char *optionName);
|
||||
static bool BinaryOutputFunctionDefined(Oid typeId);
|
||||
static bool BinaryInputFunctionDefined(Oid typeId);
|
||||
|
@ -452,6 +461,7 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
|
|||
List *columnNameList = NIL;
|
||||
int partitionColumnIndex = INVALID_PARTITION_COLUMN_INDEX;
|
||||
|
||||
bool isInputFormatBinary = IsCopyInBinaryFormat(copyStatement);
|
||||
uint64 processedRowCount = 0;
|
||||
|
||||
ErrorContextCallback errorCallback;
|
||||
|
@ -543,6 +553,72 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
|
|||
copiedDistributedRelationTuple->relkind = RELKIND_RELATION;
|
||||
}
|
||||
|
||||
/*
|
||||
* We make an optimisation to skip JSON parsing for JSONB columns, because many
|
||||
* Citus users have large objects in this column and parsing it on the coordinator
|
||||
* causes significant CPU overhead. We do this by forcing BeginCopyFrom and
|
||||
* NextCopyFrom to parse the column as text and then encoding it as JSON again
|
||||
* by using citus_text_send_as_jsonb as the binary output function.
|
||||
*
|
||||
* The main downside of enabling this optimisation is that it defers validation
|
||||
* until the object is parsed by the worker, which is unable to give an accurate
|
||||
* line number.
|
||||
*/
|
||||
if (SkipJsonbValidationInCopy && !isInputFormatBinary)
|
||||
{
|
||||
CopyOutState copyOutState = copyDest->copyOutState;
|
||||
ListCell *jsonbColumnIndexCell = NULL;
|
||||
|
||||
/* get the column indices for all JSONB columns that appear in the input */
|
||||
List *jsonbColumnIndexList = FindJsonbInputColumns(
|
||||
copiedDistributedRelation->rd_att,
|
||||
copyStatement->attlist);
|
||||
|
||||
foreach(jsonbColumnIndexCell, jsonbColumnIndexList)
|
||||
{
|
||||
int jsonbColumnIndex = lfirst_int(jsonbColumnIndexCell);
|
||||
Form_pg_attribute currentColumn =
|
||||
TupleDescAttr(copiedDistributedRelation->rd_att, jsonbColumnIndex);
|
||||
|
||||
if (jsonbColumnIndex == partitionColumnIndex)
|
||||
{
|
||||
/*
|
||||
* In the curious case of using a JSONB column as partition column,
|
||||
* we leave it as is because we want to make sure the hashing works
|
||||
* correctly.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
ereport(DEBUG1, (errmsg("parsing JSONB column %s as text",
|
||||
NameStr(currentColumn->attname))));
|
||||
|
||||
/* parse the column as text instead of JSONB */
|
||||
currentColumn->atttypid = TEXTOID;
|
||||
|
||||
if (copyOutState->binary)
|
||||
{
|
||||
Oid textSendAsJsonbFunctionId = CitusTextSendAsJsonbFunctionId();
|
||||
|
||||
/*
|
||||
* If we're using binary encoding between coordinator and workers
|
||||
* then we should honour the format expected by jsonb_recv, which
|
||||
* is a version number followed by text. We therefore use an output
|
||||
* function which sends the text as if it were jsonb, namely by
|
||||
* prepending a version number.
|
||||
*/
|
||||
fmgr_info(textSendAsJsonbFunctionId,
|
||||
©Dest->columnOutputFunctions[jsonbColumnIndex]);
|
||||
}
|
||||
else
|
||||
{
|
||||
Oid textoutFunctionId = TextOutFunctionId();
|
||||
fmgr_info(textoutFunctionId,
|
||||
©Dest->columnOutputFunctions[jsonbColumnIndex]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize copy state to read from COPY data source */
|
||||
CopyFromState copyState = BeginCopyFrom_compat(NULL,
|
||||
copiedDistributedRelation,
|
||||
|
@ -610,6 +686,82 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsCopyInBinaryFormat determines whether the given COPY statement has the
|
||||
* WITH (format binary) option.
|
||||
*/
|
||||
static bool
|
||||
IsCopyInBinaryFormat(CopyStmt *copyStatement)
|
||||
{
|
||||
ListCell *optionCell = NULL;
|
||||
|
||||
foreach(optionCell, copyStatement->options)
|
||||
{
|
||||
DefElem *defel = lfirst_node(DefElem, optionCell);
|
||||
if (strcmp(defel->defname, "format") == 0 &&
|
||||
strcmp(defGetString(defel), "binary") == 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FindJsonbInputColumns finds columns in the tuple descriptor that have
|
||||
* the JSONB type and appear in inputColumnNameList. If the list is empty then
|
||||
* all JSONB columns are returned.
|
||||
*/
|
||||
static List *
|
||||
FindJsonbInputColumns(TupleDesc tupleDescriptor, List *inputColumnNameList)
|
||||
{
|
||||
List *jsonbColumnIndexList = NIL;
|
||||
int columnCount = tupleDescriptor->natts;
|
||||
|
||||
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
if (currentColumn->attisdropped)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (currentColumn->atttypid != JSONBOID)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (inputColumnNameList != NIL)
|
||||
{
|
||||
ListCell *inputColumnCell = NULL;
|
||||
bool isInputColumn = false;
|
||||
|
||||
foreach(inputColumnCell, inputColumnNameList)
|
||||
{
|
||||
char *inputColumnName = strVal(lfirst(inputColumnCell));
|
||||
|
||||
if (namestrcmp(¤tColumn->attname, inputColumnName) == 0)
|
||||
{
|
||||
isInputColumn = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!isInputColumn)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
jsonbColumnIndexList = lappend_int(jsonbColumnIndexList, columnIndex);
|
||||
}
|
||||
|
||||
return jsonbColumnIndexList;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64 processedRowCount)
|
||||
{
|
||||
|
|
|
@ -12,112 +12,650 @@
|
|||
|
||||
#include "catalog/namespace.h"
|
||||
#include "commands/policy.h"
|
||||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
#include "distributed/coordinator_protocol.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "miscadmin.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "parser/parse_clause.h"
|
||||
#include "parser/parse_relation.h"
|
||||
#include "rewrite/rewriteManip.h"
|
||||
#include "rewrite/rowsecurity.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/ruleutils.h"
|
||||
|
||||
|
||||
/* placeholder for CreatePolicyCommands */
|
||||
static const char * unparse_policy_command(const char aclchar);
|
||||
static void AddRangeTableEntryToQueryCompat(ParseState *parseState, Relation relation);
|
||||
static RowSecurityPolicy * GetPolicyByName(Oid relationId, const char *policyName);
|
||||
static List * GetPolicyListForRelation(Oid relationId);
|
||||
static char * CreatePolicyCommandForPolicy(Oid relationId, RowSecurityPolicy *policy);
|
||||
|
||||
|
||||
/*
|
||||
* CreatePolicyCommands takes in a relationId, and returns the list of create policy
|
||||
* commands needed to reconstruct the policies of that table.
|
||||
*/
|
||||
List *
|
||||
CreatePolicyCommands(Oid relationId)
|
||||
{
|
||||
/* placeholder for future implementation */
|
||||
return NIL;
|
||||
}
|
||||
List *commands = NIL;
|
||||
|
||||
List *policyList = GetPolicyListForRelation(relationId);
|
||||
|
||||
/* placeholder for PreprocessCreatePolicyStmt */
|
||||
List *
|
||||
PreprocessCreatePolicyStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
CreatePolicyStmt *stmt = castNode(CreatePolicyStmt, node);
|
||||
Oid relationId = RangeVarGetRelid(stmt->table,
|
||||
AccessExclusiveLock,
|
||||
false);
|
||||
if (IsCitusTable(relationId))
|
||||
RowSecurityPolicy *policy;
|
||||
foreach_ptr(policy, policyList)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("policies on distributed tables are only supported in "
|
||||
"Citus Enterprise")));
|
||||
char *createPolicyCommand = CreatePolicyCommandForPolicy(relationId, policy);
|
||||
commands = lappend(commands, makeTableDDLCommandString(createPolicyCommand));
|
||||
}
|
||||
|
||||
/* placeholder for future implementation */
|
||||
return NIL;
|
||||
return commands;
|
||||
}
|
||||
|
||||
|
||||
/* placeholder for PreprocessAlterPolicyStmt */
|
||||
/*
|
||||
* GetPolicyListForRelation returns a list of RowSecurityPolicy objects identifying
|
||||
* the policies on the relation with relationId. Note that this function acquires
|
||||
* AccessShareLock on relation and does not release it in the end to make sure that
|
||||
* caller will process valid policies through the transaction.
|
||||
*/
|
||||
static List *
|
||||
GetPolicyListForRelation(Oid relationId)
|
||||
{
|
||||
Relation relation = table_open(relationId, AccessShareLock);
|
||||
|
||||
if (!relation_has_policies(relation))
|
||||
{
|
||||
table_close(relation, NoLock);
|
||||
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (relation->rd_rsdesc == NULL)
|
||||
{
|
||||
/*
|
||||
* there are policies, but since RLS is not enabled they are not loaded into
|
||||
* cache, we will do so here for us to access
|
||||
*/
|
||||
RelationBuildRowSecurity(relation);
|
||||
}
|
||||
|
||||
List *policyList = NIL;
|
||||
|
||||
RowSecurityPolicy *policy;
|
||||
foreach_ptr(policy, relation->rd_rsdesc->policies)
|
||||
{
|
||||
policyList = lappend(policyList, policy);
|
||||
}
|
||||
|
||||
table_close(relation, NoLock);
|
||||
|
||||
return policyList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreatePolicyCommandForPolicy takes a relationId and a policy, returns
|
||||
* the CREATE POLICY command needed to reconstruct the policy identified
|
||||
* by the "policy" object on the relation with relationId.
|
||||
*/
|
||||
static char *
|
||||
CreatePolicyCommandForPolicy(Oid relationId, RowSecurityPolicy *policy)
|
||||
{
|
||||
char *relationName = generate_qualified_relation_name(relationId);
|
||||
List *relationContext = deparse_context_for(relationName, relationId);
|
||||
|
||||
StringInfo createPolicyCommand = makeStringInfo();
|
||||
|
||||
appendStringInfo(createPolicyCommand, "CREATE POLICY %s ON %s FOR %s",
|
||||
quote_identifier(policy->policy_name),
|
||||
relationName,
|
||||
unparse_policy_command(policy->polcmd));
|
||||
|
||||
|
||||
appendStringInfoString(createPolicyCommand, " TO ");
|
||||
|
||||
/*
|
||||
* iterate over all roles and append them to the ddl command with commas
|
||||
* separating the role names
|
||||
*/
|
||||
Oid *roles = (Oid *) ARR_DATA_PTR(policy->roles);
|
||||
for (int roleIndex = 0; roleIndex < ARR_DIMS(policy->roles)[0]; roleIndex++)
|
||||
{
|
||||
const char *roleName;
|
||||
|
||||
if (roleIndex > 0)
|
||||
{
|
||||
appendStringInfoString(createPolicyCommand, ", ");
|
||||
}
|
||||
|
||||
if (roles[roleIndex] == ACL_ID_PUBLIC)
|
||||
{
|
||||
roleName = "PUBLIC";
|
||||
}
|
||||
else
|
||||
{
|
||||
roleName = quote_identifier(GetUserNameFromId(roles[roleIndex], false));
|
||||
}
|
||||
|
||||
appendStringInfoString(createPolicyCommand, roleName);
|
||||
}
|
||||
|
||||
if (policy->qual)
|
||||
{
|
||||
char *qualString = deparse_expression((Node *) (policy->qual),
|
||||
relationContext, false, false);
|
||||
appendStringInfo(createPolicyCommand, " USING (%s)", qualString);
|
||||
}
|
||||
|
||||
if (policy->with_check_qual)
|
||||
{
|
||||
char *withCheckQualString = deparse_expression(
|
||||
(Node *) (policy->with_check_qual), relationContext, false, false);
|
||||
appendStringInfo(createPolicyCommand, " WITH CHECK (%s)",
|
||||
withCheckQualString);
|
||||
}
|
||||
|
||||
return createPolicyCommand->data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* unparse_policy_command takes the type of a policy command and converts it to its full
|
||||
* command string. This function is the exact inverse of parse_policy_command that is in
|
||||
* postgres.
|
||||
*/
|
||||
static const char *
|
||||
unparse_policy_command(const char aclchar)
|
||||
{
|
||||
switch (aclchar)
|
||||
{
|
||||
case '*':
|
||||
{
|
||||
return "ALL";
|
||||
}
|
||||
|
||||
case ACL_SELECT_CHR:
|
||||
{
|
||||
return "SELECT";
|
||||
}
|
||||
|
||||
case ACL_INSERT_CHR:
|
||||
{
|
||||
return "INSERT";
|
||||
}
|
||||
|
||||
case ACL_UPDATE_CHR:
|
||||
{
|
||||
return "UPDATE";
|
||||
}
|
||||
|
||||
case ACL_DELETE_CHR:
|
||||
{
|
||||
return "DELETE";
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
elog(ERROR, "unrecognized aclchar: %d", aclchar);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessCreatePolicyStmt determines when a CREATE POLICY statement involves
|
||||
* a distributed table. If so, it creates DDLJobs to encapsulate information
|
||||
* needed during the worker node portion of DDL execution before returning the
|
||||
* DDLJobs in a List. If no distributed table is involved, this returns NIL.
|
||||
*/
|
||||
List *
|
||||
PostprocessCreatePolicyStmt(Node *node, const char *queryString)
|
||||
{
|
||||
CreatePolicyStmt *stmt = castNode(CreatePolicyStmt, node);
|
||||
|
||||
/* load relation information */
|
||||
RangeVar *relvar = stmt->table;
|
||||
Oid relationId = RangeVarGetRelid(relvar, NoLock, false);
|
||||
if (!IsCitusTable(relationId))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
Relation relation = table_open(relationId, AccessShareLock);
|
||||
|
||||
|
||||
ParseState *qual_pstate = make_parsestate(NULL);
|
||||
AddRangeTableEntryToQueryCompat(qual_pstate, relation);
|
||||
Node *qual = transformWhereClause(qual_pstate,
|
||||
copyObject(stmt->qual),
|
||||
EXPR_KIND_POLICY,
|
||||
"POLICY");
|
||||
if (qual)
|
||||
{
|
||||
ErrorIfUnsupportedPolicyExpr(qual);
|
||||
}
|
||||
|
||||
ParseState *with_check_pstate = make_parsestate(NULL);
|
||||
AddRangeTableEntryToQueryCompat(with_check_pstate, relation);
|
||||
Node *with_check_qual = transformWhereClause(with_check_pstate,
|
||||
copyObject(stmt->with_check),
|
||||
EXPR_KIND_POLICY,
|
||||
"POLICY");
|
||||
if (with_check_qual)
|
||||
{
|
||||
ErrorIfUnsupportedPolicyExpr(with_check_qual);
|
||||
}
|
||||
|
||||
RowSecurityPolicy *policy = GetPolicyByName(relationId, stmt->policy_name);
|
||||
|
||||
if (policy == NULL)
|
||||
{
|
||||
/*
|
||||
* As this function is executed after standard process utility created the
|
||||
* policy, we should be able to find & deparse the policy with policy_name.
|
||||
* But to be more safe, error out here.
|
||||
*/
|
||||
ereport(ERROR, (errmsg("cannot create policy, policy does not exist.")));
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
char *ddlCommand = CreatePolicyCommandForPolicy(relationId, policy);
|
||||
|
||||
/*
|
||||
* create the DDLJob that needs to be executed both on the local relation and all its
|
||||
* placements.
|
||||
*/
|
||||
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||
ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId);
|
||||
ddlJob->metadataSyncCommand = pstrdup(ddlCommand);
|
||||
ddlJob->taskList = DDLTaskList(relationId, ddlCommand);
|
||||
|
||||
relation_close(relation, NoLock);
|
||||
|
||||
return list_make1(ddlJob);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AddRangeTableEntryToQueryCompat adds the given relation to query.
|
||||
* This method is a compatibility wrapper.
|
||||
*/
|
||||
static void
|
||||
AddRangeTableEntryToQueryCompat(ParseState *parseState, Relation relation)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_13
|
||||
ParseNamespaceItem *rte = NULL;
|
||||
#else
|
||||
RangeTblEntry *rte = NULL;
|
||||
#endif
|
||||
|
||||
rte = addRangeTableEntryForRelation(parseState, relation,
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
AccessShareLock,
|
||||
#endif
|
||||
NULL, false, false);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_13
|
||||
addNSItemToQuery(parseState, rte, false, true, true);
|
||||
#else
|
||||
addRTEtoQuery(parseState, rte, false, true, true);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetPolicyByName takes a relationId and a policyName, returns RowSecurityPolicy
|
||||
* object which identifies the policy with name "policyName" on the relation
|
||||
* with relationId. If there does not exist such a policy, then this function
|
||||
* returns NULL.
|
||||
*/
|
||||
static RowSecurityPolicy *
|
||||
GetPolicyByName(Oid relationId, const char *policyName)
|
||||
{
|
||||
List *policyList = GetPolicyListForRelation(relationId);
|
||||
|
||||
RowSecurityPolicy *policy = NULL;
|
||||
foreach_ptr(policy, policyList)
|
||||
{
|
||||
if (strncmp(policy->policy_name, policyName, NAMEDATALEN) == 0)
|
||||
{
|
||||
return policy;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessAlterPolicyStmt determines whether a given ALTER POLICY statement involves a
|
||||
* distributed table. If so, it creates DDLJobs to encapsulate information needed during
|
||||
* the worker node portion of DDL execution before returning the DDLJobs in a list. If no
|
||||
* distributed table is involved this returns NIL.
|
||||
*/
|
||||
List *
|
||||
PreprocessAlterPolicyStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
/* placeholder for future implementation */
|
||||
return NIL;
|
||||
AlterPolicyStmt *stmt = castNode(AlterPolicyStmt, node);
|
||||
StringInfoData ddlString;
|
||||
ListCell *roleCell = NULL;
|
||||
|
||||
/* load relation information */
|
||||
RangeVar *relvar = stmt->table;
|
||||
Oid relOid = RangeVarGetRelid(relvar, NoLock, false);
|
||||
if (!IsCitusTable(relOid))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
initStringInfo(&ddlString);
|
||||
|
||||
Relation relation = relation_open(relOid, AccessShareLock);
|
||||
char *relationName = generate_relation_name(relOid, NIL);
|
||||
|
||||
appendStringInfo(&ddlString, "ALTER POLICY %s ON %s",
|
||||
quote_identifier(stmt->policy_name),
|
||||
relationName
|
||||
);
|
||||
|
||||
if (stmt->roles)
|
||||
{
|
||||
appendStringInfoString(&ddlString, " TO ");
|
||||
foreach(roleCell, stmt->roles)
|
||||
{
|
||||
RoleSpec *roleSpec = (RoleSpec *) lfirst(roleCell);
|
||||
|
||||
appendStringInfoString(&ddlString, RoleSpecString(roleSpec, true));
|
||||
|
||||
if (lnext_compat(stmt->roles, roleCell) != NULL)
|
||||
{
|
||||
appendStringInfoString(&ddlString, ", ");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
List *relationContext = deparse_context_for(relationName, relOid);
|
||||
|
||||
ParseState *qual_pstate = make_parsestate(NULL);
|
||||
AddRangeTableEntryToQueryCompat(qual_pstate, relation);
|
||||
Node *qual = transformWhereClause(qual_pstate,
|
||||
copyObject(stmt->qual),
|
||||
EXPR_KIND_POLICY,
|
||||
"POLICY");
|
||||
if (qual)
|
||||
{
|
||||
ErrorIfUnsupportedPolicyExpr(qual);
|
||||
|
||||
char *qualString = deparse_expression(qual, relationContext, false, false);
|
||||
appendStringInfo(&ddlString, " USING (%s)", qualString);
|
||||
}
|
||||
|
||||
ParseState *with_check_pstate = make_parsestate(NULL);
|
||||
AddRangeTableEntryToQueryCompat(with_check_pstate, relation);
|
||||
Node *with_check_qual = transformWhereClause(with_check_pstate,
|
||||
copyObject(stmt->with_check),
|
||||
EXPR_KIND_POLICY,
|
||||
"POLICY");
|
||||
if (with_check_qual)
|
||||
{
|
||||
ErrorIfUnsupportedPolicyExpr(with_check_qual);
|
||||
|
||||
char *withCheckString = deparse_expression(with_check_qual, relationContext,
|
||||
false,
|
||||
false);
|
||||
appendStringInfo(&ddlString, " WITH CHECK (%s)", withCheckString);
|
||||
}
|
||||
|
||||
/*
|
||||
* create the DDLJob that needs to be executed both on the local relation and all its
|
||||
* placements.
|
||||
*/
|
||||
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||
ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relOid);
|
||||
ddlJob->metadataSyncCommand = pstrdup(ddlString.data);
|
||||
ddlJob->taskList = DDLTaskList(relOid, ddlString.data);
|
||||
|
||||
relation_close(relation, NoLock);
|
||||
|
||||
return list_make1(ddlJob);
|
||||
}
|
||||
|
||||
|
||||
/* placeholder for ErrorIfUnsupportedPolicy */
|
||||
/*
|
||||
* ErrorIfUnsupportedPolicy runs checks related to a Relation their Policies and errors
|
||||
* out if it is not possible to create one of the policies in a distributed environment.
|
||||
*
|
||||
* To support policies we require that:
|
||||
* - Policy expressions do not contain subqueries.
|
||||
*/
|
||||
void
|
||||
ErrorIfUnsupportedPolicy(Relation relation)
|
||||
{
|
||||
if (relation_has_policies(relation))
|
||||
ListCell *policyCell = NULL;
|
||||
|
||||
if (!relation_has_policies(relation))
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("policies on distributed tables are only supported in "
|
||||
"Citus Enterprise"),
|
||||
errhint("Remove any policies on a table before distributing")));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* even if a relation has policies they might not be loaded on the Relation yet. This
|
||||
* happens if policies are on a Relation without Row Level Security enabled. We need
|
||||
* to make sure the policies installed are valid for distribution if RLS gets enabled
|
||||
* after the table has been distributed. Therefore we force a build of the policies on
|
||||
* the cached Relation
|
||||
*/
|
||||
if (relation->rd_rsdesc == NULL)
|
||||
{
|
||||
RelationBuildRowSecurity(relation);
|
||||
}
|
||||
|
||||
foreach(policyCell, relation->rd_rsdesc->policies)
|
||||
{
|
||||
RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(policyCell);
|
||||
|
||||
ErrorIfUnsupportedPolicyExpr((Node *) policy->qual);
|
||||
ErrorIfUnsupportedPolicyExpr((Node *) policy->with_check_qual);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* placeholder for PreprocessDropPolicyStmt */
|
||||
/*
|
||||
* ErrorIfUnsupportedPolicyExpr tests if the provided expression for a policy is
|
||||
* supported on a distributed table.
|
||||
*/
|
||||
void
|
||||
ErrorIfUnsupportedPolicyExpr(Node *expr)
|
||||
{
|
||||
/*
|
||||
* We do not allow any sublink to prevent expressions with subqueries to be used as an
|
||||
* expression in policies on distributed tables.
|
||||
*/
|
||||
if (checkExprHasSubLink(expr))
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot create policy"),
|
||||
errdetail("Subqueries are not supported in policies on distributed "
|
||||
"tables")));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessDropPolicyStmt determines whether a given DROP POLICY statement involves a
|
||||
* distributed table. If so it creates DDLJobs to encapsulate information needed during
|
||||
* the worker node portion of DDL execution before returning the DDLJobs in a List. If no
|
||||
* distributed table is involved this returns NIL.
|
||||
*/
|
||||
List *
|
||||
PreprocessDropPolicyStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
/* placeholder for future implementation */
|
||||
return NIL;
|
||||
DropStmt *stmt = castNode(DropStmt, node);
|
||||
List *ddlJobs = NIL;
|
||||
ListCell *dropObjectCell = NULL;
|
||||
|
||||
Assert(stmt->removeType == OBJECT_POLICY);
|
||||
|
||||
foreach(dropObjectCell, stmt->objects)
|
||||
{
|
||||
List *names = (List *) lfirst(dropObjectCell);
|
||||
|
||||
/*
|
||||
* the last element in the list of names is the name of the policy. The ones
|
||||
* before are describing the relation. By removing the last item from the list we
|
||||
* can use makeRangeVarFromNameList to get to the relation. As list_truncate
|
||||
* changes the list in place we make a copy before.
|
||||
*/
|
||||
names = list_copy(names);
|
||||
names = list_truncate(names, list_length(names) - 1);
|
||||
RangeVar *relation = makeRangeVarFromNameList(names);
|
||||
|
||||
Oid relOid = RangeVarGetRelid(relation, NoLock, false);
|
||||
if (!IsCitusTable(relOid))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||
ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relOid);
|
||||
ddlJob->metadataSyncCommand = queryString;
|
||||
ddlJob->taskList = DDLTaskList(relOid, queryString);
|
||||
|
||||
ddlJobs = lappend(ddlJobs, ddlJob);
|
||||
}
|
||||
|
||||
return ddlJobs;
|
||||
}
|
||||
|
||||
|
||||
/* placeholder for IsPolicyRenameStmt */
|
||||
/*
|
||||
* IsPolicyRenameStmt returns wherher the passed-in RenameStmt is one of the following
|
||||
* forms:
|
||||
*
|
||||
* - ALTER POLICY ... ON ... RENAME TO ...
|
||||
*/
|
||||
bool
|
||||
IsPolicyRenameStmt(RenameStmt *stmt)
|
||||
{
|
||||
/* placeholder for future implementation */
|
||||
return false;
|
||||
return stmt->renameType == OBJECT_POLICY;
|
||||
}
|
||||
|
||||
|
||||
/* placeholder for CreatePolicyEventExtendNames */
|
||||
/*
|
||||
* CreatePolicyEventExtendNames extends relation names in the given CreatePolicyStmt tree.
|
||||
* This function has side effects on the tree as the names are replaced inplace.
|
||||
*/
|
||||
void
|
||||
CreatePolicyEventExtendNames(CreatePolicyStmt *stmt, const char *schemaName, uint64
|
||||
shardId)
|
||||
{
|
||||
/* placeholder for future implementation */
|
||||
RangeVar *relation = stmt->table;
|
||||
char **relationName = &(relation->relname);
|
||||
char **relationSchemaName = &(relation->schemaname);
|
||||
|
||||
/* prefix with schema name if it is not added already */
|
||||
SetSchemaNameIfNotExist(relationSchemaName, schemaName);
|
||||
|
||||
AppendShardIdToName(relationName, shardId);
|
||||
}
|
||||
|
||||
|
||||
/* placeholder for AlterPolicyEventExtendNames */
|
||||
/*
|
||||
* AlterPolicyEventExtendNames extends relation names in the given AlterPolicyStatement
|
||||
* tree. This function has side effects on the tree as the names are replaced inplace.
|
||||
*/
|
||||
void
|
||||
AlterPolicyEventExtendNames(AlterPolicyStmt *stmt, const char *schemaName, uint64 shardId)
|
||||
{
|
||||
/* placeholder for future implementation */
|
||||
RangeVar *relation = stmt->table;
|
||||
char **relationName = &(relation->relname);
|
||||
char **relationSchemaName = &(relation->schemaname);
|
||||
|
||||
/* prefix with schema name if it is not added already */
|
||||
SetSchemaNameIfNotExist(relationSchemaName, schemaName);
|
||||
|
||||
AppendShardIdToName(relationName, shardId);
|
||||
}
|
||||
|
||||
|
||||
/* placeholder for RenamePolicyEventExtendNames */
|
||||
/*
|
||||
* RenamePolicyEventExtendNames extends relation names in the given RenameStmt tree. This
|
||||
* function has side effects on the tree as the names are replaced inline.
|
||||
*/
|
||||
void
|
||||
RenamePolicyEventExtendNames(RenameStmt *stmt, const char *schemaName, uint64 shardId)
|
||||
{
|
||||
/* placeholder for future implementation */
|
||||
char **relationName = &(stmt->relation->relname);
|
||||
char **objectSchemaName = &(stmt->relation->schemaname);
|
||||
|
||||
/* prefix with schema name if it is not added already */
|
||||
SetSchemaNameIfNotExist(objectSchemaName, schemaName);
|
||||
|
||||
AppendShardIdToName(relationName, shardId);
|
||||
}
|
||||
|
||||
|
||||
/* placeholder for DropPolicyEventExtendNames */
|
||||
/*
|
||||
* DropPolicyEventExtendNames extends relation names in the given DropStmt tree specific
|
||||
* to policies. This function has side effects on the tree as the names are replaced
|
||||
* inplace.
|
||||
*/
|
||||
void
|
||||
DropPolicyEventExtendNames(DropStmt *dropStmt, const char *schemaName, uint64 shardId)
|
||||
{
|
||||
/* placeholder for future implementation */
|
||||
Value *relationSchemaNameValue = NULL;
|
||||
Value *relationNameValue = NULL;
|
||||
|
||||
uint32 dropCount = list_length(dropStmt->objects);
|
||||
if (dropCount > 1)
|
||||
{
|
||||
ereport(ERROR, (errmsg("cannot extend name for multiple drop objects")));
|
||||
}
|
||||
|
||||
List *relationNameList = (List *) linitial(dropStmt->objects);
|
||||
int relationNameListLength = list_length(relationNameList);
|
||||
|
||||
switch (relationNameListLength)
|
||||
{
|
||||
case 2:
|
||||
{
|
||||
relationNameValue = linitial(relationNameList);
|
||||
break;
|
||||
}
|
||||
|
||||
case 3:
|
||||
{
|
||||
relationSchemaNameValue = linitial(relationNameList);
|
||||
relationNameValue = lsecond(relationNameList);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("improper policy name: \"%s\"",
|
||||
NameListToString(relationNameList))));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* prefix with schema name if it is not added already */
|
||||
if (relationSchemaNameValue == NULL)
|
||||
{
|
||||
Value *schemaNameValue = makeString(pstrdup(schemaName));
|
||||
relationNameList = lcons(schemaNameValue, relationNameList);
|
||||
}
|
||||
|
||||
char **relationName = &(relationNameValue->val.str);
|
||||
AppendShardIdToName(relationName, shardId);
|
||||
}
|
||||
|
|
|
@ -14,7 +14,9 @@
|
|||
|
||||
#include "access/heapam.h"
|
||||
#include "access/htup_details.h"
|
||||
#include "access/genam.h"
|
||||
#include "access/table.h"
|
||||
#include "access/xact.h"
|
||||
#include "catalog/catalog.h"
|
||||
#include "catalog/pg_auth_members.h"
|
||||
#include "catalog/pg_authid.h"
|
||||
|
@ -31,6 +33,9 @@
|
|||
#include "distributed/coordinator_protocol.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/relation_access_tracking.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "distributed/worker_transaction.h"
|
||||
#include "miscadmin.h"
|
||||
|
@ -40,6 +45,7 @@
|
|||
#include "parser/scansup.h"
|
||||
#include "utils/acl.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/fmgroids.h"
|
||||
#include "utils/guc_tables.h"
|
||||
#include "utils/guc.h"
|
||||
#include "utils/rel.h"
|
||||
|
@ -54,6 +60,9 @@ static char * CreateCreateOrAlterRoleCommand(const char *roleName,
|
|||
AlterRoleStmt *alterRoleStmt);
|
||||
static DefElem * makeDefElemInt(char *name, int value);
|
||||
static List * GenerateRoleOptionsList(HeapTuple tuple);
|
||||
static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options);
|
||||
static List * GenerateGrantRoleStmtsOfRole(Oid roleid);
|
||||
static void EnsureSequentialModeForRoleDDL(void);
|
||||
|
||||
static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple,
|
||||
TupleDesc DbRoleSettingDescription);
|
||||
|
@ -68,6 +77,7 @@ static int ConfigGenericNameCompare(const void *lhs, const void *rhs);
|
|||
static ObjectAddress RoleSpecToObjectAddress(RoleSpec *role, bool missing_ok);
|
||||
|
||||
/* controlled via GUC */
|
||||
bool EnableCreateRolePropagation = true;
|
||||
bool EnableAlterRolePropagation = true;
|
||||
bool EnableAlterRoleSetPropagation = true;
|
||||
|
||||
|
@ -133,11 +143,13 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString)
|
|||
return NIL;
|
||||
}
|
||||
|
||||
if (!EnableAlterRolePropagation || !IsCoordinator())
|
||||
if (!EnableAlterRolePropagation)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
AlterRoleStmt *stmt = castNode(AlterRoleStmt, node);
|
||||
|
||||
DefElem *option = NULL;
|
||||
|
@ -161,7 +173,9 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString)
|
|||
break;
|
||||
}
|
||||
}
|
||||
List *commands = list_make1((void *) CreateAlterRoleIfExistsCommand(stmt));
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) CreateAlterRoleIfExistsCommand(stmt),
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
@ -206,14 +220,7 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since roles need to be handled manually on community, we need to support such queries
|
||||
* by handling them locally on worker nodes
|
||||
*/
|
||||
if (!IsCoordinator())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
EnsureCoordinator();
|
||||
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
@ -493,6 +500,14 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
|
|||
Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(roleTuple));
|
||||
|
||||
CreateRoleStmt *createRoleStmt = NULL;
|
||||
if (EnableCreateRolePropagation)
|
||||
{
|
||||
createRoleStmt = makeNode(CreateRoleStmt);
|
||||
createRoleStmt->stmt_type = ROLESTMT_ROLE;
|
||||
createRoleStmt->role = pstrdup(NameStr(role->rolname));
|
||||
createRoleStmt->options = GenerateRoleOptionsList(roleTuple);
|
||||
}
|
||||
|
||||
AlterRoleStmt *alterRoleStmt = NULL;
|
||||
if (EnableAlterRolePropagation)
|
||||
{
|
||||
|
@ -526,6 +541,16 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
|
|||
completeRoleList = list_concat(completeRoleList, alterRoleSetCommands);
|
||||
}
|
||||
|
||||
if (EnableCreateRolePropagation)
|
||||
{
|
||||
List *grantRoleStmts = GenerateGrantRoleStmtsOfRole(roleOid);
|
||||
Node *stmt = NULL;
|
||||
foreach_ptr(stmt, grantRoleStmts)
|
||||
{
|
||||
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
|
||||
}
|
||||
}
|
||||
|
||||
return completeRoleList;
|
||||
}
|
||||
|
||||
|
@ -732,6 +757,157 @@ MakeSetStatementArguments(char *configurationName, char *configurationValue)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateGrantRoleStmtsFromOptions gets a RoleSpec of a role that is being
|
||||
* created and a list of options of CreateRoleStmt to generate GrantRoleStmts
|
||||
* for the role's memberships.
|
||||
*/
|
||||
static List *
|
||||
GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options)
|
||||
{
|
||||
List *stmts = NIL;
|
||||
|
||||
DefElem *option = NULL;
|
||||
foreach_ptr(option, options)
|
||||
{
|
||||
if (strcmp(option->defname, "adminmembers") != 0 &&
|
||||
strcmp(option->defname, "rolemembers") != 0 &&
|
||||
strcmp(option->defname, "addroleto") != 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
GrantRoleStmt *grantRoleStmt = makeNode(GrantRoleStmt);
|
||||
grantRoleStmt->is_grant = true;
|
||||
|
||||
if (strcmp(option->defname, "adminmembers") == 0 || strcmp(option->defname,
|
||||
"rolemembers") == 0)
|
||||
{
|
||||
grantRoleStmt->granted_roles = list_make1(roleSpec);
|
||||
grantRoleStmt->grantee_roles = (List *) option->arg;
|
||||
}
|
||||
else
|
||||
{
|
||||
grantRoleStmt->granted_roles = (List *) option->arg;
|
||||
grantRoleStmt->grantee_roles = list_make1(roleSpec);
|
||||
}
|
||||
|
||||
if (strcmp(option->defname, "adminmembers") == 0)
|
||||
{
|
||||
grantRoleStmt->admin_opt = true;
|
||||
}
|
||||
|
||||
stmts = lappend(stmts, grantRoleStmt);
|
||||
}
|
||||
return stmts;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateGrantRoleStmtsOfRole generates the GrantRoleStmts for the memberships
|
||||
* of the role whose oid is roleid.
|
||||
*/
|
||||
static List *
|
||||
GenerateGrantRoleStmtsOfRole(Oid roleid)
|
||||
{
|
||||
Relation pgAuthMembers = table_open(AuthMemRelationId, AccessShareLock);
|
||||
HeapTuple tuple = NULL;
|
||||
List *stmts = NIL;
|
||||
|
||||
ScanKeyData skey[1];
|
||||
|
||||
ScanKeyInit(&skey[0], Anum_pg_auth_members_member, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(roleid));
|
||||
SysScanDesc scan = systable_beginscan(pgAuthMembers, AuthMemMemRoleIndexId, true,
|
||||
NULL, 1, &skey[0]);
|
||||
|
||||
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
|
||||
{
|
||||
Form_pg_auth_members membership = (Form_pg_auth_members) GETSTRUCT(tuple);
|
||||
|
||||
GrantRoleStmt *grantRoleStmt = makeNode(GrantRoleStmt);
|
||||
grantRoleStmt->is_grant = true;
|
||||
|
||||
RoleSpec *grantedRole = makeNode(RoleSpec);
|
||||
grantedRole->roletype = ROLESPEC_CSTRING;
|
||||
grantedRole->location = -1;
|
||||
grantedRole->rolename = GetUserNameFromId(membership->roleid, true);
|
||||
grantRoleStmt->granted_roles = list_make1(grantedRole);
|
||||
|
||||
RoleSpec *granteeRole = makeNode(RoleSpec);
|
||||
granteeRole->roletype = ROLESPEC_CSTRING;
|
||||
granteeRole->location = -1;
|
||||
granteeRole->rolename = GetUserNameFromId(membership->member, true);
|
||||
grantRoleStmt->grantee_roles = list_make1(granteeRole);
|
||||
|
||||
grantRoleStmt->grantor = NULL;
|
||||
|
||||
grantRoleStmt->admin_opt = membership->admin_option;
|
||||
|
||||
stmts = lappend(stmts, grantRoleStmt);
|
||||
}
|
||||
|
||||
systable_endscan(scan);
|
||||
table_close(pgAuthMembers, AccessShareLock);
|
||||
|
||||
return stmts;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessCreateRoleStmt creates a worker_create_or_alter_role query for the
|
||||
* role that is being created. With that query we can create the role in the
|
||||
* workers or if they exist we alter them to the way they are being created
|
||||
* right now.
|
||||
*/
|
||||
List *
|
||||
PreprocessCreateRoleStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
if (!EnableCreateRolePropagation || !ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsureSequentialModeForRoleDDL();
|
||||
|
||||
LockRelationOid(DistNodeRelationId(), RowShareLock);
|
||||
|
||||
CreateRoleStmt *createRoleStmt = castNode(CreateRoleStmt, node);
|
||||
|
||||
AlterRoleStmt *alterRoleStmt = makeNode(AlterRoleStmt);
|
||||
alterRoleStmt->role = makeNode(RoleSpec);
|
||||
alterRoleStmt->role->roletype = ROLESPEC_CSTRING;
|
||||
alterRoleStmt->role->location = -1;
|
||||
alterRoleStmt->role->rolename = pstrdup(createRoleStmt->role);
|
||||
alterRoleStmt->action = 1;
|
||||
alterRoleStmt->options = createRoleStmt->options;
|
||||
|
||||
List *grantRoleStmts = GenerateGrantRoleStmtsFromOptions(alterRoleStmt->role,
|
||||
createRoleStmt->options);
|
||||
|
||||
char *createOrAlterRoleQuery = CreateCreateOrAlterRoleCommand(createRoleStmt->role,
|
||||
createRoleStmt,
|
||||
alterRoleStmt);
|
||||
|
||||
List *commands = NIL;
|
||||
commands = lappend(commands, DISABLE_DDL_PROPAGATION);
|
||||
commands = lappend(commands, createOrAlterRoleQuery);
|
||||
|
||||
/* deparse all grant statements and add them to the to commands list */
|
||||
Node *stmt = NULL;
|
||||
foreach_ptr(stmt, grantRoleStmts)
|
||||
{
|
||||
commands = lappend(commands, DeparseTreeNode(stmt));
|
||||
}
|
||||
|
||||
commands = lappend(commands, ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* makeStringConst creates a Const Node that stores a given string
|
||||
*
|
||||
|
@ -801,6 +977,178 @@ makeFloatConst(char *str, int location)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessDropRoleStmt finds the distributed role out of the ones
|
||||
* being dropped and unmarks them distributed and creates the drop statements
|
||||
* for the workers.
|
||||
*/
|
||||
List *
|
||||
PreprocessDropRoleStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
DropRoleStmt *stmt = castNode(DropRoleStmt, node);
|
||||
List *allDropRoles = stmt->roles;
|
||||
|
||||
List *distributedDropRoles = FilterDistributedRoles(allDropRoles);
|
||||
if (list_length(distributedDropRoles) <= 0)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (!EnableCreateRolePropagation || !ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsureSequentialModeForRoleDDL();
|
||||
|
||||
|
||||
stmt->roles = distributedDropRoles;
|
||||
char *sql = DeparseTreeNode((Node *) stmt);
|
||||
stmt->roles = allDropRoles;
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* UnmarkRolesDistributed unmarks the roles in the RoleSpec list distributed.
|
||||
*/
|
||||
void
|
||||
UnmarkRolesDistributed(List *roles)
|
||||
{
|
||||
Node *roleNode = NULL;
|
||||
foreach_ptr(roleNode, roles)
|
||||
{
|
||||
RoleSpec *role = castNode(RoleSpec, roleNode);
|
||||
ObjectAddress roleAddress = { 0 };
|
||||
Oid roleOid = get_rolespec_oid(role, true);
|
||||
|
||||
if (roleOid == InvalidOid)
|
||||
{
|
||||
/*
|
||||
* If the role is dropped (concurrently), we might get an inactive oid for the
|
||||
* role. If it is invalid oid, skip.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid);
|
||||
UnmarkObjectDistributed(&roleAddress);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FilterDistributedRoles filters the list of RoleSpecs and returns the ones
|
||||
* that are distributed.
|
||||
*/
|
||||
List *
|
||||
FilterDistributedRoles(List *roles)
|
||||
{
|
||||
List *distributedRoles = NIL;
|
||||
Node *roleNode = NULL;
|
||||
foreach_ptr(roleNode, roles)
|
||||
{
|
||||
RoleSpec *role = castNode(RoleSpec, roleNode);
|
||||
ObjectAddress roleAddress = { 0 };
|
||||
Oid roleOid = get_rolespec_oid(role, true);
|
||||
if (roleOid == InvalidOid)
|
||||
{
|
||||
/*
|
||||
* Non-existing roles are ignored silently here. Postgres will
|
||||
* handle to give an error or not for these roles.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid);
|
||||
if (IsObjectDistributed(&roleAddress))
|
||||
{
|
||||
distributedRoles = lappend(distributedRoles, role);
|
||||
}
|
||||
}
|
||||
return distributedRoles;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessGrantRoleStmt finds the distributed grantee roles and creates the
|
||||
* query to run on the workers.
|
||||
*/
|
||||
List *
|
||||
PreprocessGrantRoleStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
if (!EnableCreateRolePropagation || !ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
GrantRoleStmt *stmt = castNode(GrantRoleStmt, node);
|
||||
List *allGranteeRoles = stmt->grantee_roles;
|
||||
RoleSpec *grantor = stmt->grantor;
|
||||
|
||||
List *distributedGranteeRoles = FilterDistributedRoles(allGranteeRoles);
|
||||
if (list_length(distributedGranteeRoles) <= 0)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Postgres don't seem to use the grantor. Even dropping the grantor doesn't
|
||||
* seem to affect the membership. If this changes, we might need to add grantors
|
||||
* to the dependency resolution too. For now we just don't propagate it.
|
||||
*/
|
||||
stmt->grantor = NULL;
|
||||
stmt->grantee_roles = distributedGranteeRoles;
|
||||
char *sql = DeparseTreeNode((Node *) stmt);
|
||||
stmt->grantee_roles = allGranteeRoles;
|
||||
stmt->grantor = grantor;
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessGrantRoleStmt actually creates the plan we need to execute for grant
|
||||
* role statement.
|
||||
*/
|
||||
List *
|
||||
PostprocessGrantRoleStmt(Node *node, const char *queryString)
|
||||
{
|
||||
if (!EnableCreateRolePropagation || !IsCoordinator() || !ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
GrantRoleStmt *stmt = castNode(GrantRoleStmt, node);
|
||||
|
||||
RoleSpec *role = NULL;
|
||||
foreach_ptr(role, stmt->grantee_roles)
|
||||
{
|
||||
ObjectAddress roleAddress = { 0 };
|
||||
Oid roleOid = get_rolespec_oid(role, false);
|
||||
ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid);
|
||||
if (IsObjectDistributed(&roleAddress))
|
||||
{
|
||||
EnsureDependenciesExistOnAllNodes(&roleAddress);
|
||||
}
|
||||
}
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ConfigGenericNameCompare compares two config_generic structs based on their
|
||||
* name fields. If the name fields contain the same strings two structs are
|
||||
|
@ -821,3 +1169,64 @@ ConfigGenericNameCompare(const void *a, const void *b)
|
|||
*/
|
||||
return pg_strcasecmp(confa->name, confb->name);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateRoleStmtObjectAddress finds the ObjectAddress for the role described
|
||||
* by the CreateRoleStmt. If missing_ok is false this function throws an error if the
|
||||
* role does not exist.
|
||||
*
|
||||
* Never returns NULL, but the objid in the address could be invalid if missing_ok was set
|
||||
* to true.
|
||||
*/
|
||||
ObjectAddress
|
||||
CreateRoleStmtObjectAddress(Node *node, bool missing_ok)
|
||||
{
|
||||
CreateRoleStmt *stmt = castNode(CreateRoleStmt, node);
|
||||
Oid roleOid = get_role_oid(stmt->role, missing_ok);
|
||||
ObjectAddress roleAddress = { 0 };
|
||||
ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid);
|
||||
|
||||
return roleAddress;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* EnsureSequentialModeForRoleDDL makes sure that the current transaction is already in
|
||||
* sequential mode, or can still safely be put in sequential mode, it errors if that is
|
||||
* not possible. The error contains information for the user to retry the transaction with
|
||||
* sequential mode set from the begining.
|
||||
*
|
||||
* As roles are node scoped objects there exists only 1 instance of the role used by
|
||||
* potentially multiple shards. To make sure all shards in the transaction can interact
|
||||
* with the role the role needs to be visible on all connections used by the transaction,
|
||||
* meaning we can only use 1 connection per node.
|
||||
*/
|
||||
static void
|
||||
EnsureSequentialModeForRoleDDL(void)
|
||||
{
|
||||
if (!IsTransactionBlock())
|
||||
{
|
||||
/* we do not need to switch to sequential mode if we are not in a transaction */
|
||||
return;
|
||||
}
|
||||
|
||||
if (ParallelQueryExecutedInTransaction())
|
||||
{
|
||||
ereport(ERROR, (errmsg("cannot create or modify role because there was a "
|
||||
"parallel operation on a distributed table in the "
|
||||
"transaction"),
|
||||
errdetail("When creating or altering a role, Citus needs to "
|
||||
"perform all operations over a single connection per "
|
||||
"node to ensure consistency."),
|
||||
errhint("Try re-running the transaction with "
|
||||
"\"SET LOCAL citus.multi_shard_modify_mode TO "
|
||||
"\'sequential\';\"")));
|
||||
}
|
||||
|
||||
ereport(DEBUG1, (errmsg("switching to sequential query execution mode"),
|
||||
errdetail("Role is created or altered. To make sure subsequent "
|
||||
"commands see the role correctly we need to make sure to "
|
||||
"use only one connection for all future commands")));
|
||||
SetLocalMultiShardModifyModeToSequential();
|
||||
}
|
||||
|
|
|
@ -161,14 +161,7 @@ PreprocessGrantOnSchemaStmt(Node *node, const char *queryString,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since access control needs to be handled manually on community, we need to support
|
||||
* such queries by handling them locally on worker nodes.
|
||||
*/
|
||||
if (!IsCoordinator())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
EnsureCoordinator();
|
||||
|
||||
List *originalObjects = stmt->objects;
|
||||
|
||||
|
@ -178,7 +171,11 @@ PreprocessGrantOnSchemaStmt(Node *node, const char *queryString,
|
|||
|
||||
stmt->objects = originalObjects;
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, list_make1(sql));
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "distributed/worker_create_or_replace.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "utils/builtins.h"
|
||||
|
@ -32,6 +33,7 @@
|
|||
/* Local functions forward declarations for helper functions */
|
||||
static bool OptionsSpecifyOwnedBy(List *optionList, Oid *ownedByTableId);
|
||||
static Oid SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress);
|
||||
static List * FilterDistributedSequences(GrantStmt *stmt);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -660,6 +662,97 @@ PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessGrantOnSequenceStmt is executed before the statement is applied to the local
|
||||
* postgres instance.
|
||||
*
|
||||
* In this stage we can prepare the commands that need to be run on all workers to grant
|
||||
* on distributed sequences.
|
||||
*/
|
||||
List *
|
||||
PreprocessGrantOnSequenceStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
if (creating_extension)
|
||||
{
|
||||
/*
|
||||
* extensions should be created separately on the workers, sequences cascading
|
||||
* from an extension should therefore not be propagated here.
|
||||
*/
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (!EnableMetadataSync)
|
||||
{
|
||||
/*
|
||||
* we are configured to disable object propagation, should not propagate anything
|
||||
*/
|
||||
return NIL;
|
||||
}
|
||||
|
||||
List *distributedSequences = FilterDistributedSequences(stmt);
|
||||
|
||||
if (list_length(distributedSequences) == 0)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
GrantStmt *stmtCopy = copyObject(stmt);
|
||||
stmtCopy->objects = distributedSequences;
|
||||
|
||||
/*
|
||||
* if the original command was targeting schemas, we have expanded to the distributed
|
||||
* sequences in these schemas through FilterDistributedSequences.
|
||||
*/
|
||||
stmtCopy->targtype = ACL_TARGET_OBJECT;
|
||||
|
||||
QualifyTreeNode((Node *) stmtCopy);
|
||||
|
||||
char *sql = DeparseTreeNode((Node *) stmtCopy);
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessGrantOnSequenceStmt makes sure dependencies of each
|
||||
* distributed sequence in the statement exist on all nodes
|
||||
*/
|
||||
List *
|
||||
PostprocessGrantOnSequenceStmt(Node *node, const char *queryString)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
List *distributedSequences = FilterDistributedSequences(stmt);
|
||||
|
||||
if (list_length(distributedSequences) == 0)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
RangeVar *sequence = NULL;
|
||||
foreach_ptr(sequence, distributedSequences)
|
||||
{
|
||||
ObjectAddress sequenceAddress = { 0 };
|
||||
Oid sequenceOid = RangeVarGetRelid(sequence, NoLock, false);
|
||||
ObjectAddressSet(sequenceAddress, RelationRelationId, sequenceOid);
|
||||
EnsureDependenciesExistOnAllNodes(&sequenceAddress);
|
||||
}
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateBackupNameForSequenceCollision generates a new sequence name for an existing
|
||||
* sequence. The name is generated in such a way that the new name doesn't overlap with
|
||||
|
@ -702,6 +795,96 @@ GenerateBackupNameForSequenceCollision(const ObjectAddress *address)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* FilterDistributedSequences determines and returns a list of distributed sequences
|
||||
* RangeVar-s from given grant statement.
|
||||
* - If the stmt's targtype is ACL_TARGET_OBJECT, i.e. of the form GRANT ON SEQUENCE ...
|
||||
* it returns the distributed sequences in the list of sequences in the statement
|
||||
* - If targtype is ACL_TARGET_ALL_IN_SCHEMA, i.e. GRANT ON ALL SEQUENCES IN SCHEMA ...
|
||||
* it expands the ALL IN SCHEMA to the actual sequences, and returns the distributed
|
||||
* sequences from those.
|
||||
*/
|
||||
static List *
|
||||
FilterDistributedSequences(GrantStmt *stmt)
|
||||
{
|
||||
bool grantOnSequenceCommand = (stmt->targtype == ACL_TARGET_OBJECT &&
|
||||
stmt->objtype == OBJECT_SEQUENCE);
|
||||
bool grantOnAllSequencesInSchemaCommand = (stmt->targtype ==
|
||||
ACL_TARGET_ALL_IN_SCHEMA &&
|
||||
stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
/* we are only interested in sequence level grants */
|
||||
if (!grantOnSequenceCommand && !grantOnAllSequencesInSchemaCommand)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
List *grantSequenceList = NIL;
|
||||
|
||||
if (grantOnAllSequencesInSchemaCommand)
|
||||
{
|
||||
/* iterate over all namespace names provided to get their oid's */
|
||||
List *namespaceOidList = NIL;
|
||||
Value *namespaceValue = NULL;
|
||||
foreach_ptr(namespaceValue, stmt->objects)
|
||||
{
|
||||
char *nspname = strVal(namespaceValue);
|
||||
bool missing_ok = false;
|
||||
Oid namespaceOid = get_namespace_oid(nspname, missing_ok);
|
||||
namespaceOidList = list_append_unique_oid(namespaceOidList, namespaceOid);
|
||||
}
|
||||
|
||||
/*
|
||||
* iterate over all distributed sequences to filter the ones
|
||||
* that belong to one of the namespaces from above
|
||||
*/
|
||||
List *distributedSequenceList = DistributedSequenceList();
|
||||
ObjectAddress *sequenceAddress = NULL;
|
||||
foreach_ptr(sequenceAddress, distributedSequenceList)
|
||||
{
|
||||
Oid namespaceOid = get_rel_namespace(sequenceAddress->objectId);
|
||||
|
||||
/*
|
||||
* if this distributed sequence's schema is one of the schemas
|
||||
* specified in the GRANT .. ALL SEQUENCES IN SCHEMA ..
|
||||
* add it to the list
|
||||
*/
|
||||
if (list_member_oid(namespaceOidList, namespaceOid))
|
||||
{
|
||||
RangeVar *distributedSequence = makeRangeVar(get_namespace_name(
|
||||
namespaceOid),
|
||||
get_rel_name(
|
||||
sequenceAddress->objectId),
|
||||
-1);
|
||||
grantSequenceList = lappend(grantSequenceList, distributedSequence);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bool missing_ok = false;
|
||||
RangeVar *sequenceRangeVar = NULL;
|
||||
foreach_ptr(sequenceRangeVar, stmt->objects)
|
||||
{
|
||||
ObjectAddress sequenceAddress = { 0 };
|
||||
Oid sequenceOid = RangeVarGetRelid(sequenceRangeVar, NoLock, missing_ok);
|
||||
ObjectAddressSet(sequenceAddress, RelationRelationId, sequenceOid);
|
||||
|
||||
/*
|
||||
* if this sequence from GRANT .. ON SEQUENCE .. is a distributed
|
||||
* sequence, add it to the list
|
||||
*/
|
||||
if (IsObjectDistributed(&sequenceAddress))
|
||||
{
|
||||
grantSequenceList = lappend(grantSequenceList, sequenceRangeVar);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return grantSequenceList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* RenameExistingSequenceWithDifferentTypeIfExists renames the sequence's type if
|
||||
* that sequence exists and the desired sequence type is different than it's type.
|
||||
|
|
|
@ -10,13 +10,129 @@
|
|||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "safe_lib.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "commands/defrem.h"
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/connection_management.h"
|
||||
#include "distributed/pg_version_constants.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "libpq-fe.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "utils/builtins.h"
|
||||
|
||||
|
||||
/* placeholder for ProcessCreateSubscriptionStmt */
|
||||
static char * GenerateConninfoWithAuth(char *conninfo);
|
||||
|
||||
/*
|
||||
* ProcessCreateSubscriptionStmt looks for a special citus_use_authinfo option.
|
||||
* If it is set to true, then we'll expand the node's authinfo into the create
|
||||
* statement (see GenerateConninfoWithAuth).
|
||||
*/
|
||||
Node *
|
||||
ProcessCreateSubscriptionStmt(CreateSubscriptionStmt *createSubStmt)
|
||||
{
|
||||
ListCell *currCell = NULL;
|
||||
#if PG_VERSION_NUM < PG_VERSION_13
|
||||
ListCell *prevCell = NULL;
|
||||
#endif
|
||||
bool useAuthinfo = false;
|
||||
|
||||
foreach(currCell, createSubStmt->options)
|
||||
{
|
||||
DefElem *defElem = (DefElem *) lfirst(currCell);
|
||||
|
||||
if (strcmp(defElem->defname, "citus_use_authinfo") == 0)
|
||||
{
|
||||
useAuthinfo = defGetBoolean(defElem);
|
||||
|
||||
createSubStmt->options = list_delete_cell_compat(createSubStmt->options,
|
||||
currCell,
|
||||
prevCell);
|
||||
|
||||
break;
|
||||
}
|
||||
#if PG_VERSION_NUM < PG_VERSION_13
|
||||
prevCell = currCell;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (useAuthinfo)
|
||||
{
|
||||
createSubStmt->conninfo = GenerateConninfoWithAuth(createSubStmt->conninfo);
|
||||
}
|
||||
|
||||
return (Node *) createSubStmt;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateConninfoWithAuth extracts the host and port from the provided libpq
|
||||
* conninfo string, using them to find an appropriate authinfo for the target
|
||||
* host. If such an authinfo is found, it is added to the (repalloc'd) string,
|
||||
* which is then returned.
|
||||
*/
|
||||
static char *
|
||||
GenerateConninfoWithAuth(char *conninfo)
|
||||
{
|
||||
StringInfo connInfoWithAuth = makeStringInfo();
|
||||
char *host = NULL, *user = NULL;
|
||||
int32 port = -1;
|
||||
PQconninfoOption *option = NULL, *optionArray = NULL;
|
||||
|
||||
optionArray = PQconninfoParse(conninfo, NULL);
|
||||
if (optionArray == NULL)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("not a valid libpq connection info string: %s",
|
||||
conninfo)));
|
||||
}
|
||||
|
||||
for (option = optionArray; option->keyword != NULL; option++)
|
||||
{
|
||||
if (option->val == NULL || option->val[0] == '\0')
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (strcmp(option->keyword, "host") == 0)
|
||||
{
|
||||
host = option->val;
|
||||
}
|
||||
else if (strcmp(option->keyword, "port") == 0)
|
||||
{
|
||||
port = pg_atoi(option->val, 4, 0);
|
||||
}
|
||||
else if (strcmp(option->keyword, "user") == 0)
|
||||
{
|
||||
user = option->val;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* In case of repetition of parameters in connection strings, last value
|
||||
* wins. So first add the provided connection string, then global
|
||||
* connection parameters, then node specific ones.
|
||||
*
|
||||
* Note that currently lists of parameters in pg_dist_authnode and
|
||||
* citus.node_conninfo do not overlap.
|
||||
*
|
||||
* The only overlapping parameter between these three lists is
|
||||
* connect_timeout, which is assigned in conninfo (generated
|
||||
* by CreateShardMoveSubscription) and is also allowed in
|
||||
* citus.node_conninfo. Prioritizing the value in citus.node_conninfo
|
||||
* over conninfo gives user the power to control this value.
|
||||
*/
|
||||
appendStringInfo(connInfoWithAuth, "%s %s", conninfo, NodeConninfo);
|
||||
if (host != NULL && port > 0 && user != NULL)
|
||||
{
|
||||
char *nodeAuthInfo = GetAuthinfo(host, port, user);
|
||||
appendStringInfo(connInfoWithAuth, " %s", nodeAuthInfo);
|
||||
}
|
||||
|
||||
PQconninfoFree(optionArray);
|
||||
|
||||
return connInfoWithAuth->data;
|
||||
}
|
||||
|
|
|
@ -2600,6 +2600,7 @@ ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod,
|
|||
* ALTER TABLE ADD|DROP CONSTRAINT
|
||||
* ALTER TABLE REPLICA IDENTITY
|
||||
* ALTER TABLE SET ()
|
||||
* ALTER TABLE ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY
|
||||
* ALTER TABLE RESET ()
|
||||
* ALTER TABLE ENABLE/DISABLE TRIGGER (if enable_unsafe_triggers is not set, we only support triggers for citus local tables)
|
||||
*/
|
||||
|
@ -2943,6 +2944,10 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
case AT_SetNotNull:
|
||||
case AT_ReplicaIdentity:
|
||||
case AT_ChangeOwner:
|
||||
case AT_EnableRowSecurity:
|
||||
case AT_DisableRowSecurity:
|
||||
case AT_ForceRowSecurity:
|
||||
case AT_NoForceRowSecurity:
|
||||
case AT_ValidateConstraint:
|
||||
case AT_DropConstraint: /* we do the check for invalidation in AlterTableDropsForeignKey */
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
|
@ -2987,6 +2992,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
errdetail("Only ADD|DROP COLUMN, SET|DROP NOT NULL, "
|
||||
"SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, "
|
||||
"SET (), RESET (), "
|
||||
"ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, "
|
||||
"ATTACH|DETACH PARTITION and TYPE subcommands "
|
||||
"are supported.")));
|
||||
}
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
#include "distributed/listutils.h"
|
||||
#include "distributed/local_executor.h"
|
||||
#include "distributed/maintenanced.h"
|
||||
#include "distributed/multi_logical_replication.h"
|
||||
#include "distributed/multi_partitioning_utils.h"
|
||||
#if PG_VERSION_NUM < 140000
|
||||
#include "distributed/metadata_cache.h"
|
||||
|
@ -65,6 +66,7 @@
|
|||
#include "distributed/multi_physical_planner.h"
|
||||
#include "distributed/reference_table_utils.h"
|
||||
#include "distributed/resource_lock.h"
|
||||
#include "distributed/string_utils.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "distributed/worker_shard_visibility.h"
|
||||
#include "distributed/worker_transaction.h"
|
||||
|
@ -77,6 +79,7 @@
|
|||
#include "utils/lsyscache.h"
|
||||
#include "utils/syscache.h"
|
||||
|
||||
|
||||
bool EnableDDLPropagation = true; /* ddl propagation is enabled */
|
||||
int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_IMMEDIATE;
|
||||
PropSetCmdBehavior PropagateSetCommands = PROPSETCMD_NONE; /* SET prop off */
|
||||
|
@ -409,6 +412,31 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
|
|||
parsetree = ProcessCreateSubscriptionStmt(createSubStmt);
|
||||
}
|
||||
|
||||
if (IsA(parsetree, AlterSubscriptionStmt))
|
||||
{
|
||||
AlterSubscriptionStmt *alterSubStmt = (AlterSubscriptionStmt *) parsetree;
|
||||
if (!superuser() &&
|
||||
StringStartsWith(alterSubStmt->subname,
|
||||
SHARD_MOVE_SUBSCRIPTION_PREFIX))
|
||||
{
|
||||
ereport(ERROR, (
|
||||
errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("Only superusers can alter shard move subscriptions")));
|
||||
}
|
||||
}
|
||||
|
||||
if (IsA(parsetree, DropSubscriptionStmt))
|
||||
{
|
||||
DropSubscriptionStmt *dropSubStmt = (DropSubscriptionStmt *) parsetree;
|
||||
if (!superuser() &&
|
||||
StringStartsWith(dropSubStmt->subname, SHARD_MOVE_SUBSCRIPTION_PREFIX))
|
||||
{
|
||||
ereport(ERROR, (
|
||||
errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("Only superusers can drop shard move subscriptions")));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Process SET LOCAL and SET TRANSACTION statements in multi-statement
|
||||
* transactions.
|
||||
|
@ -565,7 +593,7 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
|
|||
errhint("You can manually create a database and its "
|
||||
"extensions on workers.")));
|
||||
}
|
||||
else if (IsA(parsetree, CreateRoleStmt))
|
||||
else if (IsA(parsetree, CreateRoleStmt) && !EnableCreateRolePropagation)
|
||||
{
|
||||
ereport(NOTICE, (errmsg("not propagating CREATE ROLE/USER commands to worker"
|
||||
" nodes"),
|
||||
|
@ -595,6 +623,24 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
|
|||
StopMaintenanceDaemon(MyDatabaseId);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that dropping the role deletes the pg_dist_object entries. There is a
|
||||
* separate logic for roles, since roles are not included as dropped objects in the
|
||||
* drop event trigger. To handle it both on worker and coordinator nodes, it is not
|
||||
* implemented as a part of process functions but here.
|
||||
*/
|
||||
if (IsA(parsetree, DropRoleStmt))
|
||||
{
|
||||
DropRoleStmt *stmt = castNode(DropRoleStmt, parsetree);
|
||||
List *allDropRoles = stmt->roles;
|
||||
|
||||
List *distributedDropRoles = FilterDistributedRoles(allDropRoles);
|
||||
if (list_length(distributedDropRoles) > 0)
|
||||
{
|
||||
UnmarkRolesDistributed(distributedDropRoles);
|
||||
}
|
||||
}
|
||||
|
||||
pstmt->utilityStmt = parsetree;
|
||||
|
||||
PG_TRY();
|
||||
|
@ -704,6 +750,21 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
|
|||
{
|
||||
PostprocessAlterTableStmt(castNode(AlterTableStmt, parsetree));
|
||||
}
|
||||
if (IsA(parsetree, GrantStmt))
|
||||
{
|
||||
GrantStmt *grantStmt = (GrantStmt *) parsetree;
|
||||
if (grantStmt->targtype == ACL_TARGET_ALL_IN_SCHEMA)
|
||||
{
|
||||
/*
|
||||
* Grant .. IN SCHEMA causes a deadlock if we don't use local execution
|
||||
* because standard process utility processes the shard placements as well
|
||||
* and the row-level locks in pg_class will not be released until the current
|
||||
* transaction commits. We could skip the local shard placements after standard
|
||||
* process utility, but for simplicity we just prefer using local execution.
|
||||
*/
|
||||
SetLocalExecutionStatus(LOCAL_EXECUTION_REQUIRED);
|
||||
}
|
||||
}
|
||||
|
||||
DDLJob *ddlJob = NULL;
|
||||
foreach_ptr(ddlJob, ddlJobs)
|
||||
|
@ -1589,7 +1650,6 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands)
|
|||
ddlJob->targetObjectAddress = InvalidObjectAddress;
|
||||
ddlJob->metadataSyncCommand = NULL;
|
||||
ddlJob->taskList = list_make1(task);
|
||||
|
||||
return list_make1(ddlJob);
|
||||
}
|
||||
|
||||
|
|
|
@ -10,9 +10,12 @@
|
|||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "access/transam.h"
|
||||
#include "access/xact.h"
|
||||
#include "distributed/backend_data.h"
|
||||
#include "distributed/citus_safe_lib.h"
|
||||
#include "distributed/connection_management.h"
|
||||
#include "distributed/intermediate_result_pruning.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/worker_manager.h"
|
||||
|
||||
|
@ -40,6 +43,7 @@ typedef struct ConnParamsInfo
|
|||
static ConnParamsInfo ConnParams;
|
||||
|
||||
/* helper functions for processing connection info */
|
||||
static ConnectionHashKey * GetEffectiveConnKey(ConnectionHashKey *key);
|
||||
static Size CalculateMaxSize(void);
|
||||
static int uri_prefix_length(const char *connstr);
|
||||
|
||||
|
@ -232,6 +236,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
|
|||
* already we can add a pointer to the runtimeValues.
|
||||
*/
|
||||
char nodePortString[12] = "";
|
||||
ConnectionHashKey *effectiveKey = GetEffectiveConnKey(key);
|
||||
|
||||
StringInfo applicationName = makeStringInfo();
|
||||
appendStringInfo(applicationName, "%s%ld", CITUS_APPLICATION_NAME_PREFIX,
|
||||
|
@ -260,10 +265,10 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
|
|||
"application_name"
|
||||
};
|
||||
const char *runtimeValues[] = {
|
||||
key->hostname,
|
||||
effectiveKey->hostname,
|
||||
nodePortString,
|
||||
key->database,
|
||||
key->user,
|
||||
effectiveKey->database,
|
||||
effectiveKey->user,
|
||||
GetDatabaseEncodingName(),
|
||||
applicationName->data
|
||||
};
|
||||
|
@ -300,7 +305,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
|
|||
errmsg("too many connParams entries")));
|
||||
}
|
||||
|
||||
pg_ltoa(key->port, nodePortString); /* populate node port string with port */
|
||||
pg_ltoa(effectiveKey->port, nodePortString); /* populate node port string with port */
|
||||
|
||||
/* first step: copy global parameters to beginning of array */
|
||||
for (Size paramIndex = 0; paramIndex < ConnParams.size; paramIndex++)
|
||||
|
@ -322,6 +327,58 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
|
|||
MemoryContextStrdup(context, runtimeValues[runtimeParamIndex]);
|
||||
}
|
||||
|
||||
/* we look up authinfo by original key, not effective one */
|
||||
char *authinfo = GetAuthinfo(key->hostname, key->port, key->user);
|
||||
char *pqerr = NULL;
|
||||
PQconninfoOption *optionArray = PQconninfoParse(authinfo, &pqerr);
|
||||
if (optionArray == NULL)
|
||||
{
|
||||
/* PQconninfoParse failed, it's unsafe to continue as this has caused segfaults in production */
|
||||
if (pqerr == NULL)
|
||||
{
|
||||
/* parse failed without an error message, treat as OOM error */
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OUT_OF_MEMORY),
|
||||
errmsg("out of memory"),
|
||||
errdetail("Failed to parse authentication information via libpq")));
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Parse error, should not be possible as the validity is checked upon insert into pg_dist_authinfo,
|
||||
* however, better safe than sorry
|
||||
*/
|
||||
|
||||
/*
|
||||
* errmsg is populated by PQconninfoParse which requires us to free the message. Since we want to
|
||||
* incorporate the parse error into the detail of our message we need to copy the error message before
|
||||
* freeing it. Not freeing the message will leak memory.
|
||||
*/
|
||||
char *pqerrcopy = pstrdup(pqerr);
|
||||
PQfreemem(pqerr);
|
||||
|
||||
ereport(ERROR, (errmsg(
|
||||
"failed to parse node authentication information for %s@%s:%d",
|
||||
key->user, key->hostname, key->port),
|
||||
errdetail("%s", pqerrcopy)));
|
||||
}
|
||||
}
|
||||
|
||||
for (PQconninfoOption *option = optionArray; option->keyword != NULL; option++)
|
||||
{
|
||||
if (option->val == NULL || option->val[0] == '\0')
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
connKeywords[authParamsIdx] = MemoryContextStrdup(context, option->keyword);
|
||||
connValues[authParamsIdx] = MemoryContextStrdup(context, option->val);
|
||||
|
||||
authParamsIdx++;
|
||||
}
|
||||
|
||||
PQconninfoFree(optionArray);
|
||||
|
||||
/* final step: add terminal NULL, required by libpq */
|
||||
connKeywords[authParamsIdx] = connValues[authParamsIdx] = NULL;
|
||||
}
|
||||
|
@ -346,6 +403,116 @@ GetConnParam(const char *keyword)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetEffectiveConnKey checks whether there is any pooler configuration for the
|
||||
* provided key (host/port combination). The one case where this logic is not
|
||||
* applied is for loopback connections originating within the task tracker. If
|
||||
* a corresponding row is found in the poolinfo table, a modified (effective)
|
||||
* key is returned with the node, port, and dbname overridden, as applicable,
|
||||
* otherwise, the original key is returned unmodified.
|
||||
*/
|
||||
ConnectionHashKey *
|
||||
GetEffectiveConnKey(ConnectionHashKey *key)
|
||||
{
|
||||
PQconninfoOption *option = NULL, *optionArray = NULL;
|
||||
|
||||
if (!IsTransactionState())
|
||||
{
|
||||
/* we're in the task tracker, so should only see loopback */
|
||||
Assert(strncmp(LOCAL_HOST_NAME, key->hostname, MAX_NODE_LENGTH) == 0 &&
|
||||
PostPortNumber == key->port);
|
||||
return key;
|
||||
}
|
||||
|
||||
WorkerNode *worker = FindWorkerNode(key->hostname, key->port);
|
||||
if (worker == NULL)
|
||||
{
|
||||
/* this can be hit when the key references an unknown node */
|
||||
return key;
|
||||
}
|
||||
|
||||
char *poolinfo = GetPoolinfoViaCatalog(worker->nodeId);
|
||||
if (poolinfo == NULL)
|
||||
{
|
||||
return key;
|
||||
}
|
||||
|
||||
/* copy the key to provide defaults for all fields */
|
||||
ConnectionHashKey *effectiveKey = palloc(sizeof(ConnectionHashKey));
|
||||
*effectiveKey = *key;
|
||||
|
||||
optionArray = PQconninfoParse(poolinfo, NULL);
|
||||
for (option = optionArray; option->keyword != NULL; option++)
|
||||
{
|
||||
if (option->val == NULL || option->val[0] == '\0')
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (strcmp(option->keyword, "host") == 0)
|
||||
{
|
||||
strlcpy(effectiveKey->hostname, option->val, MAX_NODE_LENGTH);
|
||||
}
|
||||
else if (strcmp(option->keyword, "port") == 0)
|
||||
{
|
||||
effectiveKey->port = pg_atoi(option->val, 4, 0);
|
||||
}
|
||||
else if (strcmp(option->keyword, "dbname") == 0)
|
||||
{
|
||||
/* permit dbname for poolers which can key pools based on dbname */
|
||||
strlcpy(effectiveKey->database, option->val, NAMEDATALEN);
|
||||
}
|
||||
else
|
||||
{
|
||||
ereport(FATAL, (errmsg("unrecognized poolinfo keyword")));
|
||||
}
|
||||
}
|
||||
|
||||
PQconninfoFree(optionArray);
|
||||
|
||||
return effectiveKey;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetAuthinfo simply returns the string representation of authentication info
|
||||
* for a specified hostname/port/user combination. If the current transaction
|
||||
* is valid, then we use the catalog, otherwise a shared memory hash is used,
|
||||
* a mode that is currently only useful for getting authentication information
|
||||
* to the Task Tracker, which lacks a database connection and transaction.
|
||||
*/
|
||||
char *
|
||||
GetAuthinfo(char *hostname, int32 port, char *user)
|
||||
{
|
||||
char *authinfo = NULL;
|
||||
bool isLoopback = (strncmp(LOCAL_HOST_NAME, hostname, MAX_NODE_LENGTH) == 0 &&
|
||||
PostPortNumber == port);
|
||||
|
||||
if (IsTransactionState())
|
||||
{
|
||||
int64 nodeId = WILDCARD_NODE_ID;
|
||||
|
||||
/* -1 is a special value for loopback connections (task tracker) */
|
||||
if (isLoopback)
|
||||
{
|
||||
nodeId = LOCALHOST_NODE_ID;
|
||||
}
|
||||
else
|
||||
{
|
||||
WorkerNode *worker = FindWorkerNode(hostname, port);
|
||||
if (worker != NULL)
|
||||
{
|
||||
nodeId = worker->nodeId;
|
||||
}
|
||||
}
|
||||
|
||||
authinfo = GetAuthinfoViaCatalog(user, nodeId);
|
||||
}
|
||||
|
||||
return (authinfo != NULL) ? authinfo : "";
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CalculateMaxSize simply counts the number of elements returned by
|
||||
* PQconnDefaults, including the final NULL. This helps us know how space would
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "distributed/listutils.h"
|
||||
#include "distributed/log_utils.h"
|
||||
#include "distributed/remote_commands.h"
|
||||
#include "distributed/errormessage.h"
|
||||
#include "distributed/cancel_utils.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#include "miscadmin.h"
|
||||
|
|
|
@ -79,6 +79,7 @@ static void deparse_index_columns(StringInfo buffer, List *indexParameterList,
|
|||
List *deparseContext);
|
||||
static void AppendStorageParametersToString(StringInfo stringBuffer,
|
||||
List *optionList);
|
||||
static const char * convert_aclright_to_string(int aclright);
|
||||
static void simple_quote_literal(StringInfo buf, const char *val);
|
||||
static void AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer);
|
||||
|
||||
|
@ -1062,6 +1063,138 @@ pg_get_indexclusterdef_string(Oid indexRelationId)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* pg_get_table_grants returns a list of sql statements which recreate the
|
||||
* permissions for a specific table.
|
||||
*
|
||||
* This function is modeled after aclexplode(), don't change too heavily.
|
||||
*/
|
||||
List *
|
||||
pg_get_table_grants(Oid relationId)
|
||||
{
|
||||
/* *INDENT-OFF* */
|
||||
StringInfoData buffer;
|
||||
List *defs = NIL;
|
||||
bool isNull = false;
|
||||
|
||||
Relation relation = relation_open(relationId, AccessShareLock);
|
||||
char *relationName = generate_relation_name(relationId, NIL);
|
||||
|
||||
initStringInfo(&buffer);
|
||||
|
||||
/* lookup all table level grants */
|
||||
HeapTuple classTuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId));
|
||||
if (!HeapTupleIsValid(classTuple))
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_TABLE),
|
||||
errmsg("relation with OID %u does not exist",
|
||||
relationId)));
|
||||
}
|
||||
|
||||
Datum aclDatum = SysCacheGetAttr(RELOID, classTuple, Anum_pg_class_relacl,
|
||||
&isNull);
|
||||
|
||||
ReleaseSysCache(classTuple);
|
||||
|
||||
if (!isNull)
|
||||
{
|
||||
|
||||
/*
|
||||
* First revoke all default permissions, so we can start adding the
|
||||
* exact permissions from the master. Note that we only do so if there
|
||||
* are any actual grants; an empty grant set signals default
|
||||
* permissions.
|
||||
*
|
||||
* Note: This doesn't work correctly if default permissions have been
|
||||
* changed with ALTER DEFAULT PRIVILEGES - but that's hard to fix
|
||||
* properly currently.
|
||||
*/
|
||||
appendStringInfo(&buffer, "REVOKE ALL ON %s FROM PUBLIC",
|
||||
relationName);
|
||||
defs = lappend(defs, pstrdup(buffer.data));
|
||||
resetStringInfo(&buffer);
|
||||
|
||||
/* iterate through the acl datastructure, emit GRANTs */
|
||||
|
||||
Acl *acl = DatumGetAclP(aclDatum);
|
||||
AclItem *aidat = ACL_DAT(acl);
|
||||
|
||||
int offtype = -1;
|
||||
int i = 0;
|
||||
while (i < ACL_NUM(acl))
|
||||
{
|
||||
AclItem *aidata = NULL;
|
||||
AclMode priv_bit = 0;
|
||||
|
||||
offtype++;
|
||||
|
||||
if (offtype == N_ACL_RIGHTS)
|
||||
{
|
||||
offtype = 0;
|
||||
i++;
|
||||
if (i >= ACL_NUM(acl)) /* done */
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
aidata = &aidat[i];
|
||||
priv_bit = 1 << offtype;
|
||||
|
||||
if (ACLITEM_GET_PRIVS(*aidata) & priv_bit)
|
||||
{
|
||||
const char *roleName = NULL;
|
||||
const char *withGrant = "";
|
||||
|
||||
if (aidata->ai_grantee != 0)
|
||||
{
|
||||
|
||||
HeapTuple htup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(aidata->ai_grantee));
|
||||
if (HeapTupleIsValid(htup))
|
||||
{
|
||||
Form_pg_authid authForm = ((Form_pg_authid) GETSTRUCT(htup));
|
||||
|
||||
roleName = quote_identifier(NameStr(authForm->rolname));
|
||||
|
||||
ReleaseSysCache(htup);
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(ERROR, "cache lookup failed for role %u", aidata->ai_grantee);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
roleName = "PUBLIC";
|
||||
}
|
||||
|
||||
if ((ACLITEM_GET_GOPTIONS(*aidata) & priv_bit) != 0)
|
||||
{
|
||||
withGrant = " WITH GRANT OPTION";
|
||||
}
|
||||
|
||||
appendStringInfo(&buffer, "GRANT %s ON %s TO %s%s",
|
||||
convert_aclright_to_string(priv_bit),
|
||||
relationName,
|
||||
roleName,
|
||||
withGrant);
|
||||
|
||||
defs = lappend(defs, pstrdup(buffer.data));
|
||||
|
||||
resetStringInfo(&buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resetStringInfo(&buffer);
|
||||
|
||||
relation_close(relation, NoLock);
|
||||
return defs;
|
||||
/* *INDENT-ON* */
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* generate_qualified_relation_name computes the schema-qualified name to display for a
|
||||
* relation specified by OID.
|
||||
|
@ -1156,6 +1289,45 @@ AppendStorageParametersToString(StringInfo stringBuffer, List *optionList)
|
|||
}
|
||||
|
||||
|
||||
/* copy of postgresql's function, which is static as well */
|
||||
static const char *
|
||||
convert_aclright_to_string(int aclright)
|
||||
{
|
||||
/* *INDENT-OFF* */
|
||||
switch (aclright)
|
||||
{
|
||||
case ACL_INSERT:
|
||||
return "INSERT";
|
||||
case ACL_SELECT:
|
||||
return "SELECT";
|
||||
case ACL_UPDATE:
|
||||
return "UPDATE";
|
||||
case ACL_DELETE:
|
||||
return "DELETE";
|
||||
case ACL_TRUNCATE:
|
||||
return "TRUNCATE";
|
||||
case ACL_REFERENCES:
|
||||
return "REFERENCES";
|
||||
case ACL_TRIGGER:
|
||||
return "TRIGGER";
|
||||
case ACL_EXECUTE:
|
||||
return "EXECUTE";
|
||||
case ACL_USAGE:
|
||||
return "USAGE";
|
||||
case ACL_CREATE:
|
||||
return "CREATE";
|
||||
case ACL_CREATE_TEMP:
|
||||
return "TEMPORARY";
|
||||
case ACL_CONNECT:
|
||||
return "CONNECT";
|
||||
default:
|
||||
elog(ERROR, "unrecognized aclright: %d", aclright);
|
||||
return NULL;
|
||||
}
|
||||
/* *INDENT-ON* */
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* contain_nextval_expression_walker walks over expression tree and returns
|
||||
* true if it contains call to 'nextval' function.
|
||||
|
@ -1224,6 +1396,46 @@ pg_get_replica_identity_command(Oid tableRelationId)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* pg_get_row_level_security_commands function returns the required ALTER .. TABLE
|
||||
* commands to define the row level security settings for a relation.
|
||||
*/
|
||||
List *
|
||||
pg_get_row_level_security_commands(Oid relationId)
|
||||
{
|
||||
StringInfoData buffer;
|
||||
List *commands = NIL;
|
||||
|
||||
initStringInfo(&buffer);
|
||||
|
||||
Relation relation = table_open(relationId, AccessShareLock);
|
||||
|
||||
if (relation->rd_rel->relrowsecurity)
|
||||
{
|
||||
char *relationName = generate_qualified_relation_name(relationId);
|
||||
|
||||
appendStringInfo(&buffer, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY",
|
||||
relationName);
|
||||
commands = lappend(commands, pstrdup(buffer.data));
|
||||
resetStringInfo(&buffer);
|
||||
}
|
||||
|
||||
if (relation->rd_rel->relforcerowsecurity)
|
||||
{
|
||||
char *relationName = generate_qualified_relation_name(relationId);
|
||||
|
||||
appendStringInfo(&buffer, "ALTER TABLE %s FORCE ROW LEVEL SECURITY",
|
||||
relationName);
|
||||
commands = lappend(commands, pstrdup(buffer.data));
|
||||
resetStringInfo(&buffer);
|
||||
}
|
||||
|
||||
table_close(relation, AccessShareLock);
|
||||
|
||||
return commands;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Generate a C string representing a relation's reloptions, or NULL if none.
|
||||
*
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* deparse_foreign_data_wrapper_stmts.c
|
||||
* All routines to deparse foreign data wrapper statements.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#include "postgres.h"
|
||||
|
||||
#include "commands/defrem.h"
|
||||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/deparser.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/relay_utility.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#include "nodes/nodes.h"
|
||||
#include "utils/builtins.h"
|
||||
|
||||
static void AppendGrantOnFDWStmt(StringInfo buf, GrantStmt *stmt);
|
||||
static void AppendGrantOnFDWNames(StringInfo buf, GrantStmt *stmt);
|
||||
|
||||
|
||||
char *
|
||||
DeparseGrantOnFDWStmt(Node *node)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
Assert(stmt->objtype == OBJECT_FDW);
|
||||
|
||||
StringInfoData str = { 0 };
|
||||
initStringInfo(&str);
|
||||
|
||||
AppendGrantOnFDWStmt(&str, stmt);
|
||||
|
||||
return str.data;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
AppendGrantOnFDWStmt(StringInfo buf, GrantStmt *stmt)
|
||||
{
|
||||
Assert(stmt->objtype == OBJECT_FDW);
|
||||
|
||||
appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE");
|
||||
|
||||
if (!stmt->is_grant && stmt->grant_option)
|
||||
{
|
||||
appendStringInfo(buf, "GRANT OPTION FOR ");
|
||||
}
|
||||
|
||||
AppendGrantPrivileges(buf, stmt);
|
||||
|
||||
AppendGrantOnFDWNames(buf, stmt);
|
||||
|
||||
AppendGrantGrantees(buf, stmt);
|
||||
|
||||
if (stmt->is_grant && stmt->grant_option)
|
||||
{
|
||||
appendStringInfo(buf, " WITH GRANT OPTION");
|
||||
}
|
||||
if (!stmt->is_grant)
|
||||
{
|
||||
if (stmt->behavior == DROP_RESTRICT)
|
||||
{
|
||||
appendStringInfo(buf, " RESTRICT");
|
||||
}
|
||||
else if (stmt->behavior == DROP_CASCADE)
|
||||
{
|
||||
appendStringInfo(buf, " CASCADE");
|
||||
}
|
||||
}
|
||||
appendStringInfo(buf, ";");
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
AppendGrantOnFDWNames(StringInfo buf, GrantStmt *stmt)
|
||||
{
|
||||
ListCell *cell = NULL;
|
||||
appendStringInfo(buf, " ON FOREIGN DATA WRAPPER ");
|
||||
|
||||
foreach(cell, stmt->objects)
|
||||
{
|
||||
char *fdwname = strVal(lfirst(cell));
|
||||
|
||||
appendStringInfoString(buf, quote_identifier(fdwname));
|
||||
if (cell != list_tail(stmt->objects))
|
||||
{
|
||||
appendStringInfo(buf, ", ");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -27,6 +27,8 @@ static void AppendDropForeignServerStmt(StringInfo buf, DropStmt *stmt);
|
|||
static void AppendServerNames(StringInfo buf, DropStmt *stmt);
|
||||
static void AppendBehavior(StringInfo buf, DropStmt *stmt);
|
||||
static char * GetDefElemActionString(DefElemAction action);
|
||||
static void AppendGrantOnForeignServerStmt(StringInfo buf, GrantStmt *stmt);
|
||||
static void AppendGrantOnForeignServerServers(StringInfo buf, GrantStmt *stmt);
|
||||
|
||||
char *
|
||||
DeparseCreateForeignServerStmt(Node *node)
|
||||
|
@ -104,6 +106,21 @@ DeparseDropForeignServerStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
char *
|
||||
DeparseGrantOnForeignServerStmt(Node *node)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
Assert(stmt->objtype == OBJECT_FOREIGN_SERVER);
|
||||
|
||||
StringInfoData str = { 0 };
|
||||
initStringInfo(&str);
|
||||
|
||||
AppendGrantOnForeignServerStmt(&str, stmt);
|
||||
|
||||
return str.data;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
AppendCreateForeignServerStmt(StringInfo buf, CreateForeignServerStmt *stmt)
|
||||
{
|
||||
|
@ -275,3 +292,58 @@ GetDefElemActionString(DefElemAction action)
|
|||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
AppendGrantOnForeignServerStmt(StringInfo buf, GrantStmt *stmt)
|
||||
{
|
||||
Assert(stmt->objtype == OBJECT_FOREIGN_SERVER);
|
||||
|
||||
appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE");
|
||||
|
||||
if (!stmt->is_grant && stmt->grant_option)
|
||||
{
|
||||
appendStringInfo(buf, "GRANT OPTION FOR ");
|
||||
}
|
||||
|
||||
AppendGrantPrivileges(buf, stmt);
|
||||
|
||||
AppendGrantOnForeignServerServers(buf, stmt);
|
||||
|
||||
AppendGrantGrantees(buf, stmt);
|
||||
|
||||
if (stmt->is_grant && stmt->grant_option)
|
||||
{
|
||||
appendStringInfo(buf, " WITH GRANT OPTION");
|
||||
}
|
||||
if (!stmt->is_grant)
|
||||
{
|
||||
if (stmt->behavior == DROP_RESTRICT)
|
||||
{
|
||||
appendStringInfo(buf, " RESTRICT");
|
||||
}
|
||||
else if (stmt->behavior == DROP_CASCADE)
|
||||
{
|
||||
appendStringInfo(buf, " CASCADE");
|
||||
}
|
||||
}
|
||||
appendStringInfo(buf, ";");
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
AppendGrantOnForeignServerServers(StringInfo buf, GrantStmt *stmt)
|
||||
{
|
||||
ListCell *cell = NULL;
|
||||
appendStringInfo(buf, " ON FOREIGN SERVER ");
|
||||
|
||||
foreach(cell, stmt->objects)
|
||||
{
|
||||
char *servername = strVal(lfirst(cell));
|
||||
appendStringInfoString(buf, quote_identifier(servername));
|
||||
if (cell != list_tail(stmt->objects))
|
||||
{
|
||||
appendStringInfo(buf, ", ");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,6 +67,9 @@ static void AppendAlterFunctionSchemaStmt(StringInfo buf, AlterObjectSchemaStmt
|
|||
static void AppendAlterFunctionOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt);
|
||||
static void AppendAlterFunctionDependsStmt(StringInfo buf, AlterObjectDependsStmt *stmt);
|
||||
|
||||
static void AppendGrantOnFunctionStmt(StringInfo buf, GrantStmt *stmt);
|
||||
static void AppendGrantOnFunctionFunctions(StringInfo buf, GrantStmt *stmt);
|
||||
|
||||
static char * CopyAndConvertToUpperCase(const char *str);
|
||||
|
||||
/*
|
||||
|
@ -711,3 +714,113 @@ CopyAndConvertToUpperCase(const char *str)
|
|||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseGrantOnFunctionStmt builds and returns a string representing the GrantOnFunctionStmt
|
||||
*/
|
||||
char *
|
||||
DeparseGrantOnFunctionStmt(Node *node)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
Assert(isFunction(stmt->objtype));
|
||||
|
||||
StringInfoData str = { 0 };
|
||||
initStringInfo(&str);
|
||||
|
||||
AppendGrantOnFunctionStmt(&str, stmt);
|
||||
|
||||
return str.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendGrantOnFunctionStmt builds and returns an SQL command representing a
|
||||
* GRANT .. ON FUNCTION command from given GrantStmt object.
|
||||
*/
|
||||
static void
|
||||
AppendGrantOnFunctionStmt(StringInfo buf, GrantStmt *stmt)
|
||||
{
|
||||
Assert(isFunction(stmt->objtype));
|
||||
|
||||
if (stmt->targtype == ACL_TARGET_ALL_IN_SCHEMA)
|
||||
{
|
||||
elog(ERROR,
|
||||
"GRANT .. ALL FUNCTIONS/PROCEDURES IN SCHEMA is not supported for formatting.");
|
||||
}
|
||||
|
||||
appendStringInfoString(buf, stmt->is_grant ? "GRANT " : "REVOKE ");
|
||||
|
||||
if (!stmt->is_grant && stmt->grant_option)
|
||||
{
|
||||
appendStringInfoString(buf, "GRANT OPTION FOR ");
|
||||
}
|
||||
|
||||
AppendGrantPrivileges(buf, stmt);
|
||||
|
||||
AppendGrantOnFunctionFunctions(buf, stmt);
|
||||
|
||||
AppendGrantGrantees(buf, stmt);
|
||||
|
||||
if (stmt->is_grant && stmt->grant_option)
|
||||
{
|
||||
appendStringInfoString(buf, " WITH GRANT OPTION");
|
||||
}
|
||||
if (!stmt->is_grant)
|
||||
{
|
||||
if (stmt->behavior == DROP_RESTRICT)
|
||||
{
|
||||
appendStringInfoString(buf, " RESTRICT");
|
||||
}
|
||||
else if (stmt->behavior == DROP_CASCADE)
|
||||
{
|
||||
appendStringInfoString(buf, " CASCADE");
|
||||
}
|
||||
}
|
||||
appendStringInfoString(buf, ";");
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendGrantOnFunctionFunctions appends the function names along with their arguments
|
||||
* to the given StringInfo from the given GrantStmt
|
||||
*/
|
||||
static void
|
||||
AppendGrantOnFunctionFunctions(StringInfo buf, GrantStmt *stmt)
|
||||
{
|
||||
ListCell *cell = NULL;
|
||||
appendStringInfo(buf, " ON %s ", ObjectTypeToKeyword(stmt->objtype));
|
||||
|
||||
foreach(cell, stmt->objects)
|
||||
{
|
||||
/*
|
||||
* GrantOnFunction statement keeps its objects (functions) as
|
||||
* a list of ObjectWithArgs
|
||||
*/
|
||||
ObjectWithArgs *function = (ObjectWithArgs *) lfirst(cell);
|
||||
|
||||
appendStringInfoString(buf, NameListToString(function->objname));
|
||||
if (!function->args_unspecified)
|
||||
{
|
||||
/* if args are specified, we should append "(arg1, arg2, ...)" to the function name */
|
||||
const char *args = TypeNameListToString(function->objargs);
|
||||
appendStringInfo(buf, "(%s)", args);
|
||||
}
|
||||
if (cell != list_tail(stmt->objects))
|
||||
{
|
||||
appendStringInfoString(buf, ", ");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* isFunction returns true if the given ObjectType is a function, a procedure or a routine
|
||||
* otherwise returns false
|
||||
*/
|
||||
bool
|
||||
isFunction(ObjectType objectType)
|
||||
{
|
||||
return (objectType == OBJECT_FUNCTION || objectType == OBJECT_PROCEDURE ||
|
||||
objectType == OBJECT_ROUTINE);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,11 @@
|
|||
|
||||
static void AppendAlterRoleStmt(StringInfo buf, AlterRoleStmt *stmt);
|
||||
static void AppendAlterRoleSetStmt(StringInfo buf, AlterRoleSetStmt *stmt);
|
||||
static void AppendCreateRoleStmt(StringInfo buf, CreateRoleStmt *stmt);
|
||||
static void AppendRoleOption(StringInfo buf, ListCell *optionCell);
|
||||
static void AppendRoleList(StringInfo buf, List *roleList);
|
||||
static void AppendDropRoleStmt(StringInfo buf, DropRoleStmt *stmt);
|
||||
static void AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -173,6 +177,213 @@ AppendRoleOption(StringInfo buf, ListCell *optionCell)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseCreateRoleStmt builds and returns a string representing of the
|
||||
* CreateRoleStmt for application on a remote server.
|
||||
*/
|
||||
char *
|
||||
DeparseCreateRoleStmt(Node *node)
|
||||
{
|
||||
CreateRoleStmt *stmt = castNode(CreateRoleStmt, node);
|
||||
|
||||
StringInfoData buf = { 0 };
|
||||
initStringInfo(&buf);
|
||||
|
||||
AppendCreateRoleStmt(&buf, stmt);
|
||||
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendCreateRoleStmt generates the string representation of the
|
||||
* CreateRoleStmt and appends it to the buffer.
|
||||
*/
|
||||
static void
|
||||
AppendCreateRoleStmt(StringInfo buf, CreateRoleStmt *stmt)
|
||||
{
|
||||
ListCell *optionCell = NULL;
|
||||
|
||||
appendStringInfo(buf, "CREATE ");
|
||||
|
||||
switch (stmt->stmt_type)
|
||||
{
|
||||
case ROLESTMT_ROLE:
|
||||
{
|
||||
appendStringInfo(buf, "ROLE ");
|
||||
break;
|
||||
}
|
||||
|
||||
case ROLESTMT_USER:
|
||||
{
|
||||
appendStringInfo(buf, "USER ");
|
||||
break;
|
||||
}
|
||||
|
||||
case ROLESTMT_GROUP:
|
||||
{
|
||||
appendStringInfo(buf, "GROUP ");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
appendStringInfo(buf, "%s", quote_identifier(stmt->role));
|
||||
|
||||
foreach(optionCell, stmt->options)
|
||||
{
|
||||
AppendRoleOption(buf, optionCell);
|
||||
|
||||
DefElem *option = (DefElem *) lfirst(optionCell);
|
||||
|
||||
if (strcmp(option->defname, "sysid") == 0)
|
||||
{
|
||||
appendStringInfo(buf, " SYSID %s", quote_literal_cstr(strVal(option->arg)));
|
||||
}
|
||||
else if (strcmp(option->defname, "adminmembers") == 0)
|
||||
{
|
||||
appendStringInfo(buf, " ADMIN ");
|
||||
AppendRoleList(buf, (List *) option->arg);
|
||||
}
|
||||
else if (strcmp(option->defname, "rolemembers") == 0)
|
||||
{
|
||||
appendStringInfo(buf, " ROLE ");
|
||||
AppendRoleList(buf, (List *) option->arg);
|
||||
}
|
||||
else if (strcmp(option->defname, "addroleto") == 0)
|
||||
{
|
||||
appendStringInfo(buf, " IN ROLE ");
|
||||
AppendRoleList(buf, (List *) option->arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseDropRoleStmt builds and returns a string representing of the
|
||||
* DropRoleStmt for application on a remote server.
|
||||
*/
|
||||
char *
|
||||
DeparseDropRoleStmt(Node *node)
|
||||
{
|
||||
DropRoleStmt *stmt = castNode(DropRoleStmt, node);
|
||||
|
||||
StringInfoData buf = { 0 };
|
||||
initStringInfo(&buf);
|
||||
|
||||
AppendDropRoleStmt(&buf, stmt);
|
||||
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendDropRoleStmt generates the string representation of the
|
||||
* DropRoleStmt and appends it to the buffer.
|
||||
*/
|
||||
static void
|
||||
AppendDropRoleStmt(StringInfo buf, DropRoleStmt *stmt)
|
||||
{
|
||||
appendStringInfo(buf, "DROP ROLE ");
|
||||
|
||||
if (stmt->missing_ok)
|
||||
{
|
||||
appendStringInfo(buf, "IF EXISTS ");
|
||||
}
|
||||
|
||||
AppendRoleList(buf, stmt->roles);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
AppendRoleList(StringInfo buf, List *roleList)
|
||||
{
|
||||
ListCell *cell = NULL;
|
||||
foreach(cell, roleList)
|
||||
{
|
||||
Node *roleNode = (Node *) lfirst(cell);
|
||||
Assert(IsA(roleNode, RoleSpec) || IsA(roleNode, AccessPriv));
|
||||
char const *rolename = NULL;
|
||||
if (IsA(roleNode, RoleSpec))
|
||||
{
|
||||
rolename = RoleSpecString((RoleSpec *) roleNode, true);
|
||||
}
|
||||
if (IsA(roleNode, AccessPriv))
|
||||
{
|
||||
rolename = quote_identifier(((AccessPriv *) roleNode)->priv_name);
|
||||
}
|
||||
appendStringInfoString(buf, rolename);
|
||||
if (cell != list_tail(roleList))
|
||||
{
|
||||
appendStringInfo(buf, ", ");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseGrantRoleStmt builds and returns a string representing of the
|
||||
* GrantRoleStmt for application on a remote server.
|
||||
*/
|
||||
char *
|
||||
DeparseGrantRoleStmt(Node *node)
|
||||
{
|
||||
GrantRoleStmt *stmt = castNode(GrantRoleStmt, node);
|
||||
|
||||
StringInfoData buf = { 0 };
|
||||
initStringInfo(&buf);
|
||||
|
||||
AppendGrantRoleStmt(&buf, stmt);
|
||||
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendGrantRoleStmt generates the string representation of the
|
||||
* GrantRoleStmt and appends it to the buffer.
|
||||
*/
|
||||
static void
|
||||
AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt)
|
||||
{
|
||||
appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE");
|
||||
|
||||
if (!stmt->is_grant && stmt->admin_opt)
|
||||
{
|
||||
appendStringInfo(buf, "ADMIN OPTION FOR ");
|
||||
}
|
||||
|
||||
AppendRoleList(buf, stmt->granted_roles);
|
||||
|
||||
appendStringInfo(buf, "%s ", stmt->is_grant ? " TO " : " FROM ");
|
||||
|
||||
AppendRoleList(buf, stmt->grantee_roles);
|
||||
|
||||
if (stmt->is_grant)
|
||||
{
|
||||
if (stmt->admin_opt)
|
||||
{
|
||||
appendStringInfo(buf, " WITH ADMIN OPTION");
|
||||
}
|
||||
|
||||
if (stmt->grantor)
|
||||
{
|
||||
appendStringInfo(buf, " GRANTED BY %s", RoleSpecString(stmt->grantor, true));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (stmt->behavior == DROP_RESTRICT)
|
||||
{
|
||||
appendStringInfo(buf, " RESTRICT");
|
||||
}
|
||||
else if (stmt->behavior == DROP_CASCADE)
|
||||
{
|
||||
appendStringInfo(buf, " CASCADE");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendAlterRoleSetStmt generates the string representation of the
|
||||
* AlterRoleSetStmt and appends it to the buffer.
|
||||
|
|
|
@ -22,9 +22,7 @@
|
|||
static void AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt);
|
||||
static void AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt);
|
||||
static void AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt);
|
||||
static void AppendGrantOnSchemaPrivileges(StringInfo buf, GrantStmt *stmt);
|
||||
static void AppendGrantOnSchemaSchemas(StringInfo buf, GrantStmt *stmt);
|
||||
static void AppendGrantOnSchemaGrantees(StringInfo buf, GrantStmt *stmt);
|
||||
static void AppendAlterSchemaRenameStmt(StringInfo buf, RenameStmt *stmt);
|
||||
|
||||
char *
|
||||
|
@ -161,11 +159,11 @@ AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt)
|
|||
appendStringInfo(buf, "GRANT OPTION FOR ");
|
||||
}
|
||||
|
||||
AppendGrantOnSchemaPrivileges(buf, stmt);
|
||||
AppendGrantPrivileges(buf, stmt);
|
||||
|
||||
AppendGrantOnSchemaSchemas(buf, stmt);
|
||||
|
||||
AppendGrantOnSchemaGrantees(buf, stmt);
|
||||
AppendGrantGrantees(buf, stmt);
|
||||
|
||||
if (stmt->is_grant && stmt->grant_option)
|
||||
{
|
||||
|
@ -186,8 +184,8 @@ AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt)
|
|||
}
|
||||
|
||||
|
||||
static void
|
||||
AppendGrantOnSchemaPrivileges(StringInfo buf, GrantStmt *stmt)
|
||||
void
|
||||
AppendGrantPrivileges(StringInfo buf, GrantStmt *stmt)
|
||||
{
|
||||
if (list_length(stmt->privileges) == 0)
|
||||
{
|
||||
|
@ -227,8 +225,8 @@ AppendGrantOnSchemaSchemas(StringInfo buf, GrantStmt *stmt)
|
|||
}
|
||||
|
||||
|
||||
static void
|
||||
AppendGrantOnSchemaGrantees(StringInfo buf, GrantStmt *stmt)
|
||||
void
|
||||
AppendGrantGrantees(StringInfo buf, GrantStmt *stmt)
|
||||
{
|
||||
ListCell *cell = NULL;
|
||||
appendStringInfo(buf, " %s ", stmt->is_grant ? "TO" : "FROM");
|
||||
|
|
|
@ -27,6 +27,8 @@ static void AppendSequenceNameList(StringInfo buf, List *objects, ObjectType obj
|
|||
static void AppendRenameSequenceStmt(StringInfo buf, RenameStmt *stmt);
|
||||
static void AppendAlterSequenceSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt);
|
||||
static void AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt);
|
||||
static void AppendGrantOnSequenceStmt(StringInfo buf, GrantStmt *stmt);
|
||||
static void AppendGrantOnSequenceSequences(StringInfo buf, GrantStmt *stmt);
|
||||
|
||||
/*
|
||||
* DeparseDropSequenceStmt builds and returns a string representing the DropStmt
|
||||
|
@ -254,3 +256,107 @@ AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseGrantOnSequenceStmt builds and returns a string representing the GrantOnSequenceStmt
|
||||
*/
|
||||
char *
|
||||
DeparseGrantOnSequenceStmt(Node *node)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
StringInfoData str = { 0 };
|
||||
initStringInfo(&str);
|
||||
|
||||
AppendGrantOnSequenceStmt(&str, stmt);
|
||||
|
||||
return str.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendGrantOnSequenceStmt builds and returns an SQL command representing a
|
||||
* GRANT .. ON SEQUENCE command from given GrantStmt object.
|
||||
*/
|
||||
static void
|
||||
AppendGrantOnSequenceStmt(StringInfo buf, GrantStmt *stmt)
|
||||
{
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
if (stmt->targtype == ACL_TARGET_ALL_IN_SCHEMA)
|
||||
{
|
||||
/*
|
||||
* Normally we shouldn't reach this
|
||||
* We deparse a GrantStmt with OBJECT_SEQUENCE after setting targtype
|
||||
* to ACL_TARGET_OBJECT
|
||||
*/
|
||||
elog(ERROR,
|
||||
"GRANT .. ALL SEQUENCES IN SCHEMA is not supported for formatting.");
|
||||
}
|
||||
|
||||
appendStringInfoString(buf, stmt->is_grant ? "GRANT " : "REVOKE ");
|
||||
|
||||
if (!stmt->is_grant && stmt->grant_option)
|
||||
{
|
||||
appendStringInfoString(buf, "GRANT OPTION FOR ");
|
||||
}
|
||||
|
||||
AppendGrantPrivileges(buf, stmt);
|
||||
|
||||
AppendGrantOnSequenceSequences(buf, stmt);
|
||||
|
||||
AppendGrantGrantees(buf, stmt);
|
||||
|
||||
if (stmt->is_grant && stmt->grant_option)
|
||||
{
|
||||
appendStringInfoString(buf, " WITH GRANT OPTION");
|
||||
}
|
||||
if (!stmt->is_grant)
|
||||
{
|
||||
if (stmt->behavior == DROP_RESTRICT)
|
||||
{
|
||||
appendStringInfoString(buf, " RESTRICT");
|
||||
}
|
||||
else if (stmt->behavior == DROP_CASCADE)
|
||||
{
|
||||
appendStringInfoString(buf, " CASCADE");
|
||||
}
|
||||
}
|
||||
appendStringInfoString(buf, ";");
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendGrantOnSequenceSequences appends the sequence names along with their arguments
|
||||
* to the given StringInfo from the given GrantStmt
|
||||
*/
|
||||
static void
|
||||
AppendGrantOnSequenceSequences(StringInfo buf, GrantStmt *stmt)
|
||||
{
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
appendStringInfoString(buf, " ON SEQUENCE ");
|
||||
|
||||
ListCell *cell = NULL;
|
||||
foreach(cell, stmt->objects)
|
||||
{
|
||||
/*
|
||||
* GrantOnSequence statement keeps its objects (sequences) as
|
||||
* a list of RangeVar-s
|
||||
*/
|
||||
RangeVar *sequence = (RangeVar *) lfirst(cell);
|
||||
|
||||
/*
|
||||
* We have qualified the statement beforehand
|
||||
*/
|
||||
appendStringInfoString(buf, quote_qualified_identifier(sequence->schemaname,
|
||||
sequence->relname));
|
||||
|
||||
if (cell != list_tail(stmt->objects))
|
||||
{
|
||||
appendStringInfoString(buf, ", ");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -137,3 +137,41 @@ QualifyDropSequenceStmt(Node *node)
|
|||
|
||||
stmt->objects = objectNameListWithSchema;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* QualifyGrantOnSequenceStmt transforms a
|
||||
* GRANT ON SEQUENCE ...
|
||||
* statement in place and makes the sequence names fully qualified.
|
||||
*/
|
||||
void
|
||||
QualifyGrantOnSequenceStmt(Node *node)
|
||||
{
|
||||
GrantStmt *stmt = castNode(GrantStmt, node);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
/*
|
||||
* The other option would be GRANT ALL SEQUENCES ON SCHEMA ...
|
||||
* For that we don't need to qualify
|
||||
*/
|
||||
if (stmt->targtype != ACL_TARGET_OBJECT)
|
||||
{
|
||||
return;
|
||||
}
|
||||
List *qualifiedSequenceRangeVars = NIL;
|
||||
RangeVar *sequenceRangeVar = NULL;
|
||||
foreach_ptr(sequenceRangeVar, stmt->objects)
|
||||
{
|
||||
if (sequenceRangeVar->schemaname == NULL)
|
||||
{
|
||||
Oid seqOid = RangeVarGetRelid(sequenceRangeVar, NoLock, false);
|
||||
Oid schemaOid = get_rel_namespace(seqOid);
|
||||
sequenceRangeVar->schemaname = get_namespace_name(schemaOid);
|
||||
}
|
||||
|
||||
qualifiedSequenceRangeVars = lappend(qualifiedSequenceRangeVars,
|
||||
sequenceRangeVar);
|
||||
}
|
||||
|
||||
stmt->objects = qualifiedSequenceRangeVars;
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "nodes/makefuncs.h"
|
||||
#include "optimizer/optimizer.h"
|
||||
#include "optimizer/clauses.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "utils/memutils.h"
|
||||
#include "utils/rel.h"
|
||||
#include "utils/datum.h"
|
||||
|
@ -674,7 +675,10 @@ CitusEndScan(CustomScanState *node)
|
|||
partitionKeyConst = workerJob->partitionKeyValue;
|
||||
}
|
||||
|
||||
/* queryId is not set if pg_stat_statements is not installed */
|
||||
/*
|
||||
* queryId is not set if pg_stat_statements is not installed,
|
||||
* it can be set with as of pg14: set compute_query_id to on;
|
||||
*/
|
||||
if (queryId != 0)
|
||||
{
|
||||
if (partitionKeyConst != NULL && executorType == MULTI_EXECUTOR_ADAPTIVE)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -21,9 +21,13 @@
|
|||
#include "catalog/catalog.h"
|
||||
#include "catalog/dependency.h"
|
||||
#include "catalog/indexing.h"
|
||||
#include "catalog/pg_auth_members.h"
|
||||
#include "catalog/pg_authid_d.h"
|
||||
#include "catalog/pg_class.h"
|
||||
#include "catalog/pg_constraint.h"
|
||||
#include "catalog/pg_depend.h"
|
||||
#include "catalog/pg_extension_d.h"
|
||||
#include "catalog/pg_foreign_data_wrapper_d.h"
|
||||
#include "catalog/pg_namespace.h"
|
||||
#include "catalog/pg_proc_d.h"
|
||||
#include "catalog/pg_rewrite.h"
|
||||
|
@ -45,6 +49,7 @@
|
|||
#include "utils/fmgroids.h"
|
||||
#include "utils/hsearch.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "utils/syscache.h"
|
||||
|
||||
/*
|
||||
* ObjectAddressCollector keeps track of collected ObjectAddresses. This can be used
|
||||
|
@ -168,6 +173,8 @@ static void ApplyAddToDependencyList(ObjectAddressCollector *collector,
|
|||
static List * GetViewRuleReferenceDependencyList(Oid relationId);
|
||||
static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector,
|
||||
ObjectAddress target);
|
||||
static List * GetDependentRoleIdsFDW(Oid FDWOid);
|
||||
static List * ExpandRolesToGroups(Oid roleid);
|
||||
static ViewDependencyNode * BuildViewDependencyGraph(Oid relationId, HTAB *nodeMap);
|
||||
|
||||
|
||||
|
@ -670,16 +677,13 @@ SupportedDependencyByCitus(const ObjectAddress *address)
|
|||
|
||||
case OCLASS_ROLE:
|
||||
{
|
||||
/*
|
||||
* Community only supports the extension owner as a distributed object to
|
||||
* propagate alter statements for this user
|
||||
*/
|
||||
if (address->objectId == CitusExtensionOwner())
|
||||
/* if it is a reserved role do not propagate */
|
||||
if (IsReservedName(GetUserNameFromId(address->objectId, false)))
|
||||
{
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
case OCLASS_EXTENSION:
|
||||
|
@ -1275,6 +1279,42 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe
|
|||
|
||||
switch (target.classId)
|
||||
{
|
||||
case AuthIdRelationId:
|
||||
{
|
||||
/*
|
||||
* Roles are members of other roles. These relations are not recorded directly
|
||||
* but can be deduced from pg_auth_members
|
||||
*/
|
||||
return ExpandRolesToGroups(target.objectId);
|
||||
}
|
||||
|
||||
case ExtensionRelationId:
|
||||
{
|
||||
/*
|
||||
* FDWs get propagated along with the extensions they belong to.
|
||||
* In case there are GRANTed privileges on FDWs to roles, those
|
||||
* GRANT statements will be propagated to. In order to make sure
|
||||
* that those GRANT statements work, the privileged roles should
|
||||
* exist on the worker nodes. Hence, here we find these dependent
|
||||
* roles and add them as dependencies.
|
||||
*/
|
||||
|
||||
Oid extensionId = target.objectId;
|
||||
List *FDWOids = GetDependentFDWsToExtension(extensionId);
|
||||
|
||||
Oid FDWOid = InvalidOid;
|
||||
foreach_oid(FDWOid, FDWOids)
|
||||
{
|
||||
List *dependentRoleIds = GetDependentRoleIdsFDW(FDWOid);
|
||||
List *dependencies =
|
||||
CreateObjectAddressDependencyDefList(AuthIdRelationId,
|
||||
dependentRoleIds);
|
||||
result = list_concat(result, dependencies);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case TypeRelationId:
|
||||
{
|
||||
switch (get_typtype(target.objectId))
|
||||
|
@ -1407,6 +1447,73 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetDependentRoleIdsFDW returns a list of role oids that has privileges on the
|
||||
* FDW with the given object id.
|
||||
*/
|
||||
static List *
|
||||
GetDependentRoleIdsFDW(Oid FDWOid)
|
||||
{
|
||||
List *roleIds = NIL;
|
||||
|
||||
Acl *aclEntry = GetPrivilegesForFDW(FDWOid);
|
||||
|
||||
if (aclEntry == NULL)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
AclItem *privileges = ACL_DAT(aclEntry);
|
||||
int numberOfPrivsGranted = ACL_NUM(aclEntry);
|
||||
|
||||
for (int i = 0; i < numberOfPrivsGranted; i++)
|
||||
{
|
||||
roleIds = lappend_oid(roleIds, privileges[i].ai_grantee);
|
||||
}
|
||||
|
||||
return roleIds;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExpandRolesToGroups returns a list of object addresses pointing to roles that roleid
|
||||
* depends on.
|
||||
*/
|
||||
static List *
|
||||
ExpandRolesToGroups(Oid roleid)
|
||||
{
|
||||
Relation pgAuthMembers = table_open(AuthMemRelationId, AccessShareLock);
|
||||
HeapTuple tuple = NULL;
|
||||
|
||||
ScanKeyData scanKey[1];
|
||||
const int scanKeyCount = 1;
|
||||
|
||||
/* scan pg_auth_members for member = $1 via index pg_auth_members_member_role_index */
|
||||
ScanKeyInit(&scanKey[0], Anum_pg_auth_members_member, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(roleid));
|
||||
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgAuthMembers, AuthMemMemRoleIndexId,
|
||||
true, NULL, scanKeyCount, scanKey);
|
||||
|
||||
List *roles = NIL;
|
||||
while ((tuple = systable_getnext(scanDescriptor)) != NULL)
|
||||
{
|
||||
Form_pg_auth_members membership = (Form_pg_auth_members) GETSTRUCT(tuple);
|
||||
|
||||
DependencyDefinition *definition = palloc0(sizeof(DependencyDefinition));
|
||||
definition->mode = DependencyObjectAddress;
|
||||
ObjectAddressSet(definition->data.address, AuthIdRelationId, membership->roleid);
|
||||
|
||||
roles = lappend(roles, definition);
|
||||
}
|
||||
|
||||
systable_endscan(scanDescriptor);
|
||||
table_close(pgAuthMembers, AccessShareLock);
|
||||
|
||||
return roles;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetViewRuleReferenceDependencyList returns the dependencies of the view's
|
||||
* internal rule dependencies.
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "catalog/pg_type.h"
|
||||
#include "citus_version.h"
|
||||
#include "commands/extension.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/colocation_utils.h"
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
|
@ -44,6 +45,7 @@
|
|||
#include "parser/parse_type.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/fmgroids.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "utils/regproc.h"
|
||||
#include "utils/rel.h"
|
||||
|
||||
|
@ -519,3 +521,82 @@ UpdateDistributedObjectColocationId(uint32 oldColocationId,
|
|||
table_close(pgDistObjectRel, NoLock);
|
||||
CommandCounterIncrement();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DistributedFunctionList returns the list of ObjectAddress-es of all the
|
||||
* distributed functions found in pg_dist_object
|
||||
*/
|
||||
List *
|
||||
DistributedFunctionList(void)
|
||||
{
|
||||
List *distributedFunctionList = NIL;
|
||||
|
||||
ScanKeyData key[1];
|
||||
Relation pgDistObjectRel = table_open(DistObjectRelationId(), AccessShareLock);
|
||||
|
||||
/* scan pg_dist_object for classid = ProcedureRelationId via index */
|
||||
ScanKeyInit(&key[0], Anum_pg_dist_object_classid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(ProcedureRelationId));
|
||||
SysScanDesc pgDistObjectScan = systable_beginscan(pgDistObjectRel,
|
||||
DistObjectPrimaryKeyIndexId(),
|
||||
true, NULL, 1, key);
|
||||
|
||||
HeapTuple pgDistObjectTup = NULL;
|
||||
while (HeapTupleIsValid(pgDistObjectTup = systable_getnext(pgDistObjectScan)))
|
||||
{
|
||||
Form_pg_dist_object pg_dist_object =
|
||||
(Form_pg_dist_object) GETSTRUCT(pgDistObjectTup);
|
||||
|
||||
ObjectAddress *functionAddress = palloc0(sizeof(ObjectAddress));
|
||||
functionAddress->classId = ProcedureRelationId;
|
||||
functionAddress->objectId = pg_dist_object->objid;
|
||||
functionAddress->objectSubId = pg_dist_object->objsubid;
|
||||
distributedFunctionList = lappend(distributedFunctionList, functionAddress);
|
||||
}
|
||||
|
||||
systable_endscan(pgDistObjectScan);
|
||||
relation_close(pgDistObjectRel, AccessShareLock);
|
||||
return distributedFunctionList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DistributedSequenceList returns the list of ObjectAddress-es of all the
|
||||
* distributed sequences found in pg_dist_object
|
||||
*/
|
||||
List *
|
||||
DistributedSequenceList(void)
|
||||
{
|
||||
List *distributedSequenceList = NIL;
|
||||
|
||||
ScanKeyData key[1];
|
||||
Relation pgDistObjectRel = table_open(DistObjectRelationId(), AccessShareLock);
|
||||
|
||||
/* scan pg_dist_object for classid = RelationRelationId via index */
|
||||
ScanKeyInit(&key[0], Anum_pg_dist_object_classid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(RelationRelationId));
|
||||
SysScanDesc pgDistObjectScan = systable_beginscan(pgDistObjectRel,
|
||||
DistObjectPrimaryKeyIndexId(),
|
||||
true, NULL, 1, key);
|
||||
|
||||
HeapTuple pgDistObjectTup = NULL;
|
||||
while (HeapTupleIsValid(pgDistObjectTup = systable_getnext(pgDistObjectScan)))
|
||||
{
|
||||
Form_pg_dist_object pg_dist_object =
|
||||
(Form_pg_dist_object) GETSTRUCT(pgDistObjectTup);
|
||||
|
||||
if (get_rel_relkind(pg_dist_object->objid) == RELKIND_SEQUENCE)
|
||||
{
|
||||
ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress));
|
||||
sequenceAddress->classId = RelationRelationId;
|
||||
sequenceAddress->objectId = pg_dist_object->objid;
|
||||
sequenceAddress->objectSubId = pg_dist_object->objsubid;
|
||||
distributedSequenceList = lappend(distributedSequenceList, sequenceAddress);
|
||||
}
|
||||
}
|
||||
|
||||
systable_endscan(pgDistObjectScan);
|
||||
relation_close(pgDistObjectRel, AccessShareLock);
|
||||
return distributedSequenceList;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "access/nbtree.h"
|
||||
#include "access/xact.h"
|
||||
#include "access/sysattr.h"
|
||||
#include "catalog/index.h"
|
||||
#include "catalog/indexing.h"
|
||||
#include "catalog/pg_am.h"
|
||||
#include "catalog/pg_collation.h"
|
||||
|
@ -39,6 +40,7 @@
|
|||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/function_utils.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/foreign_key_relationship.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/metadata_utility.h"
|
||||
|
@ -54,6 +56,7 @@
|
|||
#include "distributed/pg_dist_placement.h"
|
||||
#include "distributed/shared_library_init.h"
|
||||
#include "distributed/shardinterval_utils.h"
|
||||
#include "distributed/utils/array_type.h"
|
||||
#include "distributed/utils/function.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "distributed/worker_manager.h"
|
||||
|
@ -65,6 +68,7 @@
|
|||
#include "parser/parse_func.h"
|
||||
#include "parser/parse_type.h"
|
||||
#include "storage/lmgr.h"
|
||||
#include "utils/array.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/catcache.h"
|
||||
#include "utils/datum.h"
|
||||
|
@ -165,6 +169,7 @@ typedef struct MetadataCacheData
|
|||
Oid workerHashFunctionId;
|
||||
Oid anyValueFunctionId;
|
||||
Oid textSendAsJsonbFunctionId;
|
||||
Oid textoutFunctionId;
|
||||
Oid extensionOwner;
|
||||
Oid binaryCopyFormatId;
|
||||
Oid textCopyFormatId;
|
||||
|
@ -172,6 +177,10 @@ typedef struct MetadataCacheData
|
|||
Oid secondaryNodeRoleId;
|
||||
Oid pgTableIsVisibleFuncId;
|
||||
Oid citusTableIsVisibleFuncId;
|
||||
Oid distAuthinfoRelationId;
|
||||
Oid distAuthinfoIndexId;
|
||||
Oid distPoolinfoRelationId;
|
||||
Oid distPoolinfoIndexId;
|
||||
Oid relationIsAKnownShardFuncId;
|
||||
Oid jsonbExtractPathFuncId;
|
||||
Oid jsonbExtractPathTextFuncId;
|
||||
|
@ -237,6 +246,7 @@ static void InitializeWorkerNodeCache(void);
|
|||
static void RegisterForeignKeyGraphCacheCallbacks(void);
|
||||
static void RegisterWorkerNodeCacheCallbacks(void);
|
||||
static void RegisterLocalGroupIdCacheCallbacks(void);
|
||||
static void RegisterAuthinfoCacheCallbacks(void);
|
||||
static void RegisterCitusTableCacheEntryReleaseCallbacks(void);
|
||||
static uint32 WorkerNodeHashCode(const void *key, Size keySize);
|
||||
static void ResetCitusTableCacheEntry(CitusTableCacheEntry *cacheEntry);
|
||||
|
@ -248,6 +258,7 @@ static void InvalidateForeignRelationGraphCacheCallback(Datum argument, Oid rela
|
|||
static void InvalidateDistRelationCacheCallback(Datum argument, Oid relationId);
|
||||
static void InvalidateNodeRelationCacheCallback(Datum argument, Oid relationId);
|
||||
static void InvalidateLocalGroupIdRelationCacheCallback(Datum argument, Oid relationId);
|
||||
static void InvalidateConnParamsCacheCallback(Datum argument, Oid relationId);
|
||||
static void CitusTableCacheEntryReleaseCallback(ResourceReleasePhase phase, bool isCommit,
|
||||
bool isTopLevel, void *arg);
|
||||
static HeapTuple LookupDistPartitionTuple(Relation pgDistPartition, Oid relationId);
|
||||
|
@ -275,6 +286,10 @@ static bool IsCitusTableTypeInternal(char partitionMethod, char replicationModel
|
|||
CitusTableType tableType);
|
||||
static bool RefreshTableCacheEntryIfInvalid(ShardIdCacheEntry *shardEntry);
|
||||
|
||||
static Oid DistAuthinfoRelationId(void);
|
||||
static Oid DistAuthinfoIndexId(void);
|
||||
static Oid DistPoolinfoRelationId(void);
|
||||
static Oid DistPoolinfoIndexId(void);
|
||||
|
||||
/* exports for SQL callable functions */
|
||||
PG_FUNCTION_INFO_V1(citus_dist_partition_cache_invalidate);
|
||||
|
@ -2594,6 +2609,50 @@ DistPlacementGroupidIndexId(void)
|
|||
}
|
||||
|
||||
|
||||
/* return oid of pg_dist_authinfo relation */
|
||||
static Oid
|
||||
DistAuthinfoRelationId(void)
|
||||
{
|
||||
CachedRelationLookup("pg_dist_authinfo",
|
||||
&MetadataCache.distAuthinfoRelationId);
|
||||
|
||||
return MetadataCache.distAuthinfoRelationId;
|
||||
}
|
||||
|
||||
|
||||
/* return oid of pg_dist_authinfo identification index */
|
||||
static Oid
|
||||
DistAuthinfoIndexId(void)
|
||||
{
|
||||
CachedRelationLookup("pg_dist_authinfo_identification_index",
|
||||
&MetadataCache.distAuthinfoIndexId);
|
||||
|
||||
return MetadataCache.distAuthinfoIndexId;
|
||||
}
|
||||
|
||||
|
||||
/* return oid of pg_dist_poolinfo relation */
|
||||
static Oid
|
||||
DistPoolinfoRelationId(void)
|
||||
{
|
||||
CachedRelationLookup("pg_dist_poolinfo",
|
||||
&MetadataCache.distPoolinfoRelationId);
|
||||
|
||||
return MetadataCache.distPoolinfoRelationId;
|
||||
}
|
||||
|
||||
|
||||
/* return oid of pg_dist_poolinfo primary key index */
|
||||
static Oid
|
||||
DistPoolinfoIndexId(void)
|
||||
{
|
||||
CachedRelationLookup("pg_dist_poolinfo_pkey",
|
||||
&MetadataCache.distPoolinfoIndexId);
|
||||
|
||||
return MetadataCache.distPoolinfoIndexId;
|
||||
}
|
||||
|
||||
|
||||
/* return oid of the read_intermediate_result(text,citus_copy_format) function */
|
||||
Oid
|
||||
CitusReadIntermediateResultFuncId(void)
|
||||
|
@ -2713,6 +2772,42 @@ CitusAnyValueFunctionId(void)
|
|||
}
|
||||
|
||||
|
||||
/* return oid of the citus_text_send_as_jsonb(text) function */
|
||||
Oid
|
||||
CitusTextSendAsJsonbFunctionId(void)
|
||||
{
|
||||
if (MetadataCache.textSendAsJsonbFunctionId == InvalidOid)
|
||||
{
|
||||
List *nameList = list_make2(makeString("pg_catalog"),
|
||||
makeString("citus_text_send_as_jsonb"));
|
||||
Oid paramOids[1] = { TEXTOID };
|
||||
|
||||
MetadataCache.textSendAsJsonbFunctionId =
|
||||
LookupFuncName(nameList, 1, paramOids, false);
|
||||
}
|
||||
|
||||
return MetadataCache.textSendAsJsonbFunctionId;
|
||||
}
|
||||
|
||||
|
||||
/* return oid of the textout(text) function */
|
||||
Oid
|
||||
TextOutFunctionId(void)
|
||||
{
|
||||
if (MetadataCache.textoutFunctionId == InvalidOid)
|
||||
{
|
||||
List *nameList = list_make2(makeString("pg_catalog"),
|
||||
makeString("textout"));
|
||||
Oid paramOids[1] = { TEXTOID };
|
||||
|
||||
MetadataCache.textoutFunctionId =
|
||||
LookupFuncName(nameList, 1, paramOids, false);
|
||||
}
|
||||
|
||||
return MetadataCache.textoutFunctionId;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PgTableVisibleFuncId returns oid of the pg_table_is_visible function.
|
||||
*/
|
||||
|
@ -3301,7 +3396,7 @@ citus_conninfo_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
errmsg("must be called as trigger")));
|
||||
}
|
||||
|
||||
/* no-op in community edition */
|
||||
CitusInvalidateRelcacheByRelid(DistAuthinfoRelationId());
|
||||
|
||||
PG_RETURN_DATUM(PointerGetDatum(NULL));
|
||||
}
|
||||
|
@ -3429,6 +3524,7 @@ InitializeCaches(void)
|
|||
RegisterForeignKeyGraphCacheCallbacks();
|
||||
RegisterWorkerNodeCacheCallbacks();
|
||||
RegisterLocalGroupIdCacheCallbacks();
|
||||
RegisterAuthinfoCacheCallbacks();
|
||||
RegisterCitusTableCacheEntryReleaseCallbacks();
|
||||
}
|
||||
PG_CATCH();
|
||||
|
@ -3834,6 +3930,18 @@ RegisterLocalGroupIdCacheCallbacks(void)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* RegisterAuthinfoCacheCallbacks registers the callbacks required to
|
||||
* maintain cached connection parameters at fresh values.
|
||||
*/
|
||||
static void
|
||||
RegisterAuthinfoCacheCallbacks(void)
|
||||
{
|
||||
/* Watch for invalidation events. */
|
||||
CacheRegisterRelcacheCallback(InvalidateConnParamsCacheCallback, (Datum) 0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* WorkerNodeHashCode computes the hash code for a worker node from the node's
|
||||
* host name and port number. Nodes that only differ by their rack locations
|
||||
|
@ -4332,6 +4440,30 @@ InvalidateLocalGroupIdRelationCacheCallback(Datum argument, Oid relationId)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* InvalidateConnParamsCacheCallback sets isValid flag to false for all entries
|
||||
* in ConnParamsHash, a cache used during connection establishment.
|
||||
*/
|
||||
static void
|
||||
InvalidateConnParamsCacheCallback(Datum argument, Oid relationId)
|
||||
{
|
||||
if (relationId == MetadataCache.distAuthinfoRelationId ||
|
||||
relationId == MetadataCache.distPoolinfoRelationId ||
|
||||
relationId == InvalidOid)
|
||||
{
|
||||
ConnParamsHashEntry *entry = NULL;
|
||||
HASH_SEQ_STATUS status;
|
||||
|
||||
hash_seq_init(&status, ConnParamsHash);
|
||||
|
||||
while ((entry = (ConnParamsHashEntry *) hash_seq_search(&status)) != NULL)
|
||||
{
|
||||
entry->isValid = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CitusTableCacheFlushInvalidatedEntries frees invalidated cache entries.
|
||||
* Invalidated entries aren't freed immediately as callers expect their lifetime
|
||||
|
@ -4939,37 +5071,164 @@ role_exists(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* authinfo_valid is a check constraint which errors on all rows, intended for
|
||||
* use in prohibiting writes to pg_dist_authinfo in Citus Community.
|
||||
* GetPoolinfoViaCatalog searches the pg_dist_poolinfo table for a row matching
|
||||
* the provided nodeId and returns the poolinfo field of this row if found.
|
||||
* Otherwise, this function returns NULL.
|
||||
*/
|
||||
Datum
|
||||
authinfo_valid(PG_FUNCTION_ARGS)
|
||||
char *
|
||||
GetPoolinfoViaCatalog(int64 nodeId)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot write to pg_dist_authinfo"),
|
||||
errdetail(
|
||||
"Citus Community Edition does not support the use of "
|
||||
"custom authentication options."),
|
||||
errhint(
|
||||
"To learn more about using advanced authentication schemes "
|
||||
"with Citus, please contact us at "
|
||||
"https://citusdata.com/about/contact_us")));
|
||||
ScanKeyData scanKey[1];
|
||||
const int scanKeyCount = 1;
|
||||
const AttrNumber nodeIdIdx = 1, poolinfoIdx = 2;
|
||||
Relation pgDistPoolinfo = table_open(DistPoolinfoRelationId(), AccessShareLock);
|
||||
bool indexOK = true;
|
||||
char *poolinfo = NULL;
|
||||
|
||||
/* set scan arguments */
|
||||
ScanKeyInit(&scanKey[0], nodeIdIdx, BTEqualStrategyNumber, F_INT4EQ,
|
||||
Int32GetDatum(nodeId));
|
||||
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgDistPoolinfo, DistPoolinfoIndexId(),
|
||||
indexOK,
|
||||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
if (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPoolinfo);
|
||||
bool isNull = false;
|
||||
|
||||
Datum poolinfoDatum = heap_getattr(heapTuple, poolinfoIdx, tupleDescriptor,
|
||||
&isNull);
|
||||
|
||||
Assert(!isNull);
|
||||
|
||||
poolinfo = TextDatumGetCString(poolinfoDatum);
|
||||
}
|
||||
|
||||
systable_endscan(scanDescriptor);
|
||||
table_close(pgDistPoolinfo, AccessShareLock);
|
||||
|
||||
return poolinfo;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* poolinfo_valid is a check constraint which errors on all rows, intended for
|
||||
* use in prohibiting writes to pg_dist_poolinfo in Citus Community.
|
||||
* GetAuthinfoViaCatalog searches pg_dist_authinfo for a row matching a pro-
|
||||
* vided role and node id. Three types of rules are currently permitted: those
|
||||
* matching a specific node (non-zero nodeid), those matching all nodes (a
|
||||
* nodeid of zero), and those denoting a loopback connection (nodeid of -1).
|
||||
* Rolename must always be specified. If both types of rules exist for a given
|
||||
* user/host, the more specific (host-specific) rule wins. This means that when
|
||||
* both a zero and non-zero row exist for a given rolename, the non-zero row
|
||||
* has precedence.
|
||||
*
|
||||
* In short, this function will return a rule matching nodeId, or if that's
|
||||
* absent the rule for 0, or if that's absent, an empty string. Callers can
|
||||
* just use the returned authinfo and know the precedence has been honored.
|
||||
*/
|
||||
char *
|
||||
GetAuthinfoViaCatalog(const char *roleName, int64 nodeId)
|
||||
{
|
||||
char *authinfo = "";
|
||||
Datum nodeIdDatumArray[2] = {
|
||||
Int32GetDatum(nodeId),
|
||||
Int32GetDatum(WILDCARD_NODE_ID)
|
||||
};
|
||||
ArrayType *nodeIdArrayType = DatumArrayToArrayType(nodeIdDatumArray,
|
||||
lengthof(nodeIdDatumArray),
|
||||
INT4OID);
|
||||
ScanKeyData scanKey[2];
|
||||
const AttrNumber nodeIdIdx = 1, roleIdx = 2, authinfoIdx = 3;
|
||||
|
||||
/*
|
||||
* Our index's definition ensures correct precedence for positive nodeIds,
|
||||
* but when handling a negative value we need to traverse backwards to keep
|
||||
* the invariant that the zero rule has lowest precedence.
|
||||
*/
|
||||
ScanDirection direction = (nodeId < 0) ? BackwardScanDirection : ForwardScanDirection;
|
||||
|
||||
if (ReindexIsProcessingIndex(DistAuthinfoIndexId()))
|
||||
{
|
||||
ereport(ERROR, (errmsg("authinfo is being reindexed; try again")));
|
||||
}
|
||||
|
||||
memset(&scanKey, 0, sizeof(scanKey));
|
||||
|
||||
/* first column in index is rolename, need exact match there ... */
|
||||
ScanKeyInit(&scanKey[0], roleIdx, BTEqualStrategyNumber,
|
||||
F_NAMEEQ, CStringGetDatum(roleName));
|
||||
|
||||
/* second column is nodeId, match against array of nodeid and zero (any node) ... */
|
||||
ScanKeyInit(&scanKey[1], nodeIdIdx, BTEqualStrategyNumber,
|
||||
F_INT4EQ, PointerGetDatum(nodeIdArrayType));
|
||||
scanKey[1].sk_flags |= SK_SEARCHARRAY;
|
||||
|
||||
/*
|
||||
* It's important that we traverse the index in order: we need to ensure
|
||||
* that rules with nodeid 0 are encountered last. We'll use the first tuple
|
||||
* we find. This ordering defines the precedence order of authinfo rules.
|
||||
*/
|
||||
Relation pgDistAuthinfo = table_open(DistAuthinfoRelationId(), AccessShareLock);
|
||||
Relation pgDistAuthinfoIdx = index_open(DistAuthinfoIndexId(), AccessShareLock);
|
||||
SysScanDesc scanDescriptor = systable_beginscan_ordered(pgDistAuthinfo,
|
||||
pgDistAuthinfoIdx,
|
||||
NULL, lengthof(scanKey),
|
||||
scanKey);
|
||||
|
||||
/* first tuple represents highest-precedence rule for this node */
|
||||
HeapTuple authinfoTuple = systable_getnext_ordered(scanDescriptor, direction);
|
||||
if (HeapTupleIsValid(authinfoTuple))
|
||||
{
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgDistAuthinfo);
|
||||
bool isNull = false;
|
||||
|
||||
Datum authinfoDatum = heap_getattr(authinfoTuple, authinfoIdx,
|
||||
tupleDescriptor, &isNull);
|
||||
|
||||
Assert(!isNull);
|
||||
|
||||
authinfo = TextDatumGetCString(authinfoDatum);
|
||||
}
|
||||
|
||||
systable_endscan_ordered(scanDescriptor);
|
||||
index_close(pgDistAuthinfoIdx, AccessShareLock);
|
||||
table_close(pgDistAuthinfo, AccessShareLock);
|
||||
|
||||
return authinfo;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* authinfo_valid is a check constraint to verify that an inserted authinfo row
|
||||
* uses only permitted libpq parameters.
|
||||
*/
|
||||
Datum
|
||||
authinfo_valid(PG_FUNCTION_ARGS)
|
||||
{
|
||||
char *authinfo = TextDatumGetCString(PG_GETARG_DATUM(0));
|
||||
|
||||
/* this array _must_ be kept in an order usable by bsearch */
|
||||
const char *allowList[] = { "password", "sslcert", "sslkey" };
|
||||
bool authinfoValid = CheckConninfo(authinfo, allowList, lengthof(allowList), NULL);
|
||||
|
||||
PG_RETURN_BOOL(authinfoValid);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* poolinfo_valid is a check constraint to verify that an inserted poolinfo row
|
||||
* uses only permitted libpq parameters.
|
||||
*/
|
||||
Datum
|
||||
poolinfo_valid(PG_FUNCTION_ARGS)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot write to pg_dist_poolinfo"),
|
||||
errdetail(
|
||||
"Citus Community Edition does not support the use of "
|
||||
"pooler options."),
|
||||
errhint("To learn more about using advanced pooling schemes "
|
||||
"with Citus, please contact us at "
|
||||
"https://citusdata.com/about/contact_us")));
|
||||
char *poolinfo = TextDatumGetCString(PG_GETARG_DATUM(0));
|
||||
|
||||
/* this array _must_ be kept in an order usable by bsearch */
|
||||
const char *allowList[] = { "dbname", "host", "port" };
|
||||
bool poolinfoValid = CheckConninfo(poolinfo, allowList, lengthof(allowList), NULL);
|
||||
|
||||
PG_RETURN_BOOL(poolinfoValid);
|
||||
}
|
||||
|
|
|
@ -73,6 +73,7 @@
|
|||
#include "executor/spi.h"
|
||||
#include "foreign/foreign.h"
|
||||
#include "miscadmin.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "pgstat.h"
|
||||
#include "postmaster/bgworker.h"
|
||||
|
@ -115,6 +116,11 @@ static List * GetObjectsForGrantStmt(ObjectType objectType, Oid objectId);
|
|||
static AccessPriv * GetAccessPrivObjectForGrantStmt(char *permission);
|
||||
static List * GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid,
|
||||
AclItem *aclItem);
|
||||
static List * GenerateGrantOnFunctionQueriesFromAclItem(Oid schemaOid,
|
||||
AclItem *aclItem);
|
||||
static List * GrantOnSequenceDDLCommands(Oid sequenceOid);
|
||||
static List * GenerateGrantOnSequenceQueriesFromAclItem(Oid sequenceOid,
|
||||
AclItem *aclItem);
|
||||
static void SetLocalReplicateReferenceTablesOnActivate(bool state);
|
||||
static char * GenerateSetRoleQuery(Oid roleOid);
|
||||
static void MetadataSyncSigTermHandler(SIGNAL_ARGS);
|
||||
|
@ -513,6 +519,7 @@ ShouldSyncUserCommandForObject(ObjectAddress objectAddress)
|
|||
{
|
||||
Oid relOid = objectAddress.objectId;
|
||||
return ShouldSyncTableMetadata(relOid) ||
|
||||
ShouldSyncSequenceMetadata(relOid) ||
|
||||
get_rel_relkind(relOid) == RELKIND_VIEW;
|
||||
}
|
||||
|
||||
|
@ -585,6 +592,26 @@ ShouldSyncTableMetadataInternal(bool hashDistributed, bool citusTableWithNoDistK
|
|||
|
||||
|
||||
/*
|
||||
* ShouldSyncSequenceMetadata checks if the metadata of a sequence should be
|
||||
* propagated to metadata workers, i.e. the sequence is marked as distributed
|
||||
*/
|
||||
bool
|
||||
ShouldSyncSequenceMetadata(Oid relationId)
|
||||
{
|
||||
if (!OidIsValid(relationId) || !(get_rel_relkind(relationId) == RELKIND_SEQUENCE))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
ObjectAddress sequenceAddress = { 0 };
|
||||
ObjectAddressSet(sequenceAddress, RelationRelationId, relationId);
|
||||
|
||||
return IsObjectDistributed(&sequenceAddress);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SyncMetadataSnapshotToNode does the following:
|
||||
* SyncNodeMetadataSnapshotToNode does the following:
|
||||
* 1. Sets the localGroupId on the worker so the worker knows which tuple in
|
||||
* pg_dist_node represents itself.
|
||||
|
@ -1346,6 +1373,23 @@ ShardListInsertCommand(List *shardIntervalList)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ShardListDeleteCommand generates a command list that can be executed to delete
|
||||
* shard and shard placement metadata for the given shard.
|
||||
*/
|
||||
List *
|
||||
ShardDeleteCommandList(ShardInterval *shardInterval)
|
||||
{
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
|
||||
StringInfo deleteShardCommand = makeStringInfo();
|
||||
appendStringInfo(deleteShardCommand,
|
||||
"SELECT citus_internal_delete_shard_metadata(%ld);", shardId);
|
||||
|
||||
return list_make1(deleteShardCommand->data);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* NodeDeleteCommand generate a command that can be
|
||||
* executed to delete the metadata for a worker node.
|
||||
|
@ -1480,6 +1524,8 @@ DDLCommandsForSequence(Oid sequenceOid, char *ownerName)
|
|||
|
||||
sequenceDDLList = lappend(sequenceDDLList, wrappedSequenceDef->data);
|
||||
sequenceDDLList = lappend(sequenceDDLList, sequenceGrantStmt->data);
|
||||
sequenceDDLList = list_concat(sequenceDDLList, GrantOnSequenceDDLCommands(
|
||||
sequenceOid));
|
||||
|
||||
return sequenceDDLList;
|
||||
}
|
||||
|
@ -1939,7 +1985,7 @@ GrantOnSchemaDDLCommands(Oid schemaOid)
|
|||
|
||||
|
||||
/*
|
||||
* GenerateGrantOnSchemaQueryFromACL generates a query string for replicating a users permissions
|
||||
* GenerateGrantOnSchemaQueryFromACLItem generates a query string for replicating a users permissions
|
||||
* on a schema.
|
||||
*/
|
||||
List *
|
||||
|
@ -2023,6 +2069,34 @@ GetObjectsForGrantStmt(ObjectType objectType, Oid objectId)
|
|||
return list_make1(makeString(get_namespace_name(objectId)));
|
||||
}
|
||||
|
||||
/* enterprise supported object types */
|
||||
case OBJECT_FUNCTION:
|
||||
case OBJECT_PROCEDURE:
|
||||
{
|
||||
ObjectWithArgs *owa = ObjectWithArgsFromOid(objectId);
|
||||
return list_make1(owa);
|
||||
}
|
||||
|
||||
case OBJECT_FDW:
|
||||
{
|
||||
ForeignDataWrapper *fdw = GetForeignDataWrapper(objectId);
|
||||
return list_make1(makeString(fdw->fdwname));
|
||||
}
|
||||
|
||||
case OBJECT_FOREIGN_SERVER:
|
||||
{
|
||||
ForeignServer *server = GetForeignServer(objectId);
|
||||
return list_make1(makeString(server->servername));
|
||||
}
|
||||
|
||||
case OBJECT_SEQUENCE:
|
||||
{
|
||||
Oid namespaceOid = get_rel_namespace(objectId);
|
||||
RangeVar *sequence = makeRangeVar(get_namespace_name(namespaceOid),
|
||||
get_rel_name(objectId), -1);
|
||||
return list_make1(sequence);
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
elog(ERROR, "unsupported object type for GRANT");
|
||||
|
@ -2033,6 +2107,211 @@ GetObjectsForGrantStmt(ObjectType objectType, Oid objectId)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GrantOnFunctionDDLCommands creates a list of ddl command for replicating the permissions
|
||||
* of roles on distributed functions.
|
||||
*/
|
||||
List *
|
||||
GrantOnFunctionDDLCommands(Oid functionOid)
|
||||
{
|
||||
HeapTuple proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(functionOid));
|
||||
|
||||
bool isNull = true;
|
||||
Datum aclDatum = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_proacl,
|
||||
&isNull);
|
||||
if (isNull)
|
||||
{
|
||||
ReleaseSysCache(proctup);
|
||||
return NIL;
|
||||
}
|
||||
|
||||
Acl *acl = DatumGetAclPCopy(aclDatum);
|
||||
AclItem *aclDat = ACL_DAT(acl);
|
||||
int aclNum = ACL_NUM(acl);
|
||||
List *commands = NIL;
|
||||
|
||||
ReleaseSysCache(proctup);
|
||||
|
||||
for (int i = 0; i < aclNum; i++)
|
||||
{
|
||||
commands = list_concat(commands,
|
||||
GenerateGrantOnFunctionQueriesFromAclItem(
|
||||
functionOid,
|
||||
&aclDat[i]));
|
||||
}
|
||||
|
||||
return commands;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GrantOnForeignServerDDLCommands creates a list of ddl command for replicating the
|
||||
* permissions of roles on distributed foreign servers.
|
||||
*/
|
||||
List *
|
||||
GrantOnForeignServerDDLCommands(Oid serverId)
|
||||
{
|
||||
HeapTuple servertup = SearchSysCache1(FOREIGNSERVEROID, ObjectIdGetDatum(serverId));
|
||||
|
||||
bool isNull = true;
|
||||
Datum aclDatum = SysCacheGetAttr(FOREIGNSERVEROID, servertup,
|
||||
Anum_pg_foreign_server_srvacl, &isNull);
|
||||
if (isNull)
|
||||
{
|
||||
ReleaseSysCache(servertup);
|
||||
return NIL;
|
||||
}
|
||||
|
||||
Acl *aclEntry = DatumGetAclPCopy(aclDatum);
|
||||
AclItem *privileges = ACL_DAT(aclEntry);
|
||||
int numberOfPrivsGranted = ACL_NUM(aclEntry);
|
||||
List *commands = NIL;
|
||||
|
||||
ReleaseSysCache(servertup);
|
||||
|
||||
for (int i = 0; i < numberOfPrivsGranted; i++)
|
||||
{
|
||||
commands = list_concat(commands,
|
||||
GenerateGrantOnForeignServerQueriesFromAclItem(
|
||||
serverId,
|
||||
&privileges[i]));
|
||||
}
|
||||
|
||||
return commands;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateGrantOnForeignServerQueriesFromAclItem generates a query string for
|
||||
* replicating a users permissions on a foreign server.
|
||||
*/
|
||||
List *
|
||||
GenerateGrantOnForeignServerQueriesFromAclItem(Oid serverId, AclItem *aclItem)
|
||||
{
|
||||
/* privileges to be granted */
|
||||
AclMode permissions = ACLITEM_GET_PRIVS(*aclItem) & ACL_ALL_RIGHTS_FOREIGN_SERVER;
|
||||
|
||||
/* WITH GRANT OPTION clause */
|
||||
AclMode grants = ACLITEM_GET_GOPTIONS(*aclItem) & ACL_ALL_RIGHTS_FOREIGN_SERVER;
|
||||
|
||||
/*
|
||||
* seems unlikely but we check if there is a grant option in the list without the actual permission
|
||||
*/
|
||||
Assert(!(grants & ACL_USAGE) || (permissions & ACL_USAGE));
|
||||
|
||||
Oid granteeOid = aclItem->ai_grantee;
|
||||
List *queries = NIL;
|
||||
|
||||
/* switch to the role which had granted acl */
|
||||
queries = lappend(queries, GenerateSetRoleQuery(aclItem->ai_grantor));
|
||||
|
||||
/* generate the GRANT stmt that will be executed by the grantor role */
|
||||
if (permissions & ACL_USAGE)
|
||||
{
|
||||
char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights(
|
||||
OBJECT_FOREIGN_SERVER, granteeOid, serverId,
|
||||
"USAGE", grants & ACL_USAGE));
|
||||
queries = lappend(queries, query);
|
||||
}
|
||||
|
||||
/* reset the role back */
|
||||
queries = lappend(queries, "RESET ROLE");
|
||||
|
||||
return queries;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateGrantOnFunctionQueryFromACLItem generates a query string for replicating a users permissions
|
||||
* on a distributed function.
|
||||
*/
|
||||
List *
|
||||
GenerateGrantOnFunctionQueriesFromAclItem(Oid functionOid, AclItem *aclItem)
|
||||
{
|
||||
AclMode permissions = ACLITEM_GET_PRIVS(*aclItem) & ACL_ALL_RIGHTS_FUNCTION;
|
||||
AclMode grants = ACLITEM_GET_GOPTIONS(*aclItem) & ACL_ALL_RIGHTS_FUNCTION;
|
||||
|
||||
/*
|
||||
* seems unlikely but we check if there is a grant option in the list without the actual permission
|
||||
*/
|
||||
Assert(!(grants & ACL_EXECUTE) || (permissions & ACL_EXECUTE));
|
||||
Oid granteeOid = aclItem->ai_grantee;
|
||||
List *queries = NIL;
|
||||
|
||||
queries = lappend(queries, GenerateSetRoleQuery(aclItem->ai_grantor));
|
||||
|
||||
if (permissions & ACL_EXECUTE)
|
||||
{
|
||||
char prokind = get_func_prokind(functionOid);
|
||||
ObjectType objectType;
|
||||
|
||||
if (prokind == PROKIND_FUNCTION)
|
||||
{
|
||||
objectType = OBJECT_FUNCTION;
|
||||
}
|
||||
else if (prokind == PROKIND_PROCEDURE)
|
||||
{
|
||||
objectType = OBJECT_PROCEDURE;
|
||||
}
|
||||
else
|
||||
{
|
||||
ereport(ERROR, (errmsg("unsupported prokind"),
|
||||
errdetail("GRANT commands on procedures are propagated only "
|
||||
"for procedures and functions.")));
|
||||
}
|
||||
|
||||
char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights(
|
||||
objectType, granteeOid, functionOid, "EXECUTE",
|
||||
grants & ACL_EXECUTE));
|
||||
queries = lappend(queries, query);
|
||||
}
|
||||
|
||||
queries = lappend(queries, "RESET ROLE");
|
||||
|
||||
return queries;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateGrantOnFDWQueriesFromAclItem generates a query string for
|
||||
* replicating a users permissions on a foreign data wrapper.
|
||||
*/
|
||||
List *
|
||||
GenerateGrantOnFDWQueriesFromAclItem(Oid FDWId, AclItem *aclItem)
|
||||
{
|
||||
/* privileges to be granted */
|
||||
AclMode permissions = ACLITEM_GET_PRIVS(*aclItem) & ACL_ALL_RIGHTS_FDW;
|
||||
|
||||
/* WITH GRANT OPTION clause */
|
||||
AclMode grants = ACLITEM_GET_GOPTIONS(*aclItem) & ACL_ALL_RIGHTS_FDW;
|
||||
|
||||
/*
|
||||
* seems unlikely but we check if there is a grant option in the list without the actual permission
|
||||
*/
|
||||
Assert(!(grants & ACL_USAGE) || (permissions & ACL_USAGE));
|
||||
|
||||
Oid granteeOid = aclItem->ai_grantee;
|
||||
List *queries = NIL;
|
||||
|
||||
/* switch to the role which had granted acl */
|
||||
queries = lappend(queries, GenerateSetRoleQuery(aclItem->ai_grantor));
|
||||
|
||||
/* generate the GRANT stmt that will be executed by the grantor role */
|
||||
if (permissions & ACL_USAGE)
|
||||
{
|
||||
char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights(
|
||||
OBJECT_FDW, granteeOid, FDWId, "USAGE",
|
||||
grants & ACL_USAGE));
|
||||
queries = lappend(queries, query);
|
||||
}
|
||||
|
||||
/* reset the role back */
|
||||
queries = lappend(queries, "RESET ROLE");
|
||||
|
||||
return queries;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetAccessPrivObjectForGrantStmt creates an AccessPriv object for the given permission.
|
||||
* It will be used when creating GrantStmt objects.
|
||||
|
@ -2048,6 +2327,93 @@ GetAccessPrivObjectForGrantStmt(char *permission)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GrantOnSequenceDDLCommands creates a list of ddl command for replicating the permissions
|
||||
* of roles on distributed sequences.
|
||||
*/
|
||||
static List *
|
||||
GrantOnSequenceDDLCommands(Oid sequenceOid)
|
||||
{
|
||||
HeapTuple seqtup = SearchSysCache1(RELOID, ObjectIdGetDatum(sequenceOid));
|
||||
bool isNull = false;
|
||||
Datum aclDatum = SysCacheGetAttr(RELOID, seqtup, Anum_pg_class_relacl,
|
||||
&isNull);
|
||||
if (isNull)
|
||||
{
|
||||
ReleaseSysCache(seqtup);
|
||||
return NIL;
|
||||
}
|
||||
|
||||
Acl *acl = DatumGetAclPCopy(aclDatum);
|
||||
AclItem *aclDat = ACL_DAT(acl);
|
||||
int aclNum = ACL_NUM(acl);
|
||||
List *commands = NIL;
|
||||
|
||||
ReleaseSysCache(seqtup);
|
||||
|
||||
for (int i = 0; i < aclNum; i++)
|
||||
{
|
||||
commands = list_concat(commands,
|
||||
GenerateGrantOnSequenceQueriesFromAclItem(
|
||||
sequenceOid,
|
||||
&aclDat[i]));
|
||||
}
|
||||
|
||||
return commands;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateGrantOnSequenceQueriesFromAclItem generates a query string for replicating a users permissions
|
||||
* on a distributed sequence.
|
||||
*/
|
||||
static List *
|
||||
GenerateGrantOnSequenceQueriesFromAclItem(Oid sequenceOid, AclItem *aclItem)
|
||||
{
|
||||
AclMode permissions = ACLITEM_GET_PRIVS(*aclItem) & ACL_ALL_RIGHTS_SEQUENCE;
|
||||
AclMode grants = ACLITEM_GET_GOPTIONS(*aclItem) & ACL_ALL_RIGHTS_SEQUENCE;
|
||||
|
||||
/*
|
||||
* seems unlikely but we check if there is a grant option in the list without the actual permission
|
||||
*/
|
||||
Assert(!(grants & ACL_USAGE) || (permissions & ACL_USAGE));
|
||||
Assert(!(grants & ACL_SELECT) || (permissions & ACL_SELECT));
|
||||
Assert(!(grants & ACL_UPDATE) || (permissions & ACL_UPDATE));
|
||||
|
||||
Oid granteeOid = aclItem->ai_grantee;
|
||||
List *queries = NIL;
|
||||
queries = lappend(queries, GenerateSetRoleQuery(aclItem->ai_grantor));
|
||||
|
||||
if (permissions & ACL_USAGE)
|
||||
{
|
||||
char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights(
|
||||
OBJECT_SEQUENCE, granteeOid, sequenceOid,
|
||||
"USAGE", grants & ACL_USAGE));
|
||||
queries = lappend(queries, query);
|
||||
}
|
||||
|
||||
if (permissions & ACL_SELECT)
|
||||
{
|
||||
char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights(
|
||||
OBJECT_SEQUENCE, granteeOid, sequenceOid,
|
||||
"SELECT", grants & ACL_SELECT));
|
||||
queries = lappend(queries, query);
|
||||
}
|
||||
|
||||
if (permissions & ACL_UPDATE)
|
||||
{
|
||||
char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights(
|
||||
OBJECT_SEQUENCE, granteeOid, sequenceOid,
|
||||
"UPDATE", grants & ACL_UPDATE));
|
||||
queries = lappend(queries, query);
|
||||
}
|
||||
|
||||
queries = lappend(queries, "RESET ROLE");
|
||||
|
||||
return queries;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SetLocalEnableMetadataSync sets the enable_metadata_sync locally
|
||||
*/
|
||||
|
|
|
@ -66,6 +66,9 @@
|
|||
#include "utils/lsyscache.h"
|
||||
#include "utils/rel.h"
|
||||
#include "utils/syscache.h"
|
||||
#if PG_VERSION_NUM < 120000
|
||||
#include "utils/tqual.h"
|
||||
#endif
|
||||
|
||||
#define DISK_SPACE_FIELDS 2
|
||||
|
||||
|
@ -2175,11 +2178,8 @@ EnsureSuperUser(void)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* Return a table's owner as a string.
|
||||
*/
|
||||
char *
|
||||
TableOwner(Oid relationId)
|
||||
Oid
|
||||
TableOwnerOid(Oid relationId)
|
||||
{
|
||||
HeapTuple tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId));
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
|
@ -2191,8 +2191,17 @@ TableOwner(Oid relationId)
|
|||
Oid userId = ((Form_pg_class) GETSTRUCT(tuple))->relowner;
|
||||
|
||||
ReleaseSysCache(tuple);
|
||||
return userId;
|
||||
}
|
||||
|
||||
return GetUserNameFromId(userId, false);
|
||||
|
||||
/*
|
||||
* Return a table's owner as a string.
|
||||
*/
|
||||
char *
|
||||
TableOwner(Oid relationId)
|
||||
{
|
||||
return GetUserNameFromId(TableOwnerOid(relationId), false);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -421,20 +421,9 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr,
|
|||
case OBJECT_EXTENSION:
|
||||
case OBJECT_COLLATION:
|
||||
case OBJECT_VIEW:
|
||||
{
|
||||
check_object_ownership(userId, type, *addr, node, *relation);
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_ROLE:
|
||||
{
|
||||
/* Support only extension owner role with community */
|
||||
if (addr->objectId != CitusExtensionOwner())
|
||||
{
|
||||
ereport(ERROR, (errmsg("Current user does not have required "
|
||||
"access privileges on role %d with type %d",
|
||||
addr->objectId, type)));
|
||||
}
|
||||
check_object_ownership(userId, type, *addr, node, *relation);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -623,6 +623,17 @@ GetPreLoadTableCreationCommands(Oid relationId,
|
|||
}
|
||||
}
|
||||
|
||||
List *tableACLList = pg_get_table_grants(relationId);
|
||||
if (tableACLList != NIL)
|
||||
{
|
||||
char *tableACLCommand = NULL;
|
||||
foreach_ptr(tableACLCommand, tableACLList)
|
||||
{
|
||||
tableDDLEventList = lappend(tableDDLEventList,
|
||||
makeTableDDLCommandString(tableACLCommand));
|
||||
}
|
||||
}
|
||||
|
||||
char *tableOwnerDef = TableOwnerResetCommand(relationId);
|
||||
if (tableOwnerDef != NULL)
|
||||
{
|
||||
|
@ -630,6 +641,9 @@ GetPreLoadTableCreationCommands(Oid relationId,
|
|||
tableOwnerDef));
|
||||
}
|
||||
|
||||
List *tableRowLevelSecurityCommands = GetTableRowLevelSecurityCommands(relationId);
|
||||
tableDDLEventList = list_concat(tableDDLEventList, tableRowLevelSecurityCommands);
|
||||
|
||||
List *policyCommands = CreatePolicyCommands(relationId);
|
||||
tableDDLEventList = list_concat(tableDDLEventList, policyCommands);
|
||||
|
||||
|
@ -778,6 +792,29 @@ GatherIndexAndConstraintDefinitionList(Form_pg_index indexForm, List **indexDDLE
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetTableRowLevelSecurityCommands takes in a relationId, and returns the list of
|
||||
* commands needed to reconstruct the row level security policy.
|
||||
*/
|
||||
List *
|
||||
GetTableRowLevelSecurityCommands(Oid relationId)
|
||||
{
|
||||
List *rowLevelSecurityCommandList = NIL;
|
||||
|
||||
List *rowLevelSecurityEnableCommands = pg_get_row_level_security_commands(relationId);
|
||||
|
||||
char *rowLevelSecurityCommand = NULL;
|
||||
foreach_ptr(rowLevelSecurityCommand, rowLevelSecurityEnableCommands)
|
||||
{
|
||||
rowLevelSecurityCommandList = lappend(
|
||||
rowLevelSecurityCommandList,
|
||||
makeTableDDLCommandString(rowLevelSecurityCommand));
|
||||
}
|
||||
|
||||
return rowLevelSecurityCommandList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IndexImpliedByAConstraint is a helper function to be used while scanning
|
||||
* pg_index. It returns true if the index identified by the given indexForm is
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/multi_join_order.h"
|
||||
#include "distributed/multi_logical_replication.h"
|
||||
#include "distributed/multi_partitioning_utils.h"
|
||||
#include "distributed/reference_table_utils.h"
|
||||
#include "distributed/remote_commands.h"
|
||||
|
@ -53,6 +54,9 @@
|
|||
#include "utils/syscache.h"
|
||||
|
||||
/* local function forward declarations */
|
||||
static void VerifyTablesHaveReplicaIdentity(List *colocatedTableList);
|
||||
static bool RelationCanPublishAllModifications(Oid relationId);
|
||||
static bool CanUseLogicalReplication(Oid relationId, char shardReplicationMode);
|
||||
static void ErrorIfTableCannotBeReplicated(Oid relationId);
|
||||
static void RepairShardPlacement(int64 shardId, const char *sourceNodeName,
|
||||
int32 sourceNodePort, const char *targetNodeName,
|
||||
|
@ -64,6 +68,12 @@ static void ReplicateColocatedShardPlacement(int64 shardId, char *sourceNodeName
|
|||
static void CopyShardTables(List *shardIntervalList, char *sourceNodeName,
|
||||
int32 sourceNodePort, char *targetNodeName,
|
||||
int32 targetNodePort, bool useLogicalReplication);
|
||||
static void CopyShardTablesViaLogicalReplication(List *shardIntervalList,
|
||||
char *sourceNodeName,
|
||||
int32 sourceNodePort,
|
||||
char *targetNodeName,
|
||||
int32 targetNodePort);
|
||||
|
||||
static void CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName,
|
||||
int32 sourceNodePort,
|
||||
char *targetNodeName, int32 targetNodePort);
|
||||
|
@ -146,11 +156,10 @@ citus_copy_shard_placement(PG_FUNCTION_ARGS)
|
|||
char *targetNodeName = text_to_cstring(targetNodeNameText);
|
||||
|
||||
char shardReplicationMode = LookupShardTransferMode(shardReplicationModeOid);
|
||||
if (shardReplicationMode == TRANSFER_MODE_FORCE_LOGICAL)
|
||||
if (doRepair && shardReplicationMode == TRANSFER_MODE_FORCE_LOGICAL)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("the force_logical transfer mode is currently "
|
||||
"unsupported")));
|
||||
errmsg("logical replication cannot be used for repairs")));
|
||||
}
|
||||
|
||||
ShardInterval *shardInterval = LoadShardInterval(shardId);
|
||||
|
@ -282,8 +291,7 @@ CheckSpaceConstraints(MultiConnection *connection, uint64 colocationSizeInBytes)
|
|||
* After that, there are two different paths. First one is blocking shard move in the
|
||||
* sense that during shard move all modifications are paused to the shard. The second
|
||||
* one relies on logical replication meaning that the writes blocked only for a very
|
||||
* short duration almost only when the metadata is actually being updated. This option
|
||||
* is currently only available in Citus Enterprise.
|
||||
* short duration almost only when the metadata is actually being updated.
|
||||
*
|
||||
* After successful move operation, shards in the source node gets deleted. If the move
|
||||
* fails at any point, this function throws an error, leaving the cluster without doing
|
||||
|
@ -354,23 +362,52 @@ citus_move_shard_placement(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
char shardReplicationMode = LookupShardTransferMode(shardReplicationModeOid);
|
||||
if (shardReplicationMode == TRANSFER_MODE_FORCE_LOGICAL)
|
||||
if (shardReplicationMode == TRANSFER_MODE_AUTOMATIC)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("the force_logical transfer mode is currently "
|
||||
"unsupported")));
|
||||
VerifyTablesHaveReplicaIdentity(colocatedTableList);
|
||||
}
|
||||
|
||||
EnsureEnoughDiskSpaceForShardMove(colocatedShardList, sourceNodeName, sourceNodePort,
|
||||
targetNodeName, targetNodePort);
|
||||
|
||||
BlockWritesToShardList(colocatedShardList);
|
||||
/*
|
||||
* At this point of the shard moves, we don't need to block the writes to
|
||||
* shards when logical replication is used.
|
||||
*/
|
||||
bool useLogicalReplication = CanUseLogicalReplication(distributedTableId,
|
||||
shardReplicationMode);
|
||||
if (!useLogicalReplication)
|
||||
{
|
||||
BlockWritesToShardList(colocatedShardList);
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* We prevent multiple shard moves in a transaction that use logical
|
||||
* replication. That's because the first call opens a transaction block
|
||||
* on the worker to drop the old shard placement and replication slot
|
||||
* creation waits for pending transactions to finish, which will not
|
||||
* happen ever. In other words, we prevent a self-deadlock if both
|
||||
* source shard placements are on the same node.
|
||||
*/
|
||||
if (PlacementMovedUsingLogicalReplicationInTX)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("moving multiple shard placements via logical "
|
||||
"replication in the same transaction is currently "
|
||||
"not supported"),
|
||||
errhint("If you wish to move multiple shard placements "
|
||||
"in a single transaction set the shard_transfer_mode "
|
||||
"to 'block_writes'.")));
|
||||
}
|
||||
|
||||
PlacementMovedUsingLogicalReplicationInTX = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* CopyColocatedShardPlacement function copies given shard with its co-located
|
||||
* shards.
|
||||
*/
|
||||
bool useLogicalReplication = false;
|
||||
CopyShardTables(colocatedShardList, sourceNodeName, sourceNodePort, targetNodeName,
|
||||
targetNodePort, useLogicalReplication);
|
||||
|
||||
|
@ -522,6 +559,74 @@ ErrorIfMoveUnsupportedTableType(Oid relationId)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* VerifyTablesHaveReplicaIdentity throws an error if any of the tables
|
||||
* do not have a replica identity, which is required for logical replication
|
||||
* to replicate UPDATE and DELETE commands.
|
||||
*/
|
||||
static void
|
||||
VerifyTablesHaveReplicaIdentity(List *colocatedTableList)
|
||||
{
|
||||
ListCell *colocatedTableCell = NULL;
|
||||
|
||||
foreach(colocatedTableCell, colocatedTableList)
|
||||
{
|
||||
Oid colocatedTableId = lfirst_oid(colocatedTableCell);
|
||||
|
||||
if (!RelationCanPublishAllModifications(colocatedTableId))
|
||||
{
|
||||
char *colocatedRelationName = get_rel_name(colocatedTableId);
|
||||
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot use logical replication to transfer shards of "
|
||||
"the relation %s since it doesn't have a REPLICA "
|
||||
"IDENTITY or PRIMARY KEY", colocatedRelationName),
|
||||
errdetail("UPDATE and DELETE commands on the shard will "
|
||||
"error out during logical replication unless "
|
||||
"there is a REPLICA IDENTITY or PRIMARY KEY."),
|
||||
errhint("If you wish to continue without a replica "
|
||||
"identity set the shard_transfer_mode to "
|
||||
"'force_logical' or 'block_writes'.")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* RelationCanPublishAllModifications returns true if the relation is safe to publish
|
||||
* all modification while being replicated via logical replication.
|
||||
*/
|
||||
static bool
|
||||
RelationCanPublishAllModifications(Oid relationId)
|
||||
{
|
||||
Relation relation = RelationIdGetRelation(relationId);
|
||||
bool canPublish = false;
|
||||
|
||||
if (relation == NULL)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("could not open relation with OID %u", relationId)));
|
||||
}
|
||||
|
||||
/* if relation has replica identity we are always good */
|
||||
if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL ||
|
||||
OidIsValid(RelationGetReplicaIndex(relation)))
|
||||
{
|
||||
canPublish = true;
|
||||
}
|
||||
|
||||
/* partitioned tables do not contain any data themselves, can always replicate */
|
||||
if (PartitionedTable(relationId))
|
||||
{
|
||||
canPublish = true;
|
||||
}
|
||||
|
||||
RelationClose(relation);
|
||||
|
||||
return canPublish;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* BlockWritesToShardList blocks writes to all shards in the given shard
|
||||
* list. The function assumes that all the shards in the list are colocated.
|
||||
|
@ -567,6 +672,49 @@ BlockWritesToShardList(List *shardList)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* CanUseLogicalReplication returns true if the given table can be logically replicated.
|
||||
*/
|
||||
static bool
|
||||
CanUseLogicalReplication(Oid relationId, char shardReplicationMode)
|
||||
{
|
||||
if (shardReplicationMode == TRANSFER_MODE_BLOCK_WRITES)
|
||||
{
|
||||
/* user explicitly chose not to use logical replication */
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Logical replication doesn't support replicating foreign tables and views.
|
||||
*/
|
||||
if (!RegularTable(relationId))
|
||||
{
|
||||
ereport(LOG, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("Cannot use logical replication for "
|
||||
"shard move since the relation %s is not "
|
||||
"a regular relation",
|
||||
get_rel_name(relationId))));
|
||||
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Logical replication doesn't support inherited tables */
|
||||
if (IsParentTable(relationId))
|
||||
{
|
||||
ereport(LOG, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("Cannot use logical replication for "
|
||||
"shard move since the relation %s is an "
|
||||
"inherited relation",
|
||||
get_rel_name(relationId))));
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ErrorIfTableCannotBeReplicated function errors out if the given table is not suitable
|
||||
* for its shard being replicated. There are 2 cases in which shard replication is not
|
||||
|
@ -790,7 +938,16 @@ ReplicateColocatedShardPlacement(int64 shardId, char *sourceNodeName,
|
|||
*/
|
||||
colocatedShardList = SortList(colocatedShardList, CompareShardIntervalsById);
|
||||
|
||||
BlockWritesToShardList(colocatedShardList);
|
||||
/*
|
||||
* At this point of the shard replication, we don't need to block the writes to
|
||||
* shards when logical replication is used.
|
||||
*/
|
||||
bool useLogicalReplication = CanUseLogicalReplication(distributedTableId,
|
||||
shardReplicationMode);
|
||||
if (!useLogicalReplication)
|
||||
{
|
||||
BlockWritesToShardList(colocatedShardList);
|
||||
}
|
||||
|
||||
ShardInterval *colocatedShard = NULL;
|
||||
foreach_ptr(colocatedShard, colocatedShardList)
|
||||
|
@ -805,6 +962,11 @@ ReplicateColocatedShardPlacement(int64 shardId, char *sourceNodeName,
|
|||
targetNodeName, targetNodePort);
|
||||
}
|
||||
|
||||
if (shardReplicationMode == TRANSFER_MODE_AUTOMATIC)
|
||||
{
|
||||
VerifyTablesHaveReplicaIdentity(colocatedTableList);
|
||||
}
|
||||
|
||||
if (!IsCitusTableType(distributedTableId, REFERENCE_TABLE))
|
||||
{
|
||||
/*
|
||||
|
@ -818,7 +980,6 @@ ReplicateColocatedShardPlacement(int64 shardId, char *sourceNodeName,
|
|||
EnsureReferenceTablesExistOnAllNodesExtended(shardReplicationMode);
|
||||
}
|
||||
|
||||
bool useLogicalReplication = false;
|
||||
CopyShardTables(colocatedShardList, sourceNodeName, sourceNodePort,
|
||||
targetNodeName, targetNodePort, useLogicalReplication);
|
||||
|
||||
|
@ -912,7 +1073,9 @@ CopyShardTables(List *shardIntervalList, char *sourceNodeName, int32 sourceNodeP
|
|||
|
||||
if (useLogicalReplication)
|
||||
{
|
||||
/* only supported in Citus enterprise */
|
||||
CopyShardTablesViaLogicalReplication(shardIntervalList, sourceNodeName,
|
||||
sourceNodePort, targetNodeName,
|
||||
targetNodePort);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -922,6 +1085,50 @@ CopyShardTables(List *shardIntervalList, char *sourceNodeName, int32 sourceNodeP
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* CopyShardTablesViaLogicalReplication copies a shard along with its co-located shards
|
||||
* from a source node to target node via logical replication.
|
||||
*/
|
||||
static void
|
||||
CopyShardTablesViaLogicalReplication(List *shardIntervalList, char *sourceNodeName,
|
||||
int32 sourceNodePort, char *targetNodeName,
|
||||
int32 targetNodePort)
|
||||
{
|
||||
MemoryContext localContext = AllocSetContextCreate(CurrentMemoryContext,
|
||||
"CopyShardTablesViaLogicalReplication",
|
||||
ALLOCSET_DEFAULT_SIZES);
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(localContext);
|
||||
|
||||
/*
|
||||
* Iterate through the colocated shards and create them on the
|
||||
* target node. We do not create the indexes yet.
|
||||
*/
|
||||
ShardInterval *shardInterval = NULL;
|
||||
foreach_ptr(shardInterval, shardIntervalList)
|
||||
{
|
||||
Oid relationId = shardInterval->relationId;
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
List *tableRecreationCommandList = RecreateTableDDLCommandList(relationId);
|
||||
tableRecreationCommandList =
|
||||
WorkerApplyShardDDLCommandList(tableRecreationCommandList, shardId);
|
||||
|
||||
char *tableOwner = TableOwner(shardInterval->relationId);
|
||||
|
||||
SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort,
|
||||
tableOwner,
|
||||
tableRecreationCommandList);
|
||||
|
||||
MemoryContextReset(localContext);
|
||||
}
|
||||
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
|
||||
/* data copy is done seperately when logical replication is used */
|
||||
LogicallyReplicateShards(shardIntervalList, sourceNodeName,
|
||||
sourceNodePort, targetNodeName, targetNodePort);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CopyShardTablesViaBlockWrites copies a shard along with its co-located shards
|
||||
* from a source node to target node via COPY command. While the command is in
|
||||
|
@ -989,12 +1196,12 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName,
|
|||
{
|
||||
List *shardForeignConstraintCommandList = NIL;
|
||||
List *referenceTableForeignConstraintList = NIL;
|
||||
List *commandList = NIL;
|
||||
|
||||
CopyShardForeignConstraintCommandListGrouped(shardInterval,
|
||||
&shardForeignConstraintCommandList,
|
||||
&referenceTableForeignConstraintList);
|
||||
|
||||
List *commandList = NIL;
|
||||
commandList = list_concat(commandList, shardForeignConstraintCommandList);
|
||||
commandList = list_concat(commandList, referenceTableForeignConstraintList);
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* left around. Shards that are left around are marked as state 4
|
||||
* (SHARD_STATE_TO_DELETE) in pg_dist_placement.
|
||||
*
|
||||
* Copyright (c), Citus Data, Inc.
|
||||
* Copyright (c) 2018, Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
|
@ -317,7 +319,7 @@ CheckRebalanceStateInvariants(const RebalanceState *state)
|
|||
|
||||
/* Check that utilization field is up to date. */
|
||||
Assert(fillState->utilization == CalculateUtilization(fillState->totalCost,
|
||||
fillState->capacity));
|
||||
fillState->capacity)); /* lgtm[cpp/equality-on-floats] */
|
||||
|
||||
/*
|
||||
* Check that fillState->totalCost is within 0.1% difference of
|
||||
|
@ -698,14 +700,6 @@ ExecutePlacementUpdates(List *placementUpdateList, Oid shardReplicationModeOid,
|
|||
|
||||
ListCell *placementUpdateCell = NULL;
|
||||
|
||||
char shardReplicationMode = LookupShardTransferMode(shardReplicationModeOid);
|
||||
if (shardReplicationMode == TRANSFER_MODE_FORCE_LOGICAL)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("the force_logical transfer mode is currently "
|
||||
"unsupported")));
|
||||
}
|
||||
|
||||
DropOrphanedShardsInSeparateTransaction();
|
||||
|
||||
foreach(placementUpdateCell, placementUpdateList)
|
||||
|
@ -2341,7 +2335,7 @@ FindAndMoveShardCost(float4 utilizationLowerBound,
|
|||
}
|
||||
if (newTargetUtilization == sourceFillState->utilization &&
|
||||
newSourceUtilization <= targetFillState->utilization
|
||||
)
|
||||
) /* lgtm[cpp/equality-on-floats] */
|
||||
{
|
||||
/*
|
||||
* this can trigger when capacity of the nodes is not the
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include "postgres.h"
|
||||
#include "c.h"
|
||||
#include "fmgr.h"
|
||||
#include "libpq-fe.h"
|
||||
|
||||
#include "catalog/pg_class.h"
|
||||
#include "distributed/colocation_utils.h"
|
||||
|
@ -20,14 +21,17 @@
|
|||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/multi_join_order.h"
|
||||
#include "distributed/multi_partitioning_utils.h"
|
||||
#include "distributed/multi_router_planner.h"
|
||||
#include "distributed/pg_dist_partition.h"
|
||||
#include "distributed/pg_dist_shard.h"
|
||||
#include "distributed/remote_commands.h"
|
||||
#include "distributed/reference_table_utils.h"
|
||||
#include "distributed/resource_lock.h"
|
||||
#include "distributed/worker_manager.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "distributed/worker_transaction.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "storage/lock.h"
|
||||
#include "utils/builtins.h"
|
||||
|
@ -42,6 +46,25 @@ PG_FUNCTION_INFO_V1(isolate_tenant_to_new_shard);
|
|||
PG_FUNCTION_INFO_V1(worker_hash);
|
||||
|
||||
|
||||
/* local function forward declarations */
|
||||
static uint64 SplitShardByValue(ShardInterval *sourceShard, Datum distributionValueDatum);
|
||||
static void ErrorIfCannotSplitShard(ShardInterval *sourceShard);
|
||||
static void CreateSplitOffShards(ShardInterval *sourceShard, int hashedValue,
|
||||
List **splitOffShardList, int *isolatedShardId);
|
||||
static List * ShardTemplateList(ShardInterval *sourceShard, int hashedValue,
|
||||
int *isolatedShardIndex);
|
||||
static ShardInterval * CreateSplitOffShardFromTemplate(ShardInterval *shardTemplate,
|
||||
Oid relationId);
|
||||
static List * SplitOffCommandList(ShardInterval *sourceShard,
|
||||
ShardInterval *splitOffShard);
|
||||
static void ExecuteCommandListOnPlacements(List *commandList, List *placementList);
|
||||
static void InsertSplitOffShardMetadata(List *splitOffShardList,
|
||||
List *sourcePlacementList);
|
||||
static void CreateForeignConstraints(List *splitOffShardList, List *sourcePlacementList);
|
||||
static void ExecuteCommandListOnWorker(char *nodeName, int nodePort, List *commandList);
|
||||
static void DropShardList(List *shardIntervalList);
|
||||
|
||||
|
||||
/*
|
||||
* isolate_tenant_to_new_shard isolates a tenant to its own shard by spliting
|
||||
* the current matching shard.
|
||||
|
@ -49,9 +72,100 @@ PG_FUNCTION_INFO_V1(worker_hash);
|
|||
Datum
|
||||
isolate_tenant_to_new_shard(PG_FUNCTION_ARGS)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("isolate_tenant_to_new_shard() is only supported on "
|
||||
"Citus Enterprise")));
|
||||
CheckCitusVersion(ERROR);
|
||||
EnsureCoordinator();
|
||||
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
Datum inputDatum = PG_GETARG_DATUM(1);
|
||||
text *cascadeOptionText = PG_GETARG_TEXT_P(2);
|
||||
ListCell *colocatedTableCell = NULL;
|
||||
|
||||
EnsureTableOwner(relationId);
|
||||
|
||||
CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId);
|
||||
|
||||
char partitionMethod = cacheEntry->partitionMethod;
|
||||
if (partitionMethod != DISTRIBUTE_BY_HASH)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot isolate tenant because tenant isolation "
|
||||
"is only support for hash distributed tables")));
|
||||
}
|
||||
|
||||
if (PartitionedTable(relationId))
|
||||
{
|
||||
char *sourceRelationName = get_rel_name(relationId);
|
||||
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot isolate shard placement of '%s', because it "
|
||||
"is a partitioned table", sourceRelationName),
|
||||
errdetail("Citus does not support isolating placements of "
|
||||
"partitioned tables.")));
|
||||
}
|
||||
|
||||
List *colocatedTableList = ColocatedTableList(relationId);
|
||||
int colocatedTableCount = list_length(colocatedTableList);
|
||||
|
||||
foreach(colocatedTableCell, colocatedTableList)
|
||||
{
|
||||
Oid colocatedTableId = lfirst_oid(colocatedTableCell);
|
||||
|
||||
/*
|
||||
* At the moment, Citus does not support copying a shard if that shard's
|
||||
* relation is in a colocation group with a partitioned table or partition.
|
||||
*/
|
||||
if (colocatedTableId != relationId &&
|
||||
PartitionedTable(colocatedTableId))
|
||||
{
|
||||
char *sourceRelationName = get_rel_name(relationId);
|
||||
char *colocatedRelationName = get_rel_name(colocatedTableId);
|
||||
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot isolate shard placement of '%s', because it "
|
||||
"is a partitioned table", colocatedRelationName),
|
||||
errdetail("In colocation group of '%s', a partitioned "
|
||||
"relation exists: '%s'. Citus does not support "
|
||||
"isolating placements of partitioned tables.",
|
||||
sourceRelationName, colocatedRelationName)));
|
||||
}
|
||||
}
|
||||
|
||||
Oid inputDataType = get_fn_expr_argtype(fcinfo->flinfo, 1);
|
||||
char *tenantIdString = DatumToString(inputDatum, inputDataType);
|
||||
|
||||
char *cascadeOptionString = text_to_cstring(cascadeOptionText);
|
||||
if (pg_strncasecmp(cascadeOptionString, "CASCADE", NAMEDATALEN) != 0 &&
|
||||
colocatedTableCount > 1)
|
||||
{
|
||||
char *relationName = get_rel_name(relationId);
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot isolate tenant because \"%s\" has colocated "
|
||||
"tables", relationName),
|
||||
errhint("Use CASCADE option to isolate tenants for the "
|
||||
"colocated tables too. Example usage: "
|
||||
"isolate_tenant_to_new_shard('%s', '%s', 'CASCADE')",
|
||||
relationName, tenantIdString)));
|
||||
}
|
||||
|
||||
EnsureReferenceTablesExistOnAllNodes();
|
||||
|
||||
Var *distributionColumn = DistPartitionKey(relationId);
|
||||
|
||||
/* earlier we checked that the table was hash partitioned, so there should be a distribution column */
|
||||
Assert(distributionColumn != NULL);
|
||||
|
||||
Oid distributionColumnType = distributionColumn->vartype;
|
||||
|
||||
Datum tenantIdDatum = StringToDatum(tenantIdString, distributionColumnType);
|
||||
ShardInterval *sourceShard = FindShardInterval(tenantIdDatum, cacheEntry);
|
||||
if (sourceShard == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("tenant does not have a shard")));
|
||||
}
|
||||
|
||||
uint64 isolatedShardId = SplitShardByValue(sourceShard, tenantIdDatum);
|
||||
|
||||
PG_RETURN_INT64(isolatedShardId);
|
||||
}
|
||||
|
||||
|
||||
|
@ -86,3 +200,623 @@ worker_hash(PG_FUNCTION_ARGS)
|
|||
|
||||
PG_RETURN_INT32(hashedValueDatum);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SplitShardByValue gets a shard and a value which is in the range of
|
||||
* distribution column of this shard. Then, it splits this shard and all its
|
||||
* colocated shards into three; the lower range, the given value itself, and
|
||||
* the upper range. Finally, it returns the id of the shard which is created
|
||||
* for the given value.
|
||||
*/
|
||||
static uint64
|
||||
SplitShardByValue(ShardInterval *sourceShard, Datum distributionValueDatum)
|
||||
{
|
||||
Oid relationId = sourceShard->relationId;
|
||||
int isolatedShardId = 0;
|
||||
List *splitOffShardList = NIL;
|
||||
|
||||
if (XactModificationLevel > XACT_MODIFICATION_NONE)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
|
||||
errmsg("cannot isolate a tenant after other modifications "
|
||||
"in the same transaction")));
|
||||
}
|
||||
|
||||
/* sort the tables to avoid deadlocks */
|
||||
List *colocatedTableList = ColocatedTableList(relationId);
|
||||
colocatedTableList = SortList(colocatedTableList, CompareOids);
|
||||
|
||||
Oid colocatedTableId = InvalidOid;
|
||||
foreach_oid(colocatedTableId, colocatedTableList)
|
||||
{
|
||||
/*
|
||||
* Block concurrent DDL / TRUNCATE commands on the relation. Similarly,
|
||||
* block concurrent citus_move_shard_placement()/isolate_tenant_to_new_shard()
|
||||
* on any shard of the same relation. This is OK for now since
|
||||
* we're executing shard moves/splits sequentially anyway.
|
||||
*/
|
||||
LockRelationOid(colocatedTableId, ShareUpdateExclusiveLock);
|
||||
}
|
||||
|
||||
/* get colocated shard list */
|
||||
List *colocatedShardList = ColocatedShardIntervalList(sourceShard);
|
||||
|
||||
/* get locks */
|
||||
BlockWritesToShardList(colocatedShardList);
|
||||
|
||||
ErrorIfCannotSplitShard(sourceShard);
|
||||
|
||||
/* get hash function name */
|
||||
CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId);
|
||||
FmgrInfo *hashFunction = cacheEntry->hashFunction;
|
||||
|
||||
/* get hashed value of the distribution value */
|
||||
Datum hashedValueDatum = FunctionCall1(hashFunction, distributionValueDatum);
|
||||
int hashedValue = DatumGetInt32(hashedValueDatum);
|
||||
|
||||
/* create a list of nodes with source shard placements */
|
||||
List *sourcePlacementList = ActiveShardPlacementList(sourceShard->shardId);
|
||||
|
||||
/* create new shards in a separate transaction and commit them */
|
||||
CreateSplitOffShards(sourceShard, hashedValue, &splitOffShardList, &isolatedShardId);
|
||||
|
||||
/*
|
||||
* Drop old shards and delete related metadata. Have to do that before
|
||||
* creating the new shard metadata, because there's cross-checks
|
||||
* preventing inconsistent metadata (like overlapping shards).
|
||||
*/
|
||||
DropShardList(colocatedShardList);
|
||||
|
||||
/* insert new metadata */
|
||||
InsertSplitOffShardMetadata(splitOffShardList, sourcePlacementList);
|
||||
|
||||
/*
|
||||
* Create foreign keys if exists after the metadata changes happening in
|
||||
* DropShardList() and InsertSplitOffShardMetadata() because the foreign
|
||||
* key creation depends on the new metadata.
|
||||
*/
|
||||
CreateForeignConstraints(splitOffShardList, sourcePlacementList);
|
||||
|
||||
CitusInvalidateRelcacheByRelid(DistShardRelationId());
|
||||
|
||||
return isolatedShardId;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateForeignConstraints creates the foreign constraints on the newly
|
||||
* created shards via the tenant isolation.
|
||||
*
|
||||
* The function treats foreign keys to reference tables and foreign keys to
|
||||
* co-located distributed tables differently. The former one needs to be
|
||||
* executed over a single connection to prevent self-deadlocks. The latter
|
||||
* one can be executed in parallel if there are multiple replicas.
|
||||
*/
|
||||
static void
|
||||
CreateForeignConstraints(List *splitOffShardList, List *sourcePlacementList)
|
||||
{
|
||||
ListCell *splitOffShardCell = NULL;
|
||||
|
||||
List *colocatedShardForeignConstraintCommandList = NIL;
|
||||
List *referenceTableForeignConstraintList = NIL;
|
||||
|
||||
foreach(splitOffShardCell, splitOffShardList)
|
||||
{
|
||||
ShardInterval *splitOffShard = (ShardInterval *) lfirst(splitOffShardCell);
|
||||
|
||||
List *currentColocatedForeignKeyList = NIL;
|
||||
List *currentReferenceForeignKeyList = NIL;
|
||||
|
||||
CopyShardForeignConstraintCommandListGrouped(splitOffShard,
|
||||
¤tColocatedForeignKeyList,
|
||||
¤tReferenceForeignKeyList);
|
||||
|
||||
colocatedShardForeignConstraintCommandList =
|
||||
list_concat(colocatedShardForeignConstraintCommandList,
|
||||
currentColocatedForeignKeyList);
|
||||
referenceTableForeignConstraintList =
|
||||
list_concat(referenceTableForeignConstraintList,
|
||||
currentReferenceForeignKeyList);
|
||||
}
|
||||
|
||||
/*
|
||||
* We can use parallel connections to while creating co-located foreign keys
|
||||
* if the source placement .
|
||||
* However, foreign keys to reference tables need to be created using a single
|
||||
* connection per worker to prevent self-deadlocks.
|
||||
*/
|
||||
if (colocatedShardForeignConstraintCommandList != NIL)
|
||||
{
|
||||
ExecuteCommandListOnPlacements(colocatedShardForeignConstraintCommandList,
|
||||
sourcePlacementList);
|
||||
}
|
||||
|
||||
if (referenceTableForeignConstraintList != NIL)
|
||||
{
|
||||
ListCell *shardPlacementCell = NULL;
|
||||
foreach(shardPlacementCell, sourcePlacementList)
|
||||
{
|
||||
ShardPlacement *shardPlacement =
|
||||
(ShardPlacement *) lfirst(shardPlacementCell);
|
||||
|
||||
char *nodeName = shardPlacement->nodeName;
|
||||
int32 nodePort = shardPlacement->nodePort;
|
||||
|
||||
/*
|
||||
* We're using the connections that we've used for dropping the
|
||||
* source placements within the same coordinated transaction.
|
||||
*/
|
||||
ExecuteCommandListOnWorker(nodeName, nodePort,
|
||||
referenceTableForeignConstraintList);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExecuteCommandListOnWorker executes the command on the given node within
|
||||
* the coordinated 2PC.
|
||||
*/
|
||||
static void
|
||||
ExecuteCommandListOnWorker(char *nodeName, int nodePort, List *commandList)
|
||||
{
|
||||
ListCell *commandCell = NULL;
|
||||
|
||||
foreach(commandCell, commandList)
|
||||
{
|
||||
char *command = (char *) lfirst(commandCell);
|
||||
|
||||
SendCommandToWorker(nodeName, nodePort, command);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ErrorIfCannotSplitShard checks relation kind and invalid shards. It errors
|
||||
* out if we are not able to split the given shard.
|
||||
*/
|
||||
static void
|
||||
ErrorIfCannotSplitShard(ShardInterval *sourceShard)
|
||||
{
|
||||
Oid relationId = sourceShard->relationId;
|
||||
ListCell *colocatedTableCell = NULL;
|
||||
ListCell *colocatedShardCell = NULL;
|
||||
|
||||
/* checks for table ownership and foreign tables */
|
||||
List *colocatedTableList = ColocatedTableList(relationId);
|
||||
foreach(colocatedTableCell, colocatedTableList)
|
||||
{
|
||||
Oid colocatedTableId = lfirst_oid(colocatedTableCell);
|
||||
|
||||
/* check that user has owner rights in all co-located tables */
|
||||
EnsureTableOwner(colocatedTableId);
|
||||
|
||||
char relationKind = get_rel_relkind(colocatedTableId);
|
||||
if (relationKind == RELKIND_FOREIGN_TABLE)
|
||||
{
|
||||
char *relationName = get_rel_name(colocatedTableId);
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot isolate tenant because \"%s\" is a "
|
||||
"foreign table", relationName),
|
||||
errdetail("Isolating shards backed by foreign tables "
|
||||
"is not supported.")));
|
||||
}
|
||||
}
|
||||
|
||||
/* check shards with inactive placements */
|
||||
List *colocatedShardList = ColocatedShardIntervalList(sourceShard);
|
||||
foreach(colocatedShardCell, colocatedShardList)
|
||||
{
|
||||
ShardInterval *shardInterval = (ShardInterval *) lfirst(colocatedShardCell);
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
ListCell *shardPlacementCell = NULL;
|
||||
|
||||
List *shardPlacementList = ShardPlacementListWithoutOrphanedPlacements(shardId);
|
||||
foreach(shardPlacementCell, shardPlacementList)
|
||||
{
|
||||
ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell);
|
||||
if (placement->shardState != SHARD_STATE_ACTIVE)
|
||||
{
|
||||
char *relationName = get_rel_name(shardInterval->relationId);
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot isolate tenant because relation "
|
||||
"\"%s\" has an inactive shard placement "
|
||||
"for the shard %lu", relationName, shardId),
|
||||
errhint("Use master_copy_shard_placement UDF to "
|
||||
"repair the inactive shard placement.")));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateSplitOffShards gets a shard and a hashed value to pick the split point.
|
||||
* First, it creates templates to create new shards. Then, for every colocated
|
||||
* shard, it creates new split shards data and physically creates them on the
|
||||
* worker nodes. This function returns newly created split off shards and the
|
||||
* matching shard id for the source shard and hashed value via passed parameters.
|
||||
*/
|
||||
static void
|
||||
CreateSplitOffShards(ShardInterval *sourceShard, int hashedValue,
|
||||
List **splitOffShardList, int *isolatedShardId)
|
||||
{
|
||||
List *nodeCommandList = NIL;
|
||||
ListCell *sourceColocatedShardCell = NULL;
|
||||
int isolatedShardIndex = 0;
|
||||
|
||||
List *sourceColocatedShardList = ColocatedShardIntervalList(sourceShard);
|
||||
List *shardTemplateList = ShardTemplateList(sourceShard, hashedValue,
|
||||
&isolatedShardIndex);
|
||||
|
||||
foreach(sourceColocatedShardCell, sourceColocatedShardList)
|
||||
{
|
||||
ShardInterval *sourceColocatedShard =
|
||||
(ShardInterval *) lfirst(sourceColocatedShardCell);
|
||||
Oid relationId = sourceColocatedShard->relationId;
|
||||
ListCell *templateShardCell = NULL;
|
||||
int currentShardIndex = 0;
|
||||
|
||||
foreach(templateShardCell, shardTemplateList)
|
||||
{
|
||||
ShardInterval *templateShard = (ShardInterval *) lfirst(templateShardCell);
|
||||
|
||||
ShardInterval *splitOffShard = CreateSplitOffShardFromTemplate(templateShard,
|
||||
relationId);
|
||||
List *splitOffCommandList = SplitOffCommandList(sourceColocatedShard,
|
||||
splitOffShard);
|
||||
nodeCommandList = list_concat(nodeCommandList, splitOffCommandList);
|
||||
|
||||
/* check if this is the isolated shard for the given table */
|
||||
if (splitOffShard->relationId == sourceShard->relationId &&
|
||||
currentShardIndex == isolatedShardIndex)
|
||||
{
|
||||
(*isolatedShardId) = splitOffShard->shardId;
|
||||
}
|
||||
|
||||
/* add newly created split off shards to list */
|
||||
(*splitOffShardList) = lappend(*splitOffShardList, splitOffShard);
|
||||
|
||||
currentShardIndex++;
|
||||
}
|
||||
}
|
||||
|
||||
List *sourcePlacementList = ActiveShardPlacementList(sourceShard->shardId);
|
||||
ExecuteCommandListOnPlacements(nodeCommandList, sourcePlacementList);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ShardTemplateList creates shard templates with new min and max values from
|
||||
* the given shard and the split point which is the given hashed value.
|
||||
* It returns the list of shard templates, and passes the isolated shard index
|
||||
* via isolatedShardIndex parameter.
|
||||
*/
|
||||
static List *
|
||||
ShardTemplateList(ShardInterval *sourceShard, int hashedValue, int *isolatedShardIndex)
|
||||
{
|
||||
List *shardTemplateList = NIL;
|
||||
|
||||
/* get min and max values of the source shard */
|
||||
int32 shardMinValue = DatumGetInt32(sourceShard->minValue);
|
||||
int32 shardMaxValue = DatumGetInt32(sourceShard->maxValue);
|
||||
|
||||
(*isolatedShardIndex) = 0;
|
||||
|
||||
/* add a shard template for lower range if exists */
|
||||
if (shardMinValue < hashedValue)
|
||||
{
|
||||
ShardInterval *lowerRangeShard = CopyShardInterval(sourceShard);
|
||||
|
||||
lowerRangeShard->minValue = Int32GetDatum(shardMinValue);
|
||||
lowerRangeShard->maxValue = Int32GetDatum(hashedValue - 1);
|
||||
|
||||
shardTemplateList = lappend(shardTemplateList, lowerRangeShard);
|
||||
(*isolatedShardIndex) = 1;
|
||||
}
|
||||
|
||||
/* add shard template for the isolated value */
|
||||
ShardInterval *isolatedShard = CopyShardInterval(sourceShard);
|
||||
|
||||
isolatedShard->minValue = Int32GetDatum(hashedValue);
|
||||
isolatedShard->maxValue = Int32GetDatum(hashedValue);
|
||||
|
||||
shardTemplateList = lappend(shardTemplateList, isolatedShard);
|
||||
|
||||
/* add a shard template for upper range if exists */
|
||||
if (shardMaxValue > hashedValue)
|
||||
{
|
||||
ShardInterval *upperRangeShard = CopyShardInterval(sourceShard);
|
||||
|
||||
upperRangeShard->minValue = Int32GetDatum(hashedValue + 1);
|
||||
upperRangeShard->maxValue = Int32GetDatum(shardMaxValue);
|
||||
|
||||
shardTemplateList = lappend(shardTemplateList, upperRangeShard);
|
||||
}
|
||||
|
||||
if (list_length(shardTemplateList) == 1)
|
||||
{
|
||||
char *tableName = get_rel_name(sourceShard->relationId);
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("table \"%s\" has already been isolated for the "
|
||||
"given value", tableName)));
|
||||
}
|
||||
|
||||
return shardTemplateList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateSplitOffShardFromTemplate creates a new split off shard from the given
|
||||
* shard template by creating a new shard id and setting the relation id.
|
||||
*/
|
||||
static ShardInterval *
|
||||
CreateSplitOffShardFromTemplate(ShardInterval *shardTemplate, Oid relationId)
|
||||
{
|
||||
ShardInterval *splitOffShard = CopyShardInterval(shardTemplate);
|
||||
|
||||
/* set new shard id and the relation id */
|
||||
splitOffShard->shardId = GetNextShardId();
|
||||
splitOffShard->relationId = relationId;
|
||||
|
||||
return splitOffShard;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SplitOffCommandList creates a command list to run on worker nodes to create
|
||||
* new split off shard from the source shard.
|
||||
*/
|
||||
static List *
|
||||
SplitOffCommandList(ShardInterval *sourceShard, ShardInterval *splitOffShard)
|
||||
{
|
||||
List *splitOffCommandList = NIL;
|
||||
bool includeSequenceDefaults = false;
|
||||
|
||||
Oid relationId = sourceShard->relationId;
|
||||
Var *partitionKey = DistPartitionKey(relationId);
|
||||
Assert(partitionKey != NULL);
|
||||
const char *partitionColumnName = get_attname(relationId,
|
||||
partitionKey->varattno, false);
|
||||
const char *quotedPartitionColumnName = quote_identifier(partitionColumnName);
|
||||
|
||||
char *splitOffShardName = ConstructQualifiedShardName(splitOffShard);
|
||||
char *sourceShardName = ConstructQualifiedShardName(sourceShard);
|
||||
|
||||
int32 shardMinValue = DatumGetInt32(splitOffShard->minValue);
|
||||
int32 shardMaxValue = DatumGetInt32(splitOffShard->maxValue);
|
||||
|
||||
List *tableCreationCommandList =
|
||||
GetPreLoadTableCreationCommands(relationId, includeSequenceDefaults, NULL);
|
||||
tableCreationCommandList = WorkerApplyShardDDLCommandList(tableCreationCommandList,
|
||||
splitOffShard->shardId);
|
||||
|
||||
splitOffCommandList = list_concat(splitOffCommandList, tableCreationCommandList);
|
||||
|
||||
StringInfo splitOffShardCommand = makeStringInfo();
|
||||
appendStringInfo(splitOffShardCommand,
|
||||
"INSERT INTO %s SELECT * FROM %s WHERE "
|
||||
"worker_hash(%s) >= %d AND worker_hash(%s) <= %d",
|
||||
splitOffShardName, sourceShardName, quotedPartitionColumnName,
|
||||
shardMinValue, quotedPartitionColumnName, shardMaxValue);
|
||||
|
||||
splitOffCommandList = lappend(splitOffCommandList, splitOffShardCommand->data);
|
||||
|
||||
List *indexCommandList = GetPostLoadTableCreationCommands(relationId, true, true);
|
||||
indexCommandList = WorkerApplyShardDDLCommandList(indexCommandList,
|
||||
splitOffShard->shardId);
|
||||
|
||||
splitOffCommandList = list_concat(splitOffCommandList, indexCommandList);
|
||||
|
||||
return splitOffCommandList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExecuteCommandListOnPlacements runs the given command list on the nodes of
|
||||
* the given shard placement list. First, it creates connections. Then it sends
|
||||
* commands one by one. For every command, first it send the command to all
|
||||
* connections and then checks the results. This helps to run long running
|
||||
* commands in parallel. Finally, it sends commit messages to all connections
|
||||
* and close them.
|
||||
*/
|
||||
static void
|
||||
ExecuteCommandListOnPlacements(List *commandList, List *placementList)
|
||||
{
|
||||
List *workerConnectionList = NIL;
|
||||
ListCell *workerConnectionCell = NULL;
|
||||
ListCell *shardPlacementCell = NULL;
|
||||
ListCell *commandCell = NULL;
|
||||
|
||||
/* create connections and start transactions */
|
||||
foreach(shardPlacementCell, placementList)
|
||||
{
|
||||
ShardPlacement *shardPlacement = (ShardPlacement *) lfirst(shardPlacementCell);
|
||||
char *nodeName = shardPlacement->nodeName;
|
||||
int32 nodePort = shardPlacement->nodePort;
|
||||
|
||||
int connectionFlags = FORCE_NEW_CONNECTION;
|
||||
char *currentUser = CurrentUserName();
|
||||
|
||||
/* create a new connection */
|
||||
MultiConnection *workerConnection = GetNodeUserDatabaseConnection(connectionFlags,
|
||||
nodeName,
|
||||
nodePort,
|
||||
currentUser,
|
||||
NULL);
|
||||
|
||||
/* mark connection as critical ans start transaction */
|
||||
MarkRemoteTransactionCritical(workerConnection);
|
||||
RemoteTransactionBegin(workerConnection);
|
||||
|
||||
/* add connection to the list */
|
||||
workerConnectionList = lappend(workerConnectionList, workerConnection);
|
||||
}
|
||||
|
||||
/* send and check results for every command one by one */
|
||||
foreach(commandCell, commandList)
|
||||
{
|
||||
char *command = lfirst(commandCell);
|
||||
|
||||
/* first only send the command */
|
||||
foreach(workerConnectionCell, workerConnectionList)
|
||||
{
|
||||
MultiConnection *workerConnection =
|
||||
(MultiConnection *) lfirst(workerConnectionCell);
|
||||
|
||||
int querySent = SendRemoteCommand(workerConnection, command);
|
||||
if (querySent == 0)
|
||||
{
|
||||
ReportConnectionError(workerConnection, ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
/* then check the result separately to run long running commands in parallel */
|
||||
foreach(workerConnectionCell, workerConnectionList)
|
||||
{
|
||||
MultiConnection *workerConnection =
|
||||
(MultiConnection *) lfirst(workerConnectionCell);
|
||||
bool raiseInterrupts = true;
|
||||
|
||||
PGresult *result = GetRemoteCommandResult(workerConnection, raiseInterrupts);
|
||||
if (!IsResponseOK(result))
|
||||
{
|
||||
ReportResultError(workerConnection, result, ERROR);
|
||||
}
|
||||
|
||||
PQclear(result);
|
||||
ForgetResults(workerConnection);
|
||||
}
|
||||
}
|
||||
|
||||
/* finally commit each transaction and close connections */
|
||||
foreach(workerConnectionCell, workerConnectionList)
|
||||
{
|
||||
MultiConnection *workerConnection =
|
||||
(MultiConnection *) lfirst(workerConnectionCell);
|
||||
|
||||
RemoteTransactionCommit(workerConnection);
|
||||
CloseConnection(workerConnection);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* InsertSplitOffShardMetadata inserts new shard and shard placement data into
|
||||
* catolog tables both the coordinator and mx nodes.
|
||||
*/
|
||||
static void
|
||||
InsertSplitOffShardMetadata(List *splitOffShardList, List *sourcePlacementList)
|
||||
{
|
||||
List *syncedShardList = NIL;
|
||||
ListCell *shardCell = NULL;
|
||||
ListCell *commandCell = NULL;
|
||||
|
||||
/* add new metadata */
|
||||
foreach(shardCell, splitOffShardList)
|
||||
{
|
||||
ShardInterval *splitOffShard = (ShardInterval *) lfirst(shardCell);
|
||||
Oid relationId = splitOffShard->relationId;
|
||||
uint64 shardId = splitOffShard->shardId;
|
||||
char storageType = splitOffShard->storageType;
|
||||
ListCell *shardPlacementCell = NULL;
|
||||
|
||||
int32 shardMinValue = DatumGetInt32(splitOffShard->minValue);
|
||||
int32 shardMaxValue = DatumGetInt32(splitOffShard->maxValue);
|
||||
text *shardMinValueText = IntegerToText(shardMinValue);
|
||||
text *shardMaxValueText = IntegerToText(shardMaxValue);
|
||||
|
||||
InsertShardRow(relationId, shardId, storageType, shardMinValueText,
|
||||
shardMaxValueText);
|
||||
|
||||
/* split off shard placement metadata */
|
||||
foreach(shardPlacementCell, sourcePlacementList)
|
||||
{
|
||||
ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell);
|
||||
uint64 shardSize = 0;
|
||||
|
||||
InsertShardPlacementRow(shardId, INVALID_PLACEMENT_ID, SHARD_STATE_ACTIVE,
|
||||
shardSize, placement->groupId);
|
||||
}
|
||||
|
||||
if (ShouldSyncTableMetadata(relationId))
|
||||
{
|
||||
syncedShardList = lappend(syncedShardList, splitOffShard);
|
||||
}
|
||||
}
|
||||
|
||||
/* send commands to synced nodes one by one */
|
||||
List *splitOffShardMetadataCommandList = ShardListInsertCommand(syncedShardList);
|
||||
foreach(commandCell, splitOffShardMetadataCommandList)
|
||||
{
|
||||
char *command = (char *) lfirst(commandCell);
|
||||
SendCommandToWorkersWithMetadata(command);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DropShardList drops shards and their metadata from both the coordinator and
|
||||
* mx nodes.
|
||||
*/
|
||||
static void
|
||||
DropShardList(List *shardIntervalList)
|
||||
{
|
||||
ListCell *shardIntervalCell = NULL;
|
||||
|
||||
foreach(shardIntervalCell, shardIntervalList)
|
||||
{
|
||||
ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell);
|
||||
ListCell *shardPlacementCell = NULL;
|
||||
Oid relationId = shardInterval->relationId;
|
||||
uint64 oldShardId = shardInterval->shardId;
|
||||
|
||||
/* delete metadata from synced nodes */
|
||||
if (ShouldSyncTableMetadata(relationId))
|
||||
{
|
||||
ListCell *commandCell = NULL;
|
||||
|
||||
/* send the commands one by one */
|
||||
List *shardMetadataDeleteCommandList = ShardDeleteCommandList(shardInterval);
|
||||
foreach(commandCell, shardMetadataDeleteCommandList)
|
||||
{
|
||||
char *command = (char *) lfirst(commandCell);
|
||||
SendCommandToWorkersWithMetadata(command);
|
||||
}
|
||||
}
|
||||
|
||||
/* delete shard placements and drop shards */
|
||||
List *shardPlacementList = ActiveShardPlacementList(oldShardId);
|
||||
foreach(shardPlacementCell, shardPlacementList)
|
||||
{
|
||||
ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell);
|
||||
char *workerName = placement->nodeName;
|
||||
uint32 workerPort = placement->nodePort;
|
||||
StringInfo dropQuery = makeStringInfo();
|
||||
|
||||
DeleteShardPlacementRow(placement->placementId);
|
||||
|
||||
/* get shard name */
|
||||
char *qualifiedShardName = ConstructQualifiedShardName(shardInterval);
|
||||
|
||||
char storageType = shardInterval->storageType;
|
||||
if (storageType == SHARD_STORAGE_TABLE)
|
||||
{
|
||||
appendStringInfo(dropQuery, DROP_REGULAR_TABLE_COMMAND,
|
||||
qualifiedShardName);
|
||||
}
|
||||
else if (storageType == SHARD_STORAGE_FOREIGN)
|
||||
{
|
||||
appendStringInfo(dropQuery, DROP_FOREIGN_TABLE_COMMAND,
|
||||
qualifiedShardName);
|
||||
}
|
||||
|
||||
/* drop old shard */
|
||||
SendCommandToWorker(workerName, workerPort, dropQuery->data);
|
||||
}
|
||||
|
||||
/* delete shard row */
|
||||
DeleteShardRow(oldShardId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "distributed/multi_physical_planner.h"
|
||||
#include "distributed/pg_dist_partition.h"
|
||||
#include "distributed/query_pushdown_planning.h"
|
||||
#include "distributed/string_utils.h"
|
||||
#include "distributed/tdigest_extension.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "distributed/version_compat.h"
|
||||
|
@ -58,9 +59,6 @@
|
|||
#include "utils/rel.h"
|
||||
#include "utils/syscache.h"
|
||||
|
||||
#define StartsWith(msg, prefix) \
|
||||
(strncmp(msg, prefix, strlen(prefix)) == 0)
|
||||
|
||||
/* Config variable managed via guc.c */
|
||||
int LimitClauseRowFetchCount = -1; /* number of rows to fetch from each task */
|
||||
double CountDistinctErrorRate = 0.0; /* precision of count(distinct) approximate */
|
||||
|
@ -3418,7 +3416,7 @@ GetAggregateType(Aggref *aggregateExpression)
|
|||
* perform these checks if there is some chance it will actually result in a positive
|
||||
* hit.
|
||||
*/
|
||||
if (StartsWith(aggregateProcName, "tdigest"))
|
||||
if (StringStartsWith(aggregateProcName, "tdigest"))
|
||||
{
|
||||
if (aggFunctionId == TDigestExtensionAggTDigest1())
|
||||
{
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -40,6 +40,7 @@
|
|||
#include "distributed/connection_management.h"
|
||||
#include "distributed/cte_inline.h"
|
||||
#include "distributed/distributed_deadlock_detection.h"
|
||||
#include "distributed/errormessage.h"
|
||||
#include "distributed/insert_select_executor.h"
|
||||
#include "distributed/intermediate_result_pruning.h"
|
||||
#include "distributed/local_multi_copy.h"
|
||||
|
@ -47,6 +48,7 @@
|
|||
#include "distributed/local_distributed_join_planner.h"
|
||||
#include "distributed/locally_reserved_shared_connections.h"
|
||||
#include "distributed/maintenanced.h"
|
||||
#include "distributed/shard_cleaner.h"
|
||||
#include "distributed/metadata_utility.h"
|
||||
#include "distributed/coordinator_protocol.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
|
@ -55,6 +57,7 @@
|
|||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/multi_explain.h"
|
||||
#include "distributed/multi_join_order.h"
|
||||
#include "distributed/multi_logical_replication.h"
|
||||
#include "distributed/multi_logical_optimizer.h"
|
||||
#include "distributed/distributed_planner.h"
|
||||
#include "distributed/combine_query_planner.h"
|
||||
|
@ -62,6 +65,7 @@
|
|||
#include "distributed/multi_server_executor.h"
|
||||
#include "distributed/pg_dist_partition.h"
|
||||
#include "distributed/placement_connection.h"
|
||||
#include "distributed/query_stats.h"
|
||||
#include "distributed/recursive_planning.h"
|
||||
#include "distributed/reference_table_utils.h"
|
||||
#include "distributed/relation_access_tracking.h"
|
||||
|
@ -190,6 +194,12 @@ static const struct config_enum_entry propagate_set_commands_options[] = {
|
|||
};
|
||||
|
||||
|
||||
static const struct config_enum_entry stat_statements_track_options[] = {
|
||||
{ "none", STAT_STATEMENTS_TRACK_NONE, false },
|
||||
{ "all", STAT_STATEMENTS_TRACK_ALL, false },
|
||||
{ NULL, 0, false }
|
||||
};
|
||||
|
||||
static const struct config_enum_entry task_assignment_policy_options[] = {
|
||||
{ "greedy", TASK_ASSIGNMENT_GREEDY, false },
|
||||
{ "first-replica", TASK_ASSIGNMENT_FIRST_REPLICA, false },
|
||||
|
@ -857,7 +867,7 @@ RegisterCitusConfigVariables(void)
|
|||
"workers"),
|
||||
NULL,
|
||||
&EnableAlterDatabaseOwner,
|
||||
false,
|
||||
true,
|
||||
PGC_USERSET,
|
||||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
@ -910,6 +920,17 @@ RegisterCitusConfigVariables(void)
|
|||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enable_create_role_propagation",
|
||||
gettext_noop("Enables propagating CREATE ROLE "
|
||||
"and DROP ROLE statements to workers"),
|
||||
NULL,
|
||||
&EnableCreateRolePropagation,
|
||||
true,
|
||||
PGC_USERSET,
|
||||
GUC_STANDARD,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enable_create_type_propagation",
|
||||
gettext_noop("Enables propagating of CREATE TYPE statements to workers"),
|
||||
|
@ -1357,6 +1378,18 @@ RegisterCitusConfigVariables(void)
|
|||
GUC_STANDARD,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomIntVariable(
|
||||
"citus.logical_replication_timeout",
|
||||
gettext_noop("Sets the timeout to error out when logical replication is used"),
|
||||
gettext_noop("Citus uses logical replication when it moves/replicates shards. "
|
||||
"This setting determines when Citus gives up waiting for progress "
|
||||
"during logical replication and errors out."),
|
||||
&LogicalReplicationTimeout,
|
||||
2 * 60 * 60 * 1000, 0, 7 * 24 * 3600 * 1000,
|
||||
PGC_SIGHUP,
|
||||
GUC_NO_SHOW_ALL | GUC_UNIT_MS,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomIntVariable(
|
||||
"citus.max_adaptive_executor_pool_size",
|
||||
gettext_noop("Sets the maximum number of connections per worker node used by "
|
||||
|
@ -1756,6 +1789,22 @@ RegisterCitusConfigVariables(void)
|
|||
ShowShardsForAppNamePrefixesAssignHook,
|
||||
NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.skip_jsonb_validation_in_copy",
|
||||
gettext_noop("Skip validation of JSONB columns on the coordinator during COPY "
|
||||
"into a distributed table"),
|
||||
gettext_noop("Parsing large JSON objects may incur significant CPU overhead, "
|
||||
"which can lower COPY throughput. If this GUC is set (the default), "
|
||||
"JSON parsing is skipped on the coordinator, which means you cannot "
|
||||
"see the line number in case of malformed JSON, but throughput will "
|
||||
"be higher. This setting does not apply if the input format is "
|
||||
"binary."),
|
||||
&SkipJsonbValidationInCopy,
|
||||
true,
|
||||
PGC_USERSET,
|
||||
0,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.sort_returning",
|
||||
gettext_noop("Sorts the RETURNING clause to get consistent test output"),
|
||||
|
@ -1770,6 +1819,47 @@ RegisterCitusConfigVariables(void)
|
|||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
/*
|
||||
* It takes about 140 bytes of shared memory to store one row, therefore
|
||||
* this setting should be used responsibly. setting it to 10M will require
|
||||
* 1.4GB of shared memory.
|
||||
*/
|
||||
DefineCustomIntVariable(
|
||||
"citus.stat_statements_max",
|
||||
gettext_noop("Determines maximum number of statements tracked by "
|
||||
"citus_stat_statements."),
|
||||
NULL,
|
||||
&StatStatementsMax,
|
||||
50000, 1000, 10000000,
|
||||
PGC_POSTMASTER,
|
||||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomIntVariable(
|
||||
"citus.stat_statements_purge_interval",
|
||||
gettext_noop("Determines time interval in seconds for "
|
||||
"citus_stat_statements to purge expired entries."),
|
||||
NULL,
|
||||
&StatStatementsPurgeInterval,
|
||||
10, -1, INT_MAX,
|
||||
PGC_SIGHUP,
|
||||
GUC_UNIT_MS | GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomEnumVariable(
|
||||
"citus.stat_statements_track",
|
||||
gettext_noop(
|
||||
"Enables/Disables the stats collection for citus_stat_statements."),
|
||||
gettext_noop("Enables the stats collection when set to 'all'. "
|
||||
"Disables when set to 'none'. Disabling can be useful for "
|
||||
"avoiding extra CPU cycles needed for the calculations."),
|
||||
&StatStatementsTrack,
|
||||
STAT_STATEMENTS_TRACK_NONE,
|
||||
stat_statements_track_options,
|
||||
PGC_SUSET,
|
||||
GUC_STANDARD,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.subquery_pushdown",
|
||||
gettext_noop("Usage of this GUC is highly discouraged, please read the long "
|
||||
|
@ -2116,8 +2206,10 @@ NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source)
|
|||
#if defined(ENABLE_GSS) || defined(ENABLE_SSPI)
|
||||
"krbsrvname",
|
||||
#endif
|
||||
"sslcert",
|
||||
"sslcompression",
|
||||
"sslcrl",
|
||||
"sslkey",
|
||||
"sslmode",
|
||||
"sslrootcert",
|
||||
"tcp_user_timeout",
|
||||
|
|
|
@ -48,7 +48,7 @@ RETURNS boolean AS $$
|
|||
AND blocking_locks.pid != blocked_locks.pid
|
||||
JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
|
||||
WHERE NOT blocked_locks.GRANTED AND blocked_activity.application_name LIKE 'citus_shard_move_subscription_%'
|
||||
$two$) where result='citus_shard_move_subscription_' || pBlockedPid);
|
||||
$two$) where result LIKE 'citus_shard_move_subscription_%_' || pBlockedPid);
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
|
|
@ -48,7 +48,7 @@ RETURNS boolean AS $$
|
|||
AND blocking_locks.pid != blocked_locks.pid
|
||||
JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
|
||||
WHERE NOT blocked_locks.GRANTED AND blocked_activity.application_name LIKE 'citus_shard_move_subscription_%'
|
||||
$two$) where result='citus_shard_move_subscription_' || pBlockedPid);
|
||||
$two$) where result LIKE 'citus_shard_move_subscription_%_' || pBlockedPid);
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
|
|
@ -48,7 +48,7 @@ RETURNS boolean AS $$
|
|||
AND blocking_locks.pid != blocked_locks.pid
|
||||
JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
|
||||
WHERE NOT blocked_locks.GRANTED AND blocked_activity.application_name LIKE 'citus_shard_move_subscription_%'
|
||||
$two$) where result='citus_shard_move_subscription_' || pBlockedPid);
|
||||
$two$) where result LIKE 'citus_shard_move_subscription_%_' || pBlockedPid);
|
||||
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* This file contains UDFs for getting foreign constraint relationship between
|
||||
* distributed tables.
|
||||
*
|
||||
* Copyright (c), Citus Data, Inc.
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* pg_send_cancellation.c
|
||||
*
|
||||
* This file contains functions to test setting pg_send_cancellation.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
#include "miscadmin.h"
|
||||
#include "fmgr.h"
|
||||
#include "port.h"
|
||||
|
||||
#include "postmaster/postmaster.h"
|
||||
|
||||
|
||||
#define PG_SEND_CANCELLATION_VERSION \
|
||||
"pg_send_cancellation (PostgreSQL) " PG_VERSION "\n"
|
||||
|
||||
|
||||
/* exports for SQL callable functions */
|
||||
PG_FUNCTION_INFO_V1(get_cancellation_key);
|
||||
PG_FUNCTION_INFO_V1(run_pg_send_cancellation);
|
||||
|
||||
|
||||
/*
|
||||
* get_cancellation_key returns the cancellation key of the current process
|
||||
* as an integer.
|
||||
*/
|
||||
Datum
|
||||
get_cancellation_key(PG_FUNCTION_ARGS)
|
||||
{
|
||||
PG_RETURN_INT32(MyCancelKey);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* run_pg_send_cancellation runs the pg_send_cancellation program with
|
||||
* the specified arguments
|
||||
*/
|
||||
Datum
|
||||
run_pg_send_cancellation(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int pid = PG_GETARG_INT32(0);
|
||||
int cancelKey = PG_GETARG_INT32(1);
|
||||
|
||||
char sendCancellationPath[MAXPGPATH];
|
||||
char command[1024];
|
||||
|
||||
/* Locate executable backend before we change working directory */
|
||||
if (find_other_exec(my_exec_path, "pg_send_cancellation",
|
||||
PG_SEND_CANCELLATION_VERSION,
|
||||
sendCancellationPath) < 0)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not locate pg_send_cancellation")));
|
||||
}
|
||||
|
||||
pg_snprintf(command, sizeof(command), "%s %d %d %s %d",
|
||||
sendCancellationPath, pid, cancelKey, "localhost", PostPortNumber);
|
||||
|
||||
if (system(command) != 0)
|
||||
{
|
||||
ereport(ERROR, (errmsg("failed to run command: %s", command)));
|
||||
}
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include "access/hash.h"
|
||||
#include "distributed/backend_data.h"
|
||||
#include "distributed/errormessage.h"
|
||||
#include "distributed/distributed_deadlock_detection.h"
|
||||
#include "distributed/errormessage.h"
|
||||
#include "distributed/hash_helpers.h"
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "distributed/locally_reserved_shared_connections.h"
|
||||
#include "distributed/maintenanced.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/multi_logical_replication.h"
|
||||
#include "distributed/multi_explain.h"
|
||||
#include "distributed/repartition_join_execution.h"
|
||||
#include "distributed/transaction_management.h"
|
||||
|
@ -316,6 +317,8 @@ CoordinatedTransactionCallback(XactEvent event, void *arg)
|
|||
|
||||
UnSetDistributedTransactionId();
|
||||
|
||||
PlacementMovedUsingLogicalReplicationInTX = false;
|
||||
|
||||
/* empty the CommitContext to ensure we're not leaking memory */
|
||||
MemoryContextSwitchTo(previousContext);
|
||||
MemoryContextReset(CommitContext);
|
||||
|
@ -413,6 +416,8 @@ CoordinatedTransactionCallback(XactEvent event, void *arg)
|
|||
*/
|
||||
SubPlanLevel = 0;
|
||||
UnSetDistributedTransactionId();
|
||||
|
||||
PlacementMovedUsingLogicalReplicationInTX = false;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -163,8 +163,6 @@ citus_check_defaults_for_sslmode(PG_FUNCTION_ARGS)
|
|||
configChanged = true;
|
||||
}
|
||||
|
||||
/* placeholder for extra changes to configuration before reloading */
|
||||
|
||||
if (configChanged)
|
||||
{
|
||||
GloballyReloadConfig();
|
||||
|
|
|
@ -8,9 +8,12 @@
|
|||
#include "postgres.h"
|
||||
#include "utils/memutils.h"
|
||||
|
||||
#include "common/sha2.h"
|
||||
#include "distributed/citus_nodes.h"
|
||||
#include "distributed/errormessage.h"
|
||||
#include "distributed/log_utils.h"
|
||||
#include "utils/builtins.h"
|
||||
|
||||
|
||||
/*
|
||||
* DeferredErrorInternal is a helper function for DeferredError().
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "utils/builtins.h"
|
||||
#include "utils/regproc.h"
|
||||
|
||||
|
||||
/*
|
||||
* FunctionOid searches for a function that has the given name and the given
|
||||
* number of arguments, and returns the corresponding function's oid. The
|
||||
|
|
|
@ -9,8 +9,18 @@
|
|||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "distributed/pg_version_constants.h"
|
||||
|
||||
#include "utils/guc.h"
|
||||
#include "distributed/log_utils.h"
|
||||
#include "distributed/errormessage.h"
|
||||
#include "common/sha2.h"
|
||||
|
||||
#include "utils/builtins.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
#include "common/cryptohash.h"
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
|
@ -26,7 +36,8 @@ IsLoggableLevel(int logLevel)
|
|||
|
||||
|
||||
/*
|
||||
* HashLogMessage is only supported in Citus Enterprise
|
||||
* HashLogMessage is deprecated and doesn't do anything anymore. Its indirect
|
||||
* usage will be removed later.
|
||||
*/
|
||||
char *
|
||||
HashLogMessage(const char *logText)
|
||||
|
|
|
@ -37,8 +37,9 @@
|
|||
#include "distributed/maintenanced.h"
|
||||
#include "distributed/coordinator_protocol.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/shard_cleaner.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/query_stats.h"
|
||||
#include "distributed/statistics_collection.h"
|
||||
#include "distributed/transaction_recovery.h"
|
||||
#include "distributed/version_compat.h"
|
||||
|
@ -288,6 +289,7 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
|||
ErrorContextCallback errorCallback;
|
||||
TimestampTz lastRecoveryTime = 0;
|
||||
TimestampTz lastShardCleanTime = 0;
|
||||
TimestampTz lastStatStatementsPurgeTime = 0;
|
||||
TimestampTz nextMetadataSyncTime = 0;
|
||||
|
||||
|
||||
|
@ -661,6 +663,34 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
|||
timeout = Min(timeout, DeferShardDeleteInterval);
|
||||
}
|
||||
|
||||
if (StatStatementsPurgeInterval > 0 &&
|
||||
StatStatementsTrack != STAT_STATEMENTS_TRACK_NONE &&
|
||||
TimestampDifferenceExceeds(lastStatStatementsPurgeTime, GetCurrentTimestamp(),
|
||||
(StatStatementsPurgeInterval * 1000)))
|
||||
{
|
||||
StartTransactionCommand();
|
||||
|
||||
if (!LockCitusExtension())
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("could not lock the citus extension, "
|
||||
"skipping stat statements purging")));
|
||||
}
|
||||
else if (CheckCitusVersion(DEBUG1) && CitusHasBeenLoaded())
|
||||
{
|
||||
/*
|
||||
* Record last time we perform the purge to ensure we run once per
|
||||
* StatStatementsPurgeInterval.
|
||||
*/
|
||||
lastStatStatementsPurgeTime = GetCurrentTimestamp();
|
||||
|
||||
CitusQueryStatsSynchronizeEntries();
|
||||
}
|
||||
|
||||
CommitTransactionCommand();
|
||||
|
||||
/* make sure we don't wait too long, need to convert seconds to milliseconds */
|
||||
timeout = Min(timeout, (StatStatementsPurgeInterval * 1000));
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait until timeout, or until somebody wakes us up. Also cast the timeout to
|
||||
|
|
|
@ -346,9 +346,9 @@ ReplicateReferenceTableShardToNode(ShardInterval *shardInterval, char *nodeName,
|
|||
ShardPlacement *sourceShardPlacement = ActiveShardPlacement(shardId, missingOk);
|
||||
char *srcNodeName = sourceShardPlacement->nodeName;
|
||||
uint32 srcNodePort = sourceShardPlacement->nodePort;
|
||||
bool includeData = true;
|
||||
List *ddlCommandList =
|
||||
CopyShardCommandList(shardInterval, srcNodeName, srcNodePort, includeData);
|
||||
bool includeDataCopy = true; /* TODO: consider using logical replication */
|
||||
List *ddlCommandList = CopyShardCommandList(shardInterval, srcNodeName, srcNodePort,
|
||||
includeDataCopy);
|
||||
|
||||
ereport(NOTICE, (errmsg("Replicating reference table \"%s\" to the node %s:%d",
|
||||
get_rel_name(shardInterval->relationId), nodeName,
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include "distributed/resource_lock.h"
|
||||
#include "distributed/shardinterval_utils.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "distributed/worker_transaction.h"
|
||||
#include "distributed/utils/array_type.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "distributed/local_executor.h"
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
pg_send_cancellation
|
|
@ -0,0 +1,20 @@
|
|||
citus_top_builddir = ../../..
|
||||
|
||||
PROGRAM = pg_send_cancellation
|
||||
PGFILEDESC = "pg_send_cancellation sends a custom cancellation message"
|
||||
OBJS = $(citus_abs_srcdir)/src/bin/pg_send_cancellation/pg_send_cancellation.o
|
||||
PG_CPPFLAGS = -I$(libpq_srcdir)
|
||||
PG_LIBS_INTERNAL = $(libpq_pgport)
|
||||
PG_LDFLAGS += $(LDFLAGS)
|
||||
|
||||
include $(citus_top_builddir)/Makefile.global
|
||||
|
||||
# We reuse all the Citus flags (incl. security flags), but we are building a program not a shared library
|
||||
override CFLAGS := $(filter-out -shared,$(CFLAGS))
|
||||
|
||||
# Filter out unneeded dependencies
|
||||
override LIBS := $(filter-out -lz -lreadline -ledit -ltermcap -lncurses -lcurses -lpam, $(LIBS))
|
||||
|
||||
clean: clean-pg_send_cancellation
|
||||
clean-pg_send_cancellation:
|
||||
rm -f $(PROGRAM) $(OBJS)
|
|
@ -0,0 +1,47 @@
|
|||
# pg_send_cancellation
|
||||
|
||||
pg_send_cancellation is a program for manually sending a cancellation
|
||||
to a Postgres endpoint. It is effectively a command-line version of
|
||||
PQcancel in libpq, but it can use any PID or cancellation key.
|
||||
|
||||
We use pg_send_cancellation primarily to propagate cancellations between pgbouncers
|
||||
behind a load balancer. Since the cancellation protocol involves
|
||||
opening a new connection, the new connection may go to a different
|
||||
node that does not recognize the cancellation key. To handle that
|
||||
scenario, we modified pgbouncer to pass unrecognized cancellation
|
||||
keys to a shell command.
|
||||
|
||||
Users can configure the cancellation_command, which will be run with:
|
||||
```
|
||||
<cancellation_command> <client ip> <client port> <pid> <cancel key>
|
||||
```
|
||||
|
||||
Note that pgbouncer does not use actual PIDs. Instead, it generates PID and cancellation key together a random 8-byte number. This makes the chance of collisions exceedingly small.
|
||||
|
||||
By providing pg_send_cancellation as part of Citus, we can use a shell script that pgbouncer invokes to propagate the cancellation to all *other* worker nodes in the same cluster, for example:
|
||||
|
||||
```bash
|
||||
#!/bin/sh
|
||||
remote_ip=$1
|
||||
remote_port=$2
|
||||
pid=$3
|
||||
cancel_key=$4
|
||||
|
||||
postgres_path=/usr/pgsql-14/bin
|
||||
pgbouncer_port=6432
|
||||
|
||||
nodes_query="select nodename from pg_dist_node where groupid > 0 and groupid not in (select groupid from pg_dist_local_group) and nodecluster = current_setting('citus.cluster_name')"
|
||||
|
||||
# Get hostnames of other worker nodes in the cluster, and send cancellation to their pgbouncers
|
||||
$postgres_path/psql -c "$nodes_query" -tAX | xargs -n 1 sh -c "$postgres_path/pg_send_cancellation $pid $cancel_key \$0 $pgbouncer_port"
|
||||
```
|
||||
|
||||
One thing we need to be careful about is that the cancellations do not get forwarded
|
||||
back-and-forth. This is handled in pgbouncer by setting the last bit of all generated
|
||||
cancellation keys (sent to clients) to 1, and setting the last bit of all forwarded bits to 0.
|
||||
That way, when a pgbouncer receives a cancellation key with the last bit set to 0,
|
||||
it knows it is from another pgbouncer and should not forward further, and should set
|
||||
the last bit to 1 when comparing to stored cancellation keys.
|
||||
|
||||
Another thing we need to be careful about is that the integers should be encoded
|
||||
as big endian on the wire.
|
|
@ -0,0 +1,261 @@
|
|||
/*
|
||||
* pg_send_cancellation is a program for manually sending a cancellation
|
||||
* to a Postgres endpoint. It is effectively a command-line version of
|
||||
* PQcancel in libpq, but it can use any PID or cancellation key.
|
||||
*
|
||||
* Portions Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
* For the internal_cancel function:
|
||||
*
|
||||
* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software and its
|
||||
* documentation for any purpose, without fee, and without a written agreement
|
||||
* is hereby granted, provided that the above copyright notice and this
|
||||
* paragraph and the following two paragraphs appear in all copies.
|
||||
*
|
||||
* IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
|
||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
|
||||
* LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
|
||||
* DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
|
||||
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
* AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
* ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO
|
||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
*/
|
||||
#include "postgres_fe.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <ctype.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "common/ip.h"
|
||||
#include "common/link-canary.h"
|
||||
#include "common/scram-common.h"
|
||||
#include "common/string.h"
|
||||
#include "libpq-fe.h"
|
||||
#include "libpq-int.h"
|
||||
#include "mb/pg_wchar.h"
|
||||
#include "port/pg_bswap.h"
|
||||
|
||||
|
||||
#define ERROR_BUFFER_SIZE 256
|
||||
|
||||
|
||||
static int internal_cancel(SockAddr *raddr, int be_pid, int be_key,
|
||||
char *errbuf, int errbufsize);
|
||||
|
||||
|
||||
/*
|
||||
* main entry point into the pg_send_cancellation program.
|
||||
*/
|
||||
int
|
||||
main(int argc, char *argv[])
|
||||
{
|
||||
if (argc == 2 && strcmp(argv[1], "-V") == 0)
|
||||
{
|
||||
pg_fprintf(stdout, "pg_send_cancellation (PostgreSQL) " PG_VERSION "\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (argc < 4 || argc > 5)
|
||||
{
|
||||
char *program = argv[0];
|
||||
pg_fprintf(stderr, "%s requires 4 arguments\n\n", program);
|
||||
pg_fprintf(stderr, "Usage: %s <pid> <cancel key> <hostname> [port]\n", program);
|
||||
return 1;
|
||||
}
|
||||
|
||||
char *pidString = argv[1];
|
||||
char *cancelKeyString = argv[2];
|
||||
char *host = argv[3];
|
||||
char *portString = "5432";
|
||||
|
||||
if (argc >= 5)
|
||||
{
|
||||
portString = argv[4];
|
||||
}
|
||||
|
||||
/* parse the PID and cancellation key */
|
||||
int pid = strtol(pidString, NULL, 10);
|
||||
int cancelAuthCode = strtol(cancelKeyString, NULL, 10);
|
||||
|
||||
char errorBuffer[ERROR_BUFFER_SIZE] = { 0 };
|
||||
|
||||
struct addrinfo *ipAddressList;
|
||||
struct addrinfo hint;
|
||||
int ipAddressListFamily = AF_UNSPEC;
|
||||
SockAddr socketAddress;
|
||||
|
||||
memset(&hint, 0, sizeof(hint));
|
||||
hint.ai_socktype = SOCK_STREAM;
|
||||
hint.ai_family = ipAddressListFamily;
|
||||
|
||||
/* resolve the hostname to an IP */
|
||||
int ret = pg_getaddrinfo_all(host, portString, &hint, &ipAddressList);
|
||||
if (ret || !ipAddressList)
|
||||
{
|
||||
pg_fprintf(stderr, "could not translate host name \"%s\" to address: %s\n",
|
||||
host, gai_strerror(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (ipAddressList->ai_addrlen > sizeof(socketAddress.addr))
|
||||
{
|
||||
pg_fprintf(stderr, "invalid address length");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Explanation of IGNORE-BANNED:
|
||||
* This is a common pattern when using getaddrinfo. The system guarantees
|
||||
* that ai_addrlen < sizeof(socketAddress.addr). Out of an abundance of
|
||||
* caution. We also check it above.
|
||||
*/
|
||||
memcpy(&socketAddress.addr, ipAddressList->ai_addr, ipAddressList->ai_addrlen); /* IGNORE-BANNED */
|
||||
socketAddress.salen = ipAddressList->ai_addrlen;
|
||||
|
||||
/* send the cancellation */
|
||||
bool cancelSucceeded = internal_cancel(&socketAddress, pid, cancelAuthCode,
|
||||
errorBuffer, sizeof(errorBuffer));
|
||||
if (!cancelSucceeded)
|
||||
{
|
||||
pg_fprintf(stderr, "sending cancellation to %s:%s failed: %s",
|
||||
host, portString, errorBuffer);
|
||||
return 1;
|
||||
}
|
||||
|
||||
pg_freeaddrinfo_all(ipAddressListFamily, ipAddressList);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* *INDENT-OFF* */
|
||||
|
||||
/*
|
||||
* internal_cancel is copied from fe-connect.c
|
||||
*
|
||||
* The return value is true if the cancel request was successfully
|
||||
* dispatched, false if not (in which case an error message is available).
|
||||
* Note: successful dispatch is no guarantee that there will be any effect at
|
||||
* the backend. The application must read the operation result as usual.
|
||||
*
|
||||
* CAUTION: we want this routine to be safely callable from a signal handler
|
||||
* (for example, an application might want to call it in a SIGINT handler).
|
||||
* This means we cannot use any C library routine that might be non-reentrant.
|
||||
* malloc/free are often non-reentrant, and anything that might call them is
|
||||
* just as dangerous. We avoid sprintf here for that reason. Building up
|
||||
* error messages with strcpy/strcat is tedious but should be quite safe.
|
||||
* We also save/restore errno in case the signal handler support doesn't.
|
||||
*
|
||||
* internal_cancel() is an internal helper function to make code-sharing
|
||||
* between the two versions of the cancel function possible.
|
||||
*/
|
||||
static int
|
||||
internal_cancel(SockAddr *raddr, int be_pid, int be_key,
|
||||
char *errbuf, int errbufsize)
|
||||
{
|
||||
int save_errno = SOCK_ERRNO;
|
||||
pgsocket tmpsock = PGINVALID_SOCKET;
|
||||
char sebuf[PG_STRERROR_R_BUFLEN];
|
||||
int maxlen;
|
||||
struct
|
||||
{
|
||||
uint32 packetlen;
|
||||
CancelRequestPacket cp;
|
||||
} crp;
|
||||
|
||||
/*
|
||||
* We need to open a temporary connection to the postmaster. Do this with
|
||||
* only kernel calls.
|
||||
*/
|
||||
if ((tmpsock = socket(raddr->addr.ss_family, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
|
||||
{
|
||||
strlcpy(errbuf, "PQcancel() -- socket() failed: ", errbufsize);
|
||||
goto cancel_errReturn;
|
||||
}
|
||||
retry3:
|
||||
if (connect(tmpsock, (struct sockaddr *) &raddr->addr, raddr->salen) < 0)
|
||||
{
|
||||
if (SOCK_ERRNO == EINTR)
|
||||
/* Interrupted system call - we'll just try again */
|
||||
goto retry3;
|
||||
strlcpy(errbuf, "PQcancel() -- connect() failed: ", errbufsize);
|
||||
goto cancel_errReturn;
|
||||
}
|
||||
|
||||
/*
|
||||
* We needn't set nonblocking I/O or NODELAY options here.
|
||||
*/
|
||||
|
||||
/* Create and send the cancel request packet. */
|
||||
|
||||
crp.packetlen = pg_hton32((uint32) sizeof(crp));
|
||||
crp.cp.cancelRequestCode = (MsgType) pg_hton32(CANCEL_REQUEST_CODE);
|
||||
crp.cp.backendPID = pg_hton32(be_pid);
|
||||
crp.cp.cancelAuthCode = pg_hton32(be_key);
|
||||
|
||||
retry4:
|
||||
if (send(tmpsock, (char *) &crp, sizeof(crp), 0) != (int) sizeof(crp))
|
||||
{
|
||||
if (SOCK_ERRNO == EINTR)
|
||||
/* Interrupted system call - we'll just try again */
|
||||
goto retry4;
|
||||
strlcpy(errbuf, "PQcancel() -- send() failed: ", errbufsize);
|
||||
goto cancel_errReturn;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for the postmaster to close the connection, which indicates that
|
||||
* it's processed the request. Without this delay, we might issue another
|
||||
* command only to find that our cancel zaps that command instead of the
|
||||
* one we thought we were canceling. Note we don't actually expect this
|
||||
* read to obtain any data, we are just waiting for EOF to be signaled.
|
||||
*/
|
||||
retry5:
|
||||
if (recv(tmpsock, (char *) &crp, 1, 0) < 0)
|
||||
{
|
||||
if (SOCK_ERRNO == EINTR)
|
||||
/* Interrupted system call - we'll just try again */
|
||||
goto retry5;
|
||||
/* we ignore other error conditions */
|
||||
}
|
||||
|
||||
/* All done */
|
||||
closesocket(tmpsock);
|
||||
SOCK_ERRNO_SET(save_errno);
|
||||
return true;
|
||||
|
||||
cancel_errReturn:
|
||||
|
||||
/*
|
||||
* Make sure we don't overflow the error buffer. Leave space for the \n at
|
||||
* the end, and for the terminating zero.
|
||||
*/
|
||||
maxlen = errbufsize - strlen(errbuf) - 2;
|
||||
if (maxlen >= 0)
|
||||
{
|
||||
/*
|
||||
* Explanation of IGNORE-BANNED:
|
||||
* This is well-tested libpq code that we would like to preserve in its
|
||||
* original form. The appropriate length calculation is done above.
|
||||
*/
|
||||
strncat(errbuf, SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)), /* IGNORE-BANNED */
|
||||
maxlen);
|
||||
strcat(errbuf, "\n"); /* IGNORE-BANNED */
|
||||
}
|
||||
if (tmpsock != PGINVALID_SOCKET)
|
||||
closesocket(tmpsock);
|
||||
SOCK_ERRNO_SET(save_errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* *INDENT-ON* */
|
|
@ -43,8 +43,10 @@ extern void deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid,
|
|||
extern void deparse_shard_reindex_statement(ReindexStmt *origStmt, Oid distrelid,
|
||||
int64 shardid, StringInfo buffer);
|
||||
extern char * pg_get_indexclusterdef_string(Oid indexRelationId);
|
||||
extern List * pg_get_table_grants(Oid relationId);
|
||||
extern bool contain_nextval_expression_walker(Node *node, void *context);
|
||||
extern char * pg_get_replica_identity_command(Oid tableRelationId);
|
||||
extern List * pg_get_row_level_security_commands(Oid relationId);
|
||||
extern const char * RoleSpecString(RoleSpec *spec, bool withQuoteIdentifier);
|
||||
extern char * flatten_reloptions(Oid relid);
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "nodes/parsenodes.h"
|
||||
#include "tcop/dest.h"
|
||||
#include "tcop/utility.h"
|
||||
#include "utils/acl.h"
|
||||
|
||||
|
||||
extern bool AddAllLocalTablesToMetadata;
|
||||
|
@ -180,6 +181,7 @@ extern Oid get_constraint_typid(Oid conoid);
|
|||
|
||||
/* extension.c - forward declarations */
|
||||
extern bool IsDropCitusExtensionStmt(Node *parsetree);
|
||||
extern List * GetDependentFDWsToExtension(Oid extensionId);
|
||||
extern bool IsCreateAlterExtensionUpdateCitusStmt(Node *parsetree);
|
||||
extern bool ShouldMarkRelationDistributed(Oid relationId);
|
||||
extern void ErrorIfUnstableCreateOrAlterExtensionStmt(Node *parsetree);
|
||||
|
@ -244,7 +246,17 @@ extern Oid GetReferencedTableId(Oid foreignKeyId);
|
|||
extern Oid GetReferencingTableId(Oid foreignKeyId);
|
||||
extern bool RelationInvolvedInAnyNonInheritedForeignKeys(Oid relationId);
|
||||
|
||||
|
||||
/* foreign_data_wrapper.c - forward declarations */
|
||||
extern List * PreprocessGrantOnFDWStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern Acl * GetPrivilegesForFDW(Oid FDWOid);
|
||||
|
||||
|
||||
/* foreign_server.c - forward declarations */
|
||||
extern List * PreprocessGrantOnForeignServerStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern ObjectAddress CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok);
|
||||
extern ObjectAddress AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok);
|
||||
extern ObjectAddress RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok);
|
||||
|
@ -284,11 +296,16 @@ extern List * PreprocessAlterFunctionDependsStmt(Node *stmt,
|
|||
processUtilityContext);
|
||||
extern ObjectAddress AlterFunctionDependsStmtObjectAddress(Node *stmt,
|
||||
bool missing_ok);
|
||||
extern List * PreprocessGrantOnFunctionStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PostprocessGrantOnFunctionStmt(Node *node, const char *queryString);
|
||||
|
||||
|
||||
/* grant.c - forward declarations */
|
||||
extern List * PreprocessGrantStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern void deparsePrivileges(StringInfo privsString, GrantStmt *grantStmt);
|
||||
extern void deparseGrantees(StringInfo granteesString, GrantStmt *grantStmt);
|
||||
|
||||
|
||||
/* index.c - forward declarations */
|
||||
|
@ -323,8 +340,8 @@ extern ObjectAddress CreateExtensionStmtObjectAddress(Node *stmt, bool missing_o
|
|||
/* policy.c - forward declarations */
|
||||
extern List * CreatePolicyCommands(Oid relationId);
|
||||
extern void ErrorIfUnsupportedPolicy(Relation relation);
|
||||
extern List * PreprocessCreatePolicyStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern void ErrorIfUnsupportedPolicyExpr(Node *expr);
|
||||
extern List * PostprocessCreatePolicyStmt(Node *node, const char *queryString);
|
||||
extern List * PreprocessAlterPolicyStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PreprocessDropPolicyStmt(Node *stmt, const char *queryString,
|
||||
|
@ -357,7 +374,17 @@ extern ObjectAddress AlterRoleStmtObjectAddress(Node *node,
|
|||
bool missing_ok);
|
||||
extern ObjectAddress AlterRoleSetStmtObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern List * PreprocessCreateRoleStmt(Node *stmt, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PreprocessDropRoleStmt(Node *stmt, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PreprocessGrantRoleStmt(Node *stmt, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PostprocessGrantRoleStmt(Node *stmt, const char *queryString);
|
||||
extern List * GenerateCreateOrAlterRoleCommand(Oid roleOid);
|
||||
ObjectAddress CreateRoleStmtObjectAddress(Node *stmt, bool missing_ok);
|
||||
extern void UnmarkRolesDistributed(List *roles);
|
||||
extern List * FilterDistributedRoles(List *roles);
|
||||
|
||||
/* schema.c - forward declarations */
|
||||
extern List * PreprocessCreateSchemaStmt(Node *node, const char *queryString,
|
||||
|
@ -386,6 +413,9 @@ extern List * PreprocessDropSequenceStmt(Node *node, const char *queryString,
|
|||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PreprocessRenameSequenceStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PreprocessGrantOnSequenceStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PostprocessGrantOnSequenceStmt(Node *node, const char *queryString);
|
||||
extern ObjectAddress AlterSequenceStmtObjectAddress(Node *node, bool missing_ok);
|
||||
extern ObjectAddress AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok);
|
||||
extern ObjectAddress AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok);
|
||||
|
|
|
@ -149,6 +149,9 @@ typedef struct CitusCopyDestReceiver
|
|||
} CitusCopyDestReceiver;
|
||||
|
||||
|
||||
/* GUCs */
|
||||
extern bool SkipJsonbValidationInCopy;
|
||||
|
||||
/* managed via GUC, the default is 4MB */
|
||||
extern int CopySwitchOverThresholdBytes;
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ extern PropSetCmdBehavior PropagateSetCommands;
|
|||
extern bool EnableDDLPropagation;
|
||||
extern int CreateObjectPropagationMode;
|
||||
extern bool EnableCreateTypePropagation;
|
||||
extern bool EnableCreateRolePropagation;
|
||||
extern bool EnableAlterRolePropagation;
|
||||
extern bool EnableAlterRoleSetPropagation;
|
||||
extern bool EnableAlterDatabaseOwner;
|
||||
|
|
|
@ -29,6 +29,10 @@
|
|||
/* used for libpq commands that get an error buffer. Postgres docs recommend 256. */
|
||||
#define ERROR_BUFFER_SIZE 256
|
||||
|
||||
/* values with special behavior for authinfo lookup */
|
||||
#define WILDCARD_NODE_ID 0
|
||||
#define LOCALHOST_NODE_ID -1
|
||||
|
||||
/* application name used for internal connections in Citus */
|
||||
#define CITUS_APPLICATION_NAME_PREFIX "citus_internal gpid="
|
||||
|
||||
|
@ -250,6 +254,7 @@ extern struct MemoryContextData *ConnectionContext;
|
|||
extern void AfterXactConnectionHandling(bool isCommit);
|
||||
extern void InitializeConnectionManagement(void);
|
||||
|
||||
extern char * GetAuthinfo(char *hostname, int32 port, char *user);
|
||||
extern void InitConnParams(void);
|
||||
extern void ResetConnParams(void);
|
||||
extern void InvalidateConnParamsHashEntries(void);
|
||||
|
|
|
@ -221,6 +221,7 @@ extern List * GetPostLoadTableCreationCommands(Oid relationId, bool includeIndex
|
|||
extern List * GetPreLoadTableCreationCommands(Oid relationId, IncludeSequenceDefaults
|
||||
includeSequenceDefaults,
|
||||
char *accessMethod);
|
||||
extern List * GetTableRowLevelSecurityCommands(Oid relationId);
|
||||
extern List * GetTableIndexAndConstraintCommands(Oid relationId, int indexFlags);
|
||||
extern List * GetTableIndexAndConstraintCommandsExcludingReplicaIdentity(Oid relationId,
|
||||
int indexFlags);
|
||||
|
|
|
@ -67,12 +67,16 @@ extern void QualifyAlterDomainOwnerStmt(Node *node);
|
|||
extern void QualifyRenameDomainStmt(Node *node);
|
||||
extern void QualifyAlterDomainSchemaStmt(Node *node);
|
||||
|
||||
/* forward declarations for deparse_foreign_data_wrapper_stmts.c */
|
||||
extern char * DeparseGrantOnFDWStmt(Node *node);
|
||||
|
||||
/* forward declarations for deparse_foreign_server_stmts.c */
|
||||
extern char * DeparseCreateForeignServerStmt(Node *node);
|
||||
extern char * DeparseAlterForeignServerStmt(Node *node);
|
||||
extern char * DeparseAlterForeignServerRenameStmt(Node *node);
|
||||
extern char * DeparseAlterForeignServerOwnerStmt(Node *node);
|
||||
extern char * DeparseDropForeignServerStmt(Node *node);
|
||||
extern char * DeparseGrantOnForeignServerStmt(Node *node);
|
||||
|
||||
/* forward declarations for deparse_table_stmts.c */
|
||||
extern char * DeparseAlterTableSchemaStmt(Node *stmt);
|
||||
|
@ -102,6 +106,9 @@ extern char * DeparseDropSchemaStmt(Node *node);
|
|||
extern char * DeparseGrantOnSchemaStmt(Node *stmt);
|
||||
extern char * DeparseAlterSchemaRenameStmt(Node *stmt);
|
||||
|
||||
extern void AppendGrantPrivileges(StringInfo buf, GrantStmt *stmt);
|
||||
extern void AppendGrantGrantees(StringInfo buf, GrantStmt *stmt);
|
||||
|
||||
/* forward declarations for deparse_statistics_stmts.c */
|
||||
extern char * DeparseCreateStatisticsStmt(Node *node);
|
||||
extern char * DeparseDropStatisticsStmt(List *nameList, bool ifExists);
|
||||
|
@ -156,6 +163,8 @@ extern char * DeparseAlterViewSchemaStmt(Node *node);
|
|||
|
||||
|
||||
/* forward declarations for deparse_function_stmts.c */
|
||||
extern bool isFunction(ObjectType objectType);
|
||||
|
||||
extern char * DeparseDropFunctionStmt(Node *stmt);
|
||||
extern char * DeparseAlterFunctionStmt(Node *stmt);
|
||||
|
||||
|
@ -164,6 +173,8 @@ extern char * DeparseAlterFunctionSchemaStmt(Node *stmt);
|
|||
extern char * DeparseAlterFunctionOwnerStmt(Node *stmt);
|
||||
extern char * DeparseAlterFunctionDependsStmt(Node *stmt);
|
||||
|
||||
extern char * DeparseGrantOnFunctionStmt(Node *node);
|
||||
|
||||
extern void AppendVariableSet(StringInfo buf, VariableSetStmt *setStmt);
|
||||
|
||||
extern void QualifyAlterFunctionStmt(Node *stmt);
|
||||
|
@ -179,6 +190,9 @@ extern char * DeparseAlterRoleSetStmt(Node *stmt);
|
|||
extern List * MakeSetStatementArguments(char *configurationName,
|
||||
char *configurationValue);
|
||||
extern void QualifyAlterRoleSetStmt(Node *stmt);
|
||||
extern char * DeparseCreateRoleStmt(Node *stmt);
|
||||
extern char * DeparseDropRoleStmt(Node *stmt);
|
||||
extern char * DeparseGrantRoleStmt(Node *stmt);
|
||||
|
||||
/* forward declarations for deparse_extension_stmts.c */
|
||||
extern DefElem * GetExtensionOption(List *extensionOptions,
|
||||
|
@ -210,11 +224,13 @@ extern char * DeparseDropSequenceStmt(Node *node);
|
|||
extern char * DeparseRenameSequenceStmt(Node *node);
|
||||
extern char * DeparseAlterSequenceSchemaStmt(Node *node);
|
||||
extern char * DeparseAlterSequenceOwnerStmt(Node *node);
|
||||
extern char * DeparseGrantOnSequenceStmt(Node *node);
|
||||
|
||||
/* forward declarations for qualify_sequence_stmt.c */
|
||||
extern void QualifyRenameSequenceStmt(Node *node);
|
||||
extern void QualifyDropSequenceStmt(Node *node);
|
||||
extern void QualifyAlterSequenceSchemaStmt(Node *node);
|
||||
extern void QualifyAlterSequenceOwnerStmt(Node *node);
|
||||
extern void QualifyGrantOnSequenceStmt(Node *node);
|
||||
|
||||
#endif /* CITUS_DEPARSER_H */
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#ifndef ERRORMESSAGE_H
|
||||
#define ERRORMESSAGE_H
|
||||
|
||||
|
||||
#include "c.h"
|
||||
#include "distributed/citus_nodes.h"
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ extern char * HashLogMessage(const char *text);
|
|||
(log_min_messages <= ereport_loglevel ? HashLogMessage(text) : text)
|
||||
|
||||
#undef ereport
|
||||
|
||||
#define ereport(elevel, rest) \
|
||||
do { \
|
||||
int ereport_loglevel = elevel; \
|
||||
|
|
|
@ -35,4 +35,6 @@ extern List * GetDistributedObjectAddressList(void);
|
|||
extern RoleSpec * GetRoleSpecObjectForUser(Oid roleOid);
|
||||
extern void UpdateDistributedObjectColocationId(uint32 oldColocationId, uint32
|
||||
newColocationId);
|
||||
extern List * DistributedFunctionList(void);
|
||||
extern List * DistributedSequenceList(void);
|
||||
#endif /* CITUS_METADATA_DISTOBJECT_H */
|
||||
|
|
|
@ -256,6 +256,8 @@ extern Oid CitusReadIntermediateResultFuncId(void);
|
|||
Oid CitusReadIntermediateResultArrayFuncId(void);
|
||||
extern Oid CitusExtraDataContainerFuncId(void);
|
||||
extern Oid CitusAnyValueFunctionId(void);
|
||||
extern Oid CitusTextSendAsJsonbFunctionId(void);
|
||||
extern Oid TextOutFunctionId(void);
|
||||
extern Oid PgTableVisibleFuncId(void);
|
||||
extern Oid CitusTableVisibleFuncId(void);
|
||||
extern Oid RelationIsAKnownShardFuncId(void);
|
||||
|
@ -275,4 +277,8 @@ extern char * CitusExtensionOwnerName(void);
|
|||
extern char * CurrentUserName(void);
|
||||
extern const char * CurrentDatabaseName(void);
|
||||
|
||||
/* connection-related functions */
|
||||
extern char * GetAuthinfoViaCatalog(const char *roleName, int64 nodeId);
|
||||
extern char * GetPoolinfoViaCatalog(int64 nodeId);
|
||||
|
||||
#endif /* METADATA_CACHE_H */
|
||||
|
|
|
@ -38,6 +38,7 @@ extern char * LocalGroupIdUpdateCommand(int32 groupId);
|
|||
extern bool ShouldSyncUserCommandForObject(ObjectAddress objectAddress);
|
||||
extern bool ShouldSyncTableMetadata(Oid relationId);
|
||||
extern bool ShouldSyncTableMetadataViaCatalog(Oid relationId);
|
||||
extern bool ShouldSyncSequenceMetadata(Oid relationId);
|
||||
extern List * NodeMetadataCreateCommands(void);
|
||||
extern List * DistributedObjectMetadataSyncCommandList(void);
|
||||
extern List * ColocationGroupCreateCommandList(void);
|
||||
|
@ -53,12 +54,18 @@ extern char * DistributionDeleteCommand(const char *schemaName,
|
|||
extern char * TableOwnerResetCommand(Oid distributedRelationId);
|
||||
extern char * NodeListInsertCommand(List *workerNodeList);
|
||||
extern List * ShardListInsertCommand(List *shardIntervalList);
|
||||
extern List * ShardDeleteCommandList(ShardInterval *shardInterval);
|
||||
extern char * NodeDeleteCommand(uint32 nodeId);
|
||||
extern char * NodeStateUpdateCommand(uint32 nodeId, bool isActive);
|
||||
extern char * ShouldHaveShardsUpdateCommand(uint32 nodeId, bool shouldHaveShards);
|
||||
extern char * ColocationIdUpdateCommand(Oid relationId, uint32 colocationId);
|
||||
extern char * CreateSchemaDDLCommand(Oid schemaId);
|
||||
extern List * GrantOnSchemaDDLCommands(Oid schemaId);
|
||||
extern List * GrantOnFunctionDDLCommands(Oid functionOid);
|
||||
extern List * GrantOnForeignServerDDLCommands(Oid serverId);
|
||||
extern List * GenerateGrantOnForeignServerQueriesFromAclItem(Oid serverId,
|
||||
AclItem *aclItem);
|
||||
extern List * GenerateGrantOnFDWQueriesFromAclItem(Oid serverId, AclItem *aclItem);
|
||||
extern char * PlacementUpsertCommand(uint64 shardId, uint64 placementId, int shardState,
|
||||
uint64 shardLength, int32 groupId);
|
||||
extern TableDDLCommand * TruncateTriggerCreateCommand(Oid relationId);
|
||||
|
|
|
@ -270,6 +270,7 @@ extern bool ShouldPropagateObject(const ObjectAddress *address);
|
|||
extern List * ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort);
|
||||
|
||||
/* Remaining metadata utility functions */
|
||||
extern Oid TableOwnerOid(Oid relationId);
|
||||
extern char * TableOwner(Oid relationId);
|
||||
extern void EnsureTablePermissions(Oid relationId, AclMode mode);
|
||||
extern void EnsureTableOwner(Oid relationId);
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* multi_logical_replication.h
|
||||
*
|
||||
* Declarations for public functions and variables used in logical replication
|
||||
* on the distributed tables while moving shards.
|
||||
*
|
||||
* Copyright (c) 2017, Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#ifndef MULTI_LOGICAL_REPLICATION_H_
|
||||
#define MULTI_LOGICAL_REPLICATION_H_
|
||||
|
||||
|
||||
#include "nodes/pg_list.h"
|
||||
|
||||
|
||||
/* Config variables managed via guc.c */
|
||||
extern int LogicalReplicationTimeout;
|
||||
|
||||
extern bool PlacementMovedUsingLogicalReplicationInTX;
|
||||
|
||||
|
||||
extern void LogicallyReplicateShards(List *shardList, char *sourceNodeName,
|
||||
int sourceNodePort, char *targetNodeName,
|
||||
int targetNodePort);
|
||||
|
||||
#define SHARD_MOVE_PUBLICATION_PREFIX "citus_shard_move_publication_"
|
||||
#define SHARD_MOVE_SUBSCRIPTION_PREFIX "citus_shard_move_subscription_"
|
||||
#define SHARD_MOVE_SUBSCRIPTION_ROLE_PREFIX "citus_shard_move_subscription_role_"
|
||||
|
||||
#endif /* MULTI_LOGICAL_REPLICATION_H_ */
|
|
@ -15,5 +15,16 @@
|
|||
extern void InitializeCitusQueryStats(void);
|
||||
extern void CitusQueryStatsExecutorsEntry(uint64 queryId, MultiExecutorType executorType,
|
||||
char *partitionKey);
|
||||
extern void CitusQueryStatsSynchronizeEntries(void);
|
||||
extern int StatStatementsPurgeInterval;
|
||||
extern int StatStatementsMax;
|
||||
extern int StatStatementsTrack;
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
STAT_STATEMENTS_TRACK_NONE = 0,
|
||||
STAT_STATEMENTS_TRACK_ALL = 1
|
||||
} StatStatementsTrackType;
|
||||
|
||||
#endif /* QUERY_STATS_H */
|
||||
|
|
|
@ -40,7 +40,8 @@ typedef enum AdvisoryLocktagClass
|
|||
ADV_LOCKTAG_CLASS_CITUS_REBALANCE_COLOCATION = 7,
|
||||
ADV_LOCKTAG_CLASS_CITUS_COLOCATED_SHARDS_METADATA = 8,
|
||||
ADV_LOCKTAG_CLASS_CITUS_OPERATIONS = 9,
|
||||
ADV_LOCKTAG_CLASS_CITUS_PLACEMENT_CLEANUP = 10
|
||||
ADV_LOCKTAG_CLASS_CITUS_PLACEMENT_CLEANUP = 10,
|
||||
ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION = 12
|
||||
} AdvisoryLocktagClass;
|
||||
|
||||
/* CitusOperations has constants for citus operations */
|
||||
|
@ -110,6 +111,16 @@ typedef enum CitusOperations
|
|||
(uint32) 0, \
|
||||
ADV_LOCKTAG_CLASS_CITUS_PLACEMENT_CLEANUP)
|
||||
|
||||
/* reuse advisory lock, but with different, unused field 4 (12)
|
||||
* Also it has the database hardcoded to MyDatabaseId, to ensure the locks
|
||||
* are local to each database */
|
||||
#define SET_LOCKTAG_LOGICAL_REPLICATION(tag) \
|
||||
SET_LOCKTAG_ADVISORY(tag, \
|
||||
MyDatabaseId, \
|
||||
(uint32) 0, \
|
||||
(uint32) 0, \
|
||||
ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION)
|
||||
|
||||
/*
|
||||
* DistLockConfigs are used to configure the locking behaviour of AcquireDistributedLockOnRelations
|
||||
*/
|
||||
|
@ -156,6 +167,7 @@ extern void UnlockColocationId(int colocationId, LOCKMODE lockMode);
|
|||
|
||||
/* Lock multiple shards for safe modification */
|
||||
extern void LockShardListMetadata(List *shardIntervalList, LOCKMODE lockMode);
|
||||
extern void LockShardListMetadataOnWorkers(LOCKMODE lockmode, List *shardIntervalList);
|
||||
extern void LockShardsInPlacementListMetadata(List *shardPlacementList,
|
||||
LOCKMODE lockMode);
|
||||
|
||||
|
|
|
@ -4,7 +4,9 @@
|
|||
*
|
||||
* Type and function declarations for the shard rebalancer tool.
|
||||
*
|
||||
* Copyright (c), Citus Data, Inc.
|
||||
* Copyright (c) 2016, Citus Data, Inc.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
|
|
@ -15,4 +15,7 @@
|
|||
|
||||
extern char * ConvertIntToString(int val);
|
||||
|
||||
#define StringStartsWith(str, prefix) \
|
||||
(strncmp(str, prefix, strlen(prefix)) == 0)
|
||||
|
||||
#endif /* CITUS_STRING_UTILS_H */
|
||||
|
|
|
@ -120,6 +120,7 @@ RelationGetSmgr(Relation rel)
|
|||
#define PQ_LARGE_MESSAGE_LIMIT 0
|
||||
#define make_simple_restrictinfo_compat(a, b) make_simple_restrictinfo(b)
|
||||
#define pull_varnos_compat(a, b) pull_varnos(b)
|
||||
#define ROLE_PG_READ_ALL_STATS DEFAULT_ROLE_READ_ALL_STATS
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_13
|
||||
|
|
|
@ -16,9 +16,9 @@ MAKEFILE_DIR := $(dir $(realpath $(firstword $(MAKEFILE_LIST))))
|
|||
export PATH := $(MAKEFILE_DIR)/bin:$(PATH)
|
||||
export PG_REGRESS_DIFF_OPTS = -dU10 -w
|
||||
# Use lower isolation test timeout, the 5 minute default is waaay too long for
|
||||
# us so we use 5 seconds instead. We should detect blockages very quickly and
|
||||
# us so we use 20 seconds instead. We should detect blockages very quickly and
|
||||
# the queries we run are also very fast.
|
||||
export PGISOLATIONTIMEOUT = 5
|
||||
export PGISOLATIONTIMEOUT = 20
|
||||
|
||||
##
|
||||
## Citus regression support
|
||||
|
@ -41,9 +41,11 @@ output_files := $(patsubst $(citus_abs_srcdir)/output/%.source,expected/%.out, $
|
|||
|
||||
# have make check actually run all tests, but keep check-full as an
|
||||
# intermediate, for muscle memory backward compatibility.
|
||||
check: check-full
|
||||
check: check-full check-enterprise-full
|
||||
# check-full triggers all tests that ought to be run routinely
|
||||
check-full: check-multi check-multi-mx check-multi-1 check-operations check-follower-cluster check-isolation check-failure
|
||||
# check-enterprise-full triggers all enterprise specific tests
|
||||
check-enterprise-full: check-enterprise check-enterprise-isolation check-enterprise-failure
|
||||
|
||||
|
||||
ISOLATION_DEPDIR=.deps/isolation
|
||||
|
@ -130,6 +132,9 @@ check-multi: all
|
|||
$(pg_regress_multi_check) --load-extension=citus \
|
||||
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS)
|
||||
|
||||
check-enterprise: all
|
||||
$(pg_regress_multi_check) --load-extension=citus \
|
||||
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/enterprise_schedule $(EXTRA_TESTS)
|
||||
check-multi-1: all
|
||||
$(pg_regress_multi_check) --load-extension=citus \
|
||||
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_1_schedule $(EXTRA_TESTS)
|
||||
|
@ -161,6 +166,24 @@ check-isolation: all $(isolation_test_files)
|
|||
$(pg_regress_multi_check) --load-extension=citus --isolationtester \
|
||||
-- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/isolation_schedule $(EXTRA_TESTS)
|
||||
|
||||
check-enterprise-isolation: all $(isolation_test_files)
|
||||
$(pg_regress_multi_check) --load-extension=citus --isolationtester \
|
||||
-- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/enterprise_isolation_schedule $(EXTRA_TESTS)
|
||||
|
||||
# we have separate targets for logical replication tests because they take very long to complete
|
||||
# hence this increases parallelism a lot without sacrifing any coverage.
|
||||
check-enterprise-isolation-logicalrep-1: all $(isolation_test_files)
|
||||
$(pg_regress_multi_check) --load-extension=citus --isolationtester \
|
||||
-- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/enterprise_isolation_logicalrep_1_schedule $(EXTRA_TESTS)
|
||||
|
||||
check-enterprise-isolation-logicalrep-2: all $(isolation_test_files)
|
||||
$(pg_regress_multi_check) --load-extension=citus --isolationtester \
|
||||
-- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/enterprise_isolation_logicalrep_2_schedule $(EXTRA_TESTS)
|
||||
|
||||
check-enterprise-isolation-logicalrep-3: all $(isolation_test_files)
|
||||
$(pg_regress_multi_check) --load-extension=citus --isolationtester \
|
||||
-- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/enterprise_isolation_logicalrep_3_schedule $(EXTRA_TESTS)
|
||||
|
||||
check-isolation-base: all $(isolation_test_files)
|
||||
$(pg_regress_multi_check) --load-extension=citus --isolationtester \
|
||||
-- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/base_isolation_schedule $(EXTRA_TESTS)
|
||||
|
@ -201,6 +224,10 @@ check-failure-base: all
|
|||
$(pg_regress_multi_check) --load-extension=citus --mitmproxy \
|
||||
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/failure_base_schedule $(EXTRA_TESTS)
|
||||
|
||||
check-enterprise-failure: all
|
||||
$(pg_regress_multi_check) --load-extension=citus --mitmproxy \
|
||||
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/enterprise_failure_schedule $(EXTRA_TESTS)
|
||||
|
||||
check-pg-upgrade:
|
||||
$(pg_upgrade_check) --old-bindir=$(old-bindir) --new-bindir=$(new-bindir) --pgxsdir=$(pgxsdir)
|
||||
|
||||
|
|
|
@ -95,6 +95,7 @@ s/ *$//g
|
|||
# pg12 changes
|
||||
s/Partitioned table "/Table "/g
|
||||
s/\) TABLESPACE pg_default$/\)/g
|
||||
s/invalid input syntax for type bigint:/invalid input syntax for integer:/g
|
||||
s/invalid input syntax for type /invalid input syntax for /g
|
||||
s/_id_ref_id_fkey/_id_fkey/g
|
||||
s/_ref_id_id_fkey_/_ref_id_fkey_/g
|
||||
|
@ -250,6 +251,8 @@ s/ERROR: parallel workers for vacuum must/ERROR: parallel vacuum degree must/g
|
|||
s/(CONTEXT: PL\/pgSQL function .* line )([0-9]+)/\1XX/g
|
||||
s/^(PL\/pgSQL function .* line) [0-9]+ (.*)/\1 XX \2/g
|
||||
|
||||
# normalize a test difference in multi_move_mx
|
||||
s/ connection to server at "\w+" \(127\.0\.0\.1\), port [0-9]+ failed://g
|
||||
# can be removed after dropping PG13 support
|
||||
s/ERROR: parallel workers for vacuum must be between/ERROR: parallel vacuum degree must be between/g
|
||||
s/ERROR: fake_fetch_row_version not implemented/ERROR: fake_tuple_update not implemented/g
|
||||
|
@ -265,6 +268,8 @@ s/TRIM\(BOTH FROM value\)/btrim\(value\)/g
|
|||
s/pg14\.idx.*/pg14\.xxxxx/g
|
||||
|
||||
s/CREATE TABLESPACE test_tablespace LOCATION.*/CREATE TABLESPACE test_tablespace LOCATION XXXX/g
|
||||
/DETAIL: Subqueries are not supported in policies on distributed tables/d
|
||||
s/ERROR: unexpected non-SELECT command in SubLink/ERROR: cannot create policy/g
|
||||
|
||||
# columnar log for var correlation
|
||||
s/(.*absolute correlation \()([0,1]\.[0-9]+)(\) of var attribute [0-9]+ is smaller than.*)/\1X\.YZ\3/g
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
# import this file (from psql you can use \i) to use mitmproxy manually
|
||||
test: failure_test_helpers
|
||||
|
||||
# this should only be run by pg_regress_multi, you don't need it
|
||||
test: failure_setup
|
||||
test: multi_test_helpers multi_test_helpers_superuser
|
||||
|
||||
test: failure_online_move_shard_placement
|
||||
test: failure_on_create_subscription
|
||||
test: failure_offline_move_shard_placement
|
||||
test: failure_tenant_isolation
|
||||
test: check_mx
|
|
@ -0,0 +1,7 @@
|
|||
# tests that change node metadata should precede
|
||||
# isolation_cluster_management such that tests
|
||||
# that come later can be parallelized
|
||||
test: isolation_cluster_management
|
||||
|
||||
test: isolation_logical_replication_single_shard_commands
|
||||
test: isolation_logical_replication_multi_shard_commands
|
|
@ -0,0 +1,7 @@
|
|||
# tests that change node metadata should precede
|
||||
# isolation_cluster_management such that tests
|
||||
# that come later can be parallelized
|
||||
test: isolation_cluster_management
|
||||
|
||||
test: isolation_logical_replication_single_shard_commands_on_mx
|
||||
test: isolation_logical_replication_multi_shard_commands_on_mx
|
|
@ -0,0 +1,6 @@
|
|||
# tests that change node metadata should precede
|
||||
# isolation_cluster_management such that tests
|
||||
# that come later can be parallelized
|
||||
test: isolation_cluster_management
|
||||
|
||||
test: isolation_logical_replication_with_partitioning
|
|
@ -0,0 +1,13 @@
|
|||
# tests that change node metadata should precede
|
||||
# isolation_cluster_management such that tests
|
||||
# that come later can be parallelized
|
||||
test: isolation_cluster_management
|
||||
|
||||
test: isolation_move_placement_vs_move_placement
|
||||
test: isolation_move_placement_vs_modification
|
||||
test: isolation_move_placement_vs_modification_fk
|
||||
test: isolation_tenant_isolation_with_fkey_to_reference
|
||||
test: isolation_ref2ref_foreign_keys_enterprise
|
||||
test: isolation_pg_send_cancellation
|
||||
test: isolation_shard_move_vs_start_metadata_sync
|
||||
test: isolation_tenant_isolation
|
|
@ -0,0 +1,38 @@
|
|||
# should come before multi_cluster_management
|
||||
# as it touches node metadata
|
||||
test: single_node_enterprise
|
||||
|
||||
test: multi_test_helpers multi_test_helpers_superuser
|
||||
test: multi_cluster_management
|
||||
test: multi_test_catalog_views
|
||||
test: multi_data_types
|
||||
test: multi_tenant_isolation
|
||||
|
||||
# --------
|
||||
# Tests that require adding coordinator to pg_dist_node
|
||||
# --------
|
||||
test: add_coordinator
|
||||
|
||||
test: citus_local_tables_ent
|
||||
|
||||
test: remove_coordinator
|
||||
# --------
|
||||
|
||||
test: logical_replication
|
||||
test: multi_create_table
|
||||
test: multi_create_table_superuser
|
||||
test: multi_create_role_dependency
|
||||
test: multi_create_users
|
||||
test: multi_multiuser_master_protocol
|
||||
test: multi_multiuser_load_data
|
||||
test: multi_multiuser_basic_queries
|
||||
test: multi_multiuser_grant
|
||||
test: multi_multiuser_copy
|
||||
test: multi_multiuser_auth
|
||||
test: multi_poolinfo_usage
|
||||
test: multi_alter_table_row_level_security
|
||||
test: multi_alter_table_row_level_security_escape
|
||||
test: stat_statements
|
||||
test: shard_move_constraints
|
||||
test: logical_rep_consistency
|
||||
test: check_mx
|
|
@ -5,6 +5,7 @@
|
|||
/multi_agg_distinct.out
|
||||
/multi_agg_type_conversion.out
|
||||
/multi_alter_table_statements.out
|
||||
/multi_alter_table_statements_0.out
|
||||
/multi_behavioral_analytics_create_table.out
|
||||
/multi_behavioral_analytics_create_table_superuser.out
|
||||
/multi_complex_count_distinct.out
|
||||
|
@ -12,6 +13,8 @@
|
|||
/multi_load_data.out
|
||||
/multi_load_data_superuser.out
|
||||
/multi_load_more_data.out
|
||||
/multi_multiuser_load_data.out
|
||||
/multi_multiuser_load_data_0.out
|
||||
/multi_mx_copy_data.out
|
||||
/multi_outer_join.out
|
||||
/multi_outer_join_reference.out
|
||||
|
|
|
@ -645,15 +645,6 @@ select array_collect_sort(val) from aggdata;
|
|||
|
||||
-- Test multiuser scenario
|
||||
create user notsuper;
|
||||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
select run_command_on_workers($$create user notsuper$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
(2 rows)
|
||||
|
||||
grant all on schema aggregate_support to notsuper;
|
||||
grant all on all tables in schema aggregate_support to notsuper;
|
||||
select 1 from run_command_on_workers($$
|
||||
|
|
|
@ -1,27 +1,11 @@
|
|||
CREATE SCHEMA alter_database_owner;
|
||||
SET search_path TO alter_database_owner, public;
|
||||
CREATE USER database_owner_1;
|
||||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
CREATE USER database_owner_2;
|
||||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
SELECT run_command_on_workers('CREATE USER database_owner_1');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('CREATE USER database_owner_2');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
(2 rows)
|
||||
|
||||
-- make sure the propagation of ALTER DATABASE ... OWNER TO ... is on
|
||||
SET citus.enable_alter_database_owner TO on;
|
||||
-- in enterprise we default to on, to verify this we don't set it explicitly to on and run
|
||||
-- the rest of the tests exactly as on community
|
||||
-- SET citus.enable_alter_database_owner TO on;
|
||||
-- list the owners of the current database on all nodes
|
||||
SELECT run_command_on_workers($$
|
||||
SELECT u.rolname
|
||||
|
@ -149,6 +133,48 @@ $$);
|
|||
(localhost,57638,t,database_owner_1)
|
||||
(2 rows)
|
||||
|
||||
-- verify that a user not present on the newly added node will automatically be propagated
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE USER database_owner_3;
|
||||
ALTER DATABASE regression OWNER TO database_owner_3;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- list the owners of the current database on all nodes
|
||||
SELECT u.rolname
|
||||
FROM pg_database d
|
||||
JOIN pg_roles u
|
||||
ON (d.datdba = u.oid)
|
||||
WHERE d.datname = current_database();
|
||||
rolname
|
||||
---------------------------------------------------------------------
|
||||
database_owner_3
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$
|
||||
SELECT u.rolname
|
||||
FROM pg_database d
|
||||
JOIN pg_roles u
|
||||
ON (d.datdba = u.oid)
|
||||
WHERE d.datname = current_database();
|
||||
$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,database_owner_3)
|
||||
(localhost,57638,t,database_owner_3)
|
||||
(2 rows)
|
||||
|
||||
-- reset back to original owner to not break subsequent tests and remove enterprise test user
|
||||
ALTER DATABASE regression OWNER TO database_owner_1;
|
||||
DROP USER database_owner_3;
|
||||
CREATE TABLE t (a int PRIMARY KEY);
|
||||
SELECT create_distributed_table('t', 'a');
|
||||
create_distributed_table
|
||||
|
@ -283,19 +309,5 @@ $$);
|
|||
|
||||
DROP USER database_owner_1;
|
||||
DROP USER database_owner_2;
|
||||
SELECT run_command_on_workers('DROP USER database_owner_1');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP ROLE")
|
||||
(localhost,57638,t,"DROP ROLE")
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('DROP USER database_owner_2');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP ROLE")
|
||||
(localhost,57638,t,"DROP ROLE")
|
||||
(2 rows)
|
||||
|
||||
SET client_min_messages TO warning;
|
||||
DROP SCHEMA alter_database_owner CASCADE;
|
||||
|
|
|
@ -629,6 +629,74 @@ NOTICE: renaming the new table to alter_distributed_table.par_table
|
|||
|
||||
(1 row)
|
||||
|
||||
-- test changing shard count into a default colocation group with shard split
|
||||
-- ensure there is no colocation group with 23 shards
|
||||
SELECT count(*) FROM pg_dist_colocation WHERE shardcount = 23;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_count TO 23;
|
||||
CREATE TABLE shard_split_table (a int, b int);
|
||||
SELECT create_distributed_table ('shard_split_table', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM isolate_tenant_to_new_shard('shard_split_table', 5);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- show the difference in pg_dist_colocation and citus_tables shard counts
|
||||
SELECT
|
||||
(
|
||||
SELECT shardcount FROM pg_dist_colocation WHERE colocationid IN
|
||||
(
|
||||
SELECT colocation_id FROM public.citus_tables WHERE table_name = 'shard_split_table'::regclass
|
||||
)
|
||||
) AS "pg_dist_colocation",
|
||||
(SELECT shard_count FROM public.citus_tables WHERE table_name = 'shard_split_table'::regclass) AS "citus_tables";
|
||||
pg_dist_colocation | citus_tables
|
||||
---------------------------------------------------------------------
|
||||
23 | 25
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_count TO 4;
|
||||
-- distribute another table and then change shard count to 23
|
||||
CREATE TABLE shard_split_table_2 (a int, b int);
|
||||
SELECT create_distributed_table ('shard_split_table_2', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT alter_distributed_table ('shard_split_table_2', shard_count:=23, cascade_to_colocated:=false);
|
||||
NOTICE: creating a new table for alter_distributed_table.shard_split_table_2
|
||||
NOTICE: moving the data of alter_distributed_table.shard_split_table_2
|
||||
NOTICE: dropping the old alter_distributed_table.shard_split_table_2
|
||||
NOTICE: renaming the new table to alter_distributed_table.shard_split_table_2
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT a.colocation_id = b.colocation_id FROM public.citus_tables a, public.citus_tables b
|
||||
WHERE a.table_name = 'shard_split_table'::regclass AND b.table_name = 'shard_split_table_2'::regclass;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
SELECT shard_count FROM public.citus_tables WHERE table_name = 'shard_split_table_2'::regclass;
|
||||
shard_count
|
||||
---------------------------------------------------------------------
|
||||
23
|
||||
(1 row)
|
||||
|
||||
-- test messages
|
||||
-- test nothing to change
|
||||
SELECT alter_distributed_table('dist_table');
|
||||
|
|
|
@ -1193,17 +1193,8 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
|
|||
(4 rows)
|
||||
|
||||
-- test DROP OWNED BY
|
||||
-- Citus does not support "ALTER TABLE OWNER TO" commands. Also, not to deal with tests output
|
||||
-- difference between community and enterprise, let's disable enable_ddl_propagation here.
|
||||
SET citus.enable_ddl_propagation to OFF;
|
||||
CREATE USER another_user;
|
||||
SELECT run_command_on_workers('CREATE USER another_user');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
(2 rows)
|
||||
|
||||
-- Citus does not support "ALTER TABLE OWNER TO" commands.
|
||||
ALTER TABLE reference_table_1 OWNER TO another_user;
|
||||
SELECT run_command_on_placements('reference_table_1', 'ALTER TABLE %s OWNER TO another_user');
|
||||
run_command_on_placements
|
||||
|
@ -1213,7 +1204,6 @@ SELECT run_command_on_placements('reference_table_1', 'ALTER TABLE %s OWNER TO a
|
|||
(localhost,57638,1810093,t,"ALTER TABLE")
|
||||
(3 rows)
|
||||
|
||||
SET citus.enable_ddl_propagation to ON;
|
||||
BEGIN;
|
||||
DROP OWNED BY another_user cascade;
|
||||
SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalrelid IN ( 'citus_local_table_1'::regclass, 'citus_local_table_2'::regclass, 'citus_local_table_3'::regclass) ORDER BY logicalrelid;
|
||||
|
|
|
@ -170,17 +170,19 @@ NOTICE: executing the command locally: SELECT value FROM citus_local_tables_tes
|
|||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
-- show that we do not support policies in citus community --
|
||||
-- show that we support policies in citus enterprise --
|
||||
BEGIN;
|
||||
CREATE TABLE citus_local_table_3 (table_user text);
|
||||
ALTER TABLE citus_local_table_3 ENABLE ROW LEVEL SECURITY;
|
||||
CREATE ROLE table_users;
|
||||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
CREATE POLICY table_policy ON citus_local_table_3 TO table_users
|
||||
USING (table_user = current_user);
|
||||
-- this should error out
|
||||
SELECT citus_add_local_table_to_metadata('citus_local_table_3');
|
||||
ERROR: policies on distributed tables are only supported in Citus Enterprise
|
||||
citus_add_local_table_to_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
-- show that we properly handle sequences on citus local tables --
|
||||
BEGIN;
|
||||
|
@ -291,13 +293,13 @@ ERROR: relation "citus_local_table_1" is a local table
|
|||
SELECT get_shard_id_for_distribution_column('citus_local_table_1', 'not_checking_this_arg_for_non_dist_tables');
|
||||
get_shard_id_for_distribution_column
|
||||
---------------------------------------------------------------------
|
||||
1504013
|
||||
1504014
|
||||
(1 row)
|
||||
|
||||
SELECT get_shard_id_for_distribution_column('citus_local_table_1');
|
||||
get_shard_id_for_distribution_column
|
||||
---------------------------------------------------------------------
|
||||
1504013
|
||||
1504014
|
||||
(1 row)
|
||||
|
||||
-- master_copy_shard_placement is not supported
|
||||
|
@ -309,7 +311,7 @@ BEGIN;
|
|||
SELECT undistribute_table('citus_local_table_1');
|
||||
NOTICE: creating a new table for citus_local_tables_test_schema.citus_local_table_1
|
||||
NOTICE: moving the data of citus_local_tables_test_schema.citus_local_table_1
|
||||
NOTICE: executing the command locally: SELECT a FROM citus_local_tables_test_schema.citus_local_table_1_1504013 citus_local_table_1
|
||||
NOTICE: executing the command locally: SELECT a FROM citus_local_tables_test_schema.citus_local_table_1_1504014 citus_local_table_1
|
||||
NOTICE: dropping the old citus_local_tables_test_schema.citus_local_table_1
|
||||
NOTICE: executing the command locally: DROP TABLE IF EXISTS citus_local_tables_test_schema.citus_local_table_1_xxxxx CASCADE
|
||||
NOTICE: renaming the new table to citus_local_tables_test_schema.citus_local_table_1
|
||||
|
@ -352,7 +354,7 @@ SELECT citus_add_local_table_to_metadata('"CiTUS!LocalTables"."LocalTabLE.1!?!"'
|
|||
-- drop the table before creating it when the search path is set
|
||||
SET search_path to "CiTUS!LocalTables" ;
|
||||
DROP TABLE "LocalTabLE.1!?!";
|
||||
NOTICE: executing the command locally: DROP TABLE IF EXISTS "CiTUS!LocalTables"."LocalTabLE.1!?!_1504021" CASCADE
|
||||
NOTICE: executing the command locally: DROP TABLE IF EXISTS "CiTUS!LocalTables"."LocalTabLE.1!?!_1504022" CASCADE
|
||||
-- have a custom type in the local table
|
||||
CREATE TYPE local_type AS (key int, value jsonb);
|
||||
-- create btree_gist for GiST index
|
||||
|
@ -402,10 +404,10 @@ SELECT citus_add_local_table_to_metadata('"LocalTabLE.1!?!9012345678901234567890
|
|||
-- create some objects after citus_add_local_table_to_metadata
|
||||
CREATE INDEX "my!Index2" ON "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789"(id) WITH ( fillfactor = 90 ) WHERE id < 20;
|
||||
NOTICE: identifier "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789" will be truncated to "LocalTabLE.1!?!901234567890123456789012345678901234567890123456"
|
||||
NOTICE: executing the command locally: CREATE INDEX "my!Index2_1504022" ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id ) WITH (fillfactor = '90' ) WHERE (id < 20)
|
||||
NOTICE: executing the command locally: CREATE INDEX "my!Index2_1504023" ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504023" USING btree (id ) WITH (fillfactor = '90' ) WHERE (id < 20)
|
||||
CREATE UNIQUE INDEX uniqueIndex2 ON "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789"(id);
|
||||
NOTICE: identifier "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789" will be truncated to "LocalTabLE.1!?!901234567890123456789012345678901234567890123456"
|
||||
NOTICE: executing the command locally: CREATE UNIQUE INDEX uniqueindex2_1504022 ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id )
|
||||
NOTICE: executing the command locally: CREATE UNIQUE INDEX uniqueindex2_1504023 ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504023" USING btree (id )
|
||||
---------------------------------------------------------------------
|
||||
---- utility command execution ----
|
||||
---------------------------------------------------------------------
|
||||
|
@ -498,7 +500,7 @@ CREATE TABLE local_table_4 (
|
|||
b int references local_table_4(a));
|
||||
NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (xxxxx, 'citus_local_tables_test_schema', xxxxx, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_tables_test_schema.local_table_4 ADD CONSTRAINT local_table_4_a_fkey FOREIGN KEY (a) REFERENCES citus_local_tables_test_schema.citus_local_table_1(a)')
|
||||
ALTER TABLE citus_local_table_1 ADD COLUMN b int NOT NULL;
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504013, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_table_1 ADD COLUMN b int NOT NULL;')
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504014, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_table_1 ADD COLUMN b int NOT NULL;')
|
||||
-- show that we added column with NOT NULL
|
||||
SELECT table_name, column_name, is_nullable
|
||||
FROM INFORMATION_SCHEMA.COLUMNS
|
||||
|
@ -507,11 +509,11 @@ ORDER BY 1;
|
|||
table_name | column_name | is_nullable
|
||||
---------------------------------------------------------------------
|
||||
citus_local_table_1 | b | NO
|
||||
citus_local_table_1_1504013 | b | NO
|
||||
citus_local_table_1_1504014 | b | NO
|
||||
(2 rows)
|
||||
|
||||
ALTER TABLE citus_local_table_1 ADD CONSTRAINT unique_a_b UNIQUE (a, b);
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504013, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_table_1 ADD CONSTRAINT unique_a_b UNIQUE (a, b);')
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504014, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_table_1 ADD CONSTRAINT unique_a_b UNIQUE (a, b);')
|
||||
-- show that we defined unique constraints
|
||||
SELECT conrelid::regclass, conname, conkey
|
||||
FROM pg_constraint
|
||||
|
@ -519,12 +521,12 @@ WHERE conrelid::regclass::text LIKE 'citus_local_table_1%' AND contype = 'u'
|
|||
ORDER BY 1;
|
||||
conrelid | conname | conkey
|
||||
---------------------------------------------------------------------
|
||||
citus_local_table_1_1504013 | unique_a_b_1504013 | {1,2}
|
||||
citus_local_table_1_1504014 | unique_a_b_1504014 | {1,2}
|
||||
citus_local_table_1 | unique_a_b | {1,2}
|
||||
(2 rows)
|
||||
|
||||
CREATE UNIQUE INDEX citus_local_table_1_idx ON citus_local_table_1(b);
|
||||
NOTICE: executing the command locally: CREATE UNIQUE INDEX citus_local_table_1_idx_1504013 ON citus_local_tables_test_schema.citus_local_table_1_1504013 USING btree (b )
|
||||
NOTICE: executing the command locally: CREATE UNIQUE INDEX citus_local_table_1_idx_1504014 ON citus_local_tables_test_schema.citus_local_table_1_1504014 USING btree (b )
|
||||
-- show that we successfully defined the unique index
|
||||
SELECT indexrelid::regclass, indrelid::regclass, indkey
|
||||
FROM pg_index
|
||||
|
@ -533,7 +535,7 @@ ORDER BY 1;
|
|||
indexrelid | indrelid | indkey
|
||||
---------------------------------------------------------------------
|
||||
unique_a_b | citus_local_table_1 | 1 2
|
||||
unique_a_b_1504013 | citus_local_table_1_1504013 | 1 2
|
||||
unique_a_b_1504014 | citus_local_table_1_1504014 | 1 2
|
||||
(2 rows)
|
||||
|
||||
-- test creating citus local table with an index from non-default schema
|
||||
|
@ -541,7 +543,7 @@ CREATE SCHEMA "test_\'index_schema";
|
|||
CREATE TABLE "test_\'index_schema".testindex (a int, b int);
|
||||
CREATE INDEX ind ON "test_\'index_schema".testindex (a);
|
||||
ALTER TABLE "test_\'index_schema".testindex ADD CONSTRAINT fkey_to_dummy_ref FOREIGN KEY (a) REFERENCES dummy_reference_table(a);
|
||||
NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1504026, E'test_\\''index_schema', 1504023, 'citus_local_tables_test_schema', E'ALTER TABLE "test_\\''index_schema".testindex ADD CONSTRAINT fkey_to_dummy_ref FOREIGN KEY (a) REFERENCES dummy_reference_table(a);')
|
||||
NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1504027, E'test_\\''index_schema', 1504024, 'citus_local_tables_test_schema', E'ALTER TABLE "test_\\''index_schema".testindex ADD CONSTRAINT fkey_to_dummy_ref FOREIGN KEY (a) REFERENCES dummy_reference_table(a);')
|
||||
SELECT COUNT(*)=2 FROM pg_indexes WHERE tablename LIKE 'testindex%' AND indexname LIKE 'ind%';
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -566,7 +568,7 @@ DROP TABLE citus_local_table_1, citus_local_table_2, distributed_table, local_ta
|
|||
NOTICE: executing the command locally: DROP TABLE IF EXISTS citus_local_tables_test_schema.local_table_4_xxxxx CASCADE
|
||||
NOTICE: executing the command locally: DROP TABLE IF EXISTS citus_local_tables_test_schema.reference_table_xxxxx CASCADE
|
||||
NOTICE: executing the command locally: DROP TABLE IF EXISTS citus_local_tables_test_schema.local_table_xxxxx CASCADE
|
||||
NOTICE: drop cascades to constraint fkey_c_to_local_1504013 on table citus_local_tables_test_schema.citus_local_table_1_1504013
|
||||
NOTICE: drop cascades to constraint fkey_c_to_local_1504014 on table citus_local_tables_test_schema.citus_local_table_1_1504014
|
||||
NOTICE: executing the command locally: DROP TABLE IF EXISTS citus_local_tables_test_schema.citus_local_table_2_xxxxx CASCADE
|
||||
NOTICE: executing the command locally: DROP TABLE IF EXISTS citus_local_tables_test_schema.citus_local_table_1_xxxxx CASCADE
|
||||
-- test some other udf's with citus local tables
|
||||
|
@ -739,7 +741,7 @@ CREATE STATISTICS stx1 ON a, b FROM test_citus_local_table_with_stats;
|
|||
ALTER TABLE test_citus_local_table_with_stats ADD CONSTRAINT fkey_to_dummy_ref FOREIGN KEY (a) REFERENCES dummy_reference_table(a);
|
||||
NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (xxxxx, 'citus_local_tables_test_schema', xxxxx, 'citus_local_tables_test_schema', 'ALTER TABLE test_citus_local_table_with_stats ADD CONSTRAINT fkey_to_dummy_ref FOREIGN KEY (a) REFERENCES dummy_reference_table(a);')
|
||||
CREATE STATISTICS "CiTUS!LocalTables"."Bad\'StatName" ON a, b FROM test_citus_local_table_with_stats;
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504030, 'citus_local_tables_test_schema', E'CREATE STATISTICS "CiTUS!LocalTables"."Bad\\''StatName" ON a, b FROM citus_local_tables_test_schema.test_citus_local_table_with_stats')
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504031, 'citus_local_tables_test_schema', E'CREATE STATISTICS "CiTUS!LocalTables"."Bad\\''StatName" ON a, b FROM citus_local_tables_test_schema.test_citus_local_table_with_stats')
|
||||
SELECT COUNT(*)=4 FROM pg_statistic_ext WHERE stxname LIKE 'stx1%' or stxname LIKE 'Bad\\''StatName%' ;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -790,11 +792,11 @@ SELECT partition, from_value, to_value, access_method
|
|||
partition | from_value | to_value | access_method
|
||||
---------------------------------------------------------------------
|
||||
partition1 | Thu Apr 12 17:00:00 2018 PDT | Fri Apr 13 17:00:00 2018 PDT | heap
|
||||
partition1_1504031 | Thu Apr 12 17:00:00 2018 PDT | Fri Apr 13 17:00:00 2018 PDT | heap
|
||||
partition1_1504032 | Thu Apr 12 17:00:00 2018 PDT | Fri Apr 13 17:00:00 2018 PDT | heap
|
||||
partition2 | Fri Apr 13 17:00:00 2018 PDT | Sat Apr 14 17:00:00 2018 PDT | heap
|
||||
partition2_1504032 | Fri Apr 13 17:00:00 2018 PDT | Sat Apr 14 17:00:00 2018 PDT | heap
|
||||
partition2_1504033 | Fri Apr 13 17:00:00 2018 PDT | Sat Apr 14 17:00:00 2018 PDT | heap
|
||||
partition3 | Sat Apr 14 17:00:00 2018 PDT | Sun Apr 15 17:00:00 2018 PDT | heap
|
||||
partition3_1504034 | Sat Apr 14 17:00:00 2018 PDT | Sun Apr 15 17:00:00 2018 PDT | heap
|
||||
partition3_1504035 | Sat Apr 14 17:00:00 2018 PDT | Sun Apr 15 17:00:00 2018 PDT | heap
|
||||
(6 rows)
|
||||
|
||||
-- undistribute succesfully
|
||||
|
@ -850,22 +852,22 @@ SELECT relname FROM pg_class
|
|||
---------------------------------------------------------------------
|
||||
partitioned_distributed
|
||||
partitioned_distributed_1
|
||||
partitioned_distributed_1504038
|
||||
partitioned_distributed_1504040
|
||||
partitioned_distributed_1_1504042
|
||||
partitioned_distributed_1_1504044
|
||||
partitioned_distributed_1504039
|
||||
partitioned_distributed_1504041
|
||||
partitioned_distributed_1_1504043
|
||||
partitioned_distributed_1_1504045
|
||||
partitioned_distributed_1_a_key
|
||||
partitioned_distributed_1_a_key_1504042
|
||||
partitioned_distributed_1_a_key_1504044
|
||||
partitioned_distributed_1_a_key_1504043
|
||||
partitioned_distributed_1_a_key_1504045
|
||||
partitioned_distributed_2
|
||||
partitioned_distributed_2_1504046
|
||||
partitioned_distributed_2_1504048
|
||||
partitioned_distributed_2_1504047
|
||||
partitioned_distributed_2_1504049
|
||||
partitioned_distributed_2_a_key
|
||||
partitioned_distributed_2_a_key_1504046
|
||||
partitioned_distributed_2_a_key_1504048
|
||||
partitioned_distributed_2_a_key_1504047
|
||||
partitioned_distributed_2_a_key_1504049
|
||||
partitioned_distributed_a_key
|
||||
partitioned_distributed_a_key_1504038
|
||||
partitioned_distributed_a_key_1504040
|
||||
partitioned_distributed_a_key_1504039
|
||||
partitioned_distributed_a_key_1504041
|
||||
(18 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
\set VERBOSITY terse
|
||||
SET citus.next_shard_id TO 1511000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.enable_local_execution TO ON;
|
||||
SET citus.log_local_commands TO ON;
|
||||
CREATE SCHEMA citus_local_tables_ent;
|
||||
SET search_path TO citus_local_tables_ent;
|
||||
-- ensure that coordinator is added to pg_dist_node
|
||||
SET client_min_messages to ERROR;
|
||||
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
RESET client_min_messages;
|
||||
CREATE TABLE citus_local_table (a int, b int);
|
||||
SELECT citus_add_local_table_to_metadata('citus_local_table');
|
||||
citus_add_local_table_to_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- isolate_tenant_to_new_shard is not supported
|
||||
SELECT isolate_tenant_to_new_shard('citus_local_table', 100);
|
||||
ERROR: cannot isolate tenant because tenant isolation is only support for hash distributed tables
|
||||
-- master_copy_shard_placement is not supported
|
||||
SELECT master_copy_shard_placement(shardid, 'localhost', :master_port, 'localhost', :worker_1_port, false)
|
||||
FROM (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='citus_local_table'::regclass) as shardid;
|
||||
ERROR: Table 'citus_local_table' is a local table. Replicating shard of a local table added to metadata currently is not supported
|
||||
-- master_move_shard_placement is not supported
|
||||
SELECT master_move_shard_placement(shardid, 'localhost', :master_port, 'localhost', :worker_1_port)
|
||||
FROM (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='citus_local_table'::regclass) as shardid;
|
||||
ERROR: table citus_local_tables_ent.citus_local_table is a local table, moving shard of a local table added to metadata is currently not supported
|
||||
-- replicate_table_shards is not suported
|
||||
SELECT replicate_table_shards('citus_local_table'::regclass, 2);
|
||||
NOTICE: Copying shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
|
||||
ERROR: Table 'citus_local_table' is a local table. Replicating shard of a local table added to metadata currently is not supported
|
||||
-- rebalance_table_shards is not supported
|
||||
SELECT rebalance_table_shards('citus_local_table');
|
||||
ERROR: table citus_local_tables_ent.citus_local_table is a local table, moving shard of a local table added to metadata is currently not supported
|
||||
-- get_rebalance_table_shards_plan is not supported
|
||||
SELECT get_rebalance_table_shards_plan('citus_local_table');
|
||||
ERROR: table citus_local_tables_ent.citus_local_table is a local table, moving shard of a local table added to metadata is currently not supported
|
||||
-- test a policy defined after creating a citus local table
|
||||
-- create another user for policy test
|
||||
CREATE USER user_can_select_a_1;
|
||||
ALTER ROLE user_can_select_a_1 SET search_path TO citus_local_tables_ent;
|
||||
GRANT USAGE ON SCHEMA citus_local_tables_ent TO user_can_select_a_1;
|
||||
INSERT INTO citus_local_table VALUES (1,1);
|
||||
NOTICE: executing the command locally: INSERT INTO citus_local_tables_ent.citus_local_table_1511000 (a, b) VALUES (1, 1)
|
||||
INSERT INTO citus_local_table VALUES (2,2);
|
||||
NOTICE: executing the command locally: INSERT INTO citus_local_tables_ent.citus_local_table_1511000 (a, b) VALUES (2, 2)
|
||||
-- grant access
|
||||
GRANT SELECT ON TABLE citus_local_table TO user_can_select_a_1;
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1511000, 'citus_local_tables_ent', 'GRANT select ON citus_local_table TO user_can_select_a_1')
|
||||
-- enable row level security
|
||||
ALTER TABLE citus_local_table ENABLE ROW LEVEL SECURITY;
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1511000, 'citus_local_tables_ent', 'ALTER TABLE citus_local_table ENABLE ROW LEVEL SECURITY;')
|
||||
-- switch user, it should not be able to see any rows since row level security is enabled
|
||||
SET ROLE user_can_select_a_1;
|
||||
SELECT * FROM citus_local_table ORDER BY 1, 2;
|
||||
NOTICE: executing the command locally: SELECT a, b FROM citus_local_tables_ent.citus_local_table_1511000 citus_local_table ORDER BY a, b
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
RESET ROLE;
|
||||
-- create policy for user to read access for rows with a=1
|
||||
CREATE POLICY user_mod ON citus_local_table
|
||||
FOR SELECT
|
||||
TO user_can_select_a_1
|
||||
USING (current_user = 'user_can_select_a_1' and a=1);
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1511000, 'citus_local_tables_ent', 'CREATE POLICY user_mod ON citus_local_tables_ent.citus_local_table FOR SELECT TO user_can_select_a_1 USING (((CURRENT_USER = ''user_can_select_a_1''::name) AND (a = 1)))')
|
||||
-- switch user, it should be able to see rows with a=1
|
||||
SET ROLE user_can_select_a_1;
|
||||
SELECT * FROM citus_local_table ORDER BY 1, 2;
|
||||
NOTICE: executing the command locally: SELECT a, b FROM citus_local_tables_ent.citus_local_table_1511000 citus_local_table ORDER BY a, b
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
-- reset role
|
||||
RESET ROLE;
|
||||
-- cleanup at exit
|
||||
DROP SCHEMA citus_local_tables_ent CASCADE;
|
||||
NOTICE: drop cascades to 2 other objects
|
|
@ -433,11 +433,7 @@ ERROR: unsupported access method for the index on columnar table circles
|
|||
-- Row level security
|
||||
CREATE TABLE public.row_level_security_col (id int, pgUser CHARACTER VARYING) USING columnar;
|
||||
CREATE USER user1;
|
||||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
CREATE USER user2;
|
||||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
INSERT INTO public.row_level_security_col VALUES (1, 'user1'), (2, 'user2');
|
||||
GRANT SELECT, UPDATE, INSERT, DELETE ON public.row_level_security_col TO user1;
|
||||
GRANT SELECT, UPDATE, INSERT, DELETE ON public.row_level_security_col TO user2;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue