mirror of https://github.com/citusdata/citus.git
Sync pg_dist_object on an update and propagate while syncing to a new node
Before that PR we were updating citus.pg_dist_object metadata, which keeps the metadata related to objects on Citus, only on the coordinator node. In order to allow using those object from worker nodes (or erroring out with proper error message) we've started to propagate that metedata to worker nodes as well.pull/5415/head
parent
d4ed94d2f2
commit
ed8e32de5e
|
@ -61,6 +61,7 @@ CallDistributedProcedureRemotely(CallStmt *callStmt, DestReceiver *dest)
|
|||
|
||||
DistObjectCacheEntry *procedure = LookupDistObjectCacheEntry(ProcedureRelationId,
|
||||
functionId, 0);
|
||||
|
||||
if (procedure == NULL || !procedure->isDistributed)
|
||||
{
|
||||
return false;
|
||||
|
|
|
@ -1246,7 +1246,8 @@ FinalizeCitusLocalTableCreation(Oid relationId, List *dependentSequenceList)
|
|||
* Ensure sequence dependencies and mark them as distributed
|
||||
* before creating table metadata on workers
|
||||
*/
|
||||
MarkSequenceListDistributedAndPropagateDependencies(dependentSequenceList);
|
||||
MarkSequenceListDistributedAndPropagateWithDependencies(relationId,
|
||||
dependentSequenceList);
|
||||
}
|
||||
CreateTableMetadataOnWorkers(relationId);
|
||||
}
|
||||
|
|
|
@ -320,6 +320,8 @@ PreprocessRenameCollationStmt(Node *node, const char *queryString,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
/* fully qualify */
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
|
||||
|
@ -588,9 +590,12 @@ PostprocessDefineCollationStmt(Node *node, const char *queryString)
|
|||
ObjectAddress collationAddress =
|
||||
DefineCollationStmtObjectAddress(node, false);
|
||||
|
||||
EnsureDependenciesExistOnAllNodes(&collationAddress);
|
||||
if (IsObjectDistributed(&collationAddress))
|
||||
{
|
||||
EnsureCoordinator();
|
||||
}
|
||||
|
||||
MarkObjectDistributed(&collationAddress);
|
||||
EnsureDependenciesExistOnAllNodes(&collationAddress);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, CreateCollationDDLsIdempotent(
|
||||
collationAddress.objectId));
|
||||
|
|
|
@ -113,6 +113,8 @@ static void EnsureLocalTableEmptyIfNecessary(Oid relationId, char distributionMe
|
|||
static bool ShouldLocalTableBeEmpty(Oid relationId, char distributionMethod, bool
|
||||
viaDeprecatedAPI);
|
||||
static void EnsureCitusTableCanBeCreated(Oid relationOid);
|
||||
static void EnsureSequenceExistOnMetadataWorkersForRelation(Oid relationId,
|
||||
Oid sequenceOid);
|
||||
static List * GetFKeyCreationCommandsRelationInvolvedWithTableType(Oid relationId,
|
||||
int tableTypeFlag);
|
||||
static Oid DropFKeysAndUndistributeTable(Oid relationId);
|
||||
|
@ -536,10 +538,11 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio
|
|||
if (ClusterHasKnownMetadataWorkers())
|
||||
{
|
||||
/*
|
||||
* Ensure sequence dependencies and mark them as distributed
|
||||
* Ensure both sequence and its' dependencies and mark them as distributed
|
||||
* before creating table metadata on workers
|
||||
*/
|
||||
MarkSequenceListDistributedAndPropagateDependencies(dependentSequenceList);
|
||||
MarkSequenceListDistributedAndPropagateWithDependencies(relationId,
|
||||
dependentSequenceList);
|
||||
}
|
||||
|
||||
CreateTableMetadataOnWorkers(relationId);
|
||||
|
@ -670,37 +673,62 @@ AlterSequenceType(Oid seqOid, Oid typeOid)
|
|||
|
||||
|
||||
/*
|
||||
* MarkSequenceListDistributedAndPropagateDependencies ensures dependencies
|
||||
* for the given sequence list exist on all nodes and marks the sequences
|
||||
* as distributed.
|
||||
* MarkSequenceListDistributedAndPropagateWithDependencies ensures sequences and their
|
||||
* dependencies for the given sequence list exist on all nodes and marks them as distributed.
|
||||
*/
|
||||
void
|
||||
MarkSequenceListDistributedAndPropagateDependencies(List *sequenceList)
|
||||
MarkSequenceListDistributedAndPropagateWithDependencies(Oid relationId,
|
||||
List *sequenceList)
|
||||
{
|
||||
Oid sequenceOid = InvalidOid;
|
||||
foreach_oid(sequenceOid, sequenceList)
|
||||
{
|
||||
MarkSequenceDistributedAndPropagateDependencies(sequenceOid);
|
||||
MarkSequenceDistributedAndPropagateWithDependencies(relationId, sequenceOid);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* MarkSequenceDistributedAndPropagateDependencies ensures dependencies
|
||||
* for the given sequence exist on all nodes and marks the sequence
|
||||
* as distributed.
|
||||
* MarkSequenceDistributedAndPropagateWithDependencies ensures sequence and its'
|
||||
* dependencies for the given sequence exist on all nodes and marks them as distributed.
|
||||
*/
|
||||
void
|
||||
MarkSequenceDistributedAndPropagateDependencies(Oid sequenceOid)
|
||||
MarkSequenceDistributedAndPropagateWithDependencies(Oid relationId, Oid sequenceOid)
|
||||
{
|
||||
/* get sequence address */
|
||||
ObjectAddress sequenceAddress = { 0 };
|
||||
ObjectAddressSet(sequenceAddress, RelationRelationId, sequenceOid);
|
||||
EnsureDependenciesExistOnAllNodes(&sequenceAddress);
|
||||
EnsureSequenceExistOnMetadataWorkersForRelation(relationId, sequenceOid);
|
||||
MarkObjectDistributed(&sequenceAddress);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* EnsureSequenceExistOnMetadataWorkersForRelation ensures sequence for the given relation
|
||||
* exist on each worker node with metadata.
|
||||
*/
|
||||
static void
|
||||
EnsureSequenceExistOnMetadataWorkersForRelation(Oid relationId, Oid sequenceOid)
|
||||
{
|
||||
Assert(ShouldSyncTableMetadata(relationId));
|
||||
|
||||
char *ownerName = TableOwner(relationId);
|
||||
List *sequenceDDLList = DDLCommandsForSequence(sequenceOid, ownerName);
|
||||
|
||||
/* prevent recursive propagation */
|
||||
SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION);
|
||||
|
||||
const char *sequenceCommand = NULL;
|
||||
foreach_ptr(sequenceCommand, sequenceDDLList)
|
||||
{
|
||||
SendCommandToWorkersWithMetadata(sequenceCommand);
|
||||
}
|
||||
|
||||
SendCommandToWorkersWithMetadata(ENABLE_DDL_PROPAGATION);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* EnsureDistributedSequencesHaveOneType first ensures that the type of the column
|
||||
* in which the sequence is used as default is supported for each sequence in input
|
||||
|
|
|
@ -41,11 +41,10 @@ bool EnableDependencyCreation = true;
|
|||
* workers via a separate session that will be committed directly so that the objects are
|
||||
* visible to potentially multiple sessions creating the shards.
|
||||
*
|
||||
* Note; only the actual objects are created via a separate session, the local records to
|
||||
* Note; only the actual objects are created via a separate session, the records to
|
||||
* pg_dist_object are created in this session. As a side effect the objects could be
|
||||
* created on the workers without a catalog entry on the coordinator. Updates to the
|
||||
* objects on the coordinator are not propagated to the workers until the record is
|
||||
* visible on the coordinator.
|
||||
* created on the workers without a catalog entry. Updates to the objects on the coordinator
|
||||
* are not propagated to the workers until the record is visible on the coordinator.
|
||||
*
|
||||
* This is solved by creating the dependencies in an idempotent manner, either via
|
||||
* postgres native CREATE IF NOT EXISTS, or citus helper functions.
|
||||
|
@ -104,31 +103,6 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
|
|||
dependency->objectSubId, ExclusiveLock);
|
||||
}
|
||||
|
||||
/*
|
||||
* right after we acquired the lock we mark our objects as distributed, these changes
|
||||
* will not become visible before we have successfully created all the objects on our
|
||||
* workers.
|
||||
*
|
||||
* It is possible to create distributed tables which depend on other dependencies
|
||||
* before any node is in the cluster. If we would wait till we actually had connected
|
||||
* to the nodes before marking the objects as distributed these objects would never be
|
||||
* created on the workers when they get added, causing shards to fail to create.
|
||||
*/
|
||||
foreach_ptr(dependency, dependenciesWithCommands)
|
||||
{
|
||||
MarkObjectDistributed(dependency);
|
||||
}
|
||||
|
||||
/*
|
||||
* collect and connect to all applicable nodes
|
||||
*/
|
||||
if (list_length(workerNodeList) <= 0)
|
||||
{
|
||||
/* no nodes to execute on */
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerNodeList)
|
||||
{
|
||||
|
@ -139,6 +113,16 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
|
|||
CitusExtensionOwnerName(),
|
||||
ddlCommands);
|
||||
}
|
||||
|
||||
/*
|
||||
* We do this after creating the objects on the workers, we make sure
|
||||
* that objects have been created on worker nodes before marking them
|
||||
* distributed, so MarkObjectDistributed wouldn't fail.
|
||||
*/
|
||||
foreach_ptr(dependency, dependenciesWithCommands)
|
||||
{
|
||||
MarkObjectDistributed(dependency);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ static DistributeObjectOps NoDistributeOps = {
|
|||
.preprocess = NULL,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Aggregate_AlterObjectSchema = {
|
||||
.deparse = DeparseAlterFunctionSchemaStmt,
|
||||
|
@ -30,6 +31,7 @@ static DistributeObjectOps Aggregate_AlterObjectSchema = {
|
|||
.preprocess = PreprocessAlterFunctionSchemaStmt,
|
||||
.postprocess = PostprocessAlterFunctionSchemaStmt,
|
||||
.address = AlterFunctionSchemaStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Aggregate_AlterOwner = {
|
||||
.deparse = DeparseAlterFunctionOwnerStmt,
|
||||
|
@ -37,6 +39,7 @@ static DistributeObjectOps Aggregate_AlterOwner = {
|
|||
.preprocess = PreprocessAlterFunctionOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterFunctionOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Aggregate_Define = {
|
||||
.deparse = NULL,
|
||||
|
@ -44,6 +47,7 @@ static DistributeObjectOps Aggregate_Define = {
|
|||
.preprocess = NULL,
|
||||
.postprocess = NULL,
|
||||
.address = DefineAggregateStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Aggregate_Drop = {
|
||||
.deparse = DeparseDropFunctionStmt,
|
||||
|
@ -51,6 +55,7 @@ static DistributeObjectOps Aggregate_Drop = {
|
|||
.preprocess = PreprocessDropFunctionStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Aggregate_Rename = {
|
||||
.deparse = DeparseRenameFunctionStmt,
|
||||
|
@ -58,6 +63,7 @@ static DistributeObjectOps Aggregate_Rename = {
|
|||
.preprocess = PreprocessRenameFunctionStmt,
|
||||
.postprocess = NULL,
|
||||
.address = RenameFunctionStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_AlterEnum = {
|
||||
.deparse = DeparseAlterEnumStmt,
|
||||
|
@ -65,6 +71,7 @@ static DistributeObjectOps Any_AlterEnum = {
|
|||
.preprocess = PreprocessAlterEnumStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterEnumStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_AlterExtension = {
|
||||
.deparse = DeparseAlterExtensionStmt,
|
||||
|
@ -72,6 +79,7 @@ static DistributeObjectOps Any_AlterExtension = {
|
|||
.preprocess = PreprocessAlterExtensionUpdateStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterExtensionUpdateStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_AlterExtensionContents = {
|
||||
.deparse = NULL,
|
||||
|
@ -79,6 +87,7 @@ static DistributeObjectOps Any_AlterExtensionContents = {
|
|||
.preprocess = PreprocessAlterExtensionContentsStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_AlterFunction = {
|
||||
.deparse = DeparseAlterFunctionStmt,
|
||||
|
@ -86,6 +95,7 @@ static DistributeObjectOps Any_AlterFunction = {
|
|||
.preprocess = PreprocessAlterFunctionStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterFunctionStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_AlterPolicy = {
|
||||
.deparse = NULL,
|
||||
|
@ -93,6 +103,7 @@ static DistributeObjectOps Any_AlterPolicy = {
|
|||
.preprocess = PreprocessAlterPolicyStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_AlterRole = {
|
||||
.deparse = DeparseAlterRoleStmt,
|
||||
|
@ -100,6 +111,7 @@ static DistributeObjectOps Any_AlterRole = {
|
|||
.preprocess = NULL,
|
||||
.postprocess = PostprocessAlterRoleStmt,
|
||||
.address = AlterRoleStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_AlterRoleSet = {
|
||||
.deparse = DeparseAlterRoleSetStmt,
|
||||
|
@ -107,6 +119,7 @@ static DistributeObjectOps Any_AlterRoleSet = {
|
|||
.preprocess = PreprocessAlterRoleSetStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterRoleSetStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_AlterTableMoveAll = {
|
||||
.deparse = NULL,
|
||||
|
@ -114,6 +127,7 @@ static DistributeObjectOps Any_AlterTableMoveAll = {
|
|||
.preprocess = PreprocessAlterTableMoveAllStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_Cluster = {
|
||||
.deparse = NULL,
|
||||
|
@ -121,6 +135,7 @@ static DistributeObjectOps Any_Cluster = {
|
|||
.preprocess = PreprocessClusterStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_CompositeType = {
|
||||
.deparse = DeparseCompositeTypeStmt,
|
||||
|
@ -128,6 +143,7 @@ static DistributeObjectOps Any_CompositeType = {
|
|||
.preprocess = PreprocessCompositeTypeStmt,
|
||||
.postprocess = PostprocessCompositeTypeStmt,
|
||||
.address = CompositeTypeStmtObjectAddress,
|
||||
.markDistributed = true,
|
||||
};
|
||||
static DistributeObjectOps Any_CreateEnum = {
|
||||
.deparse = DeparseCreateEnumStmt,
|
||||
|
@ -135,6 +151,7 @@ static DistributeObjectOps Any_CreateEnum = {
|
|||
.preprocess = PreprocessCreateEnumStmt,
|
||||
.postprocess = PostprocessCreateEnumStmt,
|
||||
.address = CreateEnumStmtObjectAddress,
|
||||
.markDistributed = true,
|
||||
};
|
||||
static DistributeObjectOps Any_CreateExtension = {
|
||||
.deparse = DeparseCreateExtensionStmt,
|
||||
|
@ -142,6 +159,7 @@ static DistributeObjectOps Any_CreateExtension = {
|
|||
.preprocess = NULL,
|
||||
.postprocess = PostprocessCreateExtensionStmt,
|
||||
.address = CreateExtensionStmtObjectAddress,
|
||||
.markDistributed = true,
|
||||
};
|
||||
static DistributeObjectOps Any_CreateFunction = {
|
||||
.deparse = NULL,
|
||||
|
@ -149,6 +167,7 @@ static DistributeObjectOps Any_CreateFunction = {
|
|||
.preprocess = PreprocessCreateFunctionStmt,
|
||||
.postprocess = PostprocessCreateFunctionStmt,
|
||||
.address = CreateFunctionStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_CreatePolicy = {
|
||||
.deparse = NULL,
|
||||
|
@ -156,6 +175,7 @@ static DistributeObjectOps Any_CreatePolicy = {
|
|||
.preprocess = PreprocessCreatePolicyStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_CreateStatistics = {
|
||||
.deparse = DeparseCreateStatisticsStmt,
|
||||
|
@ -163,6 +183,7 @@ static DistributeObjectOps Any_CreateStatistics = {
|
|||
.preprocess = PreprocessCreateStatisticsStmt,
|
||||
.postprocess = PostprocessCreateStatisticsStmt,
|
||||
.address = CreateStatisticsStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_CreateTrigger = {
|
||||
.deparse = NULL,
|
||||
|
@ -170,6 +191,7 @@ static DistributeObjectOps Any_CreateTrigger = {
|
|||
.preprocess = NULL,
|
||||
.postprocess = PostprocessCreateTriggerStmt,
|
||||
.address = CreateTriggerStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_Grant = {
|
||||
.deparse = NULL,
|
||||
|
@ -177,6 +199,7 @@ static DistributeObjectOps Any_Grant = {
|
|||
.preprocess = PreprocessGrantStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_Index = {
|
||||
.deparse = NULL,
|
||||
|
@ -184,6 +207,7 @@ static DistributeObjectOps Any_Index = {
|
|||
.preprocess = PreprocessIndexStmt,
|
||||
.postprocess = PostprocessIndexStmt,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_Reindex = {
|
||||
.deparse = NULL,
|
||||
|
@ -191,6 +215,7 @@ static DistributeObjectOps Any_Reindex = {
|
|||
.preprocess = PreprocessReindexStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Any_Rename = {
|
||||
.deparse = NULL,
|
||||
|
@ -198,6 +223,7 @@ static DistributeObjectOps Any_Rename = {
|
|||
.preprocess = PreprocessRenameStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Attribute_Rename = {
|
||||
.deparse = DeparseRenameAttributeStmt,
|
||||
|
@ -205,6 +231,7 @@ static DistributeObjectOps Attribute_Rename = {
|
|||
.preprocess = PreprocessRenameAttributeStmt,
|
||||
.postprocess = NULL,
|
||||
.address = RenameAttributeStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Collation_AlterObjectSchema = {
|
||||
.deparse = DeparseAlterCollationSchemaStmt,
|
||||
|
@ -212,6 +239,7 @@ static DistributeObjectOps Collation_AlterObjectSchema = {
|
|||
.preprocess = PreprocessAlterCollationSchemaStmt,
|
||||
.postprocess = PostprocessAlterCollationSchemaStmt,
|
||||
.address = AlterCollationSchemaStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Collation_AlterOwner = {
|
||||
.deparse = DeparseAlterCollationOwnerStmt,
|
||||
|
@ -219,6 +247,7 @@ static DistributeObjectOps Collation_AlterOwner = {
|
|||
.preprocess = PreprocessAlterCollationOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterCollationOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Collation_Define = {
|
||||
.deparse = NULL,
|
||||
|
@ -226,6 +255,7 @@ static DistributeObjectOps Collation_Define = {
|
|||
.preprocess = NULL,
|
||||
.postprocess = PostprocessDefineCollationStmt,
|
||||
.address = DefineCollationStmtObjectAddress,
|
||||
.markDistributed = true,
|
||||
};
|
||||
static DistributeObjectOps Collation_Drop = {
|
||||
.deparse = DeparseDropCollationStmt,
|
||||
|
@ -233,6 +263,7 @@ static DistributeObjectOps Collation_Drop = {
|
|||
.preprocess = PreprocessDropCollationStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Collation_Rename = {
|
||||
.deparse = DeparseRenameCollationStmt,
|
||||
|
@ -240,6 +271,7 @@ static DistributeObjectOps Collation_Rename = {
|
|||
.preprocess = PreprocessRenameCollationStmt,
|
||||
.postprocess = NULL,
|
||||
.address = RenameCollationStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Database_AlterOwner = {
|
||||
.deparse = DeparseAlterDatabaseOwnerStmt,
|
||||
|
@ -247,6 +279,7 @@ static DistributeObjectOps Database_AlterOwner = {
|
|||
.preprocess = PreprocessAlterDatabaseOwnerStmt,
|
||||
.postprocess = PostprocessAlterDatabaseOwnerStmt,
|
||||
.address = AlterDatabaseOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Extension_AlterObjectSchema = {
|
||||
.deparse = DeparseAlterExtensionSchemaStmt,
|
||||
|
@ -254,6 +287,7 @@ static DistributeObjectOps Extension_AlterObjectSchema = {
|
|||
.preprocess = PreprocessAlterExtensionSchemaStmt,
|
||||
.postprocess = PostprocessAlterExtensionSchemaStmt,
|
||||
.address = AlterExtensionSchemaStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Extension_Drop = {
|
||||
.deparse = DeparseDropExtensionStmt,
|
||||
|
@ -261,6 +295,7 @@ static DistributeObjectOps Extension_Drop = {
|
|||
.preprocess = PreprocessDropExtensionStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps ForeignTable_AlterTable = {
|
||||
.deparse = NULL,
|
||||
|
@ -268,6 +303,7 @@ static DistributeObjectOps ForeignTable_AlterTable = {
|
|||
.preprocess = PreprocessAlterTableStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Function_AlterObjectDepends = {
|
||||
.deparse = DeparseAlterFunctionDependsStmt,
|
||||
|
@ -275,6 +311,7 @@ static DistributeObjectOps Function_AlterObjectDepends = {
|
|||
.preprocess = PreprocessAlterFunctionDependsStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterFunctionDependsStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Function_AlterObjectSchema = {
|
||||
.deparse = DeparseAlterFunctionSchemaStmt,
|
||||
|
@ -282,6 +319,7 @@ static DistributeObjectOps Function_AlterObjectSchema = {
|
|||
.preprocess = PreprocessAlterFunctionSchemaStmt,
|
||||
.postprocess = PostprocessAlterFunctionSchemaStmt,
|
||||
.address = AlterFunctionSchemaStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Function_AlterOwner = {
|
||||
.deparse = DeparseAlterFunctionOwnerStmt,
|
||||
|
@ -289,6 +327,7 @@ static DistributeObjectOps Function_AlterOwner = {
|
|||
.preprocess = PreprocessAlterFunctionOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterFunctionOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Function_Drop = {
|
||||
.deparse = DeparseDropFunctionStmt,
|
||||
|
@ -296,6 +335,7 @@ static DistributeObjectOps Function_Drop = {
|
|||
.preprocess = PreprocessDropFunctionStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Function_Rename = {
|
||||
.deparse = DeparseRenameFunctionStmt,
|
||||
|
@ -303,6 +343,7 @@ static DistributeObjectOps Function_Rename = {
|
|||
.preprocess = PreprocessRenameFunctionStmt,
|
||||
.postprocess = NULL,
|
||||
.address = RenameFunctionStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Index_AlterTable = {
|
||||
.deparse = NULL,
|
||||
|
@ -310,6 +351,7 @@ static DistributeObjectOps Index_AlterTable = {
|
|||
.preprocess = PreprocessAlterTableStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Index_Drop = {
|
||||
.deparse = NULL,
|
||||
|
@ -317,6 +359,7 @@ static DistributeObjectOps Index_Drop = {
|
|||
.preprocess = PreprocessDropIndexStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Policy_Drop = {
|
||||
.deparse = NULL,
|
||||
|
@ -324,6 +367,7 @@ static DistributeObjectOps Policy_Drop = {
|
|||
.preprocess = PreprocessDropPolicyStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Procedure_AlterObjectDepends = {
|
||||
.deparse = DeparseAlterFunctionDependsStmt,
|
||||
|
@ -331,6 +375,7 @@ static DistributeObjectOps Procedure_AlterObjectDepends = {
|
|||
.preprocess = PreprocessAlterFunctionDependsStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterFunctionDependsStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Procedure_AlterObjectSchema = {
|
||||
.deparse = DeparseAlterFunctionSchemaStmt,
|
||||
|
@ -338,6 +383,7 @@ static DistributeObjectOps Procedure_AlterObjectSchema = {
|
|||
.preprocess = PreprocessAlterFunctionSchemaStmt,
|
||||
.postprocess = PostprocessAlterFunctionSchemaStmt,
|
||||
.address = AlterFunctionSchemaStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Procedure_AlterOwner = {
|
||||
.deparse = DeparseAlterFunctionOwnerStmt,
|
||||
|
@ -345,6 +391,7 @@ static DistributeObjectOps Procedure_AlterOwner = {
|
|||
.preprocess = PreprocessAlterFunctionOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterFunctionOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Procedure_Drop = {
|
||||
.deparse = DeparseDropFunctionStmt,
|
||||
|
@ -352,6 +399,7 @@ static DistributeObjectOps Procedure_Drop = {
|
|||
.preprocess = PreprocessDropFunctionStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Procedure_Rename = {
|
||||
.deparse = DeparseRenameFunctionStmt,
|
||||
|
@ -359,6 +407,7 @@ static DistributeObjectOps Procedure_Rename = {
|
|||
.preprocess = PreprocessRenameFunctionStmt,
|
||||
.postprocess = NULL,
|
||||
.address = RenameFunctionStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Routine_AlterObjectDepends = {
|
||||
.deparse = DeparseAlterFunctionDependsStmt,
|
||||
|
@ -366,6 +415,7 @@ static DistributeObjectOps Routine_AlterObjectDepends = {
|
|||
.preprocess = PreprocessAlterFunctionDependsStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterFunctionDependsStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Sequence_Alter = {
|
||||
.deparse = NULL,
|
||||
|
@ -373,6 +423,7 @@ static DistributeObjectOps Sequence_Alter = {
|
|||
.preprocess = PreprocessAlterSequenceStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterSequenceStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Sequence_AlterObjectSchema = {
|
||||
.deparse = DeparseAlterSequenceSchemaStmt,
|
||||
|
@ -380,6 +431,7 @@ static DistributeObjectOps Sequence_AlterObjectSchema = {
|
|||
.preprocess = PreprocessAlterSequenceSchemaStmt,
|
||||
.postprocess = PostprocessAlterSequenceSchemaStmt,
|
||||
.address = AlterSequenceSchemaStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Sequence_AlterOwner = {
|
||||
.deparse = DeparseAlterSequenceOwnerStmt,
|
||||
|
@ -387,6 +439,7 @@ static DistributeObjectOps Sequence_AlterOwner = {
|
|||
.preprocess = PreprocessAlterSequenceOwnerStmt,
|
||||
.postprocess = PostprocessAlterSequenceOwnerStmt,
|
||||
.address = AlterSequenceOwnerStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Sequence_Drop = {
|
||||
.deparse = DeparseDropSequenceStmt,
|
||||
|
@ -394,6 +447,7 @@ static DistributeObjectOps Sequence_Drop = {
|
|||
.preprocess = PreprocessDropSequenceStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Sequence_Rename = {
|
||||
.deparse = DeparseRenameSequenceStmt,
|
||||
|
@ -401,6 +455,7 @@ static DistributeObjectOps Sequence_Rename = {
|
|||
.preprocess = PreprocessRenameSequenceStmt,
|
||||
.postprocess = NULL,
|
||||
.address = RenameSequenceStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Trigger_AlterObjectDepends = {
|
||||
.deparse = NULL,
|
||||
|
@ -408,6 +463,7 @@ static DistributeObjectOps Trigger_AlterObjectDepends = {
|
|||
.preprocess = NULL,
|
||||
.postprocess = PostprocessAlterTriggerDependsStmt,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Routine_AlterObjectSchema = {
|
||||
.deparse = DeparseAlterFunctionSchemaStmt,
|
||||
|
@ -415,6 +471,7 @@ static DistributeObjectOps Routine_AlterObjectSchema = {
|
|||
.preprocess = PreprocessAlterFunctionSchemaStmt,
|
||||
.postprocess = PostprocessAlterFunctionSchemaStmt,
|
||||
.address = AlterFunctionSchemaStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Routine_AlterOwner = {
|
||||
.deparse = DeparseAlterFunctionOwnerStmt,
|
||||
|
@ -422,6 +479,7 @@ static DistributeObjectOps Routine_AlterOwner = {
|
|||
.preprocess = PreprocessAlterFunctionOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterFunctionOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Routine_Drop = {
|
||||
.deparse = DeparseDropFunctionStmt,
|
||||
|
@ -429,6 +487,7 @@ static DistributeObjectOps Routine_Drop = {
|
|||
.preprocess = PreprocessDropFunctionStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Routine_Rename = {
|
||||
.deparse = DeparseRenameFunctionStmt,
|
||||
|
@ -436,6 +495,7 @@ static DistributeObjectOps Routine_Rename = {
|
|||
.preprocess = PreprocessRenameFunctionStmt,
|
||||
.postprocess = NULL,
|
||||
.address = RenameFunctionStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Schema_Drop = {
|
||||
.deparse = NULL,
|
||||
|
@ -443,6 +503,7 @@ static DistributeObjectOps Schema_Drop = {
|
|||
.preprocess = PreprocessDropSchemaStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Schema_Grant = {
|
||||
.deparse = DeparseGrantOnSchemaStmt,
|
||||
|
@ -450,6 +511,7 @@ static DistributeObjectOps Schema_Grant = {
|
|||
.preprocess = PreprocessGrantOnSchemaStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Schema_Rename = {
|
||||
.deparse = DeparseAlterSchemaRenameStmt,
|
||||
|
@ -457,6 +519,7 @@ static DistributeObjectOps Schema_Rename = {
|
|||
.preprocess = PreprocessAlterSchemaRenameStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterSchemaRenameStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
#if PG_VERSION_NUM >= PG_VERSION_13
|
||||
static DistributeObjectOps Statistics_Alter = {
|
||||
|
@ -465,6 +528,7 @@ static DistributeObjectOps Statistics_Alter = {
|
|||
.preprocess = PreprocessAlterStatisticsStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
#endif
|
||||
static DistributeObjectOps Statistics_AlterObjectSchema = {
|
||||
|
@ -473,6 +537,7 @@ static DistributeObjectOps Statistics_AlterObjectSchema = {
|
|||
.preprocess = PreprocessAlterStatisticsSchemaStmt,
|
||||
.postprocess = PostprocessAlterStatisticsSchemaStmt,
|
||||
.address = AlterStatisticsSchemaStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Statistics_AlterOwner = {
|
||||
.deparse = DeparseAlterStatisticsOwnerStmt,
|
||||
|
@ -480,6 +545,7 @@ static DistributeObjectOps Statistics_AlterOwner = {
|
|||
.preprocess = PreprocessAlterStatisticsOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Statistics_Drop = {
|
||||
.deparse = NULL,
|
||||
|
@ -487,6 +553,7 @@ static DistributeObjectOps Statistics_Drop = {
|
|||
.preprocess = PreprocessDropStatisticsStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Statistics_Rename = {
|
||||
.deparse = DeparseAlterStatisticsRenameStmt,
|
||||
|
@ -494,6 +561,7 @@ static DistributeObjectOps Statistics_Rename = {
|
|||
.preprocess = PreprocessAlterStatisticsRenameStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Table_AlterTable = {
|
||||
.deparse = DeparseAlterTableStmt,
|
||||
|
@ -501,6 +569,7 @@ static DistributeObjectOps Table_AlterTable = {
|
|||
.preprocess = PreprocessAlterTableStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Table_AlterObjectSchema = {
|
||||
.deparse = DeparseAlterTableSchemaStmt,
|
||||
|
@ -508,6 +577,7 @@ static DistributeObjectOps Table_AlterObjectSchema = {
|
|||
.preprocess = PreprocessAlterTableSchemaStmt,
|
||||
.postprocess = PostprocessAlterTableSchemaStmt,
|
||||
.address = AlterTableSchemaStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Table_Drop = {
|
||||
.deparse = NULL,
|
||||
|
@ -515,6 +585,7 @@ static DistributeObjectOps Table_Drop = {
|
|||
.preprocess = PreprocessDropTableStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Type_AlterObjectSchema = {
|
||||
.deparse = DeparseAlterTypeSchemaStmt,
|
||||
|
@ -522,6 +593,7 @@ static DistributeObjectOps Type_AlterObjectSchema = {
|
|||
.preprocess = PreprocessAlterTypeSchemaStmt,
|
||||
.postprocess = PostprocessAlterTypeSchemaStmt,
|
||||
.address = AlterTypeSchemaStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Type_AlterOwner = {
|
||||
.deparse = DeparseAlterTypeOwnerStmt,
|
||||
|
@ -529,6 +601,7 @@ static DistributeObjectOps Type_AlterOwner = {
|
|||
.preprocess = PreprocessAlterTypeOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterTypeOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Type_AlterTable = {
|
||||
.deparse = DeparseAlterTypeStmt,
|
||||
|
@ -536,6 +609,7 @@ static DistributeObjectOps Type_AlterTable = {
|
|||
.preprocess = PreprocessAlterTypeStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterTypeStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Type_Drop = {
|
||||
.deparse = DeparseDropTypeStmt,
|
||||
|
@ -543,6 +617,7 @@ static DistributeObjectOps Type_Drop = {
|
|||
.preprocess = PreprocessDropTypeStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Trigger_Drop = {
|
||||
.deparse = NULL,
|
||||
|
@ -550,6 +625,7 @@ static DistributeObjectOps Trigger_Drop = {
|
|||
.preprocess = PreprocessDropTriggerStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Type_Rename = {
|
||||
.deparse = DeparseRenameTypeStmt,
|
||||
|
@ -557,6 +633,7 @@ static DistributeObjectOps Type_Rename = {
|
|||
.preprocess = PreprocessRenameTypeStmt,
|
||||
.postprocess = NULL,
|
||||
.address = RenameTypeStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Trigger_Rename = {
|
||||
.deparse = NULL,
|
||||
|
@ -564,6 +641,7 @@ static DistributeObjectOps Trigger_Rename = {
|
|||
.preprocess = NULL,
|
||||
.postprocess = PostprocessAlterTriggerRenameStmt,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -188,8 +188,6 @@ PostprocessCreateExtensionStmt(Node *node, const char *queryString)
|
|||
|
||||
EnsureDependenciesExistOnAllNodes(&extensionAddress);
|
||||
|
||||
MarkObjectDistributed(&extensionAddress);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
@ -510,15 +508,6 @@ PreprocessAlterExtensionUpdateStmt(Node *node, const char *queryString,
|
|||
void
|
||||
PostprocessAlterExtensionCitusUpdateStmt(Node *node)
|
||||
{
|
||||
/*
|
||||
* We should not postprocess this command in workers as they do not keep track
|
||||
* of citus.pg_dist_object.
|
||||
*/
|
||||
if (!IsCoordinator())
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
bool citusIsUpdatedToLatestVersion = InstalledAndAvailableVersionsSame();
|
||||
|
||||
/*
|
||||
|
|
|
@ -76,9 +76,6 @@ static int GetFunctionColocationId(Oid functionOid, char *colocateWithName, Oid
|
|||
static void EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid
|
||||
distributionColumnType, Oid
|
||||
sourceRelationId);
|
||||
static void UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
|
||||
int *distribution_argument_index,
|
||||
int *colocationId);
|
||||
static void EnsureSequentialModeForFunctionDDL(void);
|
||||
static void TriggerSyncMetadataToPrimaryNodes(void);
|
||||
static bool ShouldPropagateCreateFunction(CreateFunctionStmt *stmt);
|
||||
|
@ -188,7 +185,8 @@ create_distributed_function(PG_FUNCTION_ARGS)
|
|||
const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true);
|
||||
const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid);
|
||||
initStringInfo(&ddlCommand);
|
||||
appendStringInfo(&ddlCommand, "%s;%s", createFunctionSQL, alterFunctionOwnerSQL);
|
||||
appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_OBJECT_PROPAGATION,
|
||||
createFunctionSQL, alterFunctionOwnerSQL, ENABLE_OBJECT_PROPAGATION);
|
||||
SendCommandToWorkersAsUser(NON_COORDINATOR_NODES, CurrentUserName(), ddlCommand.data);
|
||||
|
||||
MarkObjectDistributed(&functionAddress);
|
||||
|
@ -564,8 +562,9 @@ EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid distributionColumnTyp
|
|||
/*
|
||||
* UpdateFunctionDistributionInfo gets object address of a function and
|
||||
* updates its distribution_argument_index and colocationId in pg_dist_object.
|
||||
* Then update pg_dist_object on nodes with metadata if object propagation is on.
|
||||
*/
|
||||
static void
|
||||
void
|
||||
UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
|
||||
int *distribution_argument_index,
|
||||
int *colocationId)
|
||||
|
@ -638,6 +637,37 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
|
|||
systable_endscan(scanDescriptor);
|
||||
|
||||
table_close(pgDistObjectRel, NoLock);
|
||||
|
||||
if (EnableDependencyCreation)
|
||||
{
|
||||
List *objectAddressList = list_make1((ObjectAddress *) distAddress);
|
||||
List *distArgumentIndexList = NIL;
|
||||
List *colocationIdList = NIL;
|
||||
|
||||
if (distribution_argument_index == NULL)
|
||||
{
|
||||
distArgumentIndexList = list_make1_int(INVALID_DISTRIBUTION_ARGUMENT_INDEX);
|
||||
}
|
||||
else
|
||||
{
|
||||
distArgumentIndexList = list_make1_int(*distribution_argument_index);
|
||||
}
|
||||
|
||||
if (colocationId == NULL)
|
||||
{
|
||||
colocationIdList = list_make1_int(INVALID_COLOCATION_ID);
|
||||
}
|
||||
else
|
||||
{
|
||||
colocationIdList = list_make1_int(*colocationId);
|
||||
}
|
||||
|
||||
char *workerPgDistObjectUpdateCommand =
|
||||
MarkObjectsDistributedCreateCommand(objectAddressList,
|
||||
distArgumentIndexList,
|
||||
colocationIdList);
|
||||
SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -213,6 +213,15 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since roles need to be handled manually on community, we need to support such queries
|
||||
* by handling them locally on worker nodes
|
||||
*/
|
||||
if (!IsCoordinator())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
|
|
|
@ -147,6 +147,15 @@ PreprocessGrantOnSchemaStmt(Node *node, const char *queryString,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since access control needs to be handled manually on community, we need to support
|
||||
* such queries by handling them locally on worker nodes.
|
||||
*/
|
||||
if (!IsCoordinator())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
List *originalObjects = stmt->objects;
|
||||
|
||||
stmt->objects = distributedSchemas;
|
||||
|
@ -177,6 +186,8 @@ PreprocessAlterSchemaRenameStmt(Node *node, const char *queryString,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
/* fully qualify */
|
||||
QualifyTreeNode(node);
|
||||
|
||||
|
|
|
@ -2054,8 +2054,8 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
ClusterHasKnownMetadataWorkers())
|
||||
{
|
||||
needMetadataSyncForNewSequences = true;
|
||||
MarkSequenceDistributedAndPropagateDependencies(
|
||||
seqOid);
|
||||
MarkSequenceDistributedAndPropagateWithDependencies(
|
||||
relationId, seqOid);
|
||||
alterTableDefaultNextvalCmd =
|
||||
GetAddColumnWithNextvalDefaultCmd(seqOid,
|
||||
relationId,
|
||||
|
@ -2095,7 +2095,8 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
ClusterHasKnownMetadataWorkers())
|
||||
{
|
||||
needMetadataSyncForNewSequences = true;
|
||||
MarkSequenceDistributedAndPropagateDependencies(seqOid);
|
||||
MarkSequenceDistributedAndPropagateWithDependencies(relationId,
|
||||
seqOid);
|
||||
alterTableDefaultNextvalCmd = GetAlterColumnWithNextvalDefaultCmd(
|
||||
seqOid, relationId, command->name);
|
||||
}
|
||||
|
@ -2106,22 +2107,9 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
|
||||
if (needMetadataSyncForNewSequences)
|
||||
{
|
||||
List *sequenceCommandList = NIL;
|
||||
|
||||
/* commands to create sequences */
|
||||
List *sequenceDDLCommands = SequenceDDLCommandsForTable(relationId);
|
||||
sequenceCommandList = list_concat(sequenceCommandList, sequenceDDLCommands);
|
||||
|
||||
/* prevent recursive propagation */
|
||||
SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION);
|
||||
|
||||
/* send the commands one by one */
|
||||
const char *sequenceCommand = NULL;
|
||||
foreach_ptr(sequenceCommand, sequenceCommandList)
|
||||
{
|
||||
SendCommandToWorkersWithMetadata(sequenceCommand);
|
||||
}
|
||||
|
||||
/*
|
||||
* It's easy to retrieve the sequence id to create the proper commands
|
||||
* in postprocess, after the dependency between the sequence and the table
|
||||
|
|
|
@ -189,8 +189,6 @@ PostprocessCompositeTypeStmt(Node *node, const char *queryString)
|
|||
ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, false);
|
||||
EnsureDependenciesExistOnAllNodes(&typeAddress);
|
||||
|
||||
MarkObjectDistributed(&typeAddress);
|
||||
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
@ -299,13 +297,6 @@ PostprocessCreateEnumStmt(Node *node, const char *queryString)
|
|||
ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, false);
|
||||
EnsureDependenciesExistOnAllNodes(&typeAddress);
|
||||
|
||||
/*
|
||||
* now that the object has been created and distributed to the workers we mark them as
|
||||
* distributed so we know to keep them up to date and recreate on a new node in the
|
||||
* future
|
||||
*/
|
||||
MarkObjectDistributed(&typeAddress);
|
||||
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
@ -443,6 +434,8 @@ PreprocessRenameTypeStmt(Node *node, const char *queryString,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
/* fully qualify */
|
||||
QualifyTreeNode(node);
|
||||
|
||||
|
@ -481,6 +474,8 @@ PreprocessRenameTypeAttributeStmt(Node *node, const char *queryString,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
|
||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
#include "distributed/multi_partitioning_utils.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/multi_explain.h"
|
||||
#include "distributed/multi_physical_planner.h"
|
||||
|
@ -718,6 +719,16 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
|
|||
FixPartitionShardIndexNames(relationId, indexRelationId);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Since we must have objects on workers before distributing them,
|
||||
* mark object distributed as the last step.
|
||||
*/
|
||||
if (ops && ops->markDistributed)
|
||||
{
|
||||
ObjectAddress address = GetObjectAddressFromParseTree(parsetree, false);
|
||||
MarkObjectDistributed(&address);
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: fold VACUUM's processing into the above block */
|
||||
|
|
|
@ -28,10 +28,14 @@
|
|||
#include "catalog/pg_type.h"
|
||||
#include "citus_version.h"
|
||||
#include "commands/extension.h"
|
||||
#include "distributed/colocation_utils.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/metadata/pg_dist_object.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "distributed/worker_transaction.h"
|
||||
#include "executor/spi.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/pg_list.h"
|
||||
|
@ -139,6 +143,9 @@ ObjectExists(const ObjectAddress *address)
|
|||
/*
|
||||
* MarkObjectDistributed marks an object as a distributed object by citus. Marking is done
|
||||
* by adding appropriate entries to citus.pg_dist_object.
|
||||
*
|
||||
* This also marks the object as distributed on all of the workers with metadata
|
||||
* if object propagation is on.
|
||||
*/
|
||||
void
|
||||
MarkObjectDistributed(const ObjectAddress *distAddress)
|
||||
|
@ -164,6 +171,20 @@ MarkObjectDistributed(const ObjectAddress *distAddress)
|
|||
{
|
||||
ereport(ERROR, (errmsg("failed to insert object into citus.pg_dist_object")));
|
||||
}
|
||||
|
||||
if (EnableDependencyCreation)
|
||||
{
|
||||
/* create a list by adding the address of value to not to have warning */
|
||||
List *objectAddressList = list_make1((ObjectAddress *) distAddress);
|
||||
List *distArgumetIndexList = list_make1_int(INVALID_DISTRIBUTION_ARGUMENT_INDEX);
|
||||
List *colocationIdList = list_make1_int(INVALID_COLOCATION_ID);
|
||||
|
||||
char *workerPgDistObjectUpdateCommand =
|
||||
MarkObjectsDistributedCreateCommand(objectAddressList,
|
||||
distArgumetIndexList,
|
||||
colocationIdList);
|
||||
SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/metadata/pg_dist_object.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/multi_join_order.h"
|
||||
#include "distributed/multi_partitioning_utils.h"
|
||||
|
@ -60,6 +61,7 @@
|
|||
#include "distributed/worker_protocol.h"
|
||||
#include "distributed/worker_transaction.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
#include "executor/spi.h"
|
||||
#include "foreign/foreign.h"
|
||||
#include "miscadmin.h"
|
||||
|
@ -67,6 +69,7 @@
|
|||
#include "pgstat.h"
|
||||
#include "postmaster/bgworker.h"
|
||||
#include "postmaster/postmaster.h"
|
||||
#include "parser/parse_type.h"
|
||||
#include "storage/lmgr.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/fmgroids.h"
|
||||
|
@ -81,7 +84,10 @@ char *EnableManualMetadataChangesForUser = "";
|
|||
|
||||
|
||||
static void EnsureSequentialModeMetadataOperations(void);
|
||||
static List * DistributedObjectMetadataSyncCommandList(void);
|
||||
static List * GetDistributedTableDDLEvents(Oid relationId);
|
||||
static void EnsureObjectMetadataIsSane(int distributionArgumentIndex,
|
||||
int colocationId);
|
||||
static char * LocalGroupIdUpdateCommand(int32 groupId);
|
||||
static List * SequenceDependencyCommandList(Oid relationId);
|
||||
static char * TruncateTriggerCreateCommand(Oid relationId);
|
||||
|
@ -100,6 +106,7 @@ static GrantStmt * GenerateGrantOnSchemaStmtForRights(Oid roleOid,
|
|||
Oid schemaOid,
|
||||
char *permission,
|
||||
bool withGrantOption);
|
||||
static void SetLocalEnableDependencyCreation(bool state);
|
||||
static char * GenerateSetRoleQuery(Oid roleOid);
|
||||
static void MetadataSyncSigTermHandler(SIGNAL_ARGS);
|
||||
static void MetadataSyncSigAlrmHandler(SIGNAL_ARGS);
|
||||
|
@ -133,6 +140,7 @@ PG_FUNCTION_INFO_V1(citus_internal_add_placement_metadata);
|
|||
PG_FUNCTION_INFO_V1(citus_internal_update_placement_metadata);
|
||||
PG_FUNCTION_INFO_V1(citus_internal_delete_shard_metadata);
|
||||
PG_FUNCTION_INFO_V1(citus_internal_update_relation_colocation);
|
||||
PG_FUNCTION_INFO_V1(citus_internal_add_object_metadata);
|
||||
|
||||
|
||||
static bool got_SIGTERM = false;
|
||||
|
@ -520,6 +528,7 @@ DropMetadataSnapshotOnNode(WorkerNode *workerNode)
|
|||
* (iii) Queries that populate pg_dist_partition table referenced by (ii)
|
||||
* (iv) Queries that populate pg_dist_shard table referenced by (iii)
|
||||
* (v) Queries that populate pg_dist_placement table referenced by (iv)
|
||||
* (vi) Queries that populate pg_dist_object table
|
||||
*/
|
||||
List *
|
||||
MetadataCreateCommands(void)
|
||||
|
@ -571,6 +580,14 @@ MetadataCreateCommands(void)
|
|||
* and committed immediately so they become visible to all sessions creating shards.
|
||||
*/
|
||||
ObjectAddressSet(tableAddress, RelationRelationId, relationId);
|
||||
|
||||
/*
|
||||
* Set object propagation to off as we will mark objects distributed
|
||||
* at the end of this function.
|
||||
*/
|
||||
bool prevDependencyCreationValue = EnableDependencyCreation;
|
||||
SetLocalEnableDependencyCreation(false);
|
||||
|
||||
EnsureDependenciesExistOnAllNodes(&tableAddress);
|
||||
|
||||
/*
|
||||
|
@ -580,7 +597,16 @@ MetadataCreateCommands(void)
|
|||
List *dependentSequenceList = NIL;
|
||||
GetDependentSequencesWithRelation(relationId, &attnumList,
|
||||
&dependentSequenceList, 0);
|
||||
MarkSequenceListDistributedAndPropagateDependencies(dependentSequenceList);
|
||||
|
||||
Oid sequenceOid = InvalidOid;
|
||||
foreach_oid(sequenceOid, dependentSequenceList)
|
||||
{
|
||||
ObjectAddress sequenceAddress = { 0 };
|
||||
ObjectAddressSet(sequenceAddress, RelationRelationId, sequenceOid);
|
||||
EnsureDependenciesExistOnAllNodes(&sequenceAddress);
|
||||
}
|
||||
|
||||
SetLocalEnableDependencyCreation(prevDependencyCreationValue);
|
||||
|
||||
List *workerSequenceDDLCommands = SequenceDDLCommandsForTable(relationId);
|
||||
metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList,
|
||||
|
@ -667,10 +693,108 @@ MetadataCreateCommands(void)
|
|||
shardCreateCommandList);
|
||||
}
|
||||
|
||||
/* As the last step, propagate the pg_dist_object entities */
|
||||
if (ShouldPropagate())
|
||||
{
|
||||
List *distributedObjectSyncCommandList =
|
||||
DistributedObjectMetadataSyncCommandList();
|
||||
metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList,
|
||||
distributedObjectSyncCommandList);
|
||||
}
|
||||
|
||||
return metadataSnapshotCommandList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DistributedObjectMetadataSyncCommandList returns the necessary commands to create
|
||||
* pg_dist_object entries on the new node.
|
||||
*/
|
||||
static List *
|
||||
DistributedObjectMetadataSyncCommandList(void)
|
||||
{
|
||||
HeapTuple pgDistObjectTup = NULL;
|
||||
Relation pgDistObjectRel = table_open(DistObjectRelationId(), AccessShareLock);
|
||||
Relation pgDistObjectIndexRel = index_open(DistObjectPrimaryKeyIndexId(),
|
||||
AccessShareLock);
|
||||
TupleDesc pgDistObjectDesc = RelationGetDescr(pgDistObjectRel);
|
||||
|
||||
List *objectAddressList = NIL;
|
||||
List *distArgumentIndexList = NIL;
|
||||
List *colocationIdList = NIL;
|
||||
|
||||
/* It is not strictly necessary to read the tuples in order.
|
||||
* However, it is useful to get consistent behavior, both for regression
|
||||
* tests and also in production systems.
|
||||
*/
|
||||
SysScanDesc pgDistObjectScan = systable_beginscan_ordered(pgDistObjectRel,
|
||||
pgDistObjectIndexRel, NULL,
|
||||
0, NULL);
|
||||
while (HeapTupleIsValid(pgDistObjectTup = systable_getnext_ordered(pgDistObjectScan,
|
||||
ForwardScanDirection)))
|
||||
{
|
||||
Form_pg_dist_object pg_dist_object = (Form_pg_dist_object) GETSTRUCT(
|
||||
pgDistObjectTup);
|
||||
|
||||
ObjectAddress *address = palloc(sizeof(ObjectAddress));
|
||||
|
||||
ObjectAddressSubSet(*address, pg_dist_object->classid, pg_dist_object->objid,
|
||||
pg_dist_object->objsubid);
|
||||
|
||||
bool distributionArgumentIndexIsNull = false;
|
||||
Datum distributionArgumentIndexDatum =
|
||||
heap_getattr(pgDistObjectTup,
|
||||
Anum_pg_dist_object_distribution_argument_index,
|
||||
pgDistObjectDesc,
|
||||
&distributionArgumentIndexIsNull);
|
||||
int32 distributionArgumentIndex = DatumGetInt32(distributionArgumentIndexDatum);
|
||||
|
||||
bool colocationIdIsNull = false;
|
||||
Datum colocationIdDatum =
|
||||
heap_getattr(pgDistObjectTup,
|
||||
Anum_pg_dist_object_colocationid,
|
||||
pgDistObjectDesc,
|
||||
&colocationIdIsNull);
|
||||
int32 colocationId = DatumGetInt32(colocationIdDatum);
|
||||
|
||||
objectAddressList = lappend(objectAddressList, address);
|
||||
|
||||
if (distributionArgumentIndexIsNull)
|
||||
{
|
||||
distArgumentIndexList = lappend_int(distArgumentIndexList,
|
||||
INVALID_DISTRIBUTION_ARGUMENT_INDEX);
|
||||
}
|
||||
else
|
||||
{
|
||||
distArgumentIndexList = lappend_int(distArgumentIndexList,
|
||||
distributionArgumentIndex);
|
||||
}
|
||||
|
||||
if (colocationIdIsNull)
|
||||
{
|
||||
colocationIdList = lappend_int(colocationIdList,
|
||||
INVALID_COLOCATION_ID);
|
||||
}
|
||||
else
|
||||
{
|
||||
colocationIdList = lappend_int(colocationIdList, colocationId);
|
||||
}
|
||||
}
|
||||
|
||||
systable_endscan_ordered(pgDistObjectScan);
|
||||
index_close(pgDistObjectIndexRel, AccessShareLock);
|
||||
relation_close(pgDistObjectRel, NoLock);
|
||||
|
||||
char *workerMetadataUpdateCommand =
|
||||
MarkObjectsDistributedCreateCommand(objectAddressList,
|
||||
distArgumentIndexList,
|
||||
colocationIdList);
|
||||
List *commandList = list_make1(workerMetadataUpdateCommand);
|
||||
|
||||
return commandList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetDistributedTableDDLEvents returns the full set of DDL commands necessary to
|
||||
* create the given distributed table on a worker. The list includes setting up any
|
||||
|
@ -689,10 +813,6 @@ GetDistributedTableDDLEvents(Oid relationId)
|
|||
bool tableOwnedByExtension = IsTableOwnedByExtension(relationId);
|
||||
if (!tableOwnedByExtension)
|
||||
{
|
||||
/* commands to create sequences */
|
||||
List *sequenceDDLCommands = SequenceDDLCommandsForTable(relationId);
|
||||
commandList = list_concat(commandList, sequenceDDLCommands);
|
||||
|
||||
/*
|
||||
* Commands to create the table, these commands are TableDDLCommands so lets
|
||||
* materialize to the non-sharded version
|
||||
|
@ -760,6 +880,7 @@ GetDistributedTableDDLEvents(Oid relationId)
|
|||
* (v) Queries that delete all the rows from pg_dist_shard table referenced by (iv)
|
||||
* (vi) Queries that delete all the rows from pg_dist_placement table
|
||||
* referenced by (v)
|
||||
* (vii) Queries that delete all the rows from pg_dist_object table
|
||||
*/
|
||||
List *
|
||||
MetadataDropCommands(void)
|
||||
|
@ -774,6 +895,8 @@ MetadataDropCommands(void)
|
|||
REMOVE_ALL_CLUSTERED_TABLES_COMMAND);
|
||||
|
||||
dropSnapshotCommandList = lappend(dropSnapshotCommandList, DELETE_ALL_NODES);
|
||||
dropSnapshotCommandList = lappend(dropSnapshotCommandList,
|
||||
DELETE_ALL_DISTRIBUTED_OBJECTS);
|
||||
|
||||
return dropSnapshotCommandList;
|
||||
}
|
||||
|
@ -849,6 +972,194 @@ NodeListInsertCommand(List *workerNodeList)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* MarkObjectsDistributedCreateCommand generates a command that can be executed to
|
||||
* insert or update the provided objects into pg_dist_object on a worker node.
|
||||
*/
|
||||
char *
|
||||
MarkObjectsDistributedCreateCommand(List *addresses,
|
||||
List *distributionArgumentIndexes,
|
||||
List *colocationIds)
|
||||
{
|
||||
StringInfo insertDistributedObjectsCommand = makeStringInfo();
|
||||
|
||||
Assert(list_length(addresses) == list_length(distributionArgumentIndexes));
|
||||
Assert(list_length(distributionArgumentIndexes) == list_length(colocationIds));
|
||||
|
||||
appendStringInfo(insertDistributedObjectsCommand,
|
||||
"WITH distributed_object_data(typetext, objnames, "
|
||||
"objargs, distargumentindex, colocationid) AS (VALUES ");
|
||||
|
||||
bool isFirstObject = true;
|
||||
for (int currentObjectCounter = 0; currentObjectCounter < list_length(addresses);
|
||||
currentObjectCounter++)
|
||||
{
|
||||
ObjectAddress *address = list_nth(addresses, currentObjectCounter);
|
||||
int distributionArgumentIndex = list_nth_int(distributionArgumentIndexes,
|
||||
currentObjectCounter);
|
||||
int colocationId = list_nth_int(colocationIds, currentObjectCounter);
|
||||
List *names = NIL;
|
||||
List *args = NIL;
|
||||
char *objectType = NULL;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
objectType = getObjectTypeDescription(address, false);
|
||||
getObjectIdentityParts(address, &names, &args, false);
|
||||
#else
|
||||
objectType = getObjectTypeDescription(address);
|
||||
getObjectIdentityParts(address, &names, &args);
|
||||
#endif
|
||||
|
||||
if (!isFirstObject)
|
||||
{
|
||||
appendStringInfo(insertDistributedObjectsCommand, ", ");
|
||||
}
|
||||
isFirstObject = false;
|
||||
|
||||
appendStringInfo(insertDistributedObjectsCommand,
|
||||
"(%s, ARRAY[",
|
||||
quote_literal_cstr(objectType));
|
||||
|
||||
char *name = NULL;
|
||||
bool firstInNameLoop = true;
|
||||
foreach_ptr(name, names)
|
||||
{
|
||||
if (!firstInNameLoop)
|
||||
{
|
||||
appendStringInfo(insertDistributedObjectsCommand, ", ");
|
||||
}
|
||||
firstInNameLoop = false;
|
||||
appendStringInfoString(insertDistributedObjectsCommand,
|
||||
quote_literal_cstr(name));
|
||||
}
|
||||
|
||||
appendStringInfo(insertDistributedObjectsCommand, "]::text[], ARRAY[");
|
||||
|
||||
char *arg;
|
||||
bool firstInArgLoop = true;
|
||||
foreach_ptr(arg, args)
|
||||
{
|
||||
if (!firstInArgLoop)
|
||||
{
|
||||
appendStringInfo(insertDistributedObjectsCommand, ", ");
|
||||
}
|
||||
firstInArgLoop = false;
|
||||
appendStringInfoString(insertDistributedObjectsCommand,
|
||||
quote_literal_cstr(arg));
|
||||
}
|
||||
|
||||
appendStringInfo(insertDistributedObjectsCommand, "]::text[], ");
|
||||
|
||||
appendStringInfo(insertDistributedObjectsCommand, "%d, ",
|
||||
distributionArgumentIndex);
|
||||
|
||||
appendStringInfo(insertDistributedObjectsCommand, "%d)",
|
||||
colocationId);
|
||||
}
|
||||
|
||||
appendStringInfo(insertDistributedObjectsCommand, ") ");
|
||||
|
||||
appendStringInfo(insertDistributedObjectsCommand,
|
||||
"SELECT citus_internal_add_object_metadata("
|
||||
"typetext, objnames, objargs, distargumentindex::int, colocationid::int) "
|
||||
"FROM distributed_object_data;");
|
||||
|
||||
return insertDistributedObjectsCommand->data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_internal_add_object_metadata is an internal UDF to
|
||||
* add a row to pg_dist_object.
|
||||
*/
|
||||
Datum
|
||||
citus_internal_add_object_metadata(PG_FUNCTION_ARGS)
|
||||
{
|
||||
char *textType = TextDatumGetCString(PG_GETARG_DATUM(0));
|
||||
ArrayType *nameArray = PG_GETARG_ARRAYTYPE_P(1);
|
||||
ArrayType *argsArray = PG_GETARG_ARRAYTYPE_P(2);
|
||||
int distributionArgumentIndex = PG_GETARG_INT32(3);
|
||||
int colocationId = PG_GETARG_INT32(4);
|
||||
|
||||
if (!ShouldSkipMetadataChecks())
|
||||
{
|
||||
/* this UDF is not allowed for executing as a separate command */
|
||||
EnsureCoordinatorInitiatedOperation();
|
||||
|
||||
/*
|
||||
* Ensure given distributionArgumentIndex and colocationId values are
|
||||
* sane. Since we check sanity of object related parameters within
|
||||
* PgGetObjectAddress below, we are not checking them here.
|
||||
*/
|
||||
EnsureObjectMetadataIsSane(distributionArgumentIndex, colocationId);
|
||||
}
|
||||
|
||||
/*
|
||||
* We check the acl/ownership while getting the object address. That
|
||||
* funtion also checks the sanity of given textType, nameArray and
|
||||
* argsArray parameters
|
||||
*/
|
||||
ObjectAddress objectAddress = PgGetObjectAddress(textType, nameArray,
|
||||
argsArray);
|
||||
|
||||
/* First, disable propagation off to not to cause infinite propagation */
|
||||
bool prevDependencyCreationValue = EnableDependencyCreation;
|
||||
SetLocalEnableDependencyCreation(false);
|
||||
|
||||
MarkObjectDistributed(&objectAddress);
|
||||
|
||||
if (distributionArgumentIndex != INVALID_DISTRIBUTION_ARGUMENT_INDEX ||
|
||||
colocationId != INVALID_COLOCATION_ID)
|
||||
{
|
||||
int *distributionArgumentIndexAddress =
|
||||
distributionArgumentIndex == INVALID_DISTRIBUTION_ARGUMENT_INDEX ?
|
||||
NULL :
|
||||
&distributionArgumentIndex;
|
||||
|
||||
int *colocationIdAddress =
|
||||
colocationId == INVALID_COLOCATION_ID ?
|
||||
NULL :
|
||||
&colocationId;
|
||||
|
||||
UpdateFunctionDistributionInfo(&objectAddress,
|
||||
distributionArgumentIndexAddress,
|
||||
colocationIdAddress);
|
||||
}
|
||||
|
||||
SetLocalEnableDependencyCreation(prevDependencyCreationValue);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* EnsureObjectMetadataIsSane checks whether the distribution argument index and
|
||||
* colocation id metadata params for distributed object is sane. You can look
|
||||
* PgGetObjectAddress to find checks related to object sanity.
|
||||
*/
|
||||
static void
|
||||
EnsureObjectMetadataIsSane(int distributionArgumentIndex, int colocationId)
|
||||
{
|
||||
if (distributionArgumentIndex != INVALID_DISTRIBUTION_ARGUMENT_INDEX)
|
||||
{
|
||||
if (distributionArgumentIndex < 0 ||
|
||||
distributionArgumentIndex > FUNC_MAX_ARGS)
|
||||
{
|
||||
ereport(ERROR, errmsg("distribution_argument_index must be between"
|
||||
" 0 and %d", FUNC_MAX_ARGS));
|
||||
}
|
||||
}
|
||||
|
||||
if (colocationId != INVALID_COLOCATION_ID)
|
||||
{
|
||||
if (colocationId < 0)
|
||||
{
|
||||
ereport(ERROR, errmsg("colocationId must be a positive number"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DistributionCreateCommands generates a commands that can be
|
||||
* executed to replicate the metadata for a distributed table.
|
||||
|
@ -1187,7 +1498,7 @@ LocalGroupIdUpdateCommand(int32 groupId)
|
|||
List *
|
||||
SequenceDDLCommandsForTable(Oid relationId)
|
||||
{
|
||||
List *sequenceDDLList = NIL;
|
||||
List *allSequencesDDLList = NIL;
|
||||
|
||||
List *attnumList = NIL;
|
||||
List *dependentSequenceList = NIL;
|
||||
|
@ -1198,29 +1509,44 @@ SequenceDDLCommandsForTable(Oid relationId)
|
|||
Oid sequenceOid = InvalidOid;
|
||||
foreach_oid(sequenceOid, dependentSequenceList)
|
||||
{
|
||||
char *sequenceDef = pg_get_sequencedef_string(sequenceOid);
|
||||
char *escapedSequenceDef = quote_literal_cstr(sequenceDef);
|
||||
StringInfo wrappedSequenceDef = makeStringInfo();
|
||||
StringInfo sequenceGrantStmt = makeStringInfo();
|
||||
char *sequenceName = generate_qualified_relation_name(sequenceOid);
|
||||
Form_pg_sequence sequenceData = pg_get_sequencedef(sequenceOid);
|
||||
Oid sequenceTypeOid = sequenceData->seqtypid;
|
||||
char *typeName = format_type_be(sequenceTypeOid);
|
||||
|
||||
/* create schema if needed */
|
||||
appendStringInfo(wrappedSequenceDef,
|
||||
WORKER_APPLY_SEQUENCE_COMMAND,
|
||||
escapedSequenceDef,
|
||||
quote_literal_cstr(typeName));
|
||||
|
||||
appendStringInfo(sequenceGrantStmt,
|
||||
"ALTER SEQUENCE %s OWNER TO %s", sequenceName,
|
||||
quote_identifier(ownerName));
|
||||
|
||||
sequenceDDLList = lappend(sequenceDDLList, wrappedSequenceDef->data);
|
||||
sequenceDDLList = lappend(sequenceDDLList, sequenceGrantStmt->data);
|
||||
List *sequenceDDLCommands = DDLCommandsForSequence(sequenceOid, ownerName);
|
||||
allSequencesDDLList = list_concat(allSequencesDDLList, sequenceDDLCommands);
|
||||
}
|
||||
|
||||
return allSequencesDDLList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DDLCommandsForSequence returns the DDL commands needs to be run to create the
|
||||
* sequence and alter the owner to the given owner name.
|
||||
*/
|
||||
List *
|
||||
DDLCommandsForSequence(Oid sequenceOid, char *ownerName)
|
||||
{
|
||||
List *sequenceDDLList = NIL;
|
||||
char *sequenceDef = pg_get_sequencedef_string(sequenceOid);
|
||||
char *escapedSequenceDef = quote_literal_cstr(sequenceDef);
|
||||
StringInfo wrappedSequenceDef = makeStringInfo();
|
||||
StringInfo sequenceGrantStmt = makeStringInfo();
|
||||
char *sequenceName = generate_qualified_relation_name(sequenceOid);
|
||||
Form_pg_sequence sequenceData = pg_get_sequencedef(sequenceOid);
|
||||
Oid sequenceTypeOid = sequenceData->seqtypid;
|
||||
char *typeName = format_type_be(sequenceTypeOid);
|
||||
|
||||
/* create schema if needed */
|
||||
appendStringInfo(wrappedSequenceDef,
|
||||
WORKER_APPLY_SEQUENCE_COMMAND,
|
||||
escapedSequenceDef,
|
||||
quote_literal_cstr(typeName));
|
||||
|
||||
appendStringInfo(sequenceGrantStmt,
|
||||
"ALTER SEQUENCE %s OWNER TO %s", sequenceName,
|
||||
quote_identifier(ownerName));
|
||||
|
||||
sequenceDDLList = lappend(sequenceDDLList, wrappedSequenceDef->data);
|
||||
sequenceDDLList = lappend(sequenceDDLList, sequenceGrantStmt->data);
|
||||
|
||||
return sequenceDDLList;
|
||||
}
|
||||
|
||||
|
@ -1639,6 +1965,18 @@ GenerateGrantOnSchemaStmtForRights(Oid roleOid,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* SetLocalEnableDependencyCreation sets the enable_object_propagation locally
|
||||
*/
|
||||
static void
|
||||
SetLocalEnableDependencyCreation(bool state)
|
||||
{
|
||||
set_config_option("citus.enable_object_propagation", state == true ? "on" : "off",
|
||||
(superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
|
||||
GUC_ACTION_LOCAL, true, 0, false);
|
||||
}
|
||||
|
||||
|
||||
static char *
|
||||
GenerateSetRoleQuery(Oid roleOid)
|
||||
{
|
||||
|
@ -1794,12 +2132,15 @@ DetachPartitionCommandList(void)
|
|||
* SyncMetadataToNodes tries recreating the metadata snapshot in the
|
||||
* metadata workers that are out of sync. Returns the result of
|
||||
* synchronization.
|
||||
*
|
||||
* This function must be called within coordinated transaction
|
||||
* since updates on the pg_dist_node metadata must be rollbacked if anything
|
||||
* goes wrong.
|
||||
*/
|
||||
static MetadataSyncResult
|
||||
SyncMetadataToNodes(void)
|
||||
{
|
||||
MetadataSyncResult result = METADATA_SYNC_SUCCESS;
|
||||
|
||||
if (!IsCoordinator())
|
||||
{
|
||||
return METADATA_SYNC_SUCCESS;
|
||||
|
@ -1902,8 +2243,8 @@ SyncMetadataToNodesMain(Datum main_arg)
|
|||
else if (CheckCitusVersion(DEBUG1) && CitusHasBeenLoaded())
|
||||
{
|
||||
UseCoordinatedTransaction();
|
||||
MetadataSyncResult result = SyncMetadataToNodes();
|
||||
|
||||
MetadataSyncResult result = SyncMetadataToNodes();
|
||||
syncedAllNodes = (result == METADATA_SYNC_SUCCESS);
|
||||
|
||||
/* we use LISTEN/NOTIFY to wait for metadata syncing in tests */
|
||||
|
|
|
@ -0,0 +1,642 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* pg_get_object_address_12_13_14.c
|
||||
*
|
||||
* Copied functions from Postgres pg_get_object_address with acl/owner check.
|
||||
* Since we need to use intermediate data types Relation and Node from
|
||||
* the pg_get_object_address, we've copied that function from PG code and
|
||||
* added required owner/acl checks for our own purposes.
|
||||
*
|
||||
* We need to make sure that function works with future PG versions. Update
|
||||
* the function name according to supported PG versions as well.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
#include "miscadmin.h"
|
||||
|
||||
#include "catalog/objectaddress.h"
|
||||
#include "catalog/pg_type.h"
|
||||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/citus_safe_lib.h"
|
||||
#include "distributed/metadata/dependency.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/pg_version_constants.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "nodes/value.h"
|
||||
#include "utils/array.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/fmgroids.h"
|
||||
#include "utils/varlena.h"
|
||||
#include "mb/pg_wchar.h"
|
||||
#include "parser/parse_type.h"
|
||||
|
||||
static void ErrorIfCurrentUserCanNotDistributeObject(ObjectType type,
|
||||
ObjectAddress *addr,
|
||||
Node *node,
|
||||
Relation *relation);
|
||||
static void ErrorIfUserNotAllowedToPropagateExtension(char *extensionName);
|
||||
static List * textarray_to_strvaluelist(ArrayType *arr);
|
||||
|
||||
/* It is defined on PG >= 13 versions by default */
|
||||
#if PG_VERSION_NUM < PG_VERSION_13
|
||||
#define TYPALIGN_INT 'i'
|
||||
#endif
|
||||
|
||||
/*
|
||||
* PgGetObjectAddress gets the object address. This function is mostly copied from
|
||||
* pg_get_object_address of the PG code. We need to copy that function to use
|
||||
* intermediate data types Relation and Node to check acl or ownership.
|
||||
*
|
||||
* Codes added by Citus are tagged with CITUS CODE BEGIN/END.
|
||||
*/
|
||||
ObjectAddress
|
||||
PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
|
||||
{
|
||||
List *name = NIL;
|
||||
TypeName *typename = NULL;
|
||||
List *args = NIL;
|
||||
Node *objnode = NULL;
|
||||
Relation relation;
|
||||
|
||||
/* Decode object type, raise error if unknown */
|
||||
int itype = read_objtype_from_string(ttype);
|
||||
if (itype < 0)
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("unsupported object type \"%s\"", ttype)));
|
||||
}
|
||||
ObjectType type = (ObjectType) itype;
|
||||
|
||||
/*
|
||||
* Convert the text array to the representation appropriate for the given
|
||||
* object type. Most use a simple string Values list, but there are some
|
||||
* exceptions.
|
||||
*/
|
||||
if (type == OBJECT_TYPE || type == OBJECT_DOMAIN || type == OBJECT_CAST ||
|
||||
type == OBJECT_TRANSFORM || type == OBJECT_DOMCONSTRAINT)
|
||||
{
|
||||
Datum *elems;
|
||||
bool *nulls;
|
||||
int nelems;
|
||||
|
||||
deconstruct_array(namearr, TEXTOID, -1, false, TYPALIGN_INT,
|
||||
&elems, &nulls, &nelems);
|
||||
if (nelems != 1)
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("name list length must be exactly %d", 1)));
|
||||
}
|
||||
if (nulls[0])
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("name or argument lists may not contain nulls")));
|
||||
}
|
||||
typename = typeStringToTypeName(TextDatumGetCString(elems[0]));
|
||||
}
|
||||
else if (type == OBJECT_LARGEOBJECT)
|
||||
{
|
||||
Datum *elems;
|
||||
bool *nulls;
|
||||
int nelems;
|
||||
|
||||
deconstruct_array(namearr, TEXTOID, -1, false, TYPALIGN_INT,
|
||||
&elems, &nulls, &nelems);
|
||||
if (nelems != 1)
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("name list length must be exactly %d", 1)));
|
||||
}
|
||||
if (nulls[0])
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("large object OID may not be null")));
|
||||
}
|
||||
objnode = (Node *) makeFloat(TextDatumGetCString(elems[0]));
|
||||
}
|
||||
else
|
||||
{
|
||||
name = textarray_to_strvaluelist(namearr);
|
||||
if (list_length(name) < 1)
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("name list length must be at least %d", 1)));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If args are given, decode them according to the object type.
|
||||
*/
|
||||
if (type == OBJECT_AGGREGATE ||
|
||||
type == OBJECT_FUNCTION ||
|
||||
type == OBJECT_PROCEDURE ||
|
||||
type == OBJECT_ROUTINE ||
|
||||
type == OBJECT_OPERATOR ||
|
||||
type == OBJECT_CAST ||
|
||||
type == OBJECT_AMOP ||
|
||||
type == OBJECT_AMPROC)
|
||||
{
|
||||
/* in these cases, the args list must be of TypeName */
|
||||
Datum *elems;
|
||||
bool *nulls;
|
||||
int nelems;
|
||||
int i;
|
||||
|
||||
deconstruct_array(argsarr, TEXTOID, -1, false,
|
||||
TYPALIGN_INT,
|
||||
&elems, &nulls, &nelems);
|
||||
|
||||
args = NIL;
|
||||
for (i = 0; i < nelems; i++)
|
||||
{
|
||||
if (nulls[i])
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("name or argument lists may not contain nulls")));
|
||||
}
|
||||
args = lappend(args,
|
||||
typeStringToTypeName(TextDatumGetCString(elems[i])));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* For all other object types, use string Values */
|
||||
args = textarray_to_strvaluelist(argsarr);
|
||||
}
|
||||
|
||||
/*
|
||||
* get_object_address is pretty sensitive to the length of its input
|
||||
* lists; check that they're what it wants.
|
||||
*/
|
||||
switch (type)
|
||||
{
|
||||
case OBJECT_DOMCONSTRAINT:
|
||||
case OBJECT_CAST:
|
||||
case OBJECT_USER_MAPPING:
|
||||
case OBJECT_PUBLICATION_REL:
|
||||
case OBJECT_DEFACL:
|
||||
case OBJECT_TRANSFORM:
|
||||
{
|
||||
if (list_length(args) != 1)
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("argument list length must be exactly %d", 1)));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_OPFAMILY:
|
||||
case OBJECT_OPCLASS:
|
||||
{
|
||||
if (list_length(name) < 2)
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("name list length must be at least %d", 2)));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_AMOP:
|
||||
case OBJECT_AMPROC:
|
||||
{
|
||||
if (list_length(name) < 3)
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("name list length must be at least %d", 3)));
|
||||
}
|
||||
|
||||
if (list_length(args) != 2)
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("argument list length must be exactly %d", 2)));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_OPERATOR:
|
||||
{
|
||||
if (list_length(args) != 2)
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("argument list length must be exactly %d", 2)));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Now build the Node type that get_object_address() expects for the given
|
||||
* type.
|
||||
*/
|
||||
switch (type)
|
||||
{
|
||||
case OBJECT_TABLE:
|
||||
case OBJECT_SEQUENCE:
|
||||
case OBJECT_VIEW:
|
||||
case OBJECT_MATVIEW:
|
||||
case OBJECT_INDEX:
|
||||
case OBJECT_FOREIGN_TABLE:
|
||||
case OBJECT_COLUMN:
|
||||
case OBJECT_ATTRIBUTE:
|
||||
case OBJECT_COLLATION:
|
||||
case OBJECT_CONVERSION:
|
||||
case OBJECT_STATISTIC_EXT:
|
||||
case OBJECT_TSPARSER:
|
||||
case OBJECT_TSDICTIONARY:
|
||||
case OBJECT_TSTEMPLATE:
|
||||
case OBJECT_TSCONFIGURATION:
|
||||
case OBJECT_DEFAULT:
|
||||
case OBJECT_POLICY:
|
||||
case OBJECT_RULE:
|
||||
case OBJECT_TRIGGER:
|
||||
case OBJECT_TABCONSTRAINT:
|
||||
case OBJECT_OPCLASS:
|
||||
case OBJECT_OPFAMILY:
|
||||
{
|
||||
objnode = (Node *) name;
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_ACCESS_METHOD:
|
||||
case OBJECT_DATABASE:
|
||||
case OBJECT_EVENT_TRIGGER:
|
||||
case OBJECT_EXTENSION:
|
||||
case OBJECT_FDW:
|
||||
case OBJECT_FOREIGN_SERVER:
|
||||
case OBJECT_LANGUAGE:
|
||||
case OBJECT_PUBLICATION:
|
||||
case OBJECT_ROLE:
|
||||
case OBJECT_SCHEMA:
|
||||
case OBJECT_SUBSCRIPTION:
|
||||
case OBJECT_TABLESPACE:
|
||||
{
|
||||
if (list_length(name) != 1)
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("name list length must be exactly %d", 1)));
|
||||
}
|
||||
objnode = linitial(name);
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_TYPE:
|
||||
case OBJECT_DOMAIN:
|
||||
{
|
||||
objnode = (Node *) typename;
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_CAST:
|
||||
case OBJECT_DOMCONSTRAINT:
|
||||
case OBJECT_TRANSFORM:
|
||||
{
|
||||
objnode = (Node *) list_make2(typename, linitial(args));
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_PUBLICATION_REL:
|
||||
{
|
||||
objnode = (Node *) list_make2(name, linitial(args));
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_USER_MAPPING:
|
||||
{
|
||||
objnode = (Node *) list_make2(linitial(name), linitial(args));
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_DEFACL:
|
||||
{
|
||||
objnode = (Node *) lcons(linitial(args), name);
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_AMOP:
|
||||
case OBJECT_AMPROC:
|
||||
{
|
||||
objnode = (Node *) list_make2(name, args);
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_FUNCTION:
|
||||
case OBJECT_PROCEDURE:
|
||||
case OBJECT_ROUTINE:
|
||||
case OBJECT_AGGREGATE:
|
||||
case OBJECT_OPERATOR:
|
||||
{
|
||||
ObjectWithArgs *owa = makeNode(ObjectWithArgs);
|
||||
|
||||
owa->objname = name;
|
||||
owa->objargs = args;
|
||||
objnode = (Node *) owa;
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_LARGEOBJECT:
|
||||
{
|
||||
/* already handled above */
|
||||
break;
|
||||
}
|
||||
|
||||
/* no default, to let compiler warn about missing case */
|
||||
}
|
||||
|
||||
if (objnode == NULL)
|
||||
{
|
||||
elog(ERROR, "unrecognized object type: %d", type);
|
||||
}
|
||||
|
||||
ObjectAddress addr = get_object_address(type, objnode,
|
||||
&relation, AccessShareLock, false);
|
||||
|
||||
/* CITUS CODE BEGIN */
|
||||
ErrorIfCurrentUserCanNotDistributeObject(type, &addr, objnode, &relation);
|
||||
|
||||
/* CITUS CODE END */
|
||||
|
||||
/* We don't need the relcache entry, thank you very much */
|
||||
if (relation)
|
||||
{
|
||||
relation_close(relation, AccessShareLock);
|
||||
}
|
||||
|
||||
/* CITUS CODE BEGIN */
|
||||
return addr;
|
||||
|
||||
/* CITUS CODE END */
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ErrorIfCurrentUserCanNotDistributeObject checks whether current user can
|
||||
* distribute object, if not errors out.
|
||||
*/
|
||||
static void
|
||||
ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr,
|
||||
Node *node, Relation *relation)
|
||||
{
|
||||
Oid userId = GetUserId();
|
||||
AclMode aclMaskResult = 0;
|
||||
bool skipAclCheck = false;
|
||||
Oid idToCheck = InvalidOid;
|
||||
|
||||
/* Since we don't handle sequences like object, add it separately */
|
||||
if (!(SupportedDependencyByCitus(addr) || type == OBJECT_SEQUENCE))
|
||||
{
|
||||
ereport(ERROR, (errmsg("Object type %d can not be distributed by Citus", type)));
|
||||
}
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case OBJECT_SCHEMA:
|
||||
{
|
||||
idToCheck = addr->objectId;
|
||||
aclMaskResult = pg_namespace_aclmask(idToCheck, userId, ACL_USAGE,
|
||||
ACLMASK_ANY);
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_FUNCTION:
|
||||
case OBJECT_PROCEDURE:
|
||||
case OBJECT_AGGREGATE:
|
||||
{
|
||||
check_object_ownership(userId, type, *addr, node, *relation);
|
||||
skipAclCheck = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_DATABASE:
|
||||
{
|
||||
idToCheck = addr->objectId;
|
||||
aclMaskResult = pg_database_aclmask(idToCheck, userId, ACL_CONNECT,
|
||||
ACLMASK_ANY);
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_ROLE:
|
||||
{
|
||||
/* Support only extension owner role with community */
|
||||
if (addr->objectId != CitusExtensionOwner())
|
||||
{
|
||||
ereport(ERROR, (errmsg("Current user does not have required "
|
||||
"access privileges on role %d with type %d",
|
||||
addr->objectId, type)));
|
||||
}
|
||||
skipAclCheck = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_TYPE:
|
||||
{
|
||||
idToCheck = addr->objectId;
|
||||
aclMaskResult = pg_type_aclmask(idToCheck, userId, ACL_USAGE,
|
||||
ACLMASK_ANY);
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_SEQUENCE:
|
||||
{
|
||||
idToCheck = addr->objectId;
|
||||
aclMaskResult = pg_class_aclmask(idToCheck, userId, ACL_USAGE, ACLMASK_ANY);
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_TABLE:
|
||||
{
|
||||
/* table distribution already does the ownership check, so we can stick to that over acl_check */
|
||||
check_object_ownership(userId, type, *addr, node, *relation);
|
||||
skipAclCheck = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_EXTENSION:
|
||||
{
|
||||
Value *valueNode = (Value *) node;
|
||||
char *extensionName = strVal(valueNode);
|
||||
ErrorIfUserNotAllowedToPropagateExtension(extensionName);
|
||||
skipAclCheck = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_COLLATION:
|
||||
{
|
||||
skipAclCheck = true;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
ereport(ERROR, (errmsg("%d object type is not supported within "
|
||||
"object propagation", type)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!skipAclCheck && aclMaskResult == ACL_NO_RIGHTS)
|
||||
{
|
||||
ereport(ERROR, (errmsg("Current user does not have required privileges "
|
||||
"on %d with type id %d to distribute it",
|
||||
idToCheck, type)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ErrorIfUserNotAllowedToPropagateExtension errors out if the current user does
|
||||
* not have required privileges to propagate extension
|
||||
*/
|
||||
static void
|
||||
ErrorIfUserNotAllowedToPropagateExtension(char *extensionName)
|
||||
{
|
||||
const int nameAttributeIndex = 1;
|
||||
const int superuserAttributeIndex = 4;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_13
|
||||
const int trustedAttributeIndex = 5;
|
||||
#endif
|
||||
|
||||
LOCAL_FCINFO(fcinfo, 0);
|
||||
FmgrInfo flinfo;
|
||||
|
||||
bool goForward = true;
|
||||
bool doCopy = false;
|
||||
|
||||
EState *estate = CreateExecutorState();
|
||||
ReturnSetInfo *extensionsResultSet = makeNode(ReturnSetInfo);
|
||||
extensionsResultSet->econtext = GetPerTupleExprContext(estate);
|
||||
extensionsResultSet->allowedModes = SFRM_Materialize;
|
||||
|
||||
fmgr_info(F_PG_AVAILABLE_EXTENSION_VERSIONS, &flinfo);
|
||||
InitFunctionCallInfoData(*fcinfo, &flinfo, 0, InvalidOid, NULL,
|
||||
(Node *) extensionsResultSet);
|
||||
|
||||
/*
|
||||
* pg_available_extensions_versions returns result set containing all
|
||||
* available extension versions with whether the extension requires
|
||||
* superuser and it is trusted information.
|
||||
*/
|
||||
(*pg_available_extension_versions)(fcinfo);
|
||||
|
||||
TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat(
|
||||
extensionsResultSet->setDesc,
|
||||
&TTSOpsMinimalTuple);
|
||||
bool hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult,
|
||||
goForward,
|
||||
doCopy,
|
||||
tupleTableSlot);
|
||||
while (hasTuple)
|
||||
{
|
||||
bool isNull = false;
|
||||
Datum curExtensionNameDatum = slot_getattr(tupleTableSlot,
|
||||
nameAttributeIndex,
|
||||
&isNull);
|
||||
char *curExtensionName = NameStr(*DatumGetName(curExtensionNameDatum));
|
||||
if (strcmp(extensionName, curExtensionName) == 0)
|
||||
{
|
||||
Datum superuserExpectedDatum = slot_getattr(tupleTableSlot,
|
||||
superuserAttributeIndex,
|
||||
&isNull);
|
||||
bool superuserExpected = DatumGetBool(superuserExpectedDatum);
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_13
|
||||
if (superuserExpected)
|
||||
{
|
||||
EnsureSuperUser();
|
||||
}
|
||||
#else
|
||||
if (superuserExpected)
|
||||
{
|
||||
/*
|
||||
* After PG 13, if the extension is trusted it can be created
|
||||
* by the user having CREATE privilege on the database even if
|
||||
* the extension requires superuser.
|
||||
*/
|
||||
Datum trustedExtensionDatum = slot_getattr(tupleTableSlot,
|
||||
trustedAttributeIndex,
|
||||
&isNull);
|
||||
bool trustedExtension = DatumGetBool(trustedExtensionDatum);
|
||||
|
||||
if (trustedExtension)
|
||||
{
|
||||
/* Allow if user has CREATE privilege on current database */
|
||||
AclResult aclresult = pg_database_aclcheck(MyDatabaseId,
|
||||
GetUserId(),
|
||||
ACL_CREATE);
|
||||
if (aclresult != ACLCHECK_OK)
|
||||
{
|
||||
ereport(ERROR, (errmsg("operation is not allowed"),
|
||||
errhint("Must have CREATE privilege "
|
||||
"on database to propagate "
|
||||
"extension %s", curExtensionName)));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
EnsureSuperUser();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
|
||||
ExecClearTuple(tupleTableSlot);
|
||||
hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward,
|
||||
doCopy, tupleTableSlot);
|
||||
}
|
||||
|
||||
ExecDropSingleTupleTableSlot(tupleTableSlot);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Copied from PG code.
|
||||
*
|
||||
* Convert an array of TEXT into a List of string Values, as emitted by the
|
||||
* parser, which is what get_object_address uses as input.
|
||||
*/
|
||||
static List *
|
||||
textarray_to_strvaluelist(ArrayType *arr)
|
||||
{
|
||||
Datum *elems;
|
||||
bool *nulls;
|
||||
int nelems;
|
||||
List *list = NIL;
|
||||
int i;
|
||||
|
||||
deconstruct_array(arr, TEXTOID, -1, false, TYPALIGN_INT,
|
||||
&elems, &nulls, &nelems);
|
||||
|
||||
for (i = 0; i < nelems; i++)
|
||||
{
|
||||
if (nulls[i])
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("name or argument lists may not contain nulls")));
|
||||
}
|
||||
list = lappend(list, makeString(TextDatumGetCString(elems[i])));
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
|
@ -4,6 +4,7 @@
|
|||
#include "udfs/citus_disable_node/11.0-1.sql"
|
||||
|
||||
#include "udfs/citus_check_connection_to_node/11.0-1.sql"
|
||||
#include "udfs/citus_internal_add_object_metadata/11.0-1.sql"
|
||||
|
||||
DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text);
|
||||
DROP FUNCTION pg_catalog.master_get_table_metadata(text);
|
||||
|
|
|
@ -41,3 +41,4 @@ COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport intege
|
|||
IS 'removes node from the cluster temporarily';
|
||||
|
||||
DROP FUNCTION pg_catalog.citus_check_connection_to_node (text, integer);
|
||||
DROP FUNCTION pg_catalog.citus_internal_add_object_metadata(text, text[], text[], integer, integer);
|
||||
|
|
13
src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/11.0-1.sql
generated
Normal file
13
src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/11.0-1.sql
generated
Normal file
|
@ -0,0 +1,13 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_object_metadata(
|
||||
typeText text,
|
||||
objNames text[],
|
||||
objArgs text[],
|
||||
distribution_argument_index int,
|
||||
colocationid int)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
STRICT
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_add_object_metadata(text,text[],text[],int,int) IS
|
||||
'Inserts distributed object into pg_dist_object';
|
|
@ -0,0 +1,13 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_object_metadata(
|
||||
typeText text,
|
||||
objNames text[],
|
||||
objArgs text[],
|
||||
distribution_argument_index int,
|
||||
colocationid int)
|
||||
RETURNS void
|
||||
LANGUAGE C
|
||||
STRICT
|
||||
AS 'MODULE_PATHNAME';
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_internal_add_object_metadata(text,text[],text[],int,int) IS
|
||||
'Inserts distributed object into pg_dist_object';
|
|
@ -48,6 +48,7 @@ extern void SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(Oid
|
|||
* postprocess: executed after standard_ProcessUtility.
|
||||
* address: return an ObjectAddress for the subject of the statement.
|
||||
* 2nd parameter is missing_ok.
|
||||
* markDistribued: true if the object will be distributed.
|
||||
*
|
||||
* preprocess/postprocess return a List of DDLJobs.
|
||||
*/
|
||||
|
@ -58,6 +59,7 @@ typedef struct DistributeObjectOps
|
|||
List * (*preprocess)(Node *, const char *, ProcessUtilityContext);
|
||||
List * (*postprocess)(Node *, const char *);
|
||||
ObjectAddress (*address)(Node *, bool);
|
||||
bool markDistributed;
|
||||
} DistributeObjectOps;
|
||||
|
||||
#define CITUS_TRUNCATE_TRIGGER_NAME "citus_truncate_trigger"
|
||||
|
@ -474,6 +476,9 @@ extern List * CreateFunctionDDLCommandsIdempotent(const ObjectAddress *functionA
|
|||
extern char * GetFunctionDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace);
|
||||
extern char * GenerateBackupNameForProcCollision(const ObjectAddress *address);
|
||||
extern ObjectWithArgs * ObjectWithArgsFromOid(Oid funcOid);
|
||||
extern void UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
|
||||
int *distribution_argument_index,
|
||||
int *colocationId);
|
||||
|
||||
/* vacuum.c - forward declarations */
|
||||
extern void PostprocessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include "catalog/objectaddress.h"
|
||||
|
||||
#define INVALID_DISTRIBUTION_ARGUMENT_INDEX -1
|
||||
|
||||
extern bool ObjectExists(const ObjectAddress *address);
|
||||
extern bool CitusExtensionObject(const ObjectAddress *objectAddress);
|
||||
|
@ -25,6 +26,8 @@ extern void UnmarkObjectDistributed(const ObjectAddress *address);
|
|||
extern bool IsTableOwnedByExtension(Oid relationId);
|
||||
extern bool IsObjectAddressOwnedByExtension(const ObjectAddress *target,
|
||||
ObjectAddress *extensionAddress);
|
||||
extern ObjectAddress PgGetObjectAddress(char *ttype, ArrayType *namearr,
|
||||
ArrayType *argsarr);
|
||||
|
||||
extern List * GetDistributedObjectAddressList(void);
|
||||
extern void UpdateDistributedObjectColocationId(uint32 oldColocationId, uint32
|
||||
|
|
|
@ -34,6 +34,9 @@ extern bool ShouldSyncTableMetadata(Oid relationId);
|
|||
extern bool ShouldSyncTableMetadataViaCatalog(Oid relationId);
|
||||
extern List * MetadataCreateCommands(void);
|
||||
extern List * MetadataDropCommands(void);
|
||||
extern char * MarkObjectsDistributedCreateCommand(List *addresses,
|
||||
List *distributionArgumentIndexes,
|
||||
List *colocationIds);
|
||||
extern char * DistributionCreateCommand(CitusTableCacheEntry *cacheEntry);
|
||||
extern char * DistributionDeleteCommand(const char *schemaName,
|
||||
const char *tableName);
|
||||
|
@ -54,6 +57,7 @@ extern void SyncMetadataToNodesMain(Datum main_arg);
|
|||
extern void SignalMetadataSyncDaemon(Oid database, int sig);
|
||||
extern bool ShouldInitiateMetadataSync(bool *lockFailure);
|
||||
|
||||
extern List * DDLCommandsForSequence(Oid sequenceOid, char *ownerName);
|
||||
extern List * SequenceDDLCommandsForTable(Oid relationId);
|
||||
extern List * GetSequencesFromAttrDef(Oid attrdefOid);
|
||||
extern void GetDependentSequencesWithRelation(Oid relationId, List **attnumList,
|
||||
|
@ -62,10 +66,13 @@ extern void GetDependentSequencesWithRelation(Oid relationId, List **attnumList,
|
|||
extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum);
|
||||
|
||||
#define DELETE_ALL_NODES "TRUNCATE pg_dist_node CASCADE"
|
||||
#define DELETE_ALL_DISTRIBUTED_OBJECTS "TRUNCATE citus.pg_dist_object"
|
||||
#define REMOVE_ALL_CLUSTERED_TABLES_COMMAND \
|
||||
"SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition"
|
||||
#define DISABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'off'"
|
||||
#define ENABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'on'"
|
||||
#define DISABLE_OBJECT_PROPAGATION "SET citus.enable_object_propagation TO 'off'"
|
||||
#define ENABLE_OBJECT_PROPAGATION "SET citus.enable_object_propagation TO 'on'"
|
||||
#define WORKER_APPLY_SEQUENCE_COMMAND "SELECT worker_apply_sequence_command (%s,%s)"
|
||||
#define UPSERT_PLACEMENT \
|
||||
"INSERT INTO pg_dist_placement " \
|
||||
|
|
|
@ -287,8 +287,10 @@ extern bool GetNodeDiskSpaceStatsForConnection(MultiConnection *connection,
|
|||
extern void ExecuteQueryViaSPI(char *query, int SPIOK);
|
||||
extern void EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId);
|
||||
extern void AlterSequenceType(Oid seqOid, Oid typeOid);
|
||||
extern void MarkSequenceListDistributedAndPropagateDependencies(List *sequenceList);
|
||||
extern void MarkSequenceDistributedAndPropagateDependencies(Oid sequenceOid);
|
||||
extern void MarkSequenceListDistributedAndPropagateWithDependencies(Oid relationId,
|
||||
List *sequenceList);
|
||||
extern void MarkSequenceDistributedAndPropagateWithDependencies(Oid relationId, Oid
|
||||
sequenceOid);
|
||||
extern void EnsureDistributedSequencesHaveOneType(Oid relationId,
|
||||
List *dependentSequenceList,
|
||||
List *attnumList);
|
||||
|
|
|
@ -613,14 +613,14 @@ select run_command_on_workers($$create user notsuper$$);
|
|||
|
||||
grant all on schema aggregate_support to notsuper;
|
||||
grant all on all tables in schema aggregate_support to notsuper;
|
||||
select run_command_on_workers($$
|
||||
select 1 from run_command_on_workers($$
|
||||
grant all on schema aggregate_support to notsuper;
|
||||
grant all on all tables in schema aggregate_support to notsuper;
|
||||
$$);
|
||||
run_command_on_workers
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
set role notsuper;
|
||||
|
|
|
@ -25,6 +25,13 @@ SELECT create_distributed_table('notices', 'id');
|
|||
(1 row)
|
||||
|
||||
INSERT INTO notices VALUES (1, 'hello world');
|
||||
-- Create the necessary test utility function
|
||||
CREATE OR REPLACE FUNCTION master_metadata_snapshot()
|
||||
RETURNS text[]
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
COMMENT ON FUNCTION master_metadata_snapshot()
|
||||
IS 'commands to create the metadata snapshot';
|
||||
CREATE FUNCTION notice(text)
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql AS $$
|
||||
|
@ -185,14 +192,6 @@ SELECT create_distributed_table('streaming_table','id');
|
|||
|
||||
(1 row)
|
||||
|
||||
-- make sure that none of the active and primary nodes hasmetadata
|
||||
-- at the start of the test
|
||||
select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary';
|
||||
bool_or
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
-- if not paremeters are supplied, we'd see that function doesn't have
|
||||
-- distribution_argument_index and colocationid
|
||||
SELECT create_distributed_function('"eq_mi''xed_param_names"(macaddr, macaddr)');
|
||||
|
@ -216,14 +215,6 @@ SELECT * FROM run_command_on_workers($$SELECT function_tests."eq_mi'xed_param_na
|
|||
localhost | 57638 | t | f
|
||||
(2 rows)
|
||||
|
||||
-- make sure that none of the active and primary nodes hasmetadata
|
||||
-- since the function doesn't have a parameter
|
||||
select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary';
|
||||
bool_or
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
-- try to co-locate with a table that uses statement-based replication
|
||||
SELECT create_distributed_function('increment(int2)', '$1');
|
||||
ERROR: cannot distribute the function "increment" since there is no table to colocate with
|
||||
|
@ -252,6 +243,12 @@ SELECT * FROM run_command_on_workers($$SELECT function_tests.dup('0123456789ab')
|
|||
localhost | 57638 | t | (01:23:45:67:89:ab,"01:23:45:67:89:ab is text")
|
||||
(2 rows)
|
||||
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_function('eq(macaddr,macaddr)', '$1', colocate_with := 'streaming_table');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
@ -582,13 +579,6 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='eq_w
|
|||
(localhost,57638,t,0)
|
||||
(2 rows)
|
||||
|
||||
-- make sure that none of the active and primary nodes hasmetadata
|
||||
select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary';
|
||||
bool_or
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- valid distribution with distribution_arg_name
|
||||
SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', distribution_arg_name:='val1');
|
||||
create_distributed_function
|
||||
|
@ -618,6 +608,13 @@ SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', dist
|
|||
|
||||
(1 row)
|
||||
|
||||
-- show that we are able to propagate objects with multiple item on address arrays
|
||||
SELECT * FROM (SELECT unnest(master_metadata_snapshot()) as metadata_command order by 1) as innerResult WHERE metadata_command like '%distributed_object_data%';
|
||||
metadata_command
|
||||
---------------------------------------------------------------------
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('type', ARRAY['public.usage_access_type']::text[], ARRAY[]::text[], -1, 0), ('type', ARRAY['function_tests.dup_result']::text[], ARRAY[]::text[], -1, 0), ('function', ARRAY['public', 'usage_access_func']::text[], ARRAY['public.usage_access_type', 'integer[]']::text[], -1, 0), ('function', ARRAY['public', 'usage_access_func_third']::text[], ARRAY['integer', 'integer[]']::text[], 0, 50), ('function', ARRAY['function_tests', 'notice']::text[], ARRAY['pg_catalog.text']::text[], -1, 0), ('function', ARRAY['function_tests', 'dup']::text[], ARRAY['pg_catalog.macaddr']::text[], 0, 52), ('function', ARRAY['function_tests', 'eq_with_param_names']::text[], ARRAY['pg_catalog.macaddr', 'pg_catalog.macaddr']::text[], 0, 52), ('function', ARRAY['function_tests', 'eq_mi''xed_param_names']::text[], ARRAY['pg_catalog.macaddr', 'pg_catalog.macaddr']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_sfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_invfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_finalfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('aggregate', ARRAY['function_tests', 'my_rank']::text[], ARRAY['pg_catalog."any"']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_names_sfunc']::text[], ARRAY['function_tests.dup_result', 'function_tests.dup_result', 'function_tests.dup_result']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_names_finalfunc']::text[], ARRAY['function_tests.dup_result']::text[], -1, 0), ('aggregate', ARRAY['function_tests', 'agg_names']::text[], ARRAY['function_tests.dup_result', 'function_tests.dup_result']::text[], -1, 0), ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['schema_colocation']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['function_tests']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['function_tests2']::text[], ARRAY[]::text[], -1, 0), ('extension', ARRAY['plpgsql']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
(1 row)
|
||||
|
||||
-- valid distribution with distribution_arg_index
|
||||
SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)','$1');
|
||||
create_distributed_function
|
||||
|
|
|
@ -1702,7 +1702,9 @@ COMMIT;
|
|||
TRUNCATE collections_list;
|
||||
-- make sure that even if local execution is used, the sequence values
|
||||
-- are generated locally
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
ALTER SEQUENCE collections_list_key_seq NO MINVALUE NO MAXVALUE;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
PREPARE serial_prepared_local AS INSERT INTO collections_list (collection_id) VALUES (0) RETURNING key, ser;
|
||||
SELECT setval('collections_list_key_seq', 4);
|
||||
setval
|
||||
|
@ -2281,6 +2283,7 @@ CALL register_for_event(19, 1, 'yes');
|
|||
SET citus.log_local_commands TO ON;
|
||||
SET client_min_messages TO DEBUG2;
|
||||
CALL register_for_event(19, 1, 'yes');
|
||||
DEBUG: not pushing down procedure to the same node
|
||||
NOTICE: executing the command locally: INSERT INTO public.event_responses_1480001 AS citus_table_alias (event_id, user_id, response) VALUES (19, 1, 'yes'::public.invite_resp) ON CONFLICT(event_id, user_id) DO UPDATE SET response = excluded.response
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM public.event_responses_1480001 event_responses WHERE (event_id OPERATOR(pg_catalog.=) 19)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT NULL::integer AS event_id, NULL::integer AS user_id, NULL::public.invite_resp AS response WHERE false) event_responses(event_id, user_id, response) WHERE ((event_id OPERATOR(pg_catalog.=) 19) AND false)
|
||||
|
@ -2338,6 +2341,7 @@ NOTICE: executing the command locally: INSERT INTO public.event_responses_14800
|
|||
SET citus.enable_local_execution TO false;
|
||||
SET citus.enable_fast_path_router_planner TO false;
|
||||
CALL register_for_event(19, 1, 'yes');
|
||||
DEBUG: not pushing down procedure to the same node
|
||||
-- should be fine even if no parameters exists in the query
|
||||
SELECT count(*) FROM event_responses WHERE event_id = 16;
|
||||
DEBUG: Creating router plan
|
||||
|
|
|
@ -1706,7 +1706,9 @@ COMMIT;
|
|||
TRUNCATE collections_list;
|
||||
-- make sure that even if local execution is used, the sequence values
|
||||
-- are generated locally
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
ALTER SEQUENCE collections_list_key_seq NO MINVALUE NO MAXVALUE;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
PREPARE serial_prepared_local AS INSERT INTO collections_list (collection_id) VALUES (0) RETURNING key, ser;
|
||||
SELECT setval('collections_list_key_seq', 4);
|
||||
setval
|
||||
|
@ -2278,6 +2280,7 @@ CALL register_for_event(19, 1, 'yes');
|
|||
SET citus.log_local_commands TO ON;
|
||||
SET client_min_messages TO DEBUG2;
|
||||
CALL register_for_event(19, 1, 'yes');
|
||||
DEBUG: stored procedure does not have co-located tables
|
||||
NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.event_responses_1501001 AS citus_table_alias (event_id, user_id, response) VALUES (19, 1, 'yes'::local_shard_execution_replicated.invite_resp) ON CONFLICT(event_id, user_id) DO UPDATE SET response = excluded.response
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shard_execution_replicated.event_responses_1501001 event_responses WHERE (event_id OPERATOR(pg_catalog.=) 19)
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT NULL::integer AS event_id, NULL::integer AS user_id, NULL::local_shard_execution_replicated.invite_resp AS response WHERE false) event_responses(event_id, user_id, response) WHERE ((event_id OPERATOR(pg_catalog.=) 19) AND false)
|
||||
|
@ -2335,6 +2338,7 @@ NOTICE: executing the command locally: INSERT INTO local_shard_execution_replic
|
|||
SET citus.enable_local_execution TO false;
|
||||
SET citus.enable_fast_path_router_planner TO false;
|
||||
CALL register_for_event(19, 1, 'yes');
|
||||
DEBUG: stored procedure does not have co-located tables
|
||||
-- should be fine even if no parameters exists in the query
|
||||
SELECT count(*) FROM event_responses WHERE event_id = 16;
|
||||
DEBUG: Creating router plan
|
||||
|
|
|
@ -544,8 +544,126 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
('test_2'::regclass, 1420002::bigint, 't'::"char", '10'::text, '50'::text))
|
||||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ERROR: Shard intervals overlap for table "test_2": 1420001 and 1420000
|
||||
ROLLBACK;
|
||||
-- Now let's check valid pg_dist_object updates
|
||||
-- check with non-existing object type
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ERROR: unrecognized object type "non_existing_type"
|
||||
ROLLBACK;
|
||||
-- check the sanity of distributionArgumentIndex and colocationId
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ERROR: distribution_argument_index must be between 0 and 100
|
||||
ROLLBACK;
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ERROR: colocationId must be a positive number
|
||||
ROLLBACK;
|
||||
-- check with non-existing object
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ERROR: role "non_existing_user" does not exist
|
||||
ROLLBACK;
|
||||
-- since citus_internal_add_object_metadata is strict function returns NULL
|
||||
-- if any parameter is NULL
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
citus_internal_add_object_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
\c - postgres - :worker_1_port
|
||||
-- Show that citus_internal_add_object_metadata only works for object types
|
||||
-- which is known how to distribute
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
CREATE TABLE publication_test_table(id int);
|
||||
CREATE PUBLICATION publication_test FOR TABLE publication_test_table;
|
||||
SET ROLE metadata_sync_helper_role;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('publication', ARRAY['publication_test']::text[], ARRAY[]::text[], -1, 0))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ERROR: Object type 29 can not be distributed by Citus
|
||||
ROLLBACK;
|
||||
-- Show that citus_internal_add_object_metadata checks the priviliges
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
CREATE FUNCTION distribution_test_function(int) RETURNS int
|
||||
AS $$ SELECT $1 $$
|
||||
LANGUAGE SQL;
|
||||
SET ROLE metadata_sync_helper_role;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('function', ARRAY['distribution_test_function']::text[], ARRAY['integer']::text[], -1, 0))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ERROR: must be owner of function distribution_test_function
|
||||
ROLLBACK;
|
||||
-- we do not allow wrong partmethod
|
||||
-- so manually insert wrong partmethod for the sake of the test
|
||||
SET search_path TO metadata_sync_helpers;
|
||||
|
|
|
@ -228,6 +228,20 @@ SELECT run_command_on_workers('GRANT ALL ON pg_dist_local_group TO node_metadata
|
|||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA citus TO node_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('GRANT ALL ON SCHEMA citus TO node_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
@ -236,6 +250,7 @@ SELECT master_remove_node('localhost', :worker_2_port);
|
|||
|
||||
-- Removing public schema from pg_dist_object because it breaks the next tests
|
||||
DELETE FROM citus.pg_dist_object WHERE objid = 'public'::regnamespace::oid;
|
||||
DELETE FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'plpgsql');
|
||||
-- try to manipulate node metadata via non-super user
|
||||
SET ROLE non_super_user;
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1);
|
||||
|
|
|
@ -61,12 +61,17 @@ CREATE FUNCTION add(integer, integer) RETURNS integer
|
|||
LANGUAGE SQL
|
||||
IMMUTABLE
|
||||
RETURNS NULL ON NULL INPUT;
|
||||
-- Since deparse logic on workers can not work for if function
|
||||
-- is distributed on workers, we are disabling object propagation
|
||||
-- first. Same trick has been applied multiple times in this test.
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('add(int,int)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION add CALLED ON NULL INPUT
|
||||
$cmd$);
|
||||
|
@ -535,6 +540,7 @@ CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() RETURNS TEXT
|
|||
CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text) RETURNS TEXT
|
||||
AS $$ SELECT 'Overloaded function called with param: ' || $1 $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"()');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
@ -547,6 +553,7 @@ SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text)');
|
|||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() SET SCHEMA "CiTUS.TEEN2"
|
||||
$cmd$);
|
||||
|
@ -574,12 +581,14 @@ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE
|
|||
CREATE FUNCTION func_default_param(param INT DEFAULT 0) RETURNS TEXT
|
||||
AS $$ SELECT 'supplied param is : ' || param; $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('func_default_param(INT)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_default_param RENAME TO func_with_default_param;
|
||||
$cmd$);
|
||||
|
@ -595,12 +604,14 @@ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE
|
|||
CREATE FUNCTION func_out_param(IN param INT, OUT result TEXT)
|
||||
AS $$ SELECT 'supplied param is : ' || param; $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('func_out_param(INT)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_out_param RENAME TO func_in_and_out_param;
|
||||
$cmd$);
|
||||
|
@ -619,12 +630,14 @@ BEGIN
|
|||
a := a * a;
|
||||
END; $$
|
||||
LANGUAGE plpgsql;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('square(NUMERIC)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION square SET search_path TO DEFAULT;
|
||||
$cmd$);
|
||||
|
@ -650,12 +663,14 @@ BEGIN
|
|||
FROM generate_subscripts(list, 1) g(i);
|
||||
END; $$
|
||||
LANGUAGE plpgsql;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('sum_avg(NUMERIC[])');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION sum_avg COST 10000;
|
||||
$cmd$);
|
||||
|
@ -668,16 +683,20 @@ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE
|
|||
(2 rows)
|
||||
|
||||
-- a function with a custom type IN parameter
|
||||
SET citus.enable_ddl_propagation TO on;
|
||||
CREATE TYPE intpair AS (x int, y int);
|
||||
RESET citus.enable_ddl_propagation;
|
||||
CREATE FUNCTION func_custom_param(IN param intpair, OUT total INT)
|
||||
AS $$ SELECT param.x + param.y $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('func_custom_param(intpair)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param;
|
||||
$cmd$);
|
||||
|
@ -694,12 +713,14 @@ CREATE FUNCTION func_returns_table(IN count INT)
|
|||
RETURNS TABLE (x INT, y INT)
|
||||
AS $$ SELECT i,i FROM generate_series(1,count) i $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('func_returns_table(INT)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_returns_table ROWS 100;
|
||||
$cmd$);
|
||||
|
|
|
@ -49,12 +49,14 @@ BEGIN
|
|||
RAISE INFO 'information message %', $1;
|
||||
END;
|
||||
$proc$;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('raise_info(text)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER PROCEDURE raise_info CALLED ON NULL INPUT
|
||||
$cmd$);
|
||||
|
|
|
@ -974,7 +974,8 @@ SELECT * FROM multi_extension.print_extension_changes();
|
|||
function master_get_table_metadata(text) record |
|
||||
| function citus_check_connection_to_node(text,integer) boolean
|
||||
| function citus_disable_node(text,integer,boolean) void
|
||||
(6 rows)
|
||||
| function citus_internal_add_object_metadata(text,text[],text[],integer,integer) void
|
||||
(7 rows)
|
||||
|
||||
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
||||
-- show running version
|
||||
|
|
|
@ -26,12 +26,14 @@ SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s';
|
|||
-- Show that, with no MX tables, metadata snapshot contains only the delete commands,
|
||||
-- pg_dist_node entries and reference tables
|
||||
SELECT unnest(master_metadata_snapshot()) order by 1;
|
||||
unnest
|
||||
unnest
|
||||
---------------------------------------------------------------------
|
||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
|
||||
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
TRUNCATE citus.pg_dist_object
|
||||
TRUNCATE pg_dist_node CASCADE
|
||||
(3 rows)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
(5 rows)
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
|
@ -85,10 +87,12 @@ SELECT unnest(master_metadata_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_truncate_trigger('public.mx_test_table')
|
||||
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
TRUNCATE citus.pg_dist_object
|
||||
TRUNCATE pg_dist_node CASCADE
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(16 rows)
|
||||
(18 rows)
|
||||
|
||||
-- Show that CREATE INDEX commands are included in the metadata snapshot
|
||||
CREATE INDEX mx_index ON mx_test_table(col_2);
|
||||
|
@ -109,10 +113,12 @@ SELECT unnest(master_metadata_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_truncate_trigger('public.mx_test_table')
|
||||
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
TRUNCATE citus.pg_dist_object
|
||||
TRUNCATE pg_dist_node CASCADE
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(17 rows)
|
||||
(19 rows)
|
||||
|
||||
-- Show that schema changes are included in the metadata snapshot
|
||||
CREATE SCHEMA mx_testing_schema;
|
||||
|
@ -134,10 +140,12 @@ SELECT unnest(master_metadata_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
|
||||
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
TRUNCATE citus.pg_dist_object
|
||||
TRUNCATE pg_dist_node CASCADE
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(17 rows)
|
||||
(19 rows)
|
||||
|
||||
-- Show that append distributed tables are not included in the metadata snapshot
|
||||
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
|
||||
|
@ -165,10 +173,12 @@ SELECT unnest(master_metadata_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
|
||||
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
TRUNCATE citus.pg_dist_object
|
||||
TRUNCATE pg_dist_node CASCADE
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(17 rows)
|
||||
(19 rows)
|
||||
|
||||
-- Show that range distributed tables are not included in the metadata snapshot
|
||||
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
|
||||
|
@ -189,10 +199,12 @@ SELECT unnest(master_metadata_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
|
||||
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
TRUNCATE citus.pg_dist_object
|
||||
TRUNCATE pg_dist_node CASCADE
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(17 rows)
|
||||
(19 rows)
|
||||
|
||||
-- Test start_metadata_sync_to_node UDF
|
||||
-- Ensure that hasmetadata=false for all nodes
|
||||
|
@ -1195,8 +1207,10 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence;
|
|||
-- owner
|
||||
CREATE TABLE pg_dist_placement_temp AS SELECT * FROM pg_dist_placement;
|
||||
CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition;
|
||||
CREATE TABLE pg_dist_object_temp AS SELECT * FROM citus.pg_dist_object;
|
||||
DELETE FROM pg_dist_placement;
|
||||
DELETE FROM pg_dist_partition;
|
||||
DELETE FROM citus.pg_dist_object;
|
||||
SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
|
@ -1279,8 +1293,10 @@ DROP TABLE mx_table;
|
|||
\c - postgres - :master_port
|
||||
INSERT INTO pg_dist_placement SELECT * FROM pg_dist_placement_temp;
|
||||
INSERT INTO pg_dist_partition SELECT * FROM pg_dist_partition_temp;
|
||||
INSERT INTO citus.pg_dist_object SELECT * FROM pg_dist_object_temp ON CONFLICT ON CONSTRAINT pg_dist_object_pkey DO NOTHING;
|
||||
DROP TABLE pg_dist_placement_temp;
|
||||
DROP TABLE pg_dist_partition_temp;
|
||||
DROP TABLE pg_dist_object_temp;
|
||||
UPDATE pg_dist_placement
|
||||
SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port)
|
||||
WHERE groupid = :old_worker_2_group;
|
||||
|
@ -1660,7 +1676,7 @@ ALTER TABLE test_table ADD COLUMN id2 int DEFAULT nextval('mx_test_sequence_1');
|
|||
ALTER TABLE test_table ALTER COLUMN id2 DROP DEFAULT;
|
||||
ALTER TABLE test_table ALTER COLUMN id2 SET DEFAULT nextval('mx_test_sequence_1');
|
||||
SELECT unnest(master_metadata_snapshot()) order by 1;
|
||||
unnest
|
||||
unnest
|
||||
---------------------------------------------------------------------
|
||||
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
|
||||
ALTER SEQUENCE public.mx_test_sequence_0 OWNER TO postgres
|
||||
|
@ -1711,7 +1727,9 @@ SELECT unnest(master_metadata_snapshot()) order by 1;
|
|||
SELECT worker_create_truncate_trigger('public.mx_ref')
|
||||
SELECT worker_create_truncate_trigger('public.test_table')
|
||||
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
TRUNCATE citus.pg_dist_object
|
||||
TRUNCATE pg_dist_node CASCADE
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 5, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 5, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 5, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310020, 1, 0, 1, 100020), (1310021, 1, 0, 5, 100021), (1310022, 1, 0, 1, 100022), (1310023, 1, 0, 5, 100023), (1310024, 1, 0, 1, 100024)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310025, 1, 0, 1, 100025), (1310026, 1, 0, 5, 100026), (1310027, 1, 0, 1, 100027), (1310028, 1, 0, 5, 100028), (1310029, 1, 0, 1, 100029)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
|
||||
|
@ -1724,7 +1742,7 @@ SELECT unnest(master_metadata_snapshot()) order by 1;
|
|||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310074, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310075, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310076, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310073, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310083, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310084, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310085, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310086, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(62 rows)
|
||||
(64 rows)
|
||||
|
||||
-- shouldn't work since test_table is MX
|
||||
ALTER TABLE test_table ADD COLUMN id3 bigserial;
|
||||
|
@ -1806,6 +1824,12 @@ GRANT EXECUTE ON FUNCTION start_metadata_sync_to_node(text,int) TO non_super_met
|
|||
GRANT EXECUTE ON FUNCTION stop_metadata_sync_to_node(text,int,bool) TO non_super_metadata_user;
|
||||
GRANT ALL ON pg_dist_node TO non_super_metadata_user;
|
||||
GRANT ALL ON pg_dist_local_group TO non_super_metadata_user;
|
||||
GRANT ALL ON SCHEMA citus TO non_super_metadata_user;
|
||||
GRANT INSERT ON ALL TABLES IN SCHEMA citus TO non_super_metadata_user;
|
||||
GRANT USAGE ON SCHEMA mx_testing_schema TO non_super_metadata_user;
|
||||
GRANT USAGE ON SCHEMA mx_testing_schema_2 TO non_super_metadata_user;
|
||||
GRANT USAGE ON SCHEMA mx_test_schema_1 TO non_super_metadata_user;
|
||||
GRANT USAGE ON SCHEMA mx_test_schema_2 TO non_super_metadata_user;
|
||||
SELECT run_command_on_workers('GRANT ALL ON pg_dist_node TO non_super_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
|
@ -1820,6 +1844,55 @@ SELECT run_command_on_workers('GRANT ALL ON pg_dist_local_group TO non_super_met
|
|||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('GRANT ALL ON SCHEMA citus TO non_super_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('ALTER SEQUENCE user_defined_seq OWNER TO non_super_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"ALTER SEQUENCE")
|
||||
(localhost,57638,t,"ALTER SEQUENCE")
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA citus TO non_super_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_testing_schema TO non_super_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_testing_schema_2 TO non_super_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_test_schema_1 TO non_super_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_test_schema_2 TO non_super_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SET ROLE non_super_metadata_user;
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
NOTICE: dropping metadata on the node (localhost,57637)
|
||||
|
|
|
@ -411,6 +411,15 @@ SELECT id, val FROM mx_call_dist_table_1 ORDER BY id, val;
|
|||
21 | 3
|
||||
(9 rows)
|
||||
|
||||
-- Show that function delegation works from worker nodes as well
|
||||
\c - - - :worker_1_port
|
||||
SET search_path to multi_mx_call, public;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
CALL multi_mx_call.mx_call_proc_tx(9);
|
||||
DEBUG: pushing down the procedure
|
||||
\c - - - :master_port
|
||||
SET search_path to multi_mx_call, public;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
-- Test that we properly propagate errors raised from procedures.
|
||||
CREATE PROCEDURE mx_call_proc_raise(x int) LANGUAGE plpgsql AS $$
|
||||
BEGIN
|
||||
|
|
|
@ -411,6 +411,15 @@ SELECT id, val FROM mx_call_dist_table_1 ORDER BY id, val;
|
|||
21 | 3
|
||||
(9 rows)
|
||||
|
||||
-- Show that function delegation works from worker nodes as well
|
||||
\c - - - :worker_1_port
|
||||
SET search_path to multi_mx_call, public;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
CALL multi_mx_call.mx_call_proc_tx(9);
|
||||
DEBUG: pushing down the procedure
|
||||
\c - - - :master_port
|
||||
SET search_path to multi_mx_call, public;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
-- Test that we properly propagate errors raised from procedures.
|
||||
CREATE PROCEDURE mx_call_proc_raise(x int) LANGUAGE plpgsql AS $$
|
||||
BEGIN
|
||||
|
|
|
@ -1019,3 +1019,7 @@ SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards OR
|
|||
supplier_mx_1220087 | supplier_mx | reference | 0
|
||||
(469 rows)
|
||||
|
||||
-- Show that altering type name is not supported from worker node
|
||||
ALTER TYPE order_side_mx RENAME TO temp_order_side_mx;
|
||||
ERROR: operation is not allowed on this node
|
||||
HINT: Connect to the coordinator and run it again.
|
||||
|
|
|
@ -722,6 +722,15 @@ SET search_path TO multi_mx_function_call_delegation, public;
|
|||
select create_distributed_function('mx_call_func(int,int)');
|
||||
ERROR: operation is not allowed on this node
|
||||
HINT: Connect to the coordinator and run it again.
|
||||
-- show that functions can be delegated from worker nodes
|
||||
SET client_min_messages TO DEBUG1;
|
||||
SELECT mx_call_func(2, 0);
|
||||
DEBUG: pushing down the function call
|
||||
mx_call_func
|
||||
---------------------------------------------------------------------
|
||||
28
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO multi_mx_function_call_delegation, public;
|
||||
RESET client_min_messages;
|
||||
|
|
|
@ -722,6 +722,15 @@ SET search_path TO multi_mx_function_call_delegation, public;
|
|||
select create_distributed_function('mx_call_func(int,int)');
|
||||
ERROR: operation is not allowed on this node
|
||||
HINT: Connect to the coordinator and run it again.
|
||||
-- show that functions can be delegated from worker nodes
|
||||
SET client_min_messages TO DEBUG1;
|
||||
SELECT mx_call_func(2, 0);
|
||||
DEBUG: pushing down the function call
|
||||
mx_call_func
|
||||
---------------------------------------------------------------------
|
||||
28
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO multi_mx_function_call_delegation, public;
|
||||
RESET client_min_messages;
|
||||
|
|
|
@ -426,7 +426,9 @@ SELECT minimum_value::bigint AS min_value,
|
|||
FROM information_schema.sequences
|
||||
WHERE sequence_name = 'app_analytics_events_mx_id_seq' \gset
|
||||
SELECT last_value FROM app_analytics_events_mx_id_seq \gset
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
ALTER SEQUENCE app_analytics_events_mx_id_seq NO MINVALUE NO MAXVALUE;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
SELECT setval('app_analytics_events_mx_id_seq'::regclass, 3940649673949184);
|
||||
setval
|
||||
---------------------------------------------------------------------
|
||||
|
@ -458,5 +460,7 @@ SELECT 1 FROM setval('app_analytics_events_mx_id_seq'::regclass, :last_value);
|
|||
1
|
||||
(1 row)
|
||||
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
ALTER SEQUENCE app_analytics_events_mx_id_seq
|
||||
MINVALUE :min_value MAXVALUE :max_value;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
|
|
|
@ -484,6 +484,10 @@ SELECT table_schema AS "Shards' Schema"
|
|||
mx_old_schema
|
||||
(1 row)
|
||||
|
||||
-- Show that altering distributed schema is not allowed on worker nodes
|
||||
ALTER SCHEMA mx_old_schema RENAME TO temp_mx_old_schema;
|
||||
ERROR: operation is not allowed on this node
|
||||
HINT: Connect to the coordinator and run it again.
|
||||
\c - - - :master_port
|
||||
ALTER TABLE mx_old_schema.table_set_schema SET SCHEMA mx_new_schema;
|
||||
SELECT objid::oid::regnamespace as "Distributed Schemas"
|
||||
|
|
|
@ -11,7 +11,7 @@ SELECT pg_reload_conf();
|
|||
t
|
||||
(1 row)
|
||||
|
||||
-- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator
|
||||
-- Verifies pg_dist_node and pg_dist_placement in the given worker matches the ones in coordinator
|
||||
CREATE OR REPLACE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636)
|
||||
RETURNS BOOLEAN
|
||||
LANGUAGE sql
|
||||
|
|
|
@ -343,29 +343,10 @@ CREATE TABLE some_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL);
|
|||
DROP TABLE some_table_with_sequence;
|
||||
CREATE SEQUENCE some_sequence;
|
||||
DROP SEQUENCE some_sequence;
|
||||
-- Show that dropping the sequence of an MX table with cascade harms the table and shards
|
||||
BEGIN;
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass;
|
||||
Column | Type | Modifiers
|
||||
---------------------------------------------------------------------
|
||||
col_1 | integer |
|
||||
col_2 | text |
|
||||
col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass)
|
||||
(3 rows)
|
||||
|
||||
-- suppress notice message caused by DROP ... CASCADE to prevent pg version difference
|
||||
SET client_min_messages TO 'WARNING';
|
||||
-- Show that dropping the sequence of an MX table is not supported on worker nodes
|
||||
DROP SEQUENCE mx_table_col_3_seq CASCADE;
|
||||
RESET client_min_messages;
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass;
|
||||
Column | Type | Modifiers
|
||||
---------------------------------------------------------------------
|
||||
col_1 | integer |
|
||||
col_2 | text |
|
||||
col_3 | bigint | not null
|
||||
(3 rows)
|
||||
|
||||
ROLLBACK;
|
||||
ERROR: operation is not allowed on this node
|
||||
HINT: Connect to the coordinator and run it again.
|
||||
-- Cleanup
|
||||
\c - - - :master_port
|
||||
DROP TABLE mx_table;
|
||||
|
|
|
@ -0,0 +1,455 @@
|
|||
SET citus.enable_ddl_propagation TO OFF;
|
||||
CREATE SCHEMA local_schema;
|
||||
SET search_path TO local_schema;
|
||||
-- Create type and function that depends on it
|
||||
CREATE TYPE test_type AS (f1 int, f2 text);
|
||||
CREATE FUNCTION test_function(int) RETURNS test_type
|
||||
AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$
|
||||
LANGUAGE SQL;
|
||||
-- Create various objects
|
||||
CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');
|
||||
-- Create a sequence under a different schema
|
||||
CREATE SCHEMA test_sequence_schema;
|
||||
CREATE SEQUENCE test_sequence_schema.test_sequence;
|
||||
-- show that none of the objects above are marked as distributed
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema'::regnamespace::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.mood'::regtype::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_type'::regtype::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema.test_sequence'::regclass::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_function'::regproc::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
CREATE USER non_super_user_test_user;
|
||||
SELECT 1 FROM run_command_on_workers($$CREATE USER non_super_user_test_user;$$);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
RESET client_min_messages;
|
||||
GRANT ALL ON SCHEMA local_schema TO non_super_user_test_user;
|
||||
GRANT ALL ON SCHEMA test_sequence_schema TO non_super_user_test_user;
|
||||
SET ROLE non_super_user_test_user;
|
||||
SET search_path TO local_schema;
|
||||
CREATE TABLE dist_table(a int, b mood, c test_type, d int DEFAULT nextval('test_sequence_schema.test_sequence'), e bigserial);
|
||||
-- Citus requires that user must own the dependent sequence
|
||||
-- https://github.com/citusdata/citus/issues/5494
|
||||
SELECT create_distributed_table('local_schema.dist_table', 'a');
|
||||
ERROR: must be owner of sequence test_sequence
|
||||
-- Citus requires that user must own the function to distribute
|
||||
SELECT create_distributed_function('test_function(int)');
|
||||
ERROR: must be owner of function test_function
|
||||
RESET ROLE;
|
||||
SET search_path TO local_schema;
|
||||
ALTER SEQUENCE test_sequence_schema.test_sequence OWNER TO non_super_user_test_user;
|
||||
ALTER FUNCTION test_function(int) OWNER TO non_super_user_test_user;
|
||||
SET ROLE non_super_user_test_user;
|
||||
SET search_path TO local_schema;
|
||||
-- Show that we can distribute table and function after
|
||||
-- having required ownerships
|
||||
SELECT create_distributed_table('dist_table', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_function('test_function(int)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show that schema, types, function and sequence has marked as distributed
|
||||
-- on the coordinator node
|
||||
RESET ROLE;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema'::regnamespace::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(schema,{local_schema},{})
|
||||
(1 row)
|
||||
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema'::regnamespace::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(schema,{test_sequence_schema},{})
|
||||
(1 row)
|
||||
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.mood'::regtype::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(type,{local_schema.mood},{})
|
||||
(1 row)
|
||||
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_type'::regtype::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(type,{local_schema.test_type},{})
|
||||
(1 row)
|
||||
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema.test_sequence'::regclass::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(sequence,"{test_sequence_schema,test_sequence}",{})
|
||||
(1 row)
|
||||
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.dist_table_e_seq'::regclass::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(sequence,"{local_schema,dist_table_e_seq}",{})
|
||||
(1 row)
|
||||
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_function'::regproc::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(function,"{local_schema,test_function}",{integer})
|
||||
(1 row)
|
||||
|
||||
-- show those objects marked as distributed on metadata worker node as well
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema'::regnamespace::oid;$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (schema,{local_schema},{})
|
||||
localhost | 57638 | t | (schema,{local_schema},{})
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema'::regnamespace::oid;$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (schema,{test_sequence_schema},{})
|
||||
localhost | 57638 | t | (schema,{test_sequence_schema},{})
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.mood'::regtype::oid;$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (type,{local_schema.mood},{})
|
||||
localhost | 57638 | t | (type,{local_schema.mood},{})
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_type'::regtype::oid;$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (type,{local_schema.test_type},{})
|
||||
localhost | 57638 | t | (type,{local_schema.test_type},{})
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema.test_sequence'::regclass::oid;$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (sequence,"{test_sequence_schema,test_sequence}",{})
|
||||
localhost | 57638 | t | (sequence,"{test_sequence_schema,test_sequence}",{})
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.dist_table_e_seq'::regclass::oid;$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (sequence,"{local_schema,dist_table_e_seq}",{})
|
||||
localhost | 57638 | t | (sequence,"{local_schema,dist_table_e_seq}",{})
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_function'::regproc::oid;$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (function,"{local_schema,test_function}",{integer})
|
||||
localhost | 57638 | t | (function,"{local_schema,test_function}",{integer})
|
||||
(2 rows)
|
||||
|
||||
-- show that schema is owned by the superuser
|
||||
SELECT rolname FROM pg_roles JOIN pg_namespace ON(pg_namespace.nspowner = pg_roles.oid) WHERE nspname = 'local_schema';
|
||||
rolname
|
||||
---------------------------------------------------------------------
|
||||
postgres
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT rolname FROM pg_roles JOIN pg_namespace ON(pg_namespace.nspowner = pg_roles.oid) WHERE nspname = 'local_schema';$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | postgres
|
||||
localhost | 57638 | t | postgres
|
||||
(2 rows)
|
||||
|
||||
-- show that types are owned by the superuser
|
||||
SELECT DISTINCT(rolname) FROM pg_roles JOIN pg_type ON(pg_type.typowner = pg_roles.oid) WHERE typname IN ('test_type', 'mood');
|
||||
rolname
|
||||
---------------------------------------------------------------------
|
||||
postgres
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT DISTINCT(rolname) FROM pg_roles JOIN pg_type ON(pg_type.typowner = pg_roles.oid) WHERE typname IN ('test_type', 'mood');$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | postgres
|
||||
localhost | 57638 | t | postgres
|
||||
(2 rows)
|
||||
|
||||
-- show that table is owned by the non_super_user_test_user
|
||||
SELECT rolname FROM pg_roles JOIN pg_class ON(pg_class.relowner = pg_roles.oid) WHERE relname = 'dist_table';
|
||||
rolname
|
||||
---------------------------------------------------------------------
|
||||
non_super_user_test_user
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT rolname FROM pg_roles JOIN pg_class ON(pg_class.relowner = pg_roles.oid) WHERE relname = 'dist_table'$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | non_super_user_test_user
|
||||
localhost | 57638 | t | non_super_user_test_user
|
||||
(2 rows)
|
||||
|
||||
SET ROLE non_super_user_test_user;
|
||||
SET search_path TO local_schema;
|
||||
-- ensure we can load data
|
||||
INSERT INTO dist_table VALUES (1, 'sad', (1,'onder')::test_type),
|
||||
(2, 'ok', (1,'burak')::test_type),
|
||||
(3, 'happy', (1,'marco')::test_type);
|
||||
SELECT a, b, c , d FROM dist_table ORDER BY 1,2,3,4;
|
||||
a | b | c | d
|
||||
---------------------------------------------------------------------
|
||||
1 | sad | (1,onder) | 1
|
||||
2 | ok | (1,burak) | 2
|
||||
3 | happy | (1,marco) | 3
|
||||
(3 rows)
|
||||
|
||||
-- Show that dropping the table removes the dependent sequence from pg_dist_object
|
||||
-- on both coordinator and metadata worker nodes when ddl propagation is on
|
||||
SET citus.enable_ddl_propagation TO ON;
|
||||
DROP TABLE dist_table CASCADE;
|
||||
RESET ROLE;
|
||||
SET search_path TO local_schema;
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%dist_table_e_seq%';
|
||||
obj_identifier
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%dist_table_e_seq%';$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t |
|
||||
localhost | 57638 | t |
|
||||
(2 rows)
|
||||
|
||||
-- Show that altering the function's schema marks the schema distributed
|
||||
CREATE SCHEMA schema_to_prop_with_function;
|
||||
ALTER FUNCTION test_function SET SCHEMA schema_to_prop_with_function;
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{schema_to_prop_with_function}%';
|
||||
obj_identifier
|
||||
---------------------------------------------------------------------
|
||||
(schema,{schema_to_prop_with_function},{})
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{schema_to_prop_with_function}%';$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (schema,{schema_to_prop_with_function},{})
|
||||
localhost | 57638 | t | (schema,{schema_to_prop_with_function},{})
|
||||
(2 rows)
|
||||
|
||||
-- Show that dropping the function removes the metadata from pg_dist_object
|
||||
-- on both coordinator and metadata worker node
|
||||
DROP FUNCTION schema_to_prop_with_function.test_function;
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%test_function%';
|
||||
obj_identifier
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%test_function%';$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t |
|
||||
localhost | 57638 | t |
|
||||
(2 rows)
|
||||
|
||||
-- Show that altering the type's schema marks the schema distributed
|
||||
CREATE SCHEMA schema_to_prop_with_type;
|
||||
ALTER TYPE test_type SET SCHEMA schema_to_prop_with_type;
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{schema_to_prop_with_type}%';
|
||||
obj_identifier
|
||||
---------------------------------------------------------------------
|
||||
(schema,{schema_to_prop_with_type},{})
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{schema_to_prop_with_type}%';$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (schema,{schema_to_prop_with_type},{})
|
||||
localhost | 57638 | t | (schema,{schema_to_prop_with_type},{})
|
||||
(2 rows)
|
||||
|
||||
-- Show that dropping type removes the metadata from pg_dist_object
|
||||
-- on both coordinator and metadata worker node
|
||||
DROP TYPE mood CASCADE;
|
||||
DROP TYPE schema_to_prop_with_type.test_type CASCADE;
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%test_type%' or obj_identifier::text like '%mood%';
|
||||
obj_identifier
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%test_type%' or obj_identifier::text like '%mood%'$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t |
|
||||
localhost | 57638 | t |
|
||||
(2 rows)
|
||||
|
||||
-- Show that distributed function related metadata are also propagated
|
||||
set citus.shard_replication_factor to 1;
|
||||
CREATE TABLE metadata_dist_test_table (a int, b int);
|
||||
SELECT create_distributed_table('metadata_dist_test_table', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE PROCEDURE metadata_dist_test_proc(dist_key integer, dist_key_2 integer)
|
||||
LANGUAGE plpgsql
|
||||
AS $$ DECLARE
|
||||
res INT := 0;
|
||||
BEGIN
|
||||
INSERT INTO metadata_dist_test_table VALUES (dist_key);
|
||||
SELECT count(*) INTO res FROM metadata_dist_test_table;
|
||||
RAISE NOTICE 'Res: %', res;
|
||||
COMMIT;
|
||||
END;$$;
|
||||
-- create a distributed function and show its distribution_argument_index
|
||||
SELECT create_distributed_function('metadata_dist_test_proc(integer, integer)', 'dist_key', 'metadata_dist_test_table');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT distribution_argument_index FROM citus.pg_dist_object WHERE objid = 'metadata_dist_test_proc'::regproc;
|
||||
distribution_argument_index
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT distribution_argument_index FROM citus.pg_dist_object WHERE objid = 'local_schema.metadata_dist_test_proc'::regproc;$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | 0
|
||||
localhost | 57638 | t | 0
|
||||
(2 rows)
|
||||
|
||||
-- re-distribute and show that now the distribution_argument_index is updated on both the coordinator and workers
|
||||
SELECT create_distributed_function('metadata_dist_test_proc(integer, integer)', 'dist_key_2', 'metadata_dist_test_table');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT distribution_argument_index FROM citus.pg_dist_object WHERE objid = 'metadata_dist_test_proc'::regproc;
|
||||
distribution_argument_index
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$ SELECT distribution_argument_index FROM citus.pg_dist_object WHERE objid = 'local_schema.metadata_dist_test_proc'::regproc;$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | 1
|
||||
localhost | 57638 | t | 1
|
||||
(2 rows)
|
||||
|
||||
-- Show that dropping schema doesn't affect the worker node
|
||||
DROP SCHEMA local_schema CASCADE;
|
||||
NOTICE: drop cascades to 2 other objects
|
||||
DETAIL: drop cascades to table metadata_dist_test_table
|
||||
drop cascades to function metadata_dist_test_proc(integer,integer)
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{local_schema}%';
|
||||
obj_identifier
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{local_schema}%';$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (schema,{local_schema},{})
|
||||
localhost | 57638 | t | (schema,{local_schema},{})
|
||||
(2 rows)
|
||||
|
||||
-- Show that extension and dependent sequence also created and marked as distributed
|
||||
CREATE SCHEMA extension_schema;
|
||||
CREATE EXTENSION ltree WITH SCHEMA extension_schema;
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{extension_schema}%';
|
||||
obj_identifier
|
||||
---------------------------------------------------------------------
|
||||
(schema,{extension_schema},{})
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{extension_schema}%';$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (schema,{extension_schema},{})
|
||||
localhost | 57638 | t | (schema,{extension_schema},{})
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{ltree}%';
|
||||
obj_identifier
|
||||
---------------------------------------------------------------------
|
||||
(extension,{ltree},{})
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{ltree}%';$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (extension,{ltree},{})
|
||||
localhost | 57638 | t | (extension,{ltree},{})
|
||||
(2 rows)
|
||||
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
NOTICE: dropping metadata on the node (localhost,57637)
|
||||
stop_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
NOTICE: dropping metadata on the node (localhost,57638)
|
||||
stop_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Show that we don't have any object metadata after stopping syncing
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object;$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t |
|
||||
localhost | 57638 | t |
|
||||
(2 rows)
|
||||
|
||||
-- Revert the settings for following tests
|
||||
RESET citus.enable_ddl_propagation;
|
||||
RESET citus.shard_replication_factor;
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
|
@ -68,6 +68,7 @@ ORDER BY 1;
|
|||
function citus_internal.replace_isolation_tester_func()
|
||||
function citus_internal.restore_isolation_tester_func()
|
||||
function citus_internal.upgrade_columnar_storage(regclass)
|
||||
function citus_internal_add_object_metadata(text,text[],text[],integer,integer)
|
||||
function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char")
|
||||
function citus_internal_add_placement_metadata(bigint,integer,bigint,integer,bigint)
|
||||
function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text)
|
||||
|
@ -260,5 +261,5 @@ ORDER BY 1;
|
|||
view citus_worker_stat_activity
|
||||
view pg_dist_shard_placement
|
||||
view time_partitions
|
||||
(244 rows)
|
||||
(245 rows)
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ test: multi_cluster_management
|
|||
|
||||
# below tests are placed right after multi_cluster_management as we do
|
||||
# remove/add node operations and we do not want any preexisting objects
|
||||
test: non_super_user_object_metadata
|
||||
test: alter_role_propagation
|
||||
test: propagate_extension_commands
|
||||
test: escape_extension_name
|
||||
|
@ -339,7 +340,6 @@ test: ssl_by_default
|
|||
# ---------
|
||||
test: distributed_types distributed_types_conflict disable_object_propagation distributed_types_xact_add_enum_value
|
||||
test: check_mx
|
||||
test: turn_mx_off
|
||||
test: distributed_functions distributed_functions_conflict
|
||||
test: distributed_collations distributed_collations_conflict
|
||||
test: distributed_procedure
|
||||
|
|
|
@ -353,7 +353,7 @@ create user notsuper;
|
|||
select run_command_on_workers($$create user notsuper$$);
|
||||
grant all on schema aggregate_support to notsuper;
|
||||
grant all on all tables in schema aggregate_support to notsuper;
|
||||
select run_command_on_workers($$
|
||||
select 1 from run_command_on_workers($$
|
||||
grant all on schema aggregate_support to notsuper;
|
||||
grant all on all tables in schema aggregate_support to notsuper;
|
||||
$$);
|
||||
|
|
|
@ -17,6 +17,15 @@ CREATE TABLE notices (
|
|||
SELECT create_distributed_table('notices', 'id');
|
||||
INSERT INTO notices VALUES (1, 'hello world');
|
||||
|
||||
-- Create the necessary test utility function
|
||||
CREATE OR REPLACE FUNCTION master_metadata_snapshot()
|
||||
RETURNS text[]
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
|
||||
COMMENT ON FUNCTION master_metadata_snapshot()
|
||||
IS 'commands to create the metadata snapshot';
|
||||
|
||||
CREATE FUNCTION notice(text)
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql AS $$
|
||||
|
@ -175,10 +184,6 @@ CREATE TABLE streaming_table(id macaddr);
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('streaming_table','id');
|
||||
|
||||
-- make sure that none of the active and primary nodes hasmetadata
|
||||
-- at the start of the test
|
||||
select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary';
|
||||
|
||||
-- if not paremeters are supplied, we'd see that function doesn't have
|
||||
-- distribution_argument_index and colocationid
|
||||
SELECT create_distributed_function('"eq_mi''xed_param_names"(macaddr, macaddr)');
|
||||
|
@ -188,10 +193,6 @@ WHERE objid = 'eq_mi''xed_param_names(macaddr, macaddr)'::regprocedure;
|
|||
-- also show that we can use the function
|
||||
SELECT * FROM run_command_on_workers($$SELECT function_tests."eq_mi'xed_param_names"('0123456789ab','ba9876543210');$$) ORDER BY 1,2;
|
||||
|
||||
-- make sure that none of the active and primary nodes hasmetadata
|
||||
-- since the function doesn't have a parameter
|
||||
select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary';
|
||||
|
||||
-- try to co-locate with a table that uses statement-based replication
|
||||
SELECT create_distributed_function('increment(int2)', '$1');
|
||||
SELECT create_distributed_function('increment(int2)', '$1', colocate_with := 'statement_table');
|
||||
|
@ -204,6 +205,8 @@ END;
|
|||
SELECT create_distributed_function('dup(macaddr)', '$1', colocate_with := 'streaming_table');
|
||||
SELECT * FROM run_command_on_workers($$SELECT function_tests.dup('0123456789ab');$$) ORDER BY 1,2;
|
||||
|
||||
SELECT public.wait_until_metadata_sync(30000);
|
||||
|
||||
SELECT create_distributed_function('eq(macaddr,macaddr)', '$1', colocate_with := 'streaming_table');
|
||||
SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('012345689ab','0123456789ab');$$) ORDER BY 1,2;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.eq(macaddr,macaddr)');
|
||||
|
@ -346,9 +349,6 @@ ROLLBACK;
|
|||
-- make sure that none of the nodes have the function because we've rollbacked
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='eq_with_param_names';$$);
|
||||
|
||||
-- make sure that none of the active and primary nodes hasmetadata
|
||||
select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary';
|
||||
|
||||
-- valid distribution with distribution_arg_name
|
||||
SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', distribution_arg_name:='val1');
|
||||
|
||||
|
@ -361,6 +361,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='eq_w
|
|||
-- valid distribution with distribution_arg_name -- case insensitive
|
||||
SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', distribution_arg_name:='VaL1');
|
||||
|
||||
-- show that we are able to propagate objects with multiple item on address arrays
|
||||
SELECT * FROM (SELECT unnest(master_metadata_snapshot()) as metadata_command order by 1) as innerResult WHERE metadata_command like '%distributed_object_data%';
|
||||
|
||||
-- valid distribution with distribution_arg_index
|
||||
SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)','$1');
|
||||
|
||||
|
|
|
@ -818,7 +818,9 @@ TRUNCATE collections_list;
|
|||
|
||||
-- make sure that even if local execution is used, the sequence values
|
||||
-- are generated locally
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
ALTER SEQUENCE collections_list_key_seq NO MINVALUE NO MAXVALUE;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
|
||||
PREPARE serial_prepared_local AS INSERT INTO collections_list (collection_id) VALUES (0) RETURNING key, ser;
|
||||
|
||||
|
|
|
@ -784,7 +784,9 @@ TRUNCATE collections_list;
|
|||
|
||||
-- make sure that even if local execution is used, the sequence values
|
||||
-- are generated locally
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
ALTER SEQUENCE collections_list_key_seq NO MINVALUE NO MAXVALUE;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
|
||||
PREPARE serial_prepared_local AS INSERT INTO collections_list (collection_id) VALUES (0) RETURNING key, ser;
|
||||
|
||||
|
|
|
@ -339,8 +339,92 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
ROLLBACK;
|
||||
|
||||
-- Now let's check valid pg_dist_object updates
|
||||
|
||||
-- check with non-existing object type
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ROLLBACK;
|
||||
|
||||
-- check the sanity of distributionArgumentIndex and colocationId
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ROLLBACK;
|
||||
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ROLLBACK;
|
||||
|
||||
-- check with non-existing object
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ROLLBACK;
|
||||
|
||||
-- since citus_internal_add_object_metadata is strict function returns NULL
|
||||
-- if any parameter is NULL
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ROLLBACK;
|
||||
|
||||
\c - postgres - :worker_1_port
|
||||
|
||||
-- Show that citus_internal_add_object_metadata only works for object types
|
||||
-- which is known how to distribute
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
|
||||
CREATE TABLE publication_test_table(id int);
|
||||
CREATE PUBLICATION publication_test FOR TABLE publication_test_table;
|
||||
|
||||
SET ROLE metadata_sync_helper_role;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('publication', ARRAY['publication_test']::text[], ARRAY[]::text[], -1, 0))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ROLLBACK;
|
||||
|
||||
-- Show that citus_internal_add_object_metadata checks the priviliges
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
\set VERBOSITY terse
|
||||
|
||||
CREATE FUNCTION distribution_test_function(int) RETURNS int
|
||||
AS $$ SELECT $1 $$
|
||||
LANGUAGE SQL;
|
||||
|
||||
SET ROLE metadata_sync_helper_role;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
|
||||
AS (VALUES ('function', ARRAY['distribution_test_function']::text[], ARRAY['integer']::text[], -1, 0))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
|
||||
ROLLBACK;
|
||||
|
||||
-- we do not allow wrong partmethod
|
||||
-- so manually insert wrong partmethod for the sake of the test
|
||||
SET search_path TO metadata_sync_helpers;
|
||||
|
|
|
@ -104,11 +104,14 @@ GRANT EXECUTE ON FUNCTION master_update_node(int,text,int,bool,int) TO node_meta
|
|||
-- user needs permission for the pg_dist_node and pg_dist_local_group for metadata syncing
|
||||
SELECT run_command_on_workers('GRANT ALL ON pg_dist_node TO node_metadata_user');
|
||||
SELECT run_command_on_workers('GRANT ALL ON pg_dist_local_group TO node_metadata_user');
|
||||
SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA citus TO node_metadata_user');
|
||||
SELECT run_command_on_workers('GRANT ALL ON SCHEMA citus TO node_metadata_user');
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
|
||||
-- Removing public schema from pg_dist_object because it breaks the next tests
|
||||
DELETE FROM citus.pg_dist_object WHERE objid = 'public'::regnamespace::oid;
|
||||
DELETE FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'plpgsql');
|
||||
|
||||
-- try to manipulate node metadata via non-super user
|
||||
SET ROLE non_super_user;
|
||||
|
|
|
@ -67,7 +67,12 @@ CREATE FUNCTION add(integer, integer) RETURNS integer
|
|||
LANGUAGE SQL
|
||||
IMMUTABLE
|
||||
RETURNS NULL ON NULL INPUT;
|
||||
-- Since deparse logic on workers can not work for if function
|
||||
-- is distributed on workers, we are disabling object propagation
|
||||
-- first. Same trick has been applied multiple times in this test.
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('add(int,int)');
|
||||
RESET citus.enable_object_propagation;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION add CALLED ON NULL INPUT
|
||||
|
@ -271,8 +276,10 @@ CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text) RETURNS TEXT
|
|||
AS $$ SELECT 'Overloaded function called with param: ' || $1 $$
|
||||
LANGUAGE SQL;
|
||||
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"()');
|
||||
SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text)');
|
||||
RESET citus.enable_object_propagation;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() SET SCHEMA "CiTUS.TEEN2"
|
||||
|
@ -287,7 +294,9 @@ $cmd$);
|
|||
CREATE FUNCTION func_default_param(param INT DEFAULT 0) RETURNS TEXT
|
||||
AS $$ SELECT 'supplied param is : ' || param; $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('func_default_param(INT)');
|
||||
RESET citus.enable_object_propagation;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_default_param RENAME TO func_with_default_param;
|
||||
|
@ -297,7 +306,9 @@ $cmd$);
|
|||
CREATE FUNCTION func_out_param(IN param INT, OUT result TEXT)
|
||||
AS $$ SELECT 'supplied param is : ' || param; $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('func_out_param(INT)');
|
||||
RESET citus.enable_object_propagation;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_out_param RENAME TO func_in_and_out_param;
|
||||
|
@ -310,7 +321,9 @@ BEGIN
|
|||
a := a * a;
|
||||
END; $$
|
||||
LANGUAGE plpgsql;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('square(NUMERIC)');
|
||||
RESET citus.enable_object_propagation;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION square SET search_path TO DEFAULT;
|
||||
|
@ -330,18 +343,24 @@ BEGIN
|
|||
FROM generate_subscripts(list, 1) g(i);
|
||||
END; $$
|
||||
LANGUAGE plpgsql;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('sum_avg(NUMERIC[])');
|
||||
RESET citus.enable_object_propagation;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION sum_avg COST 10000;
|
||||
$cmd$);
|
||||
|
||||
-- a function with a custom type IN parameter
|
||||
SET citus.enable_ddl_propagation TO on;
|
||||
CREATE TYPE intpair AS (x int, y int);
|
||||
RESET citus.enable_ddl_propagation;
|
||||
CREATE FUNCTION func_custom_param(IN param intpair, OUT total INT)
|
||||
AS $$ SELECT param.x + param.y $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('func_custom_param(intpair)');
|
||||
RESET citus.enable_object_propagation;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param;
|
||||
|
@ -353,7 +372,9 @@ CREATE FUNCTION func_returns_table(IN count INT)
|
|||
RETURNS TABLE (x INT, y INT)
|
||||
AS $$ SELECT i,i FROM generate_series(1,count) i $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('func_returns_table(INT)');
|
||||
RESET citus.enable_object_propagation;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_returns_table ROWS 100;
|
||||
|
|
|
@ -56,7 +56,9 @@ BEGIN
|
|||
RAISE INFO 'information message %', $1;
|
||||
END;
|
||||
$proc$;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SELECT create_distributed_function('raise_info(text)');
|
||||
RESET citus.enable_object_propagation;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER PROCEDURE raise_info CALLED ON NULL INPUT
|
||||
|
|
|
@ -532,8 +532,10 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence;
|
|||
-- owner
|
||||
CREATE TABLE pg_dist_placement_temp AS SELECT * FROM pg_dist_placement;
|
||||
CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition;
|
||||
CREATE TABLE pg_dist_object_temp AS SELECT * FROM citus.pg_dist_object;
|
||||
DELETE FROM pg_dist_placement;
|
||||
DELETE FROM pg_dist_partition;
|
||||
DELETE FROM citus.pg_dist_object;
|
||||
SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
|
||||
|
@ -573,8 +575,10 @@ DROP TABLE mx_table;
|
|||
\c - postgres - :master_port
|
||||
INSERT INTO pg_dist_placement SELECT * FROM pg_dist_placement_temp;
|
||||
INSERT INTO pg_dist_partition SELECT * FROM pg_dist_partition_temp;
|
||||
INSERT INTO citus.pg_dist_object SELECT * FROM pg_dist_object_temp ON CONFLICT ON CONSTRAINT pg_dist_object_pkey DO NOTHING;
|
||||
DROP TABLE pg_dist_placement_temp;
|
||||
DROP TABLE pg_dist_partition_temp;
|
||||
DROP TABLE pg_dist_object_temp;
|
||||
UPDATE pg_dist_placement
|
||||
SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port)
|
||||
WHERE groupid = :old_worker_2_group;
|
||||
|
@ -825,8 +829,21 @@ GRANT EXECUTE ON FUNCTION start_metadata_sync_to_node(text,int) TO non_super_met
|
|||
GRANT EXECUTE ON FUNCTION stop_metadata_sync_to_node(text,int,bool) TO non_super_metadata_user;
|
||||
GRANT ALL ON pg_dist_node TO non_super_metadata_user;
|
||||
GRANT ALL ON pg_dist_local_group TO non_super_metadata_user;
|
||||
GRANT ALL ON SCHEMA citus TO non_super_metadata_user;
|
||||
GRANT INSERT ON ALL TABLES IN SCHEMA citus TO non_super_metadata_user;
|
||||
GRANT USAGE ON SCHEMA mx_testing_schema TO non_super_metadata_user;
|
||||
GRANT USAGE ON SCHEMA mx_testing_schema_2 TO non_super_metadata_user;
|
||||
GRANT USAGE ON SCHEMA mx_test_schema_1 TO non_super_metadata_user;
|
||||
GRANT USAGE ON SCHEMA mx_test_schema_2 TO non_super_metadata_user;
|
||||
SELECT run_command_on_workers('GRANT ALL ON pg_dist_node TO non_super_metadata_user');
|
||||
SELECT run_command_on_workers('GRANT ALL ON pg_dist_local_group TO non_super_metadata_user');
|
||||
SELECT run_command_on_workers('GRANT ALL ON SCHEMA citus TO non_super_metadata_user');
|
||||
SELECT run_command_on_workers('ALTER SEQUENCE user_defined_seq OWNER TO non_super_metadata_user');
|
||||
SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA citus TO non_super_metadata_user');
|
||||
SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_testing_schema TO non_super_metadata_user');
|
||||
SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_testing_schema_2 TO non_super_metadata_user');
|
||||
SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_test_schema_1 TO non_super_metadata_user');
|
||||
SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_test_schema_2 TO non_super_metadata_user');
|
||||
|
||||
SET ROLE non_super_metadata_user;
|
||||
|
||||
|
|
|
@ -184,6 +184,16 @@ select create_distributed_function('mx_call_proc_tx(int)', '$1', 'mx_call_dist_t
|
|||
CALL multi_mx_call.mx_call_proc_tx(20);
|
||||
SELECT id, val FROM mx_call_dist_table_1 ORDER BY id, val;
|
||||
|
||||
-- Show that function delegation works from worker nodes as well
|
||||
\c - - - :worker_1_port
|
||||
SET search_path to multi_mx_call, public;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
CALL multi_mx_call.mx_call_proc_tx(9);
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path to multi_mx_call, public;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
|
||||
-- Test that we properly propagate errors raised from procedures.
|
||||
CREATE PROCEDURE mx_call_proc_raise(x int) LANGUAGE plpgsql AS $$
|
||||
BEGIN
|
||||
|
|
|
@ -438,5 +438,7 @@ SELECT table_name, citus_table_type, distribution_column, shard_count, table_own
|
|||
FROM citus_tables
|
||||
ORDER BY table_name::text;
|
||||
|
||||
|
||||
SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards ORDER BY shard_name::text;
|
||||
|
||||
-- Show that altering type name is not supported from worker node
|
||||
ALTER TYPE order_side_mx RENAME TO temp_order_side_mx;
|
||||
|
|
|
@ -272,6 +272,10 @@ SET search_path TO multi_mx_function_call_delegation, public;
|
|||
-- create_distributed_function is disallowed from worker nodes
|
||||
select create_distributed_function('mx_call_func(int,int)');
|
||||
|
||||
-- show that functions can be delegated from worker nodes
|
||||
SET client_min_messages TO DEBUG1;
|
||||
SELECT mx_call_func(2, 0);
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO multi_mx_function_call_delegation, public;
|
||||
|
||||
|
|
|
@ -293,7 +293,9 @@ SELECT minimum_value::bigint AS min_value,
|
|||
FROM information_schema.sequences
|
||||
WHERE sequence_name = 'app_analytics_events_mx_id_seq' \gset
|
||||
SELECT last_value FROM app_analytics_events_mx_id_seq \gset
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
ALTER SEQUENCE app_analytics_events_mx_id_seq NO MINVALUE NO MAXVALUE;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
SELECT setval('app_analytics_events_mx_id_seq'::regclass, 3940649673949184);
|
||||
|
||||
INSERT INTO app_analytics_events_mx VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id;
|
||||
|
@ -302,5 +304,8 @@ INSERT INTO app_analytics_events_mx (app_id, name) VALUES (103, 'Mynt') RETURNIN
|
|||
|
||||
-- clean up
|
||||
SELECT 1 FROM setval('app_analytics_events_mx_id_seq'::regclass, :last_value);
|
||||
|
||||
SET citus.enable_ddl_propagation TO OFF;
|
||||
ALTER SEQUENCE app_analytics_events_mx_id_seq
|
||||
MINVALUE :min_value MAXVALUE :max_value;
|
||||
RESET citus.enable_ddl_propagation;
|
||||
|
|
|
@ -314,6 +314,9 @@ SELECT table_schema AS "Shards' Schema"
|
|||
FROM information_schema.tables
|
||||
WHERE table_name LIKE 'table\_set\_schema\_%'
|
||||
GROUP BY table_schema;
|
||||
|
||||
-- Show that altering distributed schema is not allowed on worker nodes
|
||||
ALTER SCHEMA mx_old_schema RENAME TO temp_mx_old_schema;
|
||||
\c - - - :master_port
|
||||
|
||||
ALTER TABLE mx_old_schema.table_set_schema SET SCHEMA mx_new_schema;
|
||||
|
|
|
@ -8,7 +8,7 @@ ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
|
|||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
|
||||
SELECT pg_reload_conf();
|
||||
|
||||
-- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator
|
||||
-- Verifies pg_dist_node and pg_dist_placement in the given worker matches the ones in coordinator
|
||||
CREATE OR REPLACE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636)
|
||||
RETURNS BOOLEAN
|
||||
LANGUAGE sql
|
||||
|
|
|
@ -212,15 +212,8 @@ DROP TABLE some_table_with_sequence;
|
|||
CREATE SEQUENCE some_sequence;
|
||||
DROP SEQUENCE some_sequence;
|
||||
|
||||
-- Show that dropping the sequence of an MX table with cascade harms the table and shards
|
||||
BEGIN;
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass;
|
||||
-- suppress notice message caused by DROP ... CASCADE to prevent pg version difference
|
||||
SET client_min_messages TO 'WARNING';
|
||||
-- Show that dropping the sequence of an MX table is not supported on worker nodes
|
||||
DROP SEQUENCE mx_table_col_3_seq CASCADE;
|
||||
RESET client_min_messages;
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass;
|
||||
ROLLBACK;
|
||||
|
||||
-- Cleanup
|
||||
\c - - - :master_port
|
||||
|
|
|
@ -0,0 +1,190 @@
|
|||
SET citus.enable_ddl_propagation TO OFF;
|
||||
|
||||
CREATE SCHEMA local_schema;
|
||||
SET search_path TO local_schema;
|
||||
|
||||
-- Create type and function that depends on it
|
||||
CREATE TYPE test_type AS (f1 int, f2 text);
|
||||
CREATE FUNCTION test_function(int) RETURNS test_type
|
||||
AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$
|
||||
LANGUAGE SQL;
|
||||
|
||||
-- Create various objects
|
||||
CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');
|
||||
|
||||
-- Create a sequence under a different schema
|
||||
CREATE SCHEMA test_sequence_schema;
|
||||
CREATE SEQUENCE test_sequence_schema.test_sequence;
|
||||
|
||||
-- show that none of the objects above are marked as distributed
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema'::regnamespace::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.mood'::regtype::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_type'::regtype::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema.test_sequence'::regclass::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_function'::regproc::oid;
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
CREATE USER non_super_user_test_user;
|
||||
SELECT 1 FROM run_command_on_workers($$CREATE USER non_super_user_test_user;$$);
|
||||
RESET client_min_messages;
|
||||
|
||||
GRANT ALL ON SCHEMA local_schema TO non_super_user_test_user;
|
||||
GRANT ALL ON SCHEMA test_sequence_schema TO non_super_user_test_user;
|
||||
|
||||
SET ROLE non_super_user_test_user;
|
||||
SET search_path TO local_schema;
|
||||
CREATE TABLE dist_table(a int, b mood, c test_type, d int DEFAULT nextval('test_sequence_schema.test_sequence'), e bigserial);
|
||||
|
||||
-- Citus requires that user must own the dependent sequence
|
||||
-- https://github.com/citusdata/citus/issues/5494
|
||||
SELECT create_distributed_table('local_schema.dist_table', 'a');
|
||||
|
||||
-- Citus requires that user must own the function to distribute
|
||||
SELECT create_distributed_function('test_function(int)');
|
||||
|
||||
RESET ROLE;
|
||||
SET search_path TO local_schema;
|
||||
ALTER SEQUENCE test_sequence_schema.test_sequence OWNER TO non_super_user_test_user;
|
||||
ALTER FUNCTION test_function(int) OWNER TO non_super_user_test_user;
|
||||
|
||||
SET ROLE non_super_user_test_user;
|
||||
SET search_path TO local_schema;
|
||||
|
||||
-- Show that we can distribute table and function after
|
||||
-- having required ownerships
|
||||
SELECT create_distributed_table('dist_table', 'a');
|
||||
SELECT create_distributed_function('test_function(int)');
|
||||
|
||||
-- show that schema, types, function and sequence has marked as distributed
|
||||
-- on the coordinator node
|
||||
RESET ROLE;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema'::regnamespace::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema'::regnamespace::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.mood'::regtype::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_type'::regtype::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema.test_sequence'::regclass::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.dist_table_e_seq'::regclass::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_function'::regproc::oid;
|
||||
|
||||
-- show those objects marked as distributed on metadata worker node as well
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema'::regnamespace::oid;$$) ORDER BY 1,2;
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema'::regnamespace::oid;$$) ORDER BY 1,2;
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.mood'::regtype::oid;$$) ORDER BY 1,2;
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_type'::regtype::oid;$$) ORDER BY 1,2;
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema.test_sequence'::regclass::oid;$$) ORDER BY 1,2;
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.dist_table_e_seq'::regclass::oid;$$) ORDER BY 1,2;
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_function'::regproc::oid;$$) ORDER BY 1,2;
|
||||
|
||||
-- show that schema is owned by the superuser
|
||||
SELECT rolname FROM pg_roles JOIN pg_namespace ON(pg_namespace.nspowner = pg_roles.oid) WHERE nspname = 'local_schema';
|
||||
SELECT * FROM run_command_on_workers($$SELECT rolname FROM pg_roles JOIN pg_namespace ON(pg_namespace.nspowner = pg_roles.oid) WHERE nspname = 'local_schema';$$) ORDER BY 1,2;
|
||||
|
||||
-- show that types are owned by the superuser
|
||||
SELECT DISTINCT(rolname) FROM pg_roles JOIN pg_type ON(pg_type.typowner = pg_roles.oid) WHERE typname IN ('test_type', 'mood');
|
||||
SELECT * FROM run_command_on_workers($$SELECT DISTINCT(rolname) FROM pg_roles JOIN pg_type ON(pg_type.typowner = pg_roles.oid) WHERE typname IN ('test_type', 'mood');$$) ORDER BY 1,2;
|
||||
|
||||
-- show that table is owned by the non_super_user_test_user
|
||||
SELECT rolname FROM pg_roles JOIN pg_class ON(pg_class.relowner = pg_roles.oid) WHERE relname = 'dist_table';
|
||||
SELECT * FROM run_command_on_workers($$SELECT rolname FROM pg_roles JOIN pg_class ON(pg_class.relowner = pg_roles.oid) WHERE relname = 'dist_table'$$) ORDER BY 1,2;
|
||||
|
||||
SET ROLE non_super_user_test_user;
|
||||
SET search_path TO local_schema;
|
||||
|
||||
-- ensure we can load data
|
||||
INSERT INTO dist_table VALUES (1, 'sad', (1,'onder')::test_type),
|
||||
(2, 'ok', (1,'burak')::test_type),
|
||||
(3, 'happy', (1,'marco')::test_type);
|
||||
|
||||
SELECT a, b, c , d FROM dist_table ORDER BY 1,2,3,4;
|
||||
|
||||
-- Show that dropping the table removes the dependent sequence from pg_dist_object
|
||||
-- on both coordinator and metadata worker nodes when ddl propagation is on
|
||||
SET citus.enable_ddl_propagation TO ON;
|
||||
DROP TABLE dist_table CASCADE;
|
||||
|
||||
RESET ROLE;
|
||||
SET search_path TO local_schema;
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%dist_table_e_seq%';
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%dist_table_e_seq%';$$) ORDER BY 1,2;
|
||||
|
||||
-- Show that altering the function's schema marks the schema distributed
|
||||
CREATE SCHEMA schema_to_prop_with_function;
|
||||
ALTER FUNCTION test_function SET SCHEMA schema_to_prop_with_function;
|
||||
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{schema_to_prop_with_function}%';
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{schema_to_prop_with_function}%';$$) ORDER BY 1,2;
|
||||
|
||||
-- Show that dropping the function removes the metadata from pg_dist_object
|
||||
-- on both coordinator and metadata worker node
|
||||
DROP FUNCTION schema_to_prop_with_function.test_function;
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%test_function%';
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%test_function%';$$) ORDER BY 1,2;
|
||||
|
||||
-- Show that altering the type's schema marks the schema distributed
|
||||
CREATE SCHEMA schema_to_prop_with_type;
|
||||
ALTER TYPE test_type SET SCHEMA schema_to_prop_with_type;
|
||||
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{schema_to_prop_with_type}%';
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{schema_to_prop_with_type}%';$$) ORDER BY 1,2;
|
||||
|
||||
-- Show that dropping type removes the metadata from pg_dist_object
|
||||
-- on both coordinator and metadata worker node
|
||||
DROP TYPE mood CASCADE;
|
||||
DROP TYPE schema_to_prop_with_type.test_type CASCADE;
|
||||
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%test_type%' or obj_identifier::text like '%mood%';
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%test_type%' or obj_identifier::text like '%mood%'$$) ORDER BY 1,2;
|
||||
|
||||
-- Show that distributed function related metadata are also propagated
|
||||
set citus.shard_replication_factor to 1;
|
||||
|
||||
CREATE TABLE metadata_dist_test_table (a int, b int);
|
||||
SELECT create_distributed_table('metadata_dist_test_table', 'a');
|
||||
CREATE OR REPLACE PROCEDURE metadata_dist_test_proc(dist_key integer, dist_key_2 integer)
|
||||
LANGUAGE plpgsql
|
||||
AS $$ DECLARE
|
||||
res INT := 0;
|
||||
BEGIN
|
||||
INSERT INTO metadata_dist_test_table VALUES (dist_key);
|
||||
SELECT count(*) INTO res FROM metadata_dist_test_table;
|
||||
RAISE NOTICE 'Res: %', res;
|
||||
COMMIT;
|
||||
END;$$;
|
||||
|
||||
-- create a distributed function and show its distribution_argument_index
|
||||
SELECT create_distributed_function('metadata_dist_test_proc(integer, integer)', 'dist_key', 'metadata_dist_test_table');
|
||||
SELECT distribution_argument_index FROM citus.pg_dist_object WHERE objid = 'metadata_dist_test_proc'::regproc;
|
||||
SELECT * FROM run_command_on_workers($$SELECT distribution_argument_index FROM citus.pg_dist_object WHERE objid = 'local_schema.metadata_dist_test_proc'::regproc;$$) ORDER BY 1,2;
|
||||
|
||||
-- re-distribute and show that now the distribution_argument_index is updated on both the coordinator and workers
|
||||
SELECT create_distributed_function('metadata_dist_test_proc(integer, integer)', 'dist_key_2', 'metadata_dist_test_table');
|
||||
SELECT distribution_argument_index FROM citus.pg_dist_object WHERE objid = 'metadata_dist_test_proc'::regproc;
|
||||
SELECT * FROM run_command_on_workers($$ SELECT distribution_argument_index FROM citus.pg_dist_object WHERE objid = 'local_schema.metadata_dist_test_proc'::regproc;$$) ORDER BY 1,2;
|
||||
|
||||
-- Show that dropping schema doesn't affect the worker node
|
||||
DROP SCHEMA local_schema CASCADE;
|
||||
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{local_schema}%';
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{local_schema}%';$$) ORDER BY 1,2;
|
||||
|
||||
-- Show that extension and dependent sequence also created and marked as distributed
|
||||
CREATE SCHEMA extension_schema;
|
||||
CREATE EXTENSION ltree WITH SCHEMA extension_schema;
|
||||
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{extension_schema}%';
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{extension_schema}%';$$) ORDER BY 1,2;
|
||||
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{ltree}%';
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{ltree}%';$$) ORDER BY 1,2;
|
||||
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
|
||||
-- Show that we don't have any object metadata after stopping syncing
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object;$$) ORDER BY 1,2;
|
||||
|
||||
-- Revert the settings for following tests
|
||||
RESET citus.enable_ddl_propagation;
|
||||
RESET citus.shard_replication_factor;
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
Loading…
Reference in New Issue