mirror of https://github.com/citusdata/citus.git
Merge branch 'master' into dev/yanwjin/separate_columnar2
commit
1483b9d5c0
29
CHANGELOG.md
29
CHANGELOG.md
|
@ -1,3 +1,32 @@
|
|||
### citus v10.2.4 (February 1, 2022) ###
|
||||
|
||||
* Adds support for operator class parameters in indexes
|
||||
|
||||
* Fixes a bug with distributed functions that have `OUT` parameters or
|
||||
return `TABLE`
|
||||
|
||||
* Fixes a build error that happens when `lz4` is not installed
|
||||
|
||||
* Improves self-deadlock prevention for `CREATE INDEX` &
|
||||
`REINDEX CONCURRENTLY` commands for builds using PG14 or higher
|
||||
|
||||
* Fixes a bug that causes commands to fail when `application_name` is set
|
||||
|
||||
### citus v10.1.4 (February 1, 2022) ###
|
||||
|
||||
* Adds missing version checks for columnar tables
|
||||
|
||||
* Fixes a bug that could break `DROP SCHEMA/EXTENSION` commands when there is
|
||||
a columnar table
|
||||
|
||||
* Fixes a build error that happens when `lz4` is not installed
|
||||
|
||||
* Fixes a missing `FROM` clause entry error
|
||||
|
||||
* Reinstates optimisation for uniform shard interval ranges
|
||||
|
||||
* Fixes a bug that causes commands to fail when `application_name` is set
|
||||
|
||||
### citus v10.2.3 (November 29, 2021) ###
|
||||
|
||||
* Adds `fix_partition_shard_index_names` udf to fix currently broken
|
||||
|
|
|
@ -186,17 +186,17 @@ ColumnarStorageInit(SMgrRelation srel, uint64 storageId)
|
|||
(char *) &metapage, sizeof(ColumnarMetapage));
|
||||
phdr->pd_lower += sizeof(ColumnarMetapage);
|
||||
|
||||
PageSetChecksumInplace(page, COLUMNAR_METAPAGE_BLOCKNO);
|
||||
log_newpage(&srel->smgr_rnode.node, MAIN_FORKNUM,
|
||||
COLUMNAR_METAPAGE_BLOCKNO, page, true);
|
||||
PageSetChecksumInplace(page, COLUMNAR_METAPAGE_BLOCKNO);
|
||||
smgrextend(srel, MAIN_FORKNUM, COLUMNAR_METAPAGE_BLOCKNO, page, true);
|
||||
|
||||
/* write empty page */
|
||||
PageInit(page, BLCKSZ, 0);
|
||||
|
||||
PageSetChecksumInplace(page, COLUMNAR_EMPTY_BLOCKNO);
|
||||
log_newpage(&srel->smgr_rnode.node, MAIN_FORKNUM,
|
||||
COLUMNAR_EMPTY_BLOCKNO, page, true);
|
||||
PageSetChecksumInplace(page, COLUMNAR_EMPTY_BLOCKNO);
|
||||
smgrextend(srel, MAIN_FORKNUM, COLUMNAR_EMPTY_BLOCKNO, page, true);
|
||||
|
||||
/*
|
||||
|
|
|
@ -1913,13 +1913,6 @@ columnar_tableam_init()
|
|||
}
|
||||
|
||||
|
||||
void
|
||||
columnar_tableam_finish()
|
||||
{
|
||||
object_access_hook = PrevObjectAccessHook;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Get the number of chunks filtered out during the given scan.
|
||||
*/
|
||||
|
@ -2334,7 +2327,11 @@ alter_columnar_table_set(PG_FUNCTION_ARGS)
|
|||
quote_identifier(RelationGetRelationName(rel)))));
|
||||
}
|
||||
|
||||
EnsureTableOwner(relationId);
|
||||
if (!pg_class_ownercheck(relationId, GetUserId()))
|
||||
{
|
||||
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TABLE,
|
||||
get_rel_name(relationId));
|
||||
}
|
||||
|
||||
ColumnarOptions options = { 0 };
|
||||
if (!ReadColumnarOptions(relationId, &options))
|
||||
|
@ -2454,7 +2451,11 @@ alter_columnar_table_reset(PG_FUNCTION_ARGS)
|
|||
quote_identifier(RelationGetRelationName(rel)))));
|
||||
}
|
||||
|
||||
EnsureTableOwner(relationId);
|
||||
if (!pg_class_ownercheck(relationId, GetUserId()))
|
||||
{
|
||||
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TABLE,
|
||||
get_rel_name(relationId));
|
||||
}
|
||||
|
||||
ColumnarOptions options = { 0 };
|
||||
if (!ReadColumnarOptions(relationId, &options))
|
||||
|
|
|
@ -22,13 +22,11 @@
|
|||
|
||||
#include "columnar/columnar_tableam.h"
|
||||
|
||||
|
||||
PG_MODULE_MAGIC;
|
||||
|
||||
void _PG_init(void);
|
||||
|
||||
void
|
||||
_PG_init(void)
|
||||
{
|
||||
columnar_init_gucs();
|
||||
columnar_tableam_init();
|
||||
}
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ CallDistributedProcedureRemotely(CallStmt *callStmt, DestReceiver *dest)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (IsCitusInitiatedRemoteBackend())
|
||||
if (IsCitusInternalBackend())
|
||||
{
|
||||
/*
|
||||
* We are in a citus-initiated backend handling a CALL to a distributed
|
||||
|
|
|
@ -34,8 +34,6 @@ static List * GetDependencyCreateDDLCommands(const ObjectAddress *dependency);
|
|||
static List * FilterObjectAddressListByPredicate(List *objectAddressList,
|
||||
AddressPredicate predicate);
|
||||
|
||||
bool EnableDependencyCreation = true;
|
||||
|
||||
/*
|
||||
* EnsureDependenciesExistOnAllNodes finds all the dependencies that we support and makes
|
||||
* sure these are available on all workers. If not available they will be created on the
|
||||
|
@ -122,7 +120,15 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
|
|||
*/
|
||||
foreach_ptr(dependency, dependenciesWithCommands)
|
||||
{
|
||||
MarkObjectDistributed(dependency);
|
||||
/*
|
||||
* pg_dist_object entries must be propagated with the super user, since
|
||||
* the owner of the target object may not own dependencies but we must
|
||||
* propagate as we send objects itself with the superuser.
|
||||
*
|
||||
* Only dependent object's metadata should be propagated with super user.
|
||||
* Metadata of the table itself must be propagated with the current user.
|
||||
*/
|
||||
MarkObjectDistributedViaSuperUser(dependency);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -364,7 +370,7 @@ ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort)
|
|||
List *dependencies = GetDistributedObjectAddressList();
|
||||
|
||||
/*
|
||||
* Depending on changes in the environment, such as the enable_object_propagation guc
|
||||
* Depending on changes in the environment, such as the enable_metadata_sync guc
|
||||
* there might be objects in the distributed object address list that should currently
|
||||
* not be propagated by citus as they are 'not supported'.
|
||||
*/
|
||||
|
@ -415,7 +421,7 @@ ShouldPropagate(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!EnableDependencyCreation)
|
||||
if (!EnableMetadataSync)
|
||||
{
|
||||
/*
|
||||
* we are configured to disable object propagation, should not propagate anything
|
||||
|
|
|
@ -649,7 +649,7 @@ static bool
|
|||
ShouldPropagateExtensionCommand(Node *parseTree)
|
||||
{
|
||||
/* if we disabled object propagation, then we should not propagate anything. */
|
||||
if (!EnableDependencyCreation)
|
||||
if (!EnableMetadataSync)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -200,8 +200,8 @@ create_distributed_function(PG_FUNCTION_ARGS)
|
|||
const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true);
|
||||
const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid);
|
||||
initStringInfo(&ddlCommand);
|
||||
appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_OBJECT_PROPAGATION,
|
||||
createFunctionSQL, alterFunctionOwnerSQL, ENABLE_OBJECT_PROPAGATION);
|
||||
appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_METADATA_SYNC,
|
||||
createFunctionSQL, alterFunctionOwnerSQL, ENABLE_METADATA_SYNC);
|
||||
SendCommandToWorkersAsUser(NON_COORDINATOR_NODES, CurrentUserName(), ddlCommand.data);
|
||||
|
||||
MarkObjectDistributed(&functionAddress);
|
||||
|
@ -698,7 +698,7 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
|
|||
|
||||
table_close(pgDistObjectRel, NoLock);
|
||||
|
||||
if (EnableDependencyCreation)
|
||||
if (EnableMetadataSync)
|
||||
{
|
||||
List *objectAddressList = list_make1((ObjectAddress *) distAddress);
|
||||
List *distArgumentIndexList = NIL;
|
||||
|
@ -1206,7 +1206,7 @@ ShouldPropagateCreateFunction(CreateFunctionStmt *stmt)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!EnableDependencyCreation)
|
||||
if (!EnableMetadataSync)
|
||||
{
|
||||
/*
|
||||
* we are configured to disable object propagation, should not propagate anything
|
||||
|
@ -1254,7 +1254,7 @@ ShouldPropagateAlterFunction(const ObjectAddress *address)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!EnableDependencyCreation)
|
||||
if (!EnableMetadataSync)
|
||||
{
|
||||
/*
|
||||
* we are configured to disable object propagation, should not propagate anything
|
||||
|
@ -1556,7 +1556,7 @@ PreprocessDropFunctionStmt(Node *node, const char *queryString,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
if (!EnableDependencyCreation)
|
||||
if (!EnableMetadataSync)
|
||||
{
|
||||
/*
|
||||
* we are configured to disable object propagation, should not propagate anything
|
||||
|
@ -1657,7 +1657,7 @@ PreprocessAlterFunctionDependsStmt(Node *node, const char *queryString,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
if (!EnableDependencyCreation)
|
||||
if (!EnableMetadataSync)
|
||||
{
|
||||
/*
|
||||
* we are configured to disable object propagation, should not propagate anything
|
||||
|
|
|
@ -241,7 +241,7 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString,
|
|||
return NIL;
|
||||
}
|
||||
|
||||
if (!EnableDependencyCreation)
|
||||
if (!EnableMetadataSync)
|
||||
{
|
||||
/*
|
||||
* we are configured to disable object propagation, should not propagate anything
|
||||
|
|
|
@ -437,8 +437,12 @@ AcquireDistributedLockOnRelations(List *relationIdList, LOCKMODE lockMode)
|
|||
/*
|
||||
* We only acquire distributed lock on relation if
|
||||
* the relation is sync'ed between mx nodes.
|
||||
*
|
||||
* Even if users disable metadata sync, we cannot
|
||||
* allow them not to acquire the remote locks.
|
||||
* Hence, we have !IsCoordinator() check.
|
||||
*/
|
||||
if (ShouldSyncTableMetadata(relationId))
|
||||
if (ShouldSyncTableMetadata(relationId) || !IsCoordinator())
|
||||
{
|
||||
char *qualifiedRelationName = generate_qualified_relation_name(relationId);
|
||||
StringInfo lockRelationCommand = makeStringInfo();
|
||||
|
|
|
@ -897,7 +897,7 @@ ShouldCheckUndistributeCitusLocalTables(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (IsCitusInitiatedRemoteBackend())
|
||||
if (IsCitusInternalBackend() || IsRebalancerInternalBackend())
|
||||
{
|
||||
/* connection from the coordinator operating on a shard */
|
||||
return false;
|
||||
|
|
|
@ -1429,7 +1429,7 @@ ShouldShutdownConnection(MultiConnection *connection, const int cachedConnection
|
|||
* escalating the number of cached connections. We can recognize such backends
|
||||
* from their application name.
|
||||
*/
|
||||
return IsCitusInitiatedRemoteBackend() ||
|
||||
return (IsCitusInternalBackend() || IsRebalancerInternalBackend()) ||
|
||||
connection->initilizationState != POOL_STATE_INITIALIZED ||
|
||||
cachedConnectionCount >= MaxCachedConnectionsPerWorker ||
|
||||
connection->forceCloseAtTransactionEnd ||
|
||||
|
@ -1441,12 +1441,23 @@ ShouldShutdownConnection(MultiConnection *connection, const int cachedConnection
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsRebalancerInitiatedBackend returns true if we are in a backend that citus
|
||||
* rebalancer initiated.
|
||||
*/
|
||||
bool
|
||||
IsRebalancerInternalBackend(void)
|
||||
{
|
||||
return application_name && strcmp(application_name, CITUS_REBALANCER_NAME) == 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsCitusInitiatedRemoteBackend returns true if we are in a backend that citus
|
||||
* initiated via remote connection.
|
||||
*/
|
||||
bool
|
||||
IsCitusInitiatedRemoteBackend(void)
|
||||
IsCitusInternalBackend(void)
|
||||
{
|
||||
return application_name && strcmp(application_name, CITUS_APPLICATION_NAME) == 0;
|
||||
}
|
||||
|
|
|
@ -779,7 +779,7 @@ InTaskExecution(void)
|
|||
* is considered a task execution, but an exception is when we
|
||||
* are in a delegated function/procedure call.
|
||||
*/
|
||||
return IsCitusInitiatedRemoteBackend() &&
|
||||
return IsCitusInternalBackend() &&
|
||||
!InTopLevelDelegatedFunctionCall &&
|
||||
!InDelegatedProcedureCall;
|
||||
}
|
||||
|
|
|
@ -558,7 +558,7 @@ IsObjectAddressCollected(ObjectAddress findAddress,
|
|||
bool
|
||||
SupportedDependencyByCitus(const ObjectAddress *address)
|
||||
{
|
||||
if (!EnableDependencyCreation)
|
||||
if (!EnableMetadataSync)
|
||||
{
|
||||
/*
|
||||
* If the user has disabled object propagation we need to fall back to the legacy
|
||||
|
|
|
@ -46,6 +46,8 @@
|
|||
#include "utils/rel.h"
|
||||
|
||||
|
||||
static void MarkObjectDistributedLocally(const ObjectAddress *distAddress);
|
||||
static char * CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress);
|
||||
static int ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes,
|
||||
Datum *paramValues);
|
||||
|
||||
|
@ -141,14 +143,60 @@ ObjectExists(const ObjectAddress *address)
|
|||
|
||||
|
||||
/*
|
||||
* MarkObjectDistributed marks an object as a distributed object by citus. Marking is done
|
||||
* by adding appropriate entries to citus.pg_dist_object.
|
||||
* MarkObjectDistributed marks an object as a distributed object. Marking is done
|
||||
* by adding appropriate entries to citus.pg_dist_object and also marking the object
|
||||
* as distributed by opening a connection using current user to all of the workers
|
||||
* with metadata if object propagation is on.
|
||||
*
|
||||
* This also marks the object as distributed on all of the workers with metadata
|
||||
* if object propagation is on.
|
||||
* This function should be used if the user creating the given object. If you want
|
||||
* to mark dependent objects as distributed check MarkObjectDistributedViaSuperUser.
|
||||
*/
|
||||
void
|
||||
MarkObjectDistributed(const ObjectAddress *distAddress)
|
||||
{
|
||||
MarkObjectDistributedLocally(distAddress);
|
||||
|
||||
if (EnableMetadataSync)
|
||||
{
|
||||
char *workerPgDistObjectUpdateCommand =
|
||||
CreatePgDistObjectEntryCommand(distAddress);
|
||||
SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* MarkObjectDistributedViaSuperUser marks an object as a distributed object. Marking
|
||||
* is done by adding appropriate entries to citus.pg_dist_object and also marking the
|
||||
* object as distributed by opening a connection using super user to all of the workers
|
||||
* with metadata if object propagation is on.
|
||||
*
|
||||
* This function should be used to mark dependent object as distributed. If you want
|
||||
* to mark the object you are creating please check MarkObjectDistributed.
|
||||
*/
|
||||
void
|
||||
MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress)
|
||||
{
|
||||
MarkObjectDistributedLocally(distAddress);
|
||||
|
||||
if (EnableMetadataSync)
|
||||
{
|
||||
char *workerPgDistObjectUpdateCommand =
|
||||
CreatePgDistObjectEntryCommand(distAddress);
|
||||
SendCommandToWorkersWithMetadataViaSuperUser(workerPgDistObjectUpdateCommand);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* MarkObjectDistributedLocally marks an object as a distributed object by citus.
|
||||
* Marking is done by adding appropriate entries to citus.pg_dist_object.
|
||||
*
|
||||
* This function should never be called alone, MarkObjectDistributed() or
|
||||
* MarkObjectDistributedViaSuperUser() should be called.
|
||||
*/
|
||||
static void
|
||||
MarkObjectDistributedLocally(const ObjectAddress *distAddress)
|
||||
{
|
||||
int paramCount = 3;
|
||||
Oid paramTypes[3] = {
|
||||
|
@ -161,32 +209,38 @@ MarkObjectDistributed(const ObjectAddress *distAddress)
|
|||
ObjectIdGetDatum(distAddress->objectId),
|
||||
Int32GetDatum(distAddress->objectSubId)
|
||||
};
|
||||
|
||||
char *insertQuery = "INSERT INTO citus.pg_dist_object (classid, objid, objsubid) "
|
||||
"VALUES ($1, $2, $3) ON CONFLICT DO NOTHING";
|
||||
|
||||
int spiStatus = ExecuteCommandAsSuperuser(insertQuery, paramCount, paramTypes,
|
||||
paramValues);
|
||||
if (spiStatus < 0)
|
||||
{
|
||||
ereport(ERROR, (errmsg("failed to insert object into citus.pg_dist_object")));
|
||||
}
|
||||
}
|
||||
|
||||
if (EnableDependencyCreation)
|
||||
{
|
||||
/* create a list by adding the address of value to not to have warning */
|
||||
List *objectAddressList = list_make1((ObjectAddress *) distAddress);
|
||||
List *distArgumetIndexList = list_make1_int(INVALID_DISTRIBUTION_ARGUMENT_INDEX);
|
||||
List *colocationIdList = list_make1_int(INVALID_COLOCATION_ID);
|
||||
List *forceDelegationList = list_make1_int(NO_FORCE_PUSHDOWN);
|
||||
|
||||
char *workerPgDistObjectUpdateCommand =
|
||||
MarkObjectsDistributedCreateCommand(objectAddressList,
|
||||
distArgumetIndexList,
|
||||
colocationIdList,
|
||||
forceDelegationList);
|
||||
SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand);
|
||||
}
|
||||
/*
|
||||
* CreatePgDistObjectEntryCommand creates command to insert pg_dist_object tuple
|
||||
* for the given object address.
|
||||
*/
|
||||
static char *
|
||||
CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress)
|
||||
{
|
||||
/* create a list by adding the address of value to not to have warning */
|
||||
List *objectAddressList =
|
||||
list_make1((ObjectAddress *) objectAddress);
|
||||
List *distArgumetIndexList = list_make1_int(INVALID_DISTRIBUTION_ARGUMENT_INDEX);
|
||||
List *colocationIdList = list_make1_int(INVALID_COLOCATION_ID);
|
||||
List *forceDelegationList = list_make1_int(NO_FORCE_PUSHDOWN);
|
||||
|
||||
char *workerPgDistObjectUpdateCommand =
|
||||
MarkObjectsDistributedCreateCommand(objectAddressList,
|
||||
distArgumetIndexList,
|
||||
colocationIdList,
|
||||
forceDelegationList);
|
||||
|
||||
return workerPgDistObjectUpdateCommand;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ static AccessPriv * GetAccessPrivObjectForGrantStmt(char *permission);
|
|||
static RoleSpec * GetRoleSpecObjectForGrantStmt(Oid roleOid);
|
||||
static List * GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid,
|
||||
AclItem *aclItem);
|
||||
static void SetLocalEnableDependencyCreation(bool state);
|
||||
static void SetLocalEnableMetadataSync(bool state);
|
||||
static void SetLocalReplicateReferenceTablesOnActivate(bool state);
|
||||
static char * GenerateSetRoleQuery(Oid roleOid);
|
||||
static void MetadataSyncSigTermHandler(SIGNAL_ARGS);
|
||||
|
@ -417,7 +417,8 @@ ClusterHasKnownMetadataWorkers()
|
|||
bool
|
||||
ShouldSyncTableMetadata(Oid relationId)
|
||||
{
|
||||
if (!OidIsValid(relationId) || !IsCitusTable(relationId))
|
||||
if (!EnableMetadataSync ||
|
||||
!OidIsValid(relationId) || !IsCitusTable(relationId))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -950,8 +951,8 @@ citus_internal_add_object_metadata(PG_FUNCTION_ARGS)
|
|||
argsArray);
|
||||
|
||||
/* First, disable propagation off to not to cause infinite propagation */
|
||||
bool prevDependencyCreationValue = EnableDependencyCreation;
|
||||
SetLocalEnableDependencyCreation(false);
|
||||
bool prevDependencyCreationValue = EnableMetadataSync;
|
||||
SetLocalEnableMetadataSync(false);
|
||||
|
||||
MarkObjectDistributed(&objectAddress);
|
||||
|
||||
|
@ -978,7 +979,7 @@ citus_internal_add_object_metadata(PG_FUNCTION_ARGS)
|
|||
forceDelegationAddress);
|
||||
}
|
||||
|
||||
SetLocalEnableDependencyCreation(prevDependencyCreationValue);
|
||||
SetLocalEnableMetadataSync(prevDependencyCreationValue);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
@ -1847,12 +1848,12 @@ GetRoleSpecObjectForGrantStmt(Oid roleOid)
|
|||
|
||||
|
||||
/*
|
||||
* SetLocalEnableDependencyCreation sets the enable_object_propagation locally
|
||||
* SetLocalEnableMetadataSync sets the enable_metadata_sync locally
|
||||
*/
|
||||
static void
|
||||
SetLocalEnableDependencyCreation(bool state)
|
||||
SetLocalEnableMetadataSync(bool state)
|
||||
{
|
||||
set_config_option("citus.enable_object_propagation", state == true ? "on" : "off",
|
||||
set_config_option("citus.enable_metadata_sync", state == true ? "on" : "off",
|
||||
(superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
|
||||
GUC_ACTION_LOCAL, true, 0, false);
|
||||
}
|
||||
|
@ -2636,7 +2637,8 @@ EnsureCoordinatorInitiatedOperation(void)
|
|||
* check. The other two checks are to ensure that the operation is initiated
|
||||
* by the coordinator.
|
||||
*/
|
||||
if (!IsCitusInitiatedRemoteBackend() || !MyBackendIsInDisributedTransaction() ||
|
||||
if (!(IsCitusInternalBackend() || IsRebalancerInternalBackend()) ||
|
||||
!MyBackendIsInDisributedTransaction() ||
|
||||
GetLocalGroupId() == COORDINATOR_GROUP_ID)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
|
|
|
@ -77,7 +77,7 @@ bool ReplicateReferenceTablesOnActivate = true;
|
|||
/* did current transaction modify pg_dist_node? */
|
||||
bool TransactionModifiedNodeMetadata = false;
|
||||
|
||||
bool EnableMetadataSyncByDefault = true;
|
||||
bool EnableMetadataSync = true;
|
||||
|
||||
typedef struct NodeMetadata
|
||||
{
|
||||
|
@ -787,7 +787,6 @@ SyncDistributedObjectsToNode(WorkerNode *workerNode)
|
|||
return;
|
||||
}
|
||||
|
||||
EnsureNoModificationsHaveBeenDone();
|
||||
EnsureSequentialModeMetadataOperations();
|
||||
|
||||
Assert(ShouldPropagate());
|
||||
|
@ -1096,9 +1095,9 @@ ActivateNode(char *nodeName, int nodePort)
|
|||
BoolGetDatum(isActive));
|
||||
|
||||
/* TODO: Once all tests will be enabled for MX, we can remove sync by default check */
|
||||
bool syncMetadata = EnableMetadataSyncByDefault && NodeIsPrimary(workerNode);
|
||||
bool syncMetadata = EnableMetadataSync && NodeIsPrimary(workerNode);
|
||||
|
||||
if (syncMetadata && EnableDependencyCreation)
|
||||
if (syncMetadata)
|
||||
{
|
||||
/*
|
||||
* We are going to sync the metadata anyway in this transaction, so do
|
||||
|
|
|
@ -38,7 +38,6 @@ static void ErrorIfCurrentUserCanNotDistributeObject(ObjectType type,
|
|||
ObjectAddress *addr,
|
||||
Node *node,
|
||||
Relation *relation);
|
||||
static void ErrorIfUserNotAllowedToPropagateExtension(char *extensionName);
|
||||
static List * textarray_to_strvaluelist(ArrayType *arr);
|
||||
|
||||
/* It is defined on PG >= 13 versions by default */
|
||||
|
@ -398,12 +397,8 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr,
|
|||
Node *node, Relation *relation)
|
||||
{
|
||||
Oid userId = GetUserId();
|
||||
AclMode aclMaskResult = 0;
|
||||
bool skipAclCheck = false;
|
||||
Oid idToCheck = InvalidOid;
|
||||
|
||||
/* Since we don't handle sequences like object, add it separately */
|
||||
if (!(SupportedDependencyByCitus(addr) || type == OBJECT_SEQUENCE))
|
||||
if (!SupportedDependencyByCitus(addr))
|
||||
{
|
||||
ereport(ERROR, (errmsg("Object type %d can not be distributed by Citus", type)));
|
||||
}
|
||||
|
@ -411,27 +406,19 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr,
|
|||
switch (type)
|
||||
{
|
||||
case OBJECT_SCHEMA:
|
||||
{
|
||||
idToCheck = addr->objectId;
|
||||
aclMaskResult = pg_namespace_aclmask(idToCheck, userId, ACL_USAGE,
|
||||
ACLMASK_ANY);
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_DATABASE:
|
||||
case OBJECT_FUNCTION:
|
||||
case OBJECT_PROCEDURE:
|
||||
case OBJECT_AGGREGATE:
|
||||
case OBJECT_TYPE:
|
||||
case OBJECT_FOREIGN_SERVER:
|
||||
case OBJECT_SEQUENCE:
|
||||
case OBJECT_FOREIGN_TABLE:
|
||||
case OBJECT_TABLE:
|
||||
case OBJECT_EXTENSION:
|
||||
case OBJECT_COLLATION:
|
||||
{
|
||||
check_object_ownership(userId, type, *addr, node, *relation);
|
||||
skipAclCheck = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_DATABASE:
|
||||
{
|
||||
idToCheck = addr->objectId;
|
||||
aclMaskResult = pg_database_aclmask(idToCheck, userId, ACL_CONNECT,
|
||||
ACLMASK_ANY);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -444,54 +431,6 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr,
|
|||
"access privileges on role %d with type %d",
|
||||
addr->objectId, type)));
|
||||
}
|
||||
skipAclCheck = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_TYPE:
|
||||
{
|
||||
idToCheck = addr->objectId;
|
||||
aclMaskResult = pg_type_aclmask(idToCheck, userId, ACL_USAGE,
|
||||
ACLMASK_ANY);
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_FOREIGN_SERVER:
|
||||
{
|
||||
idToCheck = addr->objectId;
|
||||
aclMaskResult = pg_foreign_server_aclmask(idToCheck, userId, ACL_USAGE,
|
||||
ACLMASK_ANY);
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_SEQUENCE:
|
||||
{
|
||||
idToCheck = addr->objectId;
|
||||
aclMaskResult = pg_class_aclmask(idToCheck, userId, ACL_USAGE, ACLMASK_ANY);
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_FOREIGN_TABLE:
|
||||
case OBJECT_TABLE:
|
||||
{
|
||||
/* table distribution already does the ownership check, so we can stick to that over acl_check */
|
||||
check_object_ownership(userId, type, *addr, node, *relation);
|
||||
skipAclCheck = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_EXTENSION:
|
||||
{
|
||||
Value *valueNode = (Value *) node;
|
||||
char *extensionName = strVal(valueNode);
|
||||
ErrorIfUserNotAllowedToPropagateExtension(extensionName);
|
||||
skipAclCheck = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case OBJECT_COLLATION:
|
||||
{
|
||||
skipAclCheck = true;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -502,119 +441,6 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr,
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!skipAclCheck && aclMaskResult == ACL_NO_RIGHTS)
|
||||
{
|
||||
ereport(ERROR, (errmsg("Current user does not have required privileges "
|
||||
"on %d with type id %d to distribute it",
|
||||
idToCheck, type)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ErrorIfUserNotAllowedToPropagateExtension errors out if the current user does
|
||||
* not have required privileges to propagate extension
|
||||
*/
|
||||
static void
|
||||
ErrorIfUserNotAllowedToPropagateExtension(char *extensionName)
|
||||
{
|
||||
const int nameAttributeIndex = 1;
|
||||
const int superuserAttributeIndex = 4;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_13
|
||||
const int trustedAttributeIndex = 5;
|
||||
#endif
|
||||
|
||||
LOCAL_FCINFO(fcinfo, 0);
|
||||
FmgrInfo flinfo;
|
||||
|
||||
bool goForward = true;
|
||||
bool doCopy = false;
|
||||
|
||||
EState *estate = CreateExecutorState();
|
||||
ReturnSetInfo *extensionsResultSet = makeNode(ReturnSetInfo);
|
||||
extensionsResultSet->econtext = GetPerTupleExprContext(estate);
|
||||
extensionsResultSet->allowedModes = SFRM_Materialize;
|
||||
|
||||
fmgr_info(F_PG_AVAILABLE_EXTENSION_VERSIONS, &flinfo);
|
||||
InitFunctionCallInfoData(*fcinfo, &flinfo, 0, InvalidOid, NULL,
|
||||
(Node *) extensionsResultSet);
|
||||
|
||||
/*
|
||||
* pg_available_extensions_versions returns result set containing all
|
||||
* available extension versions with whether the extension requires
|
||||
* superuser and it is trusted information.
|
||||
*/
|
||||
(*pg_available_extension_versions)(fcinfo);
|
||||
|
||||
TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat(
|
||||
extensionsResultSet->setDesc,
|
||||
&TTSOpsMinimalTuple);
|
||||
bool hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult,
|
||||
goForward,
|
||||
doCopy,
|
||||
tupleTableSlot);
|
||||
while (hasTuple)
|
||||
{
|
||||
bool isNull = false;
|
||||
Datum curExtensionNameDatum = slot_getattr(tupleTableSlot,
|
||||
nameAttributeIndex,
|
||||
&isNull);
|
||||
char *curExtensionName = NameStr(*DatumGetName(curExtensionNameDatum));
|
||||
if (strcmp(extensionName, curExtensionName) == 0)
|
||||
{
|
||||
Datum superuserExpectedDatum = slot_getattr(tupleTableSlot,
|
||||
superuserAttributeIndex,
|
||||
&isNull);
|
||||
bool superuserExpected = DatumGetBool(superuserExpectedDatum);
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_13
|
||||
if (superuserExpected)
|
||||
{
|
||||
EnsureSuperUser();
|
||||
}
|
||||
#else
|
||||
if (superuserExpected)
|
||||
{
|
||||
/*
|
||||
* After PG 13, if the extension is trusted it can be created
|
||||
* by the user having CREATE privilege on the database even if
|
||||
* the extension requires superuser.
|
||||
*/
|
||||
Datum trustedExtensionDatum = slot_getattr(tupleTableSlot,
|
||||
trustedAttributeIndex,
|
||||
&isNull);
|
||||
bool trustedExtension = DatumGetBool(trustedExtensionDatum);
|
||||
|
||||
if (trustedExtension)
|
||||
{
|
||||
/* Allow if user has CREATE privilege on current database */
|
||||
AclResult aclresult = pg_database_aclcheck(MyDatabaseId,
|
||||
GetUserId(),
|
||||
ACL_CREATE);
|
||||
if (aclresult != ACLCHECK_OK)
|
||||
{
|
||||
ereport(ERROR, (errmsg("operation is not allowed"),
|
||||
errhint("Must have CREATE privilege "
|
||||
"on database to propagate "
|
||||
"extension %s", curExtensionName)));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
EnsureSuperUser();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
|
||||
ExecClearTuple(tupleTableSlot);
|
||||
hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward,
|
||||
doCopy, tupleTableSlot);
|
||||
}
|
||||
|
||||
ExecDropSingleTupleTableSlot(tupleTableSlot);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -67,7 +67,6 @@
|
|||
/* Shard related configuration */
|
||||
int ShardCount = 32;
|
||||
int ShardReplicationFactor = 1; /* desired replication factor for shards */
|
||||
int ShardPlacementPolicy = SHARD_PLACEMENT_ROUND_ROBIN;
|
||||
int NextShardId = 0;
|
||||
int NextPlacementId = 0;
|
||||
|
||||
|
|
|
@ -556,8 +556,12 @@ BlockWritesToShardList(List *shardList)
|
|||
Oid firstDistributedTableId = firstShardInterval->relationId;
|
||||
|
||||
bool shouldSyncMetadata = ShouldSyncTableMetadata(firstDistributedTableId);
|
||||
if (shouldSyncMetadata)
|
||||
if (shouldSyncMetadata || !IsCoordinator())
|
||||
{
|
||||
/*
|
||||
* Even if users disable metadata sync, we cannot allow them not to
|
||||
* acquire the remote locks. Hence, we have !IsCoordinator() check.
|
||||
*/
|
||||
LockShardListMetadataOnWorkers(ExclusiveLock, shardList);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ isolation_cleanup_orphaned_shards(PG_FUNCTION_ARGS)
|
|||
void
|
||||
DropOrphanedShardsInSeparateTransaction(void)
|
||||
{
|
||||
ExecuteCriticalCommandInSeparateTransaction("CALL citus_cleanup_orphaned_shards()");
|
||||
ExecuteRebalancerCommandInSeparateTransaction("CALL citus_cleanup_orphaned_shards()");
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -919,10 +919,10 @@ citus_drain_node(PG_FUNCTION_ARGS)
|
|||
* This is done in a separate session. This way it's not undone if the
|
||||
* draining fails midway through.
|
||||
*/
|
||||
ExecuteCriticalCommandInSeparateTransaction(psprintf(
|
||||
"SELECT master_set_node_property(%s, %i, 'shouldhaveshards', false)",
|
||||
quote_literal_cstr(nodeName),
|
||||
nodePort));
|
||||
ExecuteRebalancerCommandInSeparateTransaction(psprintf(
|
||||
"SELECT master_set_node_property(%s, %i, 'shouldhaveshards', false)",
|
||||
quote_literal_cstr(nodeName),
|
||||
nodePort));
|
||||
|
||||
RebalanceTableShards(&options, shardTransferModeOid);
|
||||
|
||||
|
@ -1696,7 +1696,7 @@ UpdateShardPlacement(PlacementUpdateEvent *placementUpdateEvent,
|
|||
* In case of failure, we throw an error such that rebalance_table_shards
|
||||
* fails early.
|
||||
*/
|
||||
ExecuteCriticalCommandInSeparateTransaction(placementUpdateCommand->data);
|
||||
ExecuteRebalancerCommandInSeparateTransaction(placementUpdateCommand->data);
|
||||
|
||||
UpdateColocatedShardPlacementProgress(shardId,
|
||||
sourceNode->workerName,
|
||||
|
@ -1711,12 +1711,18 @@ UpdateShardPlacement(PlacementUpdateEvent *placementUpdateEvent,
|
|||
* don't want to rollback when the current transaction is rolled back.
|
||||
*/
|
||||
void
|
||||
ExecuteCriticalCommandInSeparateTransaction(char *command)
|
||||
ExecuteRebalancerCommandInSeparateTransaction(char *command)
|
||||
{
|
||||
int connectionFlag = FORCE_NEW_CONNECTION;
|
||||
MultiConnection *connection = GetNodeConnection(connectionFlag, LocalHostName,
|
||||
PostPortNumber);
|
||||
StringInfo setApplicationName = makeStringInfo();
|
||||
appendStringInfo(setApplicationName, "SET application_name TO %s",
|
||||
CITUS_REBALANCER_NAME);
|
||||
|
||||
ExecuteCriticalRemoteCommand(connection, setApplicationName->data);
|
||||
ExecuteCriticalRemoteCommand(connection, command);
|
||||
|
||||
CloseConnection(connection);
|
||||
}
|
||||
|
||||
|
|
|
@ -171,26 +171,9 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
|
|||
/* first retrieve a list of random nodes for shard placements */
|
||||
while (candidateNodeIndex < attemptableNodeCount)
|
||||
{
|
||||
WorkerNode *candidateNode = NULL;
|
||||
|
||||
if (ShardPlacementPolicy == SHARD_PLACEMENT_LOCAL_NODE_FIRST)
|
||||
{
|
||||
candidateNode = WorkerGetLocalFirstCandidateNode(candidateNodeList);
|
||||
}
|
||||
else if (ShardPlacementPolicy == SHARD_PLACEMENT_ROUND_ROBIN)
|
||||
{
|
||||
candidateNode = WorkerGetRoundRobinCandidateNode(workerNodeList, shardId,
|
||||
candidateNodeIndex);
|
||||
}
|
||||
else if (ShardPlacementPolicy == SHARD_PLACEMENT_RANDOM)
|
||||
{
|
||||
candidateNode = WorkerGetRandomCandidateNode(candidateNodeList);
|
||||
}
|
||||
else
|
||||
{
|
||||
ereport(ERROR, (errmsg("unrecognized shard placement policy")));
|
||||
}
|
||||
|
||||
WorkerNode *candidateNode = WorkerGetRoundRobinCandidateNode(workerNodeList,
|
||||
shardId,
|
||||
candidateNodeIndex);
|
||||
if (candidateNode == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could only find %u of %u possible nodes",
|
||||
|
|
|
@ -40,12 +40,6 @@ int MaxWorkerNodesTracked = 2048; /* determines worker node hash table size *
|
|||
|
||||
|
||||
/* Local functions forward declarations */
|
||||
static WorkerNode * WorkerGetNodeWithName(const char *hostname);
|
||||
static char * ClientHostAddress(StringInfo remoteHostStringInfo);
|
||||
static List * PrimaryNodesNotInList(List *currentList);
|
||||
static WorkerNode * FindRandomNodeFromList(List *candidateWorkerNodeList);
|
||||
static bool OddNumber(uint32 number);
|
||||
static bool ListMember(List *currentList, WorkerNode *workerNode);
|
||||
static bool NodeIsPrimaryWorker(WorkerNode *node);
|
||||
static bool NodeIsReadableWorker(WorkerNode *node);
|
||||
|
||||
|
@ -55,73 +49,6 @@ static bool NodeIsReadableWorker(WorkerNode *node);
|
|||
* ------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/*
|
||||
* WorkerGetRandomCandidateNode accepts a list of WorkerNode's and returns a random
|
||||
* primary node which is not in that list.
|
||||
*
|
||||
* Note that the function returns null if the worker membership list does not
|
||||
* contain enough nodes to allocate a new worker node.
|
||||
*/
|
||||
WorkerNode *
|
||||
WorkerGetRandomCandidateNode(List *currentNodeList)
|
||||
{
|
||||
WorkerNode *workerNode = NULL;
|
||||
bool wantSameRack = false;
|
||||
uint32 tryCount = WORKER_RACK_TRIES;
|
||||
|
||||
uint32 currentNodeCount = list_length(currentNodeList);
|
||||
List *candidateWorkerNodeList = PrimaryNodesNotInList(currentNodeList);
|
||||
|
||||
/* we check if the shard has already been placed on all nodes known to us */
|
||||
if (list_length(candidateWorkerNodeList) == 0)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* if current node list is empty, randomly pick one node and return */
|
||||
if (currentNodeCount == 0)
|
||||
{
|
||||
workerNode = FindRandomNodeFromList(candidateWorkerNodeList);
|
||||
return workerNode;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the current list has an odd number of nodes (1, 3, 5, etc), we want to
|
||||
* place the shard on a different rack than the first node's rack.
|
||||
* Otherwise, we want to place the shard on the same rack as the first node.
|
||||
*/
|
||||
if (OddNumber(currentNodeCount))
|
||||
{
|
||||
wantSameRack = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
wantSameRack = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* We try to find a worker node that fits our rack-aware placement strategy.
|
||||
* If after a predefined number of tries, we still cannot find such a node,
|
||||
* we simply give up and return the last worker node we found.
|
||||
*/
|
||||
for (uint32 tryIndex = 0; tryIndex < tryCount; tryIndex++)
|
||||
{
|
||||
WorkerNode *firstNode = (WorkerNode *) linitial(currentNodeList);
|
||||
char *firstRack = firstNode->workerRack;
|
||||
|
||||
workerNode = FindRandomNodeFromList(candidateWorkerNodeList);
|
||||
char *workerRack = workerNode->workerRack;
|
||||
|
||||
bool sameRack = (strncmp(workerRack, firstRack, WORKER_LENGTH) == 0);
|
||||
if ((sameRack && wantSameRack) || (!sameRack && !wantSameRack))
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return workerNode;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* WorkerGetRoundRobinCandidateNode takes in a list of worker nodes and returns
|
||||
|
@ -152,147 +79,6 @@ WorkerGetRoundRobinCandidateNode(List *workerNodeList, uint64 shardId,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* WorkerGetLocalFirstCandidateNode takes in a list of worker nodes, and then
|
||||
* allocates a new worker node. The allocation is performed according to the
|
||||
* following policy: if the list is empty, the node where the caller is connecting
|
||||
* from is allocated; if the list is not empty, a node is allocated according
|
||||
* to random policy.
|
||||
*/
|
||||
WorkerNode *
|
||||
WorkerGetLocalFirstCandidateNode(List *currentNodeList)
|
||||
{
|
||||
WorkerNode *candidateNode = NULL;
|
||||
uint32 currentNodeCount = list_length(currentNodeList);
|
||||
|
||||
/* choose first candidate node to be the client's host */
|
||||
if (currentNodeCount == 0)
|
||||
{
|
||||
StringInfo clientHostStringInfo = makeStringInfo();
|
||||
char *errorMessage = ClientHostAddress(clientHostStringInfo);
|
||||
|
||||
if (errorMessage != NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("%s", errorMessage),
|
||||
errdetail("Could not find the first worker "
|
||||
"node for local-node-first policy."),
|
||||
errhint("Make sure that you are not on the "
|
||||
"master node.")));
|
||||
}
|
||||
|
||||
/* if hostname is localhost.localdomain, change it to localhost */
|
||||
char *clientHost = clientHostStringInfo->data;
|
||||
if (strncmp(clientHost, "localhost.localdomain", WORKER_LENGTH) == 0)
|
||||
{
|
||||
clientHost = pstrdup("localhost");
|
||||
}
|
||||
|
||||
candidateNode = WorkerGetNodeWithName(clientHost);
|
||||
if (candidateNode == NULL)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not find worker node for "
|
||||
"host: %s", clientHost)));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* find a candidate node different from those already selected */
|
||||
candidateNode = WorkerGetRandomCandidateNode(currentNodeList);
|
||||
}
|
||||
|
||||
return candidateNode;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ClientHostAddress appends the connecting client's fully qualified hostname
|
||||
* to the given StringInfo. If there is no such connection or the connection is
|
||||
* over Unix domain socket, the function fills the error message and returns it.
|
||||
* On success, it just returns NULL.
|
||||
*/
|
||||
static char *
|
||||
ClientHostAddress(StringInfo clientHostStringInfo)
|
||||
{
|
||||
Port *port = MyProcPort;
|
||||
char *clientHost = NULL;
|
||||
char *errorMessage = NULL;
|
||||
int clientHostLength = NI_MAXHOST;
|
||||
int flags = NI_NAMEREQD; /* require fully qualified hostname */
|
||||
int nameFound = 0;
|
||||
|
||||
if (port == NULL)
|
||||
{
|
||||
errorMessage = "cannot find tcp/ip connection to client";
|
||||
return errorMessage;
|
||||
}
|
||||
|
||||
switch (port->raddr.addr.ss_family)
|
||||
{
|
||||
case AF_INET:
|
||||
#ifdef HAVE_IPV6
|
||||
case AF_INET6:
|
||||
#endif
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
errorMessage = "invalid address family in connection";
|
||||
return errorMessage;
|
||||
}
|
||||
}
|
||||
|
||||
clientHost = palloc0(clientHostLength);
|
||||
|
||||
nameFound = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen,
|
||||
clientHost, clientHostLength, NULL, 0, flags);
|
||||
if (nameFound == 0)
|
||||
{
|
||||
appendStringInfo(clientHostStringInfo, "%s", clientHost);
|
||||
}
|
||||
else
|
||||
{
|
||||
StringInfo errorMessageStringInfo = makeStringInfo();
|
||||
appendStringInfo(errorMessageStringInfo, "could not resolve client host: %s",
|
||||
gai_strerror(nameFound));
|
||||
|
||||
errorMessage = errorMessageStringInfo->data;
|
||||
return errorMessage;
|
||||
}
|
||||
|
||||
return errorMessage;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* WorkerGetNodeWithName finds and returns a node from the membership list that
|
||||
* has the given hostname. The function returns null if no such node exists.
|
||||
*/
|
||||
static WorkerNode *
|
||||
WorkerGetNodeWithName(const char *hostname)
|
||||
{
|
||||
WorkerNode *workerNode = NULL;
|
||||
HASH_SEQ_STATUS status;
|
||||
HTAB *workerNodeHash = GetWorkerNodeHash();
|
||||
|
||||
hash_seq_init(&status, workerNodeHash);
|
||||
|
||||
while ((workerNode = hash_seq_search(&status)) != NULL)
|
||||
{
|
||||
int nameCompare = strncmp(workerNode->workerName, hostname, WORKER_LENGTH);
|
||||
if (nameCompare == 0)
|
||||
{
|
||||
/* we need to terminate the scan since we break */
|
||||
hash_seq_term(&status);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return workerNode;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ActivePrimaryNonCoordinatorNodeCount returns the number of groups with a primary in the cluster.
|
||||
* This method excludes coordinator even if it is added as a worker to cluster.
|
||||
|
@ -542,84 +328,6 @@ NodeIsReadableWorker(WorkerNode *node)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PrimaryNodesNotInList scans through the worker node hash and returns a list of all
|
||||
* primary nodes which are not in currentList. It runs in O(n*m) but currentList is
|
||||
* quite small.
|
||||
*/
|
||||
static List *
|
||||
PrimaryNodesNotInList(List *currentList)
|
||||
{
|
||||
List *workerNodeList = NIL;
|
||||
HTAB *workerNodeHash = GetWorkerNodeHash();
|
||||
WorkerNode *workerNode = NULL;
|
||||
HASH_SEQ_STATUS status;
|
||||
|
||||
hash_seq_init(&status, workerNodeHash);
|
||||
|
||||
while ((workerNode = hash_seq_search(&status)) != NULL)
|
||||
{
|
||||
if (ListMember(currentList, workerNode))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (NodeIsPrimary(workerNode))
|
||||
{
|
||||
workerNodeList = lappend(workerNodeList, workerNode);
|
||||
}
|
||||
}
|
||||
|
||||
return workerNodeList;
|
||||
}
|
||||
|
||||
|
||||
/* FindRandomNodeFromList picks a random node from the list provided to it. */
|
||||
static WorkerNode *
|
||||
FindRandomNodeFromList(List *candidateWorkerNodeList)
|
||||
{
|
||||
uint32 candidateNodeCount = list_length(candidateWorkerNodeList);
|
||||
|
||||
/* nb, the random seed has already been set by the postmaster when starting up */
|
||||
uint32 workerPosition = (random() % candidateNodeCount);
|
||||
|
||||
WorkerNode *workerNode =
|
||||
(WorkerNode *) list_nth(candidateWorkerNodeList, workerPosition);
|
||||
|
||||
return workerNode;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* OddNumber function returns true if given number is odd; returns false otherwise.
|
||||
*/
|
||||
static bool
|
||||
OddNumber(uint32 number)
|
||||
{
|
||||
bool oddNumber = ((number % 2) == 1);
|
||||
return oddNumber;
|
||||
}
|
||||
|
||||
|
||||
/* Checks if given worker node is a member of the current list. */
|
||||
static bool
|
||||
ListMember(List *currentList, WorkerNode *workerNode)
|
||||
{
|
||||
Size keySize = WORKER_LENGTH + sizeof(uint32);
|
||||
|
||||
WorkerNode *currentNode = NULL;
|
||||
foreach_ptr(currentNode, currentList)
|
||||
{
|
||||
if (WorkerNodeCompare(workerNode, currentNode, keySize) == 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CompareWorkerNodes compares two pointers to worker nodes using the exact
|
||||
* same logic employed by WorkerNodeCompare.
|
||||
|
|
|
@ -260,7 +260,7 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
|
|||
ereport(DEBUG4, (errmsg("function is distributed")));
|
||||
}
|
||||
|
||||
if (IsCitusInitiatedRemoteBackend())
|
||||
if (IsCitusInternalBackend())
|
||||
{
|
||||
bool isFunctionForceDelegated = procedure->forceDelegation;
|
||||
|
||||
|
@ -303,17 +303,6 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (localGroupId != COORDINATOR_GROUP_ID)
|
||||
{
|
||||
/*
|
||||
* We are calling a distributed function on a worker node. We currently
|
||||
* only delegate from the coordinator.
|
||||
*
|
||||
* TODO: remove this restriction.
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cannot delegate functions for INSERT ... SELECT func(), since they require
|
||||
* coordinated transactions.
|
||||
|
@ -374,14 +363,16 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
|
|||
}
|
||||
|
||||
/*
|
||||
* If the expression is simple, such as, SELECT fn() in
|
||||
* PL/PgSQL code, PL engine is doing simple expression
|
||||
* evaluation, which can't interpret the CustomScan Node.
|
||||
* Function from FROM clause is not simple, so it's ok.
|
||||
* If the expression is simple, such as, SELECT function() or PEFORM function()
|
||||
* in PL/PgSQL code, PL engine does a simple expression evaluation which can't
|
||||
* interpret the Citus CustomScan Node.
|
||||
* Note: Function from FROM clause is not simple, so it's ok to pushdown.
|
||||
*/
|
||||
if (MaybeExecutingUDF() && IsQuerySimple(planContext->query) && !fromFuncExpr)
|
||||
if ((MaybeExecutingUDF() || DoBlockLevel > 0) &&
|
||||
IsQuerySimple(planContext->query) &&
|
||||
!fromFuncExpr)
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("Skipping delegation of function "
|
||||
ereport(DEBUG1, (errmsg("Skipping pushdown of function "
|
||||
"from a PL/PgSQL simple expression")));
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -1616,7 +1616,19 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
Expr *directarg;
|
||||
foreach_ptr(directarg, originalAggregate->aggdirectargs)
|
||||
{
|
||||
if (!IsA(directarg, Const) && !IsA(directarg, Param))
|
||||
/*
|
||||
* Need to replace nodes that contain any Vars with Vars referring
|
||||
* to the related column of the result set returned for the worker
|
||||
* aggregation.
|
||||
*
|
||||
* When there are no Vars, then the expression can be fully evaluated
|
||||
* on the coordinator, so we skip it here. This is not just an
|
||||
* optimization, but the result of the expression might require
|
||||
* calling the final function of the aggregate, and doing so when
|
||||
* there are no input rows (i.e.: with an empty tuple slot) is not
|
||||
* desirable for the node-executor methods.
|
||||
*/
|
||||
if (pull_var_clause_default((Node *) directarg) != NIL)
|
||||
{
|
||||
Var *var = makeVar(masterTableId, walkerContext->columnId,
|
||||
exprType((Node *) directarg),
|
||||
|
@ -3070,7 +3082,13 @@ WorkerAggregateExpressionList(Aggref *originalAggregate,
|
|||
Expr *directarg;
|
||||
foreach_ptr(directarg, originalAggregate->aggdirectargs)
|
||||
{
|
||||
if (!IsA(directarg, Const) && !IsA(directarg, Param))
|
||||
/*
|
||||
* The worker aggregation should execute any node that contains any
|
||||
* Var nodes and return the result in the targetlist, so that the
|
||||
* combine query can then fetch the result via remote scan; see
|
||||
* MasterAggregateExpression.
|
||||
*/
|
||||
if (pull_var_clause_default((Node *) directarg) != NIL)
|
||||
{
|
||||
workerAggregateList = lappend(workerAggregateList, directarg);
|
||||
}
|
||||
|
|
|
@ -108,7 +108,6 @@ static GucStringAssignHook OldApplicationNameAssignHook = NULL;
|
|||
|
||||
|
||||
void _PG_init(void);
|
||||
void _PG_fini(void);
|
||||
|
||||
static void DoInitialCleanup(void);
|
||||
static void ResizeStackToMaximumDepth(void);
|
||||
|
@ -169,13 +168,6 @@ static const struct config_enum_entry task_executor_type_options[] = {
|
|||
{ NULL, 0, false }
|
||||
};
|
||||
|
||||
static const struct config_enum_entry shard_placement_policy_options[] = {
|
||||
{ "local-node-first", SHARD_PLACEMENT_LOCAL_NODE_FIRST, false },
|
||||
{ "round-robin", SHARD_PLACEMENT_ROUND_ROBIN, false },
|
||||
{ "random", SHARD_PLACEMENT_RANDOM, false },
|
||||
{ NULL, 0, false }
|
||||
};
|
||||
|
||||
static const struct config_enum_entry use_secondary_nodes_options[] = {
|
||||
{ "never", USE_SECONDARY_NODES_NEVER, false },
|
||||
{ "always", USE_SECONDARY_NODES_ALWAYS, false },
|
||||
|
@ -355,10 +347,12 @@ _PG_init(void)
|
|||
DoInitialCleanup();
|
||||
}
|
||||
|
||||
|
||||
load_file("columnar.so", false);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* DoInitialCleanup does cleanup at start time.
|
||||
* Currently it:
|
||||
|
@ -904,21 +898,10 @@ RegisterCitusConfigVariables(void)
|
|||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enable_metadata_sync_by_default",
|
||||
gettext_noop("Enables MX in the new nodes by default"),
|
||||
"citus.enable_metadata_sync",
|
||||
gettext_noop("Enables object and metadata syncing."),
|
||||
NULL,
|
||||
&EnableMetadataSyncByDefault,
|
||||
true,
|
||||
PGC_USERSET,
|
||||
GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enable_object_propagation",
|
||||
gettext_noop("Enables propagating object creation for more complex objects, "
|
||||
"schema's will always be created"),
|
||||
NULL,
|
||||
&EnableDependencyCreation,
|
||||
&EnableMetadataSync,
|
||||
true,
|
||||
PGC_USERSET,
|
||||
GUC_NO_SHOW_ALL,
|
||||
|
@ -1642,22 +1625,6 @@ RegisterCitusConfigVariables(void)
|
|||
GUC_STANDARD,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomEnumVariable(
|
||||
"citus.shard_placement_policy",
|
||||
gettext_noop("Sets the policy to use when choosing nodes for shard placement."),
|
||||
gettext_noop("The master node chooses which worker nodes to place new shards "
|
||||
"on. This configuration value specifies the policy to use when "
|
||||
"selecting these nodes. The local-node-first policy places the "
|
||||
"first replica on the client node and chooses others randomly. "
|
||||
"The round-robin policy aims to distribute shards evenly across "
|
||||
"the cluster by selecting nodes in a round-robin fashion."
|
||||
"The random policy picks all workers randomly."),
|
||||
&ShardPlacementPolicy,
|
||||
SHARD_PLACEMENT_ROUND_ROBIN, shard_placement_policy_options,
|
||||
PGC_USERSET,
|
||||
GUC_STANDARD,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomIntVariable(
|
||||
"citus.shard_replication_factor",
|
||||
gettext_noop("Sets the replication factor for shards."),
|
||||
|
|
|
@ -188,7 +188,7 @@ FROM \
|
|||
get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \
|
||||
ON pg_stat_activity.pid = dist_txs.process_id \
|
||||
WHERE \
|
||||
pg_stat_activity.application_name = 'citus' \
|
||||
pg_stat_activity.application_name = 'citus_internal' \
|
||||
AND \
|
||||
pg_stat_activity.query NOT ILIKE '%stat_activity%';"
|
||||
|
||||
|
|
|
@ -824,7 +824,7 @@ EnsurePrepareTransactionIsAllowed(void)
|
|||
return;
|
||||
}
|
||||
|
||||
if (IsCitusInitiatedRemoteBackend())
|
||||
if (IsCitusInternalBackend())
|
||||
{
|
||||
/*
|
||||
* If this is a Citus-initiated backend.
|
||||
|
|
|
@ -112,8 +112,7 @@ SendCommandToWorkerAsUser(const char *nodeName, int32 nodePort, const char *node
|
|||
/*
|
||||
* SendCommandToWorkers sends a command to all workers in
|
||||
* parallel. Commands are committed on the workers when the local
|
||||
* transaction commits. The connection are made as the extension
|
||||
* owner to ensure write access to the Citus metadata tables.
|
||||
* transaction commits.
|
||||
*/
|
||||
void
|
||||
SendCommandToWorkersWithMetadata(const char *command)
|
||||
|
@ -123,6 +122,24 @@ SendCommandToWorkersWithMetadata(const char *command)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* SendCommandToWorkersWithMetadataViaSuperUser sends a command to all workers in
|
||||
* parallel by opening a super user connection. Commands are committed on the workers
|
||||
* when the local transaction commits. The connection are made as the extension
|
||||
* owner to ensure write access to the Citus metadata tables.
|
||||
*
|
||||
* Since we prevent to open superuser connections for metadata tables, it is
|
||||
* discourated to use it. Consider using it only for propagating pg_dist_object
|
||||
* tuples for dependent objects.
|
||||
*/
|
||||
void
|
||||
SendCommandToWorkersWithMetadataViaSuperUser(const char *command)
|
||||
{
|
||||
SendCommandToMetadataWorkersParams(command, CitusExtensionOwnerName(),
|
||||
0, NULL, NULL);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TargetWorkerSetNodeList returns a list of WorkerNode's that satisfies the
|
||||
* TargetWorkerSet.
|
||||
|
|
|
@ -150,7 +150,8 @@ ErrorIfRelationIsAKnownShard(Oid relationId)
|
|||
void
|
||||
ErrorIfIllegallyChangingKnownShard(Oid relationId)
|
||||
{
|
||||
if (LocalExecutorLevel > 0 || IsCitusInitiatedRemoteBackend() ||
|
||||
if (LocalExecutorLevel > 0 ||
|
||||
(IsCitusInternalBackend() || IsRebalancerInternalBackend()) ||
|
||||
EnableManualChangesToShards)
|
||||
{
|
||||
return;
|
||||
|
@ -330,7 +331,7 @@ ResetHideShardsDecision(void)
|
|||
static bool
|
||||
ShouldHideShardsInternal(void)
|
||||
{
|
||||
if (IsCitusInitiatedRemoteBackend())
|
||||
if (IsCitusInternalBackend() || IsRebalancerInternalBackend())
|
||||
{
|
||||
/* we never hide shards from Citus */
|
||||
return false;
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
#include "access/tableam.h"
|
||||
#include "access/skey.h"
|
||||
#include "nodes/bitmapset.h"
|
||||
|
||||
#include "distributed/coordinator_protocol.h"
|
||||
|
||||
#include "access/heapam.h"
|
||||
#include "catalog/indexing.h"
|
||||
#include "utils/acl.h"
|
||||
|
||||
/*
|
||||
* Number of valid ItemPointer Offset's for "row number" <> "ItemPointer"
|
||||
|
@ -50,8 +50,7 @@ typedef struct ColumnarScanDescData *ColumnarScanDesc;
|
|||
|
||||
const TableAmRoutine * GetColumnarTableAmRoutine(void);
|
||||
extern void columnar_tableam_init(void);
|
||||
extern void columnar_tableam_finish(void);
|
||||
|
||||
extern bool CheckCitusVersion(int elevel);
|
||||
extern TableScanDesc columnar_beginscan_extended(Relation relation, Snapshot snapshot,
|
||||
int nkeys, ScanKey key,
|
||||
ParallelTableScanDesc parallel_scan,
|
||||
|
|
|
@ -32,7 +32,6 @@ typedef enum
|
|||
} PropSetCmdBehavior;
|
||||
extern PropSetCmdBehavior PropagateSetCommands;
|
||||
extern bool EnableDDLPropagation;
|
||||
extern bool EnableDependencyCreation;
|
||||
extern bool EnableCreateTypePropagation;
|
||||
extern bool EnableAlterRolePropagation;
|
||||
extern bool EnableAlterRoleSetPropagation;
|
||||
|
|
|
@ -29,7 +29,10 @@
|
|||
#define ERROR_BUFFER_SIZE 256
|
||||
|
||||
/* application name used for internal connections in Citus */
|
||||
#define CITUS_APPLICATION_NAME "citus"
|
||||
#define CITUS_APPLICATION_NAME "citus_internal"
|
||||
|
||||
/* application name used for internal connections in rebalancer */
|
||||
#define CITUS_REBALANCER_NAME "citus_rebalancer"
|
||||
|
||||
/* forward declare, to avoid forcing large headers on everyone */
|
||||
struct pg_conn; /* target of the PGconn typedef */
|
||||
|
@ -277,7 +280,8 @@ extern void FinishConnectionListEstablishment(List *multiConnectionList);
|
|||
extern void FinishConnectionEstablishment(MultiConnection *connection);
|
||||
extern void ClaimConnectionExclusively(MultiConnection *connection);
|
||||
extern void UnclaimConnection(MultiConnection *connection);
|
||||
extern bool IsCitusInitiatedRemoteBackend(void);
|
||||
extern bool IsCitusInternalBackend(void);
|
||||
extern bool IsRebalancerInternalBackend(void);
|
||||
extern void MarkConnectionConnected(MultiConnection *connection);
|
||||
|
||||
/* time utilities */
|
||||
|
|
|
@ -72,15 +72,6 @@
|
|||
#define DROP_FOREIGN_TABLE_COMMAND "DROP FOREIGN TABLE IF EXISTS %s CASCADE"
|
||||
#define CREATE_SCHEMA_COMMAND "CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s"
|
||||
|
||||
/* Enumeration that defines the shard placement policy to use while staging */
|
||||
typedef enum
|
||||
{
|
||||
SHARD_PLACEMENT_INVALID_FIRST = 0,
|
||||
SHARD_PLACEMENT_LOCAL_NODE_FIRST = 1,
|
||||
SHARD_PLACEMENT_ROUND_ROBIN = 2,
|
||||
SHARD_PLACEMENT_RANDOM = 3
|
||||
} ShardPlacementPolicyType;
|
||||
|
||||
/*
|
||||
* TableDDLCommandType encodes the implementation used by TableDDLCommand. See comments in
|
||||
* TableDDLCpmmand for details.
|
||||
|
@ -212,7 +203,6 @@ extern TableDDLCommand * ColumnarGetCustomTableOptionsDDL(char *schemaName,
|
|||
/* Config variables managed via guc.c */
|
||||
extern int ShardCount;
|
||||
extern int ShardReplicationFactor;
|
||||
extern int ShardPlacementPolicy;
|
||||
extern int NextShardId;
|
||||
extern int NextPlacementId;
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ extern bool CitusExtensionObject(const ObjectAddress *objectAddress);
|
|||
extern bool IsObjectDistributed(const ObjectAddress *address);
|
||||
extern bool ClusterHasDistributedFunctionWithDistArgument(void);
|
||||
extern void MarkObjectDistributed(const ObjectAddress *distAddress);
|
||||
extern void MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress);
|
||||
extern void UnmarkObjectDistributed(const ObjectAddress *address);
|
||||
extern bool IsTableOwnedByExtension(Oid relationId);
|
||||
extern bool IsObjectAddressOwnedByExtension(const ObjectAddress *target,
|
||||
|
|
|
@ -89,8 +89,8 @@ extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum);
|
|||
|
||||
#define DISABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'off'"
|
||||
#define ENABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'on'"
|
||||
#define DISABLE_OBJECT_PROPAGATION "SET citus.enable_object_propagation TO 'off'"
|
||||
#define ENABLE_OBJECT_PROPAGATION "SET citus.enable_object_propagation TO 'on'"
|
||||
#define DISABLE_METADATA_SYNC "SET citus.enable_metadata_sync TO 'off'"
|
||||
#define ENABLE_METADATA_SYNC "SET citus.enable_metadata_sync TO 'on'"
|
||||
#define WORKER_APPLY_SEQUENCE_COMMAND "SELECT worker_apply_sequence_command (%s,%s)"
|
||||
#define UPSERT_PLACEMENT \
|
||||
"INSERT INTO pg_dist_placement " \
|
||||
|
@ -108,6 +108,6 @@ extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum);
|
|||
|
||||
/* controlled via GUC */
|
||||
extern char *EnableManualMetadataChangesForUser;
|
||||
extern bool EnableMetadataSyncByDefault;
|
||||
extern bool EnableMetadataSync;
|
||||
|
||||
#endif /* METADATA_SYNC_H */
|
||||
|
|
|
@ -190,7 +190,7 @@ extern List * RebalancePlacementUpdates(List *workerNodeList, List *shardPlaceme
|
|||
RebalancePlanFunctions *rebalancePlanFunctions);
|
||||
extern List * ReplicationPlacementUpdates(List *workerNodeList, List *shardPlacementList,
|
||||
int shardReplicationFactor);
|
||||
extern void ExecuteCriticalCommandInSeparateTransaction(char *command);
|
||||
extern void ExecuteRebalancerCommandInSeparateTransaction(char *command);
|
||||
|
||||
|
||||
#endif /* SHARD_REBALANCER_H */
|
||||
|
|
|
@ -70,7 +70,6 @@ extern WorkerNode * WorkerGetRandomCandidateNode(List *currentNodeList);
|
|||
extern WorkerNode * WorkerGetRoundRobinCandidateNode(List *workerNodeList,
|
||||
uint64 shardId,
|
||||
uint32 placementIndex);
|
||||
extern WorkerNode * WorkerGetLocalFirstCandidateNode(List *currentNodeList);
|
||||
extern uint32 ActivePrimaryNonCoordinatorNodeCount(void);
|
||||
extern uint32 ActivePrimaryNodeCount(void);
|
||||
extern List * ActivePrimaryNonCoordinatorNodeList(LOCKMODE lockMode);
|
||||
|
|
|
@ -49,6 +49,7 @@ extern bool SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(cons
|
|||
List *
|
||||
commandList);
|
||||
extern void SendCommandToWorkersWithMetadata(const char *command);
|
||||
extern void SendCommandToWorkersWithMetadataViaSuperUser(const char *command);
|
||||
extern void SendBareCommandListToMetadataWorkers(List *commandList);
|
||||
extern void EnsureNoModificationsHaveBeenDone(void);
|
||||
extern void SendCommandListToWorkerOutsideTransaction(const char *nodeName,
|
||||
|
|
|
@ -712,6 +712,19 @@ select array_agg(val order by valf) from aggdata;
|
|||
{0,NULL,2,3,5,2,4,NULL,NULL,8,NULL}
|
||||
(1 row)
|
||||
|
||||
-- test by using some other node types as arguments to agg
|
||||
select key, percentile_cont((key - (key > 4)::int) / 10.0) within group(order by val) from aggdata group by key;
|
||||
key | percentile_cont
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 2.4
|
||||
3 | 4
|
||||
5 |
|
||||
6 |
|
||||
7 | 8
|
||||
9 | 0
|
||||
(7 rows)
|
||||
|
||||
-- Test TransformSubqueryNode
|
||||
select * FROM (
|
||||
SELECT key, mode() within group (order by floor(agg1.val/2)) m from aggdata agg1
|
||||
|
@ -932,5 +945,100 @@ SELECT square_func(5), a, count(a) FROM t1 GROUP BY a;
|
|||
ERROR: function aggregate_support.square_func(integer) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- Test the cases where the worker agg exec. returns no tuples.
|
||||
CREATE TABLE dist_table (dist_col int, agg_col numeric);
|
||||
SELECT create_distributed_table('dist_table', 'dist_col');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE ref_table (int_col int);
|
||||
SELECT create_reference_table('ref_table');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
percentile_disc
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM (SELECT *, random() FROM dist_table) a;
|
||||
percentile_disc
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT PERCENTILE_DISC((2 > random())::int::numeric / 10) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
percentile_disc
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT SUM(COALESCE(agg_col, 3))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT AVG(COALESCE(agg_col, 10))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
avg
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
insert into dist_table values (2, 11.2), (3, NULL), (6, 3.22), (3, 4.23), (5, 5.25), (4, 63.4), (75, NULL), (80, NULL), (96, NULL), (8, 1078), (0, 1.19);
|
||||
-- run the same queries after loading some data
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
percentile_disc
|
||||
---------------------------------------------------------------------
|
||||
3.22
|
||||
(1 row)
|
||||
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM (SELECT *, random() FROM dist_table) a;
|
||||
percentile_disc
|
||||
---------------------------------------------------------------------
|
||||
3.22
|
||||
(1 row)
|
||||
|
||||
SELECT PERCENTILE_DISC((2 > random())::int::numeric / 10) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
percentile_disc
|
||||
---------------------------------------------------------------------
|
||||
1.19
|
||||
(1 row)
|
||||
|
||||
SELECT floor(SUM(COALESCE(agg_col, 3)))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
floor
|
||||
---------------------------------------------------------------------
|
||||
1178
|
||||
(1 row)
|
||||
|
||||
SELECT floor(AVG(COALESCE(agg_col, 10)))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
floor
|
||||
---------------------------------------------------------------------
|
||||
109
|
||||
(1 row)
|
||||
|
||||
set client_min_messages to error;
|
||||
drop schema aggregate_support cascade;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
SHOW citus.enable_metadata_sync_by_default;
|
||||
citus.enable_metadata_sync_by_default
|
||||
SHOW citus.enable_metadata_sync;
|
||||
citus.enable_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
on
|
||||
(1 row)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
SET citus.next_shard_id TO 20030000;
|
||||
SET citus.enable_object_propagation TO false; -- all tests here verify old behaviour without distributing types,functions,etc automatically
|
||||
SET citus.enable_metadata_sync TO false; -- all tests here verify old behaviour without distributing types,functions,etc automatically
|
||||
CREATE USER typeowner_for_disabled_object_propagation_guc;
|
||||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
|
@ -65,7 +65,7 @@ SELECT create_distributed_table('t3', 'a');
|
|||
-- verify ALTER TYPE statements are not propagated for types, even though they are marked distributed
|
||||
BEGIN;
|
||||
-- object propagation is turned off after xact finished, type is already marked distributed by then
|
||||
SET LOCAL citus.enable_object_propagation TO on;
|
||||
SET LOCAL citus.enable_metadata_sync TO on;
|
||||
CREATE TYPE tt3 AS (a int, b int);
|
||||
CREATE TABLE t4 (a int PRIMARY KEY, b tt3);
|
||||
SELECT create_distributed_table('t4','a');
|
||||
|
@ -120,7 +120,7 @@ $$);
|
|||
|
||||
-- suppress any warnings during cleanup
|
||||
SET client_min_messages TO error;
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
DROP SCHEMA disabled_object_propagation CASCADE;
|
||||
DROP SCHEMA disabled_object_propagation2 CASCADE;
|
||||
DROP USER typeowner_for_disabled_object_propagation_guc;
|
||||
|
|
|
@ -8,6 +8,7 @@ SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$);
|
|||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO collation_conflict;
|
||||
SET citus.enable_metadata_sync TO off;
|
||||
CREATE COLLATION caseinsensitive (
|
||||
provider = icu,
|
||||
locale = 'und-u-ks-level2'
|
||||
|
@ -45,6 +46,7 @@ SET search_path TO collation_conflict;
|
|||
DROP TABLE tblcoll;
|
||||
DROP COLLATION caseinsensitive;
|
||||
\c - - - :worker_1_port
|
||||
SET citus.enable_metadata_sync TO off;
|
||||
SET search_path TO collation_conflict;
|
||||
CREATE COLLATION caseinsensitive (
|
||||
provider = icu,
|
||||
|
|
|
@ -627,7 +627,7 @@ DETAIL: A distributed function is created. To make sure subsequent commands see
|
|||
(1 row)
|
||||
|
||||
SELECT outer_emp();
|
||||
DEBUG: Skipping delegation of function from a PL/PgSQL simple expression
|
||||
DEBUG: Skipping pushdown of function from a PL/PgSQL simple expression
|
||||
CONTEXT: SQL statement "SELECT inner_emp('hello')"
|
||||
PL/pgSQL function outer_emp() line XX at PERFORM
|
||||
outer_emp
|
||||
|
@ -1388,10 +1388,47 @@ SELECT COUNT(*) FROM table_test_prepare;
|
|||
28
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE test_perform(i int);
|
||||
SELECT create_distributed_table('test_perform', 'i', colocate_with := 'none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION test(x int)
|
||||
RETURNS int
|
||||
AS $$
|
||||
DECLARE
|
||||
BEGIN
|
||||
RAISE NOTICE 'INPUT %', x;
|
||||
RETURN x;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
SELECT create_distributed_function('test(int)', 'x',
|
||||
colocate_with := 'test_perform', force_delegation := true);
|
||||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
PERFORM test(3);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
DEBUG: Skipping pushdown of function from a PL/PgSQL simple expression
|
||||
CONTEXT: SQL statement "SELECT test(3)"
|
||||
PL/pgSQL function inline_code_block line XX at PERFORM
|
||||
NOTICE: INPUT 3
|
||||
CONTEXT: PL/pgSQL function test(integer) line XX at RAISE
|
||||
SQL statement "SELECT test(3)"
|
||||
PL/pgSQL function inline_code_block line XX at PERFORM
|
||||
RESET client_min_messages;
|
||||
SET citus.log_remote_commands TO off;
|
||||
DROP SCHEMA forcepushdown_schema CASCADE;
|
||||
NOTICE: drop cascades to 36 other objects
|
||||
NOTICE: drop cascades to 38 other objects
|
||||
DETAIL: drop cascades to table test_forcepushdown
|
||||
drop cascades to table test_forcepushdown_noncolocate
|
||||
drop cascades to function insert_data(integer)
|
||||
|
@ -1428,3 +1465,5 @@ drop cascades to function insert_data_cte_nondist(integer)
|
|||
drop cascades to table table_test_prepare
|
||||
drop cascades to function test_prepare(integer,integer)
|
||||
drop cascades to function outer_test_prepare(integer,integer)
|
||||
drop cascades to table test_perform
|
||||
drop cascades to function test(integer)
|
||||
|
|
|
@ -2,10 +2,10 @@ Parsed test spec with 1 sessions
|
|||
|
||||
starting permutation: check_mx
|
||||
step check_mx:
|
||||
SHOW citus.enable_metadata_sync_by_default;
|
||||
SHOW citus.enable_metadata_sync;
|
||||
SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary';
|
||||
|
||||
citus.enable_metadata_sync_by_default
|
||||
citus.enable_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
on
|
||||
(1 row)
|
||||
|
|
|
@ -2,295 +2,337 @@ Parsed test spec with 2 sessions
|
|||
|
||||
starting permutation: s1-begin s2-begin s2-reindex s1-insert s2-commit s1-commit
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
|
||||
step s2-reindex:
|
||||
REINDEX TABLE test_table;
|
||||
REINDEX TABLE test_table;
|
||||
|
||||
ERROR: must be owner of table test_table
|
||||
step s1-insert:
|
||||
UPDATE test_table SET column2 = 1;
|
||||
UPDATE test_table SET column2 = 1;
|
||||
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-grant s1-begin s2-begin s2-reindex s1-insert s2-insert s2-commit s1-commit
|
||||
step s1-grant:
|
||||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$);
|
||||
|
||||
bool_and
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,SET)
|
||||
(localhost,57638,t,SET)
|
||||
(2 rows)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
|
||||
step s2-reindex:
|
||||
REINDEX TABLE test_table;
|
||||
REINDEX TABLE test_table;
|
||||
|
||||
ERROR: must be owner of table test_table
|
||||
step s1-insert:
|
||||
UPDATE test_table SET column2 = 1;
|
||||
UPDATE test_table SET column2 = 1;
|
||||
|
||||
step s2-insert:
|
||||
UPDATE test_table SET column2 = 2;
|
||||
UPDATE test_table SET column2 = 2;
|
||||
|
||||
ERROR: current transaction is aborted, commands ignored until end of transaction block
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-grant s1-begin s2-begin s1-reindex s2-insert s1-insert s1-commit s2-commit
|
||||
step s1-grant:
|
||||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$);
|
||||
|
||||
bool_and
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,SET)
|
||||
(localhost,57638,t,SET)
|
||||
(2 rows)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
|
||||
step s1-reindex:
|
||||
REINDEX TABLE test_table;
|
||||
REINDEX TABLE test_table;
|
||||
|
||||
step s2-insert:
|
||||
UPDATE test_table SET column2 = 2;
|
||||
UPDATE test_table SET column2 = 2;
|
||||
<waiting ...>
|
||||
step s1-insert:
|
||||
UPDATE test_table SET column2 = 1;
|
||||
UPDATE test_table SET column2 = 1;
|
||||
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-insert: <... completed>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s2-begin s2-index s1-insert s2-commit s1-commit s2-drop-index
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
|
||||
step s2-index:
|
||||
CREATE INDEX test_index ON test_table(column1);
|
||||
CREATE INDEX test_index ON test_table(column1);
|
||||
|
||||
ERROR: must be owner of table test_table
|
||||
step s1-insert:
|
||||
UPDATE test_table SET column2 = 1;
|
||||
UPDATE test_table SET column2 = 1;
|
||||
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-drop-index:
|
||||
DROP INDEX IF EXISTS test_index;
|
||||
DROP INDEX IF EXISTS test_index;
|
||||
|
||||
|
||||
starting permutation: s1-grant s1-begin s2-begin s2-insert s1-index s2-insert s2-commit s1-commit s1-drop-index
|
||||
step s1-grant:
|
||||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$);
|
||||
|
||||
bool_and
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,SET)
|
||||
(localhost,57638,t,SET)
|
||||
(2 rows)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
|
||||
step s2-insert:
|
||||
UPDATE test_table SET column2 = 2;
|
||||
UPDATE test_table SET column2 = 2;
|
||||
|
||||
step s1-index:
|
||||
CREATE INDEX test_index ON test_table(column1);
|
||||
CREATE INDEX test_index ON test_table(column1);
|
||||
<waiting ...>
|
||||
step s2-insert:
|
||||
UPDATE test_table SET column2 = 2;
|
||||
UPDATE test_table SET column2 = 2;
|
||||
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s1-index: <... completed>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s1-drop-index:
|
||||
DROP INDEX IF EXISTS test_index;
|
||||
DROP INDEX IF EXISTS test_index;
|
||||
|
||||
|
||||
starting permutation: s1-grant s1-begin s2-begin s1-index s2-index s1-insert s1-commit s2-commit s1-drop-index s2-drop-index
|
||||
step s1-grant:
|
||||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$);
|
||||
|
||||
bool_and
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,SET)
|
||||
(localhost,57638,t,SET)
|
||||
(2 rows)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
|
||||
step s1-index:
|
||||
CREATE INDEX test_index ON test_table(column1);
|
||||
CREATE INDEX test_index ON test_table(column1);
|
||||
|
||||
step s2-index:
|
||||
CREATE INDEX test_index ON test_table(column1);
|
||||
CREATE INDEX test_index ON test_table(column1);
|
||||
|
||||
ERROR: must be owner of table test_table
|
||||
step s1-insert:
|
||||
UPDATE test_table SET column2 = 1;
|
||||
UPDATE test_table SET column2 = 1;
|
||||
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s1-drop-index:
|
||||
DROP INDEX IF EXISTS test_index;
|
||||
DROP INDEX IF EXISTS test_index;
|
||||
|
||||
step s2-drop-index:
|
||||
DROP INDEX IF EXISTS test_index;
|
||||
DROP INDEX IF EXISTS test_index;
|
||||
|
||||
|
||||
starting permutation: s1-begin s2-begin s2-truncate s1-insert s2-commit s1-commit
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
|
||||
step s2-truncate:
|
||||
TRUNCATE test_table;
|
||||
TRUNCATE test_table;
|
||||
|
||||
ERROR: permission denied for table test_table
|
||||
step s1-insert:
|
||||
UPDATE test_table SET column2 = 1;
|
||||
UPDATE test_table SET column2 = 1;
|
||||
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-grant s1-begin s2-begin s1-truncate s2-insert s1-insert s1-commit s2-commit
|
||||
step s1-grant:
|
||||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$);
|
||||
|
||||
bool_and
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,SET)
|
||||
(localhost,57638,t,SET)
|
||||
(2 rows)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
|
||||
step s1-truncate:
|
||||
TRUNCATE test_table;
|
||||
TRUNCATE test_table;
|
||||
|
||||
step s2-insert:
|
||||
UPDATE test_table SET column2 = 2;
|
||||
UPDATE test_table SET column2 = 2;
|
||||
<waiting ...>
|
||||
step s1-insert:
|
||||
UPDATE test_table SET column2 = 1;
|
||||
UPDATE test_table SET column2 = 1;
|
||||
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-insert: <... completed>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-grant s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit
|
||||
step s1-grant:
|
||||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$);
|
||||
|
||||
bool_and
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,SET)
|
||||
(localhost,57638,t,SET)
|
||||
(2 rows)
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
|
||||
step s1-truncate:
|
||||
TRUNCATE test_table;
|
||||
TRUNCATE test_table;
|
||||
|
||||
step s2-truncate:
|
||||
TRUNCATE test_table;
|
||||
TRUNCATE test_table;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
step s2-truncate: <... completed>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ Parsed test spec with 1 sessions
|
|||
|
||||
starting permutation: disable-mx-by-default reload stop-metadata-sync
|
||||
step disable-mx-by-default:
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF;
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync TO OFF;
|
||||
|
||||
step reload:
|
||||
SELECT pg_reload_conf();
|
||||
|
|
|
@ -2,7 +2,7 @@ Parsed test spec with 1 sessions
|
|||
|
||||
starting permutation: disable-mx-by-default reload stop-metadata-sync
|
||||
step disable-mx-by-default:
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF;
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync TO OFF;
|
||||
|
||||
step reload:
|
||||
SELECT pg_reload_conf();
|
||||
|
|
|
@ -2,7 +2,7 @@ Parsed test spec with 1 sessions
|
|||
|
||||
starting permutation: enable-mx-by-default reload start-metadata-sync
|
||||
step enable-mx-by-default:
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON;
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync TO ON;
|
||||
|
||||
step reload:
|
||||
SELECT pg_reload_conf();
|
||||
|
|
|
@ -2,7 +2,7 @@ Parsed test spec with 1 sessions
|
|||
|
||||
starting permutation: enable-mx-by-default reload start-metadata-sync
|
||||
step enable-mx-by-default:
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON;
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync TO ON;
|
||||
|
||||
step reload:
|
||||
SELECT pg_reload_conf();
|
||||
|
|
|
@ -36,7 +36,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's');
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
ROLLBACK;
|
||||
|
@ -73,7 +73,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's');
|
||||
ERROR: must be owner of table test
|
||||
ROLLBACK;
|
||||
|
@ -85,7 +85,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_update_relation_colocation ('test'::regclass, 10);
|
||||
ERROR: must be owner of table test
|
||||
ROLLBACK;
|
||||
|
@ -99,7 +99,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
@ -121,7 +121,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's');
|
||||
ERROR: Metadata syncing is only allowed for hash, reference and local tables:X
|
||||
ROLLBACK;
|
||||
|
@ -133,7 +133,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's');
|
||||
ERROR: column "non_existing_col" of relation "test_2" does not exist
|
||||
ROLLBACK;
|
||||
|
@ -145,7 +145,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's');
|
||||
ERROR: relation cannot be NULL
|
||||
ROLLBACK;
|
||||
|
@ -157,7 +157,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's');
|
||||
ERROR: Metadata syncing is only allowed for valid colocation id values.
|
||||
ROLLBACK;
|
||||
|
@ -169,7 +169,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X');
|
||||
ERROR: Metadata syncing is only allowed for hash, reference and local tables:X
|
||||
ROLLBACK;
|
||||
|
@ -181,7 +181,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
|
@ -200,7 +200,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
|
@ -219,7 +219,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's');
|
||||
ERROR: Distribution column cannot be NULL for relation "test_2"
|
||||
|
@ -252,7 +252,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
@ -268,7 +268,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111);
|
||||
ERROR: could not find valid entry for shard xxxxx
|
||||
|
@ -298,7 +298,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's');
|
||||
ERROR: role "non_existing_user" does not exist
|
||||
ROLLBACK;
|
||||
|
@ -329,7 +329,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's');
|
||||
ERROR: Reference or local tables cannot have distribution columns
|
||||
ROLLBACK;
|
||||
|
@ -341,7 +341,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A');
|
||||
ERROR: Metadata syncing is only allowed for known replication models.
|
||||
ROLLBACK;
|
||||
|
@ -353,7 +353,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c');
|
||||
ERROR: Local or references tables can only have 's' or 't' as the replication model.
|
||||
ROLLBACK;
|
||||
|
@ -368,7 +368,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
@ -387,7 +387,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text))
|
||||
|
@ -402,7 +402,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text))
|
||||
|
@ -417,7 +417,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
@ -445,7 +445,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232);
|
||||
citus_internal_update_relation_colocation
|
||||
---------------------------------------------------------------------
|
||||
|
@ -461,7 +461,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text))
|
||||
|
@ -476,7 +476,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text))
|
||||
|
@ -491,7 +491,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text))
|
||||
|
@ -506,7 +506,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text))
|
||||
|
@ -521,7 +521,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text))
|
||||
|
@ -536,7 +536,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text),
|
||||
|
@ -554,7 +554,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false))
|
||||
|
@ -569,7 +569,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false))
|
||||
|
@ -583,7 +583,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false))
|
||||
|
@ -598,7 +598,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false))
|
||||
|
@ -614,7 +614,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false))
|
||||
|
@ -635,7 +635,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
CREATE TABLE publication_test_table(id int);
|
||||
CREATE PUBLICATION publication_test FOR TABLE publication_test_table;
|
||||
|
@ -653,7 +653,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
CREATE FUNCTION distribution_test_function(int) RETURNS int
|
||||
AS $$ SELECT $1 $$
|
||||
|
@ -664,6 +664,22 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
ERROR: must be owner of function distribution_test_function
|
||||
ROLLBACK;
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
assign_distributed_transaction_id
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
CREATE TYPE distributed_test_type AS (a int, b int);
|
||||
SET ROLE metadata_sync_helper_role;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('type', ARRAY['distributed_test_type']::text[], ARRAY[]::text[], -1, 0, false))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
ERROR: must be owner of type distributed_test_type
|
||||
ROLLBACK;
|
||||
-- we do not allow wrong partmethod
|
||||
-- so manually insert wrong partmethod for the sake of the test
|
||||
SET search_path TO metadata_sync_helpers;
|
||||
|
@ -674,7 +690,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
UPDATE pg_dist_partition SET partmethod = 'X';
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
|
@ -693,7 +709,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text))
|
||||
|
@ -720,7 +736,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '11'::text, '20'::text),
|
||||
|
@ -751,7 +767,7 @@ BEGIN;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
|
||||
ERROR: cannot colocate tables test_2 and test_3
|
||||
ROLLBACK;
|
||||
|
@ -763,7 +779,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_3'::regclass, 1420009::bigint, 't'::"char", '21'::text, '30'::text),
|
||||
|
@ -790,7 +806,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL))
|
||||
|
@ -805,7 +821,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL),
|
||||
|
@ -821,7 +837,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL))
|
||||
|
@ -842,7 +858,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text))
|
||||
|
@ -864,7 +880,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint))
|
||||
|
@ -879,7 +895,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420000, 1, 0::bigint, 1::int, -10))
|
||||
|
@ -894,7 +910,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (1430100, 1, 0::bigint, 1::int, 10))
|
||||
|
@ -909,7 +925,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420000, 10, 0::bigint, 1::int, 1500000))
|
||||
|
@ -924,7 +940,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES ( 1420000, 1, 0::bigint, 123123123::int, 1500000))
|
||||
|
@ -952,7 +968,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000),
|
||||
|
@ -968,7 +984,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420007, 1, 0::bigint, get_node_id(), 1500000))
|
||||
|
@ -983,7 +999,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000),
|
||||
|
@ -1024,7 +1040,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
|
||||
citus_internal_update_relation_colocation
|
||||
---------------------------------------------------------------------
|
||||
|
@ -1041,7 +1057,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000);
|
||||
ERROR: Node with group id 1014 for shard placement xxxxx does not exist
|
||||
|
@ -1054,7 +1070,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id());
|
||||
ERROR: Active placement for shard xxxxx is not found on group:14
|
||||
|
@ -1067,7 +1083,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1);
|
||||
ERROR: Shard id does not exists: 0
|
||||
|
@ -1080,7 +1096,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1);
|
||||
ERROR: Shard id does not exists: 213123123123
|
||||
|
@ -1093,7 +1109,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1);
|
||||
ERROR: must be owner of table super_user_table
|
||||
|
@ -1106,7 +1122,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420007))
|
||||
|
@ -1115,7 +1131,7 @@ ERROR: must be owner of table super_user_table
|
|||
ROLLBACK;
|
||||
-- the user only allowed to delete shards in a distributed transaction
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420007))
|
||||
|
@ -1130,7 +1146,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420100))
|
||||
|
@ -1157,7 +1173,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420000))
|
||||
|
@ -1191,7 +1207,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
-- with an ugly trick, update the repmodel
|
||||
-- so that making two tables colocated fails
|
||||
UPDATE pg_dist_partition SET repmodel = 't'
|
||||
|
@ -1206,7 +1222,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
-- with an ugly trick, update the vartype of table from int to bigint
|
||||
-- so that making two tables colocated fails
|
||||
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}'
|
||||
|
@ -1221,7 +1237,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
-- with an ugly trick, update the partmethod of the table to not-valid
|
||||
-- so that making two tables colocated fails
|
||||
UPDATE pg_dist_partition SET partmethod = ''
|
||||
|
@ -1236,7 +1252,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
-- with an ugly trick, update the partmethod of the table to not-valid
|
||||
-- so that making two tables colocated fails
|
||||
UPDATE pg_dist_partition SET partmethod = 'a'
|
||||
|
@ -1254,7 +1270,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
|
@ -1277,7 +1293,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
|
||||
(1 row)
|
||||
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's');
|
||||
citus_internal_add_partition_metadata
|
||||
|
|
|
@ -64,14 +64,14 @@ CREATE FUNCTION add(integer, integer) RETURNS integer
|
|||
-- Since deparse logic on workers can not work for if function
|
||||
-- is distributed on workers, we are disabling object propagation
|
||||
-- first. Same trick has been applied multiple times in this test.
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('add(int,int)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION add CALLED ON NULL INPUT
|
||||
$cmd$);
|
||||
|
@ -540,7 +540,7 @@ CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() RETURNS TEXT
|
|||
CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text) RETURNS TEXT
|
||||
AS $$ SELECT 'Overloaded function called with param: ' || $1 $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"()');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
@ -553,7 +553,7 @@ SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text)');
|
|||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() SET SCHEMA "CiTUS.TEEN2"
|
||||
$cmd$);
|
||||
|
@ -581,14 +581,14 @@ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE
|
|||
CREATE FUNCTION func_default_param(param INT DEFAULT 0) RETURNS TEXT
|
||||
AS $$ SELECT 'supplied param is : ' || param; $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('func_default_param(INT)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_default_param RENAME TO func_with_default_param;
|
||||
$cmd$);
|
||||
|
@ -604,14 +604,14 @@ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE
|
|||
CREATE FUNCTION func_out_param(IN param INT, OUT result TEXT)
|
||||
AS $$ SELECT 'supplied param is : ' || param; $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('func_out_param(INT)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_out_param RENAME TO func_in_and_out_param;
|
||||
$cmd$);
|
||||
|
@ -630,14 +630,14 @@ BEGIN
|
|||
a := a * a;
|
||||
END; $$
|
||||
LANGUAGE plpgsql;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('square(NUMERIC)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION square SET search_path TO DEFAULT;
|
||||
$cmd$);
|
||||
|
@ -663,14 +663,14 @@ BEGIN
|
|||
FROM generate_subscripts(list, 1) g(i);
|
||||
END; $$
|
||||
LANGUAGE plpgsql;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('sum_avg(NUMERIC[])');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION sum_avg COST 10000;
|
||||
$cmd$);
|
||||
|
@ -689,14 +689,14 @@ RESET citus.enable_ddl_propagation;
|
|||
CREATE FUNCTION func_custom_param(IN param intpair, OUT total INT)
|
||||
AS $$ SELECT param.x + param.y $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('func_custom_param(intpair)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param;
|
||||
$cmd$);
|
||||
|
@ -713,14 +713,14 @@ CREATE FUNCTION func_returns_table(IN count INT)
|
|||
RETURNS TABLE (x INT, y INT)
|
||||
AS $$ SELECT i,i FROM generate_series(1,count) i $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('func_returns_table(INT)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_returns_table ROWS 100;
|
||||
$cmd$);
|
||||
|
|
|
@ -49,14 +49,14 @@ BEGIN
|
|||
RAISE INFO 'information message %', $1;
|
||||
END;
|
||||
$proc$;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('raise_info(text)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER PROCEDURE raise_info CALLED ON NULL INPUT
|
||||
$cmd$);
|
||||
|
|
|
@ -105,7 +105,7 @@ ORDER BY 1, 2;
|
|||
DROP EXTENSION citus;
|
||||
\c
|
||||
-- these tests switch between citus versions and call ddl's that require pg_dist_object to be created
|
||||
SET citus.enable_object_propagation TO 'false';
|
||||
SET citus.enable_metadata_sync TO 'false';
|
||||
SET citus.enable_version_checks TO 'false';
|
||||
CREATE EXTENSION citus VERSION '8.0-1';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-2';
|
||||
|
@ -492,7 +492,7 @@ SELECT * FROM multi_extension.print_extension_changes();
|
|||
-- Test downgrade to 9.4-1 from 9.5-1
|
||||
ALTER EXTENSION citus UPDATE TO '9.5-1';
|
||||
BEGIN;
|
||||
SET citus.enable_object_propagation TO on;
|
||||
SET citus.enable_metadata_sync TO on;
|
||||
SELECT master_add_node('localhost', :master_port, groupId=>0);
|
||||
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
|
||||
master_add_node
|
||||
|
@ -508,7 +508,7 @@ NOTICE: create_citus_local_table is deprecated in favour of citus_add_local_tab
|
|||
|
||||
(1 row)
|
||||
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
-- downgrade from 9.5-1 to 9.4-1 should fail as we have a citus local table
|
||||
ALTER EXTENSION citus UPDATE TO '9.4-1';
|
||||
ERROR: citus local tables are introduced in Citus 9.5
|
||||
|
|
|
@ -283,6 +283,14 @@ SELECT * FROM columnar_table;
|
|||
1
|
||||
(2 rows)
|
||||
|
||||
-- Fail to alter a columnar table that is created by a different user
|
||||
SET ROLE full_access;
|
||||
SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000);
|
||||
ERROR: must be owner of table columnar_table
|
||||
-- Fail to reset a columnar table value created by a different user
|
||||
SELECT alter_columnar_table_reset('columnar_table', chunk_group_row_limit => true);
|
||||
ERROR: must be owner of table columnar_table
|
||||
SET ROLE read_access;
|
||||
-- and drop it
|
||||
DROP TABLE columnar_table;
|
||||
-- cannot modify columnar metadata table as unprivileged user
|
||||
|
|
|
@ -303,5 +303,126 @@ SELECT * FROM seg_test;
|
|||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
CREATE SCHEMA ext_owned_tables;
|
||||
SELECT run_command_on_workers($$CREATE SCHEMA ext_owned_tables;$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE SCHEMA")
|
||||
(localhost,57638,t,"CREATE SCHEMA")
|
||||
(2 rows)
|
||||
|
||||
SET search_path TO ext_owned_tables;
|
||||
CREATE sequence my_seq_ext_1;
|
||||
SELECT run_command_on_workers($$CREATE sequence ext_owned_tables.my_seq_ext_1;$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE SEQUENCE")
|
||||
(localhost,57638,t,"CREATE SEQUENCE")
|
||||
(2 rows)
|
||||
|
||||
CREATE sequence my_seq_ext_2;
|
||||
SELECT run_command_on_workers($$CREATE sequence ext_owned_tables.my_seq_ext_2;$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE SEQUENCE")
|
||||
(localhost,57638,t,"CREATE SEQUENCE")
|
||||
(2 rows)
|
||||
|
||||
-- test distributed tables owned by extension
|
||||
CREATE TABLE seg_test (x int, y bigserial, z int default nextval('my_seq_ext_1'));
|
||||
SELECT run_command_on_workers($$CREATE TABLE ext_owned_tables.seg_test (x int, y bigserial, z int default nextval('ext_owned_tables.my_seq_ext_1'))$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE TABLE")
|
||||
(localhost,57638,t,"CREATE TABLE")
|
||||
(2 rows)
|
||||
|
||||
INSERT INTO seg_test VALUES (42);
|
||||
CREATE TABLE tcn_test (x int, y bigserial, z int default nextval('my_seq_ext_2'));
|
||||
SELECT run_command_on_workers($$CREATE TABLE ext_owned_tables.tcn_test (x int, y bigserial, z int default nextval('ext_owned_tables.my_seq_ext_2'));$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE TABLE")
|
||||
(localhost,57638,t,"CREATE TABLE")
|
||||
(2 rows)
|
||||
|
||||
INSERT INTO tcn_test VALUES (42);
|
||||
-- pretend this table belongs to an extension
|
||||
ALTER EXTENSION seg ADD TABLE ext_owned_tables.seg_test;
|
||||
NOTICE: Citus does not propagate adding/dropping member objects
|
||||
HINT: You can add/drop the member objects on the workers as well.
|
||||
ALTER EXTENSION seg ADD SEQUENCE ext_owned_tables.my_seq_ext_1;
|
||||
NOTICE: Citus does not propagate adding/dropping member objects
|
||||
HINT: You can add/drop the member objects on the workers as well.
|
||||
SELECT run_command_on_workers($$ALTER EXTENSION seg ADD TABLE ext_owned_tables.seg_test;$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"ALTER EXTENSION")
|
||||
(localhost,57638,t,"ALTER EXTENSION")
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers($$ALTER EXTENSION seg ADD SEQUENCE ext_owned_tables.my_seq_ext_1;$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"ALTER EXTENSION")
|
||||
(localhost,57638,t,"ALTER EXTENSION")
|
||||
(2 rows)
|
||||
|
||||
CREATE EXTENSION tcn;
|
||||
ALTER EXTENSION tcn ADD TABLE ext_owned_tables.tcn_test;
|
||||
NOTICE: Citus does not propagate adding/dropping member objects
|
||||
HINT: You can add/drop the member objects on the workers as well.
|
||||
ALTER EXTENSION tcn ADD SEQUENCE ext_owned_tables.my_seq_ext_2;
|
||||
NOTICE: Citus does not propagate adding/dropping member objects
|
||||
HINT: You can add/drop the member objects on the workers as well.
|
||||
SELECT run_command_on_workers($$ALTER EXTENSION tcn ADD TABLE ext_owned_tables.tcn_test;$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"ALTER EXTENSION")
|
||||
(localhost,57638,t,"ALTER EXTENSION")
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers($$ALTER EXTENSION tcn ADD SEQUENCE ext_owned_tables.my_seq_ext_2;$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"ALTER EXTENSION")
|
||||
(localhost,57638,t,"ALTER EXTENSION")
|
||||
(2 rows)
|
||||
|
||||
SELECT create_reference_table('seg_test');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$ext_owned_tables.seg_test$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('tcn_test', 'x');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$ext_owned_tables.tcn_test$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- test metadata re-sync in the presence of an extension-owned table
|
||||
-- and serial/sequences
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- also drops table on both worker and master
|
||||
DROP EXTENSION seg CASCADE;
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA ext_owned_tables CASCADE;
|
||||
|
|
|
@ -74,6 +74,11 @@ LANGUAGE plpgsql AS $$
|
|||
BEGIN
|
||||
y := x + y * 2;
|
||||
END;$$;
|
||||
CREATE FUNCTION mx_call_func_bigint_force(x bigint, INOUT y bigint)
|
||||
LANGUAGE plpgsql AS $$
|
||||
BEGIN
|
||||
PERFORM multi_mx_function_call_delegation.mx_call_func_bigint(x, y);
|
||||
END;$$;
|
||||
-- create another function which verifies:
|
||||
-- 1. we work fine with multiple return columns
|
||||
-- 2. we work fine in combination with custom types
|
||||
|
@ -193,12 +198,6 @@ select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass
|
|||
|
||||
(1 row)
|
||||
|
||||
select colocate_proc_with_table('mx_call_func_bigint', 'mx_call_dist_table_bigint'::regclass, 1);
|
||||
colocate_proc_with_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
select colocate_proc_with_table('mx_call_func_custom_types', 'mx_call_dist_table_enum'::regclass, 1);
|
||||
colocate_proc_with_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -211,6 +210,26 @@ select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0);
|
|||
|
||||
(1 row)
|
||||
|
||||
select create_distributed_function('mx_call_func_bigint(bigint,bigint)', 'x',
|
||||
colocate_with := 'mx_call_dist_table_bigint');
|
||||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- set up a force_delegation function
|
||||
select create_distributed_function('mx_call_func_bigint_force(bigint,bigint)', 'x',
|
||||
colocate_with := 'mx_call_dist_table_2',
|
||||
force_delegation := true);
|
||||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
select mx_call_func(2, 0);
|
||||
DEBUG: pushing down the function call
|
||||
mx_call_func
|
||||
|
@ -748,6 +767,16 @@ HINT: Connect to the coordinator and run it again.
|
|||
-- show that functions can be delegated from worker nodes
|
||||
SET client_min_messages TO DEBUG1;
|
||||
SELECT mx_call_func(2, 0);
|
||||
DEBUG: pushing down the function call
|
||||
mx_call_func
|
||||
---------------------------------------------------------------------
|
||||
28
|
||||
(1 row)
|
||||
|
||||
-- not delegated in a transaction block
|
||||
BEGIN;
|
||||
SELECT mx_call_func(2, 0);
|
||||
DEBUG: not pushing down function calls in a multi-statement transaction
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id)))
|
||||
CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)"
|
||||
PL/pgSQL function mx_call_func(integer,integer) line XX at assignment
|
||||
|
@ -759,9 +788,52 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment
|
|||
28
|
||||
(1 row)
|
||||
|
||||
END;
|
||||
-- not delegated in a DO block
|
||||
DO $$
|
||||
BEGIN
|
||||
PERFORM mx_call_func(2, 0);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
DEBUG: not pushing down function calls in a multi-statement transaction
|
||||
CONTEXT: SQL statement "SELECT mx_call_func(2, 0)"
|
||||
PL/pgSQL function inline_code_block line XX at PERFORM
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id)))
|
||||
CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)"
|
||||
PL/pgSQL function mx_call_func(integer,integer) line XX at assignment
|
||||
SQL statement "SELECT mx_call_func(2, 0)"
|
||||
PL/pgSQL function inline_code_block line XX at PERFORM
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((2 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer
|
||||
CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)"
|
||||
PL/pgSQL function mx_call_func(integer,integer) line XX at assignment
|
||||
SQL statement "SELECT mx_call_func(2, 0)"
|
||||
PL/pgSQL function inline_code_block line XX at PERFORM
|
||||
-- forced calls are delegated in a transaction block
|
||||
BEGIN;
|
||||
SELECT mx_call_func_bigint_force(4, 2);
|
||||
DEBUG: pushing down function call in a multi-statement transaction
|
||||
DEBUG: pushing down the function call
|
||||
mx_call_func_bigint_force
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
END;
|
||||
-- forced calls are delegated in a DO block
|
||||
DO $$
|
||||
BEGIN
|
||||
PERFORM * FROM mx_call_func_bigint_force(4, 2);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
DEBUG: pushing down function call in a multi-statement transaction
|
||||
CONTEXT: SQL statement "SELECT * FROM mx_call_func_bigint_force(4, 2)"
|
||||
PL/pgSQL function inline_code_block line XX at PERFORM
|
||||
DEBUG: pushing down the function call
|
||||
CONTEXT: SQL statement "SELECT * FROM mx_call_func_bigint_force(4, 2)"
|
||||
PL/pgSQL function inline_code_block line XX at PERFORM
|
||||
\c - - - :master_port
|
||||
SET search_path TO multi_mx_function_call_delegation, public;
|
||||
RESET client_min_messages;
|
||||
\set VERBOSITY terse
|
||||
DROP SCHEMA multi_mx_function_call_delegation CASCADE;
|
||||
NOTICE: drop cascades to 15 other objects
|
||||
NOTICE: drop cascades to 16 other objects
|
||||
|
|
|
@ -74,6 +74,11 @@ LANGUAGE plpgsql AS $$
|
|||
BEGIN
|
||||
y := x + y * 2;
|
||||
END;$$;
|
||||
CREATE FUNCTION mx_call_func_bigint_force(x bigint, INOUT y bigint)
|
||||
LANGUAGE plpgsql AS $$
|
||||
BEGIN
|
||||
PERFORM multi_mx_function_call_delegation.mx_call_func_bigint(x, y);
|
||||
END;$$;
|
||||
-- create another function which verifies:
|
||||
-- 1. we work fine with multiple return columns
|
||||
-- 2. we work fine in combination with custom types
|
||||
|
@ -193,12 +198,6 @@ select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass
|
|||
|
||||
(1 row)
|
||||
|
||||
select colocate_proc_with_table('mx_call_func_bigint', 'mx_call_dist_table_bigint'::regclass, 1);
|
||||
colocate_proc_with_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
select colocate_proc_with_table('mx_call_func_custom_types', 'mx_call_dist_table_enum'::regclass, 1);
|
||||
colocate_proc_with_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -211,6 +210,26 @@ select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0);
|
|||
|
||||
(1 row)
|
||||
|
||||
select create_distributed_function('mx_call_func_bigint(bigint,bigint)', 'x',
|
||||
colocate_with := 'mx_call_dist_table_bigint');
|
||||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- set up a force_delegation function
|
||||
select create_distributed_function('mx_call_func_bigint_force(bigint,bigint)', 'x',
|
||||
colocate_with := 'mx_call_dist_table_2',
|
||||
force_delegation := true);
|
||||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
select mx_call_func(2, 0);
|
||||
DEBUG: pushing down the function call
|
||||
mx_call_func
|
||||
|
@ -748,6 +767,16 @@ HINT: Connect to the coordinator and run it again.
|
|||
-- show that functions can be delegated from worker nodes
|
||||
SET client_min_messages TO DEBUG1;
|
||||
SELECT mx_call_func(2, 0);
|
||||
DEBUG: pushing down the function call
|
||||
mx_call_func
|
||||
---------------------------------------------------------------------
|
||||
28
|
||||
(1 row)
|
||||
|
||||
-- not delegated in a transaction block
|
||||
BEGIN;
|
||||
SELECT mx_call_func(2, 0);
|
||||
DEBUG: not pushing down function calls in a multi-statement transaction
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id)))
|
||||
CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)"
|
||||
PL/pgSQL function mx_call_func(integer,integer) line XX at assignment
|
||||
|
@ -759,9 +788,52 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment
|
|||
28
|
||||
(1 row)
|
||||
|
||||
END;
|
||||
-- not delegated in a DO block
|
||||
DO $$
|
||||
BEGIN
|
||||
PERFORM mx_call_func(2, 0);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
DEBUG: not pushing down function calls in a multi-statement transaction
|
||||
CONTEXT: SQL statement "SELECT mx_call_func(2, 0)"
|
||||
PL/pgSQL function inline_code_block line XX at PERFORM
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id)))
|
||||
CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)"
|
||||
PL/pgSQL function mx_call_func(integer,integer) line XX at assignment
|
||||
SQL statement "SELECT mx_call_func(2, 0)"
|
||||
PL/pgSQL function inline_code_block line XX at PERFORM
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (2 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint)))
|
||||
CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)"
|
||||
PL/pgSQL function mx_call_func(integer,integer) line XX at assignment
|
||||
SQL statement "SELECT mx_call_func(2, 0)"
|
||||
PL/pgSQL function inline_code_block line XX at PERFORM
|
||||
-- forced calls are delegated in a transaction block
|
||||
BEGIN;
|
||||
SELECT mx_call_func_bigint_force(4, 2);
|
||||
DEBUG: pushing down function call in a multi-statement transaction
|
||||
DEBUG: pushing down the function call
|
||||
mx_call_func_bigint_force
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
END;
|
||||
-- forced calls are delegated in a DO block
|
||||
DO $$
|
||||
BEGIN
|
||||
PERFORM * FROM mx_call_func_bigint_force(4, 2);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
DEBUG: pushing down function call in a multi-statement transaction
|
||||
CONTEXT: SQL statement "SELECT * FROM mx_call_func_bigint_force(4, 2)"
|
||||
PL/pgSQL function inline_code_block line XX at PERFORM
|
||||
DEBUG: pushing down the function call
|
||||
CONTEXT: SQL statement "SELECT * FROM mx_call_func_bigint_force(4, 2)"
|
||||
PL/pgSQL function inline_code_block line XX at PERFORM
|
||||
\c - - - :master_port
|
||||
SET search_path TO multi_mx_function_call_delegation, public;
|
||||
RESET client_min_messages;
|
||||
\set VERBOSITY terse
|
||||
DROP SCHEMA multi_mx_function_call_delegation CASCADE;
|
||||
NOTICE: drop cascades to 15 other objects
|
||||
NOTICE: drop cascades to 16 other objects
|
||||
|
|
|
@ -464,7 +464,11 @@ SELECT create_reference_table('replicate_reference_table_insert');
|
|||
BEGIN;
|
||||
INSERT INTO replicate_reference_table_insert VALUES(1);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
ERROR: cannot open new connections after the first modification command within a transaction
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
DROP TABLE replicate_reference_table_insert;
|
||||
-- test COPY then adding a new node in a transaction
|
||||
|
@ -479,7 +483,11 @@ SET citus.enable_local_execution = 'off';
|
|||
BEGIN;
|
||||
COPY replicate_reference_table_copy FROM STDIN;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
ERROR: cannot open new connections after the first modification command within a transaction
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
RESET citus.enable_local_execution;
|
||||
DROP TABLE replicate_reference_table_copy;
|
||||
|
@ -494,7 +502,11 @@ SELECT create_reference_table('replicate_reference_table_ddl');
|
|||
BEGIN;
|
||||
ALTER TABLE replicate_reference_table_ddl ADD column2 int;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
ERROR: cannot open new connections after the first modification command within a transaction
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
DROP TABLE replicate_reference_table_ddl;
|
||||
-- test DROP table after adding new node in a transaction
|
||||
|
|
|
@ -79,6 +79,21 @@ SELECT create_distributed_function('test_function(int)');
|
|||
|
||||
(1 row)
|
||||
|
||||
-- Create and distribute plpgsql extension's function
|
||||
CREATE OR REPLACE FUNCTION plpgsql_dist_function(text)
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql AS
|
||||
$$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', $1;
|
||||
END;
|
||||
$$;
|
||||
SELECT create_distributed_function('plpgsql_dist_function(text)');
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show that schema, types, function and sequence has marked as distributed
|
||||
-- on the coordinator node
|
||||
RESET ROLE;
|
||||
|
@ -124,6 +139,12 @@ SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dis
|
|||
(function,"{local_schema,test_function}",{integer})
|
||||
(1 row)
|
||||
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.plpgsql_dist_function'::regproc::oid;
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(function,"{local_schema,plpgsql_dist_function}",{pg_catalog.text})
|
||||
(1 row)
|
||||
|
||||
-- show those objects marked as distributed on metadata worker node as well
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema'::regnamespace::oid;$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
|
@ -174,6 +195,27 @@ SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(clas
|
|||
localhost | 57638 | t | (function,"{local_schema,test_function}",{integer})
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.plpgsql_dist_function'::regproc::oid;$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (function,"{local_schema,plpgsql_dist_function}",{pg_catalog.text})
|
||||
localhost | 57638 | t | (function,"{local_schema,plpgsql_dist_function}",{pg_catalog.text})
|
||||
(2 rows)
|
||||
|
||||
-- Show that extension plpgsql is also marked as distributed as a dependency of plpgsl_dist_function
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{plpgsql}%';
|
||||
obj_identifier
|
||||
---------------------------------------------------------------------
|
||||
(extension,{plpgsql},{})
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{plpgsql}%';$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (extension,{plpgsql},{})
|
||||
localhost | 57638 | t | (extension,{plpgsql},{})
|
||||
(2 rows)
|
||||
|
||||
-- show that schema is owned by the superuser
|
||||
SELECT rolname FROM pg_roles JOIN pg_namespace ON(pg_namespace.nspowner = pg_roles.oid) WHERE nspname = 'local_schema';
|
||||
rolname
|
||||
|
@ -372,8 +414,9 @@ SELECT * FROM run_command_on_workers($$ SELECT distribution_argument_index FROM
|
|||
|
||||
-- Show that dropping schema doesn't affect the worker node
|
||||
DROP SCHEMA local_schema CASCADE;
|
||||
NOTICE: drop cascades to 2 other objects
|
||||
DETAIL: drop cascades to table metadata_dist_test_table
|
||||
NOTICE: drop cascades to 3 other objects
|
||||
DETAIL: drop cascades to function plpgsql_dist_function(text)
|
||||
drop cascades to table metadata_dist_test_table
|
||||
drop cascades to function metadata_dist_test_proc(integer,integer)
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{local_schema}%';
|
||||
obj_identifier
|
||||
|
|
|
@ -2349,3 +2349,32 @@ WHERE logicalrelid = 'r1'::regclass;
|
|||
(1 row)
|
||||
|
||||
DROP TABLE t1, r1;
|
||||
-- Test rebalancer with index on a table
|
||||
DROP TABLE IF EXISTS test_rebalance_with_index;
|
||||
CREATE TABLE test_rebalance_with_index (measureid integer PRIMARY KEY);
|
||||
SELECT create_distributed_table('test_rebalance_with_index', 'measureid');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX rebalance_with_index ON test_rebalance_with_index(measureid);
|
||||
INSERT INTO test_rebalance_with_index VALUES(0);
|
||||
INSERT INTO test_rebalance_with_index VALUES(1);
|
||||
INSERT INTO test_rebalance_with_index VALUES(2);
|
||||
SELECT * FROM master_drain_node('localhost', :worker_2_port);
|
||||
master_drain_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CALL citus_cleanup_orphaned_shards();
|
||||
UPDATE pg_dist_node SET shouldhaveshards=true WHERE nodeport = :worker_2_port;
|
||||
SELECT rebalance_table_shards();
|
||||
rebalance_table_shards
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CALL citus_cleanup_orphaned_shards();
|
||||
DROP TABLE test_rebalance_with_index CASCADE;
|
||||
|
|
|
@ -186,7 +186,7 @@ BEGIN;
|
|||
-- it'd spawn a bg worker targeting this node
|
||||
-- and that changes the connection count specific tests
|
||||
-- here
|
||||
SET LOCAL citus.enable_metadata_sync_by_default TO OFF;
|
||||
SET LOCAL citus.enable_metadata_sync TO OFF;
|
||||
-- cannot add workers with specific IP as long as I have a placeholder coordinator record
|
||||
SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port);
|
||||
ERROR: cannot add a worker node when the coordinator hostname is set to localhost
|
||||
|
@ -198,7 +198,7 @@ BEGIN;
|
|||
-- it'd spawn a bg worker targeting this node
|
||||
-- and that changes the connection count specific tests
|
||||
-- here
|
||||
SET LOCAL citus.enable_metadata_sync_by_default TO OFF;
|
||||
SET LOCAL citus.enable_metadata_sync TO OFF;
|
||||
-- adding localhost workers is ok
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
NOTICE: shards are still on the coordinator after adding the new node
|
||||
|
@ -228,7 +228,7 @@ BEGIN;
|
|||
-- it'd spawn a bg worker targeting this node
|
||||
-- and that changes the connection count specific tests
|
||||
-- here
|
||||
SET LOCAL citus.enable_metadata_sync_by_default TO OFF;
|
||||
SET LOCAL citus.enable_metadata_sync TO OFF;
|
||||
-- adding workers with specific IP is ok now
|
||||
SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port);
|
||||
NOTICE: shards are still on the coordinator after adding the new node
|
||||
|
|
|
@ -156,7 +156,7 @@ SELECT * FROM test_matview;
|
|||
(1 row)
|
||||
|
||||
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'events%' ORDER BY logicalrelid::text;
|
||||
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
|
||||
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
|
||||
---------------------------------------------------------------------
|
||||
events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f
|
||||
events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f
|
||||
|
@ -468,7 +468,9 @@ BEGIN;
|
|||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
ERROR: cannot open new connections after the first modification command within a transaction
|
||||
ERROR: cannot execute metadata syncing operation because there was a parallel operation on a distributed table in the transaction
|
||||
DETAIL: When modifying metadata, Citus needs to perform all operations over a single connection per node to ensure consistency.
|
||||
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
|
||||
ROLLBACK;
|
||||
-- this is safe because start_metadata_sync_to_node already switches to
|
||||
-- sequential execution
|
||||
|
@ -539,7 +541,11 @@ BEGIN;
|
|||
|
||||
-- sync at the end of the tx
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
ERROR: cannot open new connections after the first modification command within a transaction
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
-- multi-shard commands are not allowed with start_metadata_sync
|
||||
BEGIN;
|
||||
|
@ -576,7 +582,11 @@ BEGIN;
|
|||
|
||||
-- sync at the end of the tx
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
ERROR: cannot open new connections after the first modification command within a transaction
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
-- cleanup
|
||||
\c - - - :master_port
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF;
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT pg_reload_conf();
|
||||
pg_reload_conf
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF;
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT pg_reload_conf();
|
||||
pg_reload_conf
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF;
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT pg_reload_conf();
|
||||
pg_reload_conf
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON;
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync TO ON;
|
||||
SELECT pg_reload_conf();
|
||||
pg_reload_conf
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON;
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync TO ON;
|
||||
SELECT pg_reload_conf();
|
||||
pg_reload_conf
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON;
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync TO ON;
|
||||
SELECT pg_reload_conf();
|
||||
pg_reload_conf
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -582,7 +582,7 @@ SET session_replication_role = DEFAULT;
|
|||
|
||||
-- disable test_user on the first worker
|
||||
\c - :default_user - :worker_1_port
|
||||
SET citus.enable_object_propagation TO off;
|
||||
SET citus.enable_metadata_sync TO off;
|
||||
ALTER USER test_user WITH nologin;
|
||||
\c - test_user - :master_port
|
||||
|
||||
|
@ -616,7 +616,7 @@ SELECT shardid, shardstate, nodename, nodeport
|
|||
|
||||
-- re-enable test_user on the first worker
|
||||
\c - :default_user - :worker_1_port
|
||||
SET citus.enable_object_propagation TO off;
|
||||
SET citus.enable_metadata_sync TO off;
|
||||
ALTER USER test_user WITH login;
|
||||
|
||||
\c - test_user - :master_port
|
||||
|
|
|
@ -89,11 +89,11 @@ test: isolation_ref_update_delete_upsert_vs_all_on_mx
|
|||
test: isolation_dis2ref_foreign_keys_on_mx
|
||||
test: isolation_metadata_sync_deadlock
|
||||
test: isolation_replicated_dist_on_mx
|
||||
test: isolation_replicate_reference_tables_to_coordinator
|
||||
test: isolation_multiuser_locking
|
||||
|
||||
# MXless tests
|
||||
test: isolation_check_mx
|
||||
test: isolation_replicate_reference_tables_to_coordinator
|
||||
test: isolation_turn_mx_off
|
||||
test: isolation_reference_copy_vs_all
|
||||
test: isolation_ref2ref_foreign_keys
|
||||
test: isolation_multiuser_locking
|
||||
|
|
|
@ -319,11 +319,8 @@ test: check_mx
|
|||
test: distributed_functions distributed_functions_conflict
|
||||
test: distributed_collations
|
||||
test: distributed_procedure
|
||||
|
||||
# blocked on #5583
|
||||
test: turn_mx_off
|
||||
test: distributed_collations_conflict
|
||||
test: turn_mx_on
|
||||
test: check_mx
|
||||
|
||||
# ---------
|
||||
# deparsing logic tests
|
||||
|
|
|
@ -745,7 +745,7 @@ UPDATE pg_dist_shard_placement SET nodeport = :worker_1_port+10 WHERE shardid =
|
|||
SET session_replication_role = DEFAULT;
|
||||
-- disable test_user on the first worker
|
||||
\c - :default_user - :worker_1_port
|
||||
SET citus.enable_object_propagation TO off;
|
||||
SET citus.enable_metadata_sync TO off;
|
||||
ALTER USER test_user WITH nologin;
|
||||
\c - test_user - :master_port
|
||||
-- reissue copy, and it should fail
|
||||
|
@ -804,7 +804,7 @@ SELECT shardid, shardstate, nodename, nodeport
|
|||
|
||||
-- re-enable test_user on the first worker
|
||||
\c - :default_user - :worker_1_port
|
||||
SET citus.enable_object_propagation TO off;
|
||||
SET citus.enable_metadata_sync TO off;
|
||||
ALTER USER test_user WITH login;
|
||||
\c - test_user - :master_port
|
||||
DROP TABLE numbers_hash;
|
||||
|
|
|
@ -2,7 +2,7 @@ session "s1"
|
|||
|
||||
step "check_mx"
|
||||
{
|
||||
SHOW citus.enable_metadata_sync_by_default;
|
||||
SHOW citus.enable_metadata_sync;
|
||||
|
||||
SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary';
|
||||
}
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
setup
|
||||
{
|
||||
SELECT citus_internal.replace_isolation_tester_func();
|
||||
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
|
||||
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
CREATE USER test_user_1;
|
||||
|
@ -16,6 +19,8 @@ setup
|
|||
|
||||
teardown
|
||||
{
|
||||
SELECT citus_internal.restore_isolation_tester_func();
|
||||
|
||||
BEGIN;
|
||||
DROP TABLE IF EXISTS test_table;
|
||||
DROP USER test_user_1, test_user_2;
|
||||
|
@ -31,6 +36,7 @@ step "s1-grant"
|
|||
SET ROLE test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$);
|
||||
}
|
||||
|
||||
step "s1-begin"
|
||||
|
|
|
@ -2,7 +2,7 @@ session "s1"
|
|||
|
||||
step "disable-mx-by-default"
|
||||
{
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF;
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync TO OFF;
|
||||
}
|
||||
|
||||
step "reload"
|
||||
|
|
|
@ -2,7 +2,7 @@ session "s1"
|
|||
|
||||
step "enable-mx-by-default"
|
||||
{
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON;
|
||||
ALTER SYSTEM SET citus.enable_metadata_sync TO ON;
|
||||
}
|
||||
|
||||
step "reload"
|
||||
|
|
|
@ -376,6 +376,9 @@ select percentile_cont(0.5) within group(order by valf) from aggdata;
|
|||
select key, percentile_cont(key/10.0) within group(order by val) from aggdata group by key;
|
||||
select array_agg(val order by valf) from aggdata;
|
||||
|
||||
-- test by using some other node types as arguments to agg
|
||||
select key, percentile_cont((key - (key > 4)::int) / 10.0) within group(order by val) from aggdata group by key;
|
||||
|
||||
-- Test TransformSubqueryNode
|
||||
|
||||
select * FROM (
|
||||
|
@ -479,6 +482,54 @@ SELECT square_func(5), a FROM t1 GROUP BY a;
|
|||
-- the expression will be pushed down.
|
||||
SELECT square_func(5), a, count(a) FROM t1 GROUP BY a;
|
||||
|
||||
-- Test the cases where the worker agg exec. returns no tuples.
|
||||
|
||||
CREATE TABLE dist_table (dist_col int, agg_col numeric);
|
||||
SELECT create_distributed_table('dist_table', 'dist_col');
|
||||
|
||||
CREATE TABLE ref_table (int_col int);
|
||||
SELECT create_reference_table('ref_table');
|
||||
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM (SELECT *, random() FROM dist_table) a;
|
||||
|
||||
SELECT PERCENTILE_DISC((2 > random())::int::numeric / 10) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
SELECT SUM(COALESCE(agg_col, 3))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
SELECT AVG(COALESCE(agg_col, 10))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
insert into dist_table values (2, 11.2), (3, NULL), (6, 3.22), (3, 4.23), (5, 5.25), (4, 63.4), (75, NULL), (80, NULL), (96, NULL), (8, 1078), (0, 1.19);
|
||||
|
||||
-- run the same queries after loading some data
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM (SELECT *, random() FROM dist_table) a;
|
||||
|
||||
SELECT PERCENTILE_DISC((2 > random())::int::numeric / 10) WITHIN GROUP (ORDER BY agg_col)
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
SELECT floor(SUM(COALESCE(agg_col, 3)))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
SELECT floor(AVG(COALESCE(agg_col, 10)))
|
||||
FROM dist_table
|
||||
LEFT JOIN ref_table ON TRUE;
|
||||
|
||||
set client_min_messages to error;
|
||||
drop schema aggregate_support cascade;
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
SHOW citus.enable_metadata_sync_by_default;
|
||||
SHOW citus.enable_metadata_sync;
|
||||
|
||||
SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary';
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
SET citus.next_shard_id TO 20030000;
|
||||
SET citus.enable_object_propagation TO false; -- all tests here verify old behaviour without distributing types,functions,etc automatically
|
||||
SET citus.enable_metadata_sync TO false; -- all tests here verify old behaviour without distributing types,functions,etc automatically
|
||||
|
||||
CREATE USER typeowner_for_disabled_object_propagation_guc;
|
||||
CREATE SCHEMA disabled_object_propagation;
|
||||
|
@ -37,7 +37,7 @@ SELECT create_distributed_table('t3', 'a');
|
|||
-- verify ALTER TYPE statements are not propagated for types, even though they are marked distributed
|
||||
BEGIN;
|
||||
-- object propagation is turned off after xact finished, type is already marked distributed by then
|
||||
SET LOCAL citus.enable_object_propagation TO on;
|
||||
SET LOCAL citus.enable_metadata_sync TO on;
|
||||
CREATE TYPE tt3 AS (a int, b int);
|
||||
CREATE TABLE t4 (a int PRIMARY KEY, b tt3);
|
||||
SELECT create_distributed_table('t4','a');
|
||||
|
@ -75,7 +75,7 @@ $$);
|
|||
|
||||
-- suppress any warnings during cleanup
|
||||
SET client_min_messages TO error;
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
DROP SCHEMA disabled_object_propagation CASCADE;
|
||||
DROP SCHEMA disabled_object_propagation2 CASCADE;
|
||||
DROP USER typeowner_for_disabled_object_propagation_guc;
|
||||
|
|
|
@ -4,6 +4,8 @@ SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$);
|
|||
\c - - - :worker_1_port
|
||||
SET search_path TO collation_conflict;
|
||||
|
||||
SET citus.enable_metadata_sync TO off;
|
||||
|
||||
CREATE COLLATION caseinsensitive (
|
||||
provider = icu,
|
||||
locale = 'und-u-ks-level2'
|
||||
|
@ -36,6 +38,8 @@ DROP TABLE tblcoll;
|
|||
DROP COLLATION caseinsensitive;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET citus.enable_metadata_sync TO off;
|
||||
|
||||
SET search_path TO collation_conflict;
|
||||
|
||||
CREATE COLLATION caseinsensitive (
|
||||
|
|
|
@ -661,6 +661,27 @@ SELECT outer_test_prepare(1,2);
|
|||
|
||||
SELECT COUNT(*) FROM table_test_prepare;
|
||||
|
||||
CREATE TABLE test_perform(i int);
|
||||
SELECT create_distributed_table('test_perform', 'i', colocate_with := 'none');
|
||||
|
||||
CREATE OR REPLACE FUNCTION test(x int)
|
||||
RETURNS int
|
||||
AS $$
|
||||
DECLARE
|
||||
BEGIN
|
||||
RAISE NOTICE 'INPUT %', x;
|
||||
RETURN x;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
SELECT create_distributed_function('test(int)', 'x',
|
||||
colocate_with := 'test_perform', force_delegation := true);
|
||||
DO $$
|
||||
BEGIN
|
||||
PERFORM test(3);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
RESET client_min_messages;
|
||||
SET citus.log_remote_commands TO off;
|
||||
DROP SCHEMA forcepushdown_schema CASCADE;
|
||||
|
|
|
@ -28,7 +28,7 @@ ROLLBACK;
|
|||
-- but we are on the coordinator, so still not allowed
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's');
|
||||
ROLLBACK;
|
||||
|
||||
|
@ -67,14 +67,14 @@ SET search_path TO metadata_sync_helpers;
|
|||
-- owner of the table test
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's');
|
||||
ROLLBACK;
|
||||
|
||||
-- we do not own the relation
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_update_relation_colocation ('test'::regclass, 10);
|
||||
ROLLBACK;
|
||||
|
||||
|
@ -83,7 +83,7 @@ CREATE TABLE test_2(col_1 int, col_2 int);
|
|||
CREATE TABLE test_3(col_1 int, col_2 int);
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
SELECT count(*) FROM pg_dist_partition WHERE logicalrelid = 'metadata_sync_helpers.test_2'::regclass;
|
||||
ROLLBACK;
|
||||
|
@ -91,42 +91,42 @@ ROLLBACK;
|
|||
-- fails because there is no X distribution method
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's');
|
||||
ROLLBACK;
|
||||
|
||||
-- fails because there is the column does not exist
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's');
|
||||
ROLLBACK;
|
||||
|
||||
--- fails because we do not allow NULL parameters
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's');
|
||||
ROLLBACK;
|
||||
|
||||
-- fails because colocationId cannot be negative
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's');
|
||||
ROLLBACK;
|
||||
|
||||
-- fails because there is no X replication model
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X');
|
||||
ROLLBACK;
|
||||
|
||||
-- the same table cannot be added twice, that is enforced by a primary key
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
|
@ -135,7 +135,7 @@ ROLLBACK;
|
|||
-- the same table cannot be added twice, that is enforced by a primary key even if distribution key changes
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's');
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's');
|
||||
|
@ -144,7 +144,7 @@ ROLLBACK;
|
|||
-- hash distributed table cannot have NULL distribution key
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's');
|
||||
ROLLBACK;
|
||||
|
@ -165,14 +165,14 @@ SET search_path TO metadata_sync_helpers;
|
|||
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's');
|
||||
ROLLBACK;
|
||||
|
||||
-- should throw error even if we skip the checks, there are no such nodes
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111);
|
||||
ROLLBACK;
|
||||
|
@ -189,7 +189,7 @@ SET search_path TO metadata_sync_helpers;
|
|||
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's');
|
||||
ROLLBACK;
|
||||
|
||||
|
@ -207,21 +207,21 @@ SET search_path TO metadata_sync_helpers;
|
|||
CREATE TABLE test_ref(col_1 int, col_2 int);
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's');
|
||||
ROLLBACK;
|
||||
|
||||
-- non-valid replication model
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A');
|
||||
ROLLBACK;
|
||||
|
||||
-- not-matching replication model for reference table
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c');
|
||||
ROLLBACK;
|
||||
|
||||
|
@ -231,7 +231,7 @@ SET search_path TO metadata_sync_helpers;
|
|||
CREATE TABLE super_user_table(col_1 int);
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's');
|
||||
COMMIT;
|
||||
|
||||
|
@ -244,7 +244,7 @@ SET search_path TO metadata_sync_helpers;
|
|||
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text))
|
||||
|
@ -254,7 +254,7 @@ ROLLBACK;
|
|||
-- the user is only allowed to add a shard for add a table which is in pg_dist_partition
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text))
|
||||
|
@ -264,7 +264,7 @@ ROLLBACK;
|
|||
-- ok, now add the table to the pg_dist_partition
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's');
|
||||
SELECT citus_internal_add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's');
|
||||
SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't');
|
||||
|
@ -273,14 +273,14 @@ COMMIT;
|
|||
-- we can update to a non-existing colocation group (e.g., colocate_with:=none)
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232);
|
||||
ROLLBACK;
|
||||
|
||||
-- invalid shard ids are not allowed
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text))
|
||||
|
@ -290,7 +290,7 @@ ROLLBACK;
|
|||
-- invalid storage types are not allowed
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text))
|
||||
|
@ -300,7 +300,7 @@ ROLLBACK;
|
|||
-- NULL shard ranges are not allowed for hash distributed tables
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text))
|
||||
|
@ -310,7 +310,7 @@ ROLLBACK;
|
|||
-- non-integer shard ranges are not allowed
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text))
|
||||
|
@ -320,7 +320,7 @@ ROLLBACK;
|
|||
-- shardMinValue should be smaller than shardMaxValue
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text))
|
||||
|
@ -330,7 +330,7 @@ ROLLBACK;
|
|||
-- we do not allow overlapping shards for the same table
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text),
|
||||
|
@ -344,7 +344,7 @@ ROLLBACK;
|
|||
-- check with non-existing object type
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false))
|
||||
|
@ -354,7 +354,7 @@ ROLLBACK;
|
|||
-- check the sanity of distributionArgumentIndex and colocationId
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false))
|
||||
|
@ -363,7 +363,7 @@ ROLLBACK;
|
|||
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false))
|
||||
|
@ -373,7 +373,7 @@ ROLLBACK;
|
|||
-- check with non-existing object
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false))
|
||||
|
@ -384,7 +384,7 @@ ROLLBACK;
|
|||
-- if any parameter is NULL
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false))
|
||||
|
@ -397,7 +397,7 @@ ROLLBACK;
|
|||
-- which is known how to distribute
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
|
||||
CREATE TABLE publication_test_table(id int);
|
||||
|
@ -412,7 +412,7 @@ ROLLBACK;
|
|||
-- Show that citus_internal_add_object_metadata checks the priviliges
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
|
||||
CREATE FUNCTION distribution_test_function(int) RETURNS int
|
||||
|
@ -425,12 +425,25 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
ROLLBACK;
|
||||
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
|
||||
CREATE TYPE distributed_test_type AS (a int, b int);
|
||||
|
||||
SET ROLE metadata_sync_helper_role;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
|
||||
AS (VALUES ('type', ARRAY['distributed_test_type']::text[], ARRAY[]::text[], -1, 0, false))
|
||||
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
|
||||
ROLLBACK;
|
||||
|
||||
-- we do not allow wrong partmethod
|
||||
-- so manually insert wrong partmethod for the sake of the test
|
||||
SET search_path TO metadata_sync_helpers;
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
UPDATE pg_dist_partition SET partmethod = 'X';
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
|
@ -444,7 +457,7 @@ ROLLBACK;
|
|||
SET search_path TO metadata_sync_helpers;
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text))
|
||||
|
@ -462,7 +475,7 @@ SET search_path TO metadata_sync_helpers;
|
|||
-- now, add few shards
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '11'::text, '20'::text),
|
||||
|
@ -478,14 +491,14 @@ COMMIT;
|
|||
-- we cannot mark these two tables colocated because they are not colocated
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
|
||||
ROLLBACK;
|
||||
|
||||
-- now, add few more shards for test_3 to make it colocated with test_2
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_3'::regclass, 1420009::bigint, 't'::"char", '21'::text, '30'::text),
|
||||
|
@ -499,7 +512,7 @@ COMMIT;
|
|||
-- shardMin/MaxValues should be NULL for reference tables
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL))
|
||||
|
@ -509,7 +522,7 @@ ROLLBACK;
|
|||
-- reference tables cannot have multiple shards
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL),
|
||||
|
@ -520,7 +533,7 @@ ROLLBACK;
|
|||
-- finally, add a shard for reference tables
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL))
|
||||
|
@ -533,7 +546,7 @@ SET search_path TO metadata_sync_helpers;
|
|||
-- and a shard for the superuser table
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
|
||||
AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text))
|
||||
|
@ -548,7 +561,7 @@ SET search_path TO metadata_sync_helpers;
|
|||
-- shard does not exist
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint))
|
||||
|
@ -558,7 +571,7 @@ ROLLBACK;
|
|||
-- invalid placementid
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420000, 1, 0::bigint, 1::int, -10))
|
||||
|
@ -568,7 +581,7 @@ ROLLBACK;
|
|||
-- non-existing shard
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (1430100, 1, 0::bigint, 1::int, 10))
|
||||
|
@ -578,7 +591,7 @@ ROLLBACK;
|
|||
-- invalid shard state
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420000, 10, 0::bigint, 1::int, 1500000))
|
||||
|
@ -588,7 +601,7 @@ ROLLBACK;
|
|||
-- non-existing node with non-existing node-id 123123123
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES ( 1420000, 1, 0::bigint, 123123123::int, 1500000))
|
||||
|
@ -612,7 +625,7 @@ END; $$ language plpgsql;
|
|||
-- fails because we ingest more placements for the same shards to the same worker node
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000),
|
||||
|
@ -623,7 +636,7 @@ ROLLBACK;
|
|||
-- shard is not owned by us
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420007, 1, 0::bigint, get_node_id(), 1500000))
|
||||
|
@ -633,7 +646,7 @@ ROLLBACK;
|
|||
-- sucessfully add placements
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS
|
||||
(VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000),
|
||||
|
@ -654,7 +667,7 @@ COMMIT;
|
|||
-- we should be able to colocate both tables now
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
|
||||
ROLLBACK;
|
||||
|
||||
|
@ -663,7 +676,7 @@ ROLLBACK;
|
|||
-- fails because we are trying to update it to non-existing node
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000);
|
||||
COMMIT;
|
||||
|
@ -671,7 +684,7 @@ COMMIT;
|
|||
-- fails because the source node doesn't contain the shard
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id());
|
||||
COMMIT;
|
||||
|
@ -679,7 +692,7 @@ COMMIT;
|
|||
-- fails because shard does not exist
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1);
|
||||
COMMIT;
|
||||
|
@ -687,7 +700,7 @@ COMMIT;
|
|||
-- fails because none-existing shard
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1);
|
||||
COMMIT;
|
||||
|
@ -695,7 +708,7 @@ COMMIT;
|
|||
-- fails because we do not own the shard
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1);
|
||||
COMMIT;
|
||||
|
@ -703,7 +716,7 @@ COMMIT;
|
|||
-- the user only allowed to delete their own shards
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420007))
|
||||
|
@ -712,7 +725,7 @@ ROLLBACK;
|
|||
|
||||
-- the user only allowed to delete shards in a distributed transaction
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420007))
|
||||
|
@ -722,7 +735,7 @@ ROLLBACK;
|
|||
-- the user cannot delete non-existing shards
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420100))
|
||||
|
@ -737,7 +750,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
SELECT count(*) FROM pg_dist_placement WHERE shardid = 1420000;
|
||||
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420000))
|
||||
|
@ -754,7 +767,7 @@ ROLLBACK;
|
|||
SET search_path TO metadata_sync_helpers;
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
-- with an ugly trick, update the repmodel
|
||||
-- so that making two tables colocated fails
|
||||
UPDATE pg_dist_partition SET repmodel = 't'
|
||||
|
@ -765,7 +778,7 @@ ROLLBACK;
|
|||
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
-- with an ugly trick, update the vartype of table from int to bigint
|
||||
-- so that making two tables colocated fails
|
||||
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}'
|
||||
|
@ -775,7 +788,7 @@ ROLLBACK;
|
|||
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
-- with an ugly trick, update the partmethod of the table to not-valid
|
||||
-- so that making two tables colocated fails
|
||||
UPDATE pg_dist_partition SET partmethod = ''
|
||||
|
@ -785,7 +798,7 @@ ROLLBACK;
|
|||
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
-- with an ugly trick, update the partmethod of the table to not-valid
|
||||
-- so that making two tables colocated fails
|
||||
UPDATE pg_dist_partition SET partmethod = 'a'
|
||||
|
@ -799,7 +812,7 @@ CREATE TABLE test_6(int_col int, text_col text);
|
|||
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's');
|
||||
SELECT citus_internal_add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's');
|
||||
|
@ -815,7 +828,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
CREATE TABLE test_8(int_col int, text_col text COLLATE "caseinsensitive");
|
||||
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
SET application_name to 'citus';
|
||||
SET application_name to 'citus_internal';
|
||||
\set VERBOSITY terse
|
||||
SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's');
|
||||
SELECT citus_internal_add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's');
|
||||
|
|
|
@ -70,9 +70,9 @@ CREATE FUNCTION add(integer, integer) RETURNS integer
|
|||
-- Since deparse logic on workers can not work for if function
|
||||
-- is distributed on workers, we are disabling object propagation
|
||||
-- first. Same trick has been applied multiple times in this test.
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('add(int,int)');
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION add CALLED ON NULL INPUT
|
||||
|
@ -276,10 +276,10 @@ CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text) RETURNS TEXT
|
|||
AS $$ SELECT 'Overloaded function called with param: ' || $1 $$
|
||||
LANGUAGE SQL;
|
||||
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"()');
|
||||
SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text)');
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() SET SCHEMA "CiTUS.TEEN2"
|
||||
|
@ -294,9 +294,9 @@ $cmd$);
|
|||
CREATE FUNCTION func_default_param(param INT DEFAULT 0) RETURNS TEXT
|
||||
AS $$ SELECT 'supplied param is : ' || param; $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('func_default_param(INT)');
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_default_param RENAME TO func_with_default_param;
|
||||
|
@ -306,9 +306,9 @@ $cmd$);
|
|||
CREATE FUNCTION func_out_param(IN param INT, OUT result TEXT)
|
||||
AS $$ SELECT 'supplied param is : ' || param; $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('func_out_param(INT)');
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_out_param RENAME TO func_in_and_out_param;
|
||||
|
@ -321,9 +321,9 @@ BEGIN
|
|||
a := a * a;
|
||||
END; $$
|
||||
LANGUAGE plpgsql;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('square(NUMERIC)');
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION square SET search_path TO DEFAULT;
|
||||
|
@ -343,9 +343,9 @@ BEGIN
|
|||
FROM generate_subscripts(list, 1) g(i);
|
||||
END; $$
|
||||
LANGUAGE plpgsql;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('sum_avg(NUMERIC[])');
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION sum_avg COST 10000;
|
||||
|
@ -358,9 +358,9 @@ RESET citus.enable_ddl_propagation;
|
|||
CREATE FUNCTION func_custom_param(IN param intpair, OUT total INT)
|
||||
AS $$ SELECT param.x + param.y $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('func_custom_param(intpair)');
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param;
|
||||
|
@ -372,9 +372,9 @@ CREATE FUNCTION func_returns_table(IN count INT)
|
|||
RETURNS TABLE (x INT, y INT)
|
||||
AS $$ SELECT i,i FROM generate_series(1,count) i $$
|
||||
LANGUAGE SQL;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('func_returns_table(INT)');
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER FUNCTION func_returns_table ROWS 100;
|
||||
|
|
|
@ -56,9 +56,9 @@ BEGIN
|
|||
RAISE INFO 'information message %', $1;
|
||||
END;
|
||||
$proc$;
|
||||
SET citus.enable_object_propagation TO OFF;
|
||||
SET citus.enable_metadata_sync TO OFF;
|
||||
SELECT create_distributed_function('raise_info(text)');
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
SELECT deparse_and_run_on_workers($cmd$
|
||||
ALTER PROCEDURE raise_info CALLED ON NULL INPUT
|
||||
|
|
|
@ -101,7 +101,7 @@ DROP EXTENSION citus;
|
|||
\c
|
||||
|
||||
-- these tests switch between citus versions and call ddl's that require pg_dist_object to be created
|
||||
SET citus.enable_object_propagation TO 'false';
|
||||
SET citus.enable_metadata_sync TO 'false';
|
||||
|
||||
SET citus.enable_version_checks TO 'false';
|
||||
|
||||
|
@ -212,11 +212,11 @@ SELECT * FROM multi_extension.print_extension_changes();
|
|||
ALTER EXTENSION citus UPDATE TO '9.5-1';
|
||||
|
||||
BEGIN;
|
||||
SET citus.enable_object_propagation TO on;
|
||||
SET citus.enable_metadata_sync TO on;
|
||||
SELECT master_add_node('localhost', :master_port, groupId=>0);
|
||||
CREATE TABLE citus_local_table (a int);
|
||||
SELECT create_citus_local_table('citus_local_table');
|
||||
RESET citus.enable_object_propagation;
|
||||
RESET citus.enable_metadata_sync;
|
||||
|
||||
-- downgrade from 9.5-1 to 9.4-1 should fail as we have a citus local table
|
||||
ALTER EXTENSION citus UPDATE TO '9.4-1';
|
||||
|
|
|
@ -169,6 +169,12 @@ SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000)
|
|||
-- insert some data and read
|
||||
INSERT INTO columnar_table VALUES (1), (1);
|
||||
SELECT * FROM columnar_table;
|
||||
-- Fail to alter a columnar table that is created by a different user
|
||||
SET ROLE full_access;
|
||||
SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000);
|
||||
-- Fail to reset a columnar table value created by a different user
|
||||
SELECT alter_columnar_table_reset('columnar_table', chunk_group_row_limit => true);
|
||||
SET ROLE read_access;
|
||||
-- and drop it
|
||||
DROP TABLE columnar_table;
|
||||
|
||||
|
|
|
@ -172,5 +172,48 @@ SELECT * FROM seg_test;
|
|||
|
||||
\c - - - :master_port
|
||||
|
||||
CREATE SCHEMA ext_owned_tables;
|
||||
SELECT run_command_on_workers($$CREATE SCHEMA ext_owned_tables;$$);
|
||||
|
||||
SET search_path TO ext_owned_tables;
|
||||
|
||||
CREATE sequence my_seq_ext_1;
|
||||
SELECT run_command_on_workers($$CREATE sequence ext_owned_tables.my_seq_ext_1;$$);
|
||||
CREATE sequence my_seq_ext_2;
|
||||
SELECT run_command_on_workers($$CREATE sequence ext_owned_tables.my_seq_ext_2;$$);
|
||||
|
||||
-- test distributed tables owned by extension
|
||||
CREATE TABLE seg_test (x int, y bigserial, z int default nextval('my_seq_ext_1'));
|
||||
SELECT run_command_on_workers($$CREATE TABLE ext_owned_tables.seg_test (x int, y bigserial, z int default nextval('ext_owned_tables.my_seq_ext_1'))$$);
|
||||
|
||||
INSERT INTO seg_test VALUES (42);
|
||||
|
||||
CREATE TABLE tcn_test (x int, y bigserial, z int default nextval('my_seq_ext_2'));
|
||||
SELECT run_command_on_workers($$CREATE TABLE ext_owned_tables.tcn_test (x int, y bigserial, z int default nextval('ext_owned_tables.my_seq_ext_2'));$$);
|
||||
|
||||
INSERT INTO tcn_test VALUES (42);
|
||||
|
||||
-- pretend this table belongs to an extension
|
||||
ALTER EXTENSION seg ADD TABLE ext_owned_tables.seg_test;
|
||||
ALTER EXTENSION seg ADD SEQUENCE ext_owned_tables.my_seq_ext_1;
|
||||
SELECT run_command_on_workers($$ALTER EXTENSION seg ADD TABLE ext_owned_tables.seg_test;$$);
|
||||
SELECT run_command_on_workers($$ALTER EXTENSION seg ADD SEQUENCE ext_owned_tables.my_seq_ext_1;$$);
|
||||
|
||||
|
||||
CREATE EXTENSION tcn;
|
||||
ALTER EXTENSION tcn ADD TABLE ext_owned_tables.tcn_test;
|
||||
ALTER EXTENSION tcn ADD SEQUENCE ext_owned_tables.my_seq_ext_2;
|
||||
SELECT run_command_on_workers($$ALTER EXTENSION tcn ADD TABLE ext_owned_tables.tcn_test;$$);
|
||||
SELECT run_command_on_workers($$ALTER EXTENSION tcn ADD SEQUENCE ext_owned_tables.my_seq_ext_2;$$);
|
||||
|
||||
SELECT create_reference_table('seg_test');
|
||||
SELECT create_distributed_table('tcn_test', 'x');
|
||||
|
||||
-- test metadata re-sync in the presence of an extension-owned table
|
||||
-- and serial/sequences
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
|
||||
-- also drops table on both worker and master
|
||||
DROP EXTENSION seg CASCADE;
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA ext_owned_tables CASCADE;
|
||||
|
|
|
@ -57,6 +57,12 @@ BEGIN
|
|||
y := x + y * 2;
|
||||
END;$$;
|
||||
|
||||
CREATE FUNCTION mx_call_func_bigint_force(x bigint, INOUT y bigint)
|
||||
LANGUAGE plpgsql AS $$
|
||||
BEGIN
|
||||
PERFORM multi_mx_function_call_delegation.mx_call_func_bigint(x, y);
|
||||
END;$$;
|
||||
|
||||
-- create another function which verifies:
|
||||
-- 1. we work fine with multiple return columns
|
||||
-- 2. we work fine in combination with custom types
|
||||
|
@ -104,10 +110,17 @@ select mx_call_func_custom_types('S', 'A');
|
|||
|
||||
-- Mark them as colocated with a table. Now we should route them to workers.
|
||||
select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, 1);
|
||||
select colocate_proc_with_table('mx_call_func_bigint', 'mx_call_dist_table_bigint'::regclass, 1);
|
||||
select colocate_proc_with_table('mx_call_func_custom_types', 'mx_call_dist_table_enum'::regclass, 1);
|
||||
select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0);
|
||||
|
||||
select create_distributed_function('mx_call_func_bigint(bigint,bigint)', 'x',
|
||||
colocate_with := 'mx_call_dist_table_bigint');
|
||||
|
||||
-- set up a force_delegation function
|
||||
select create_distributed_function('mx_call_func_bigint_force(bigint,bigint)', 'x',
|
||||
colocate_with := 'mx_call_dist_table_2',
|
||||
force_delegation := true);
|
||||
|
||||
select mx_call_func(2, 0);
|
||||
select mx_call_func_bigint(4, 2);
|
||||
select mx_call_func_custom_types('S', 'A');
|
||||
|
@ -294,6 +307,30 @@ select create_distributed_function('mx_call_func(int,int)');
|
|||
SET client_min_messages TO DEBUG1;
|
||||
SELECT mx_call_func(2, 0);
|
||||
|
||||
-- not delegated in a transaction block
|
||||
BEGIN;
|
||||
SELECT mx_call_func(2, 0);
|
||||
END;
|
||||
|
||||
-- not delegated in a DO block
|
||||
DO $$
|
||||
BEGIN
|
||||
PERFORM mx_call_func(2, 0);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- forced calls are delegated in a transaction block
|
||||
BEGIN;
|
||||
SELECT mx_call_func_bigint_force(4, 2);
|
||||
END;
|
||||
|
||||
-- forced calls are delegated in a DO block
|
||||
DO $$
|
||||
BEGIN
|
||||
PERFORM * FROM mx_call_func_bigint_force(4, 2);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO multi_mx_function_call_delegation, public;
|
||||
|
||||
|
|
|
@ -55,6 +55,18 @@ SET search_path TO local_schema;
|
|||
SELECT create_distributed_table('dist_table', 'a');
|
||||
SELECT create_distributed_function('test_function(int)');
|
||||
|
||||
-- Create and distribute plpgsql extension's function
|
||||
CREATE OR REPLACE FUNCTION plpgsql_dist_function(text)
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql AS
|
||||
$$
|
||||
BEGIN
|
||||
RAISE NOTICE '%', $1;
|
||||
END;
|
||||
$$;
|
||||
|
||||
SELECT create_distributed_function('plpgsql_dist_function(text)');
|
||||
|
||||
-- show that schema, types, function and sequence has marked as distributed
|
||||
-- on the coordinator node
|
||||
RESET ROLE;
|
||||
|
@ -65,6 +77,7 @@ SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dis
|
|||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema.test_sequence'::regclass::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.dist_table_e_seq'::regclass::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_function'::regproc::oid;
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.plpgsql_dist_function'::regproc::oid;
|
||||
|
||||
-- show those objects marked as distributed on metadata worker node as well
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema'::regnamespace::oid;$$) ORDER BY 1,2;
|
||||
|
@ -74,6 +87,11 @@ SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(clas
|
|||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema.test_sequence'::regclass::oid;$$) ORDER BY 1,2;
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.dist_table_e_seq'::regclass::oid;$$) ORDER BY 1,2;
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_function'::regproc::oid;$$) ORDER BY 1,2;
|
||||
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.plpgsql_dist_function'::regproc::oid;$$) ORDER BY 1,2;
|
||||
|
||||
-- Show that extension plpgsql is also marked as distributed as a dependency of plpgsl_dist_function
|
||||
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{plpgsql}%';
|
||||
SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{plpgsql}%';$$) ORDER BY 1,2;
|
||||
|
||||
-- show that schema is owned by the superuser
|
||||
SELECT rolname FROM pg_roles JOIN pg_namespace ON(pg_namespace.nspowner = pg_roles.oid) WHERE nspname = 'local_schema';
|
||||
|
|
|
@ -1399,3 +1399,21 @@ WHERE logicalrelid = 'r1'::regclass;
|
|||
|
||||
DROP TABLE t1, r1;
|
||||
|
||||
-- Test rebalancer with index on a table
|
||||
|
||||
DROP TABLE IF EXISTS test_rebalance_with_index;
|
||||
CREATE TABLE test_rebalance_with_index (measureid integer PRIMARY KEY);
|
||||
SELECT create_distributed_table('test_rebalance_with_index', 'measureid');
|
||||
CREATE INDEX rebalance_with_index ON test_rebalance_with_index(measureid);
|
||||
|
||||
INSERT INTO test_rebalance_with_index VALUES(0);
|
||||
INSERT INTO test_rebalance_with_index VALUES(1);
|
||||
INSERT INTO test_rebalance_with_index VALUES(2);
|
||||
|
||||
SELECT * FROM master_drain_node('localhost', :worker_2_port);
|
||||
CALL citus_cleanup_orphaned_shards();
|
||||
UPDATE pg_dist_node SET shouldhaveshards=true WHERE nodeport = :worker_2_port;
|
||||
|
||||
SELECT rebalance_table_shards();
|
||||
CALL citus_cleanup_orphaned_shards();
|
||||
DROP TABLE test_rebalance_with_index CASCADE;
|
||||
|
|
|
@ -101,7 +101,7 @@ BEGIN;
|
|||
-- it'd spawn a bg worker targeting this node
|
||||
-- and that changes the connection count specific tests
|
||||
-- here
|
||||
SET LOCAL citus.enable_metadata_sync_by_default TO OFF;
|
||||
SET LOCAL citus.enable_metadata_sync TO OFF;
|
||||
-- cannot add workers with specific IP as long as I have a placeholder coordinator record
|
||||
SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port);
|
||||
COMMIT;
|
||||
|
@ -111,7 +111,7 @@ BEGIN;
|
|||
-- it'd spawn a bg worker targeting this node
|
||||
-- and that changes the connection count specific tests
|
||||
-- here
|
||||
SET LOCAL citus.enable_metadata_sync_by_default TO OFF;
|
||||
SET LOCAL citus.enable_metadata_sync TO OFF;
|
||||
-- adding localhost workers is ok
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
COMMIT;
|
||||
|
@ -127,7 +127,7 @@ BEGIN;
|
|||
-- it'd spawn a bg worker targeting this node
|
||||
-- and that changes the connection count specific tests
|
||||
-- here
|
||||
SET LOCAL citus.enable_metadata_sync_by_default TO OFF;
|
||||
SET LOCAL citus.enable_metadata_sync TO OFF;
|
||||
-- adding workers with specific IP is ok now
|
||||
SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port);
|
||||
COMMIT;
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF;
|
||||
SELECT pg_reload_conf();
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
SELECT stop_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
|
|
@ -1,6 +0,0 @@
|
|||
ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON;
|
||||
SELECT pg_reload_conf();
|
||||
SELECT pg_sleep(0.1);
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
|
Loading…
Reference in New Issue