mirror of https://github.com/citusdata/citus.git
Remove the word 'master' from Citus UDFs (#4472)
* Replace master_add_node with citus_add_node * Replace master_activate_node with citus_activate_node * Replace master_add_inactive_node with citus_add_inactive_node * Use master udfs in old scripts * Replace master_add_secondary_node with citus_add_secondary_node * Replace master_disable_node with citus_disable_node * Replace master_drain_node with citus_drain_node * Replace master_remove_node with citus_remove_node * Replace master_set_node_property with citus_set_node_property * Replace master_unmark_object_distributed with citus_unmark_object_distributed * Replace master_update_node with citus_update_node * Replace master_update_shard_statistics with citus_update_shard_statistics * Replace master_update_table_statistics with citus_update_table_statistics * Rename master_conninfo_cache_invalidate to citus_conninfo_cache_invalidate Rename master_dist_local_group_cache_invalidate to citus_dist_local_group_cache_invalidate * Replace master_copy_shard_placement with citus_copy_shard_placement * Replace master_move_shard_placement with citus_move_shard_placement * Rename master_dist_node_cache_invalidate to citus_dist_node_cache_invalidate * Rename master_dist_object_cache_invalidate to citus_dist_object_cache_invalidate * Rename master_dist_partition_cache_invalidate to citus_dist_partition_cache_invalidate * Rename master_dist_placement_cache_invalidate to citus_dist_placement_cache_invalidate * Rename master_dist_shard_cache_invalidate to citus_dist_shard_cache_invalidate * Drop master_modify_multiple_shards * Rename master_drop_all_shards to citus_drop_all_shards * Drop master_create_distributed_table * Drop master_create_worker_shards * Revert old function definitions * Add missing revoke statement for citus_disable_nodepull/4499/head^2
parent
2ef5879bcc
commit
436c9d9d79
|
@ -89,7 +89,7 @@ citus_truncate_trigger(PG_FUNCTION_ARGS)
|
|||
char *schemaName = get_namespace_name(schemaId);
|
||||
char *relationName = get_rel_name(relationId);
|
||||
|
||||
DirectFunctionCall3(master_drop_all_shards,
|
||||
DirectFunctionCall3(citus_drop_all_shards,
|
||||
ObjectIdGetDatum(relationId),
|
||||
CStringGetTextDatum(relationName),
|
||||
CStringGetTextDatum(schemaName));
|
||||
|
@ -272,7 +272,7 @@ ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement)
|
|||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("truncating distributed foreign tables is "
|
||||
"currently unsupported"),
|
||||
errhint("Use master_drop_all_shards to remove "
|
||||
errhint("Use citus_drop_all_shards to remove "
|
||||
"foreign table's shards.")));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,17 +45,18 @@
|
|||
static int ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes,
|
||||
Datum *paramValues);
|
||||
|
||||
PG_FUNCTION_INFO_V1(citus_unmark_object_distributed);
|
||||
PG_FUNCTION_INFO_V1(master_unmark_object_distributed);
|
||||
|
||||
|
||||
/*
|
||||
* master_unmark_object_distributed(classid oid, objid oid, objsubid int)
|
||||
* citus_unmark_object_distributed(classid oid, objid oid, objsubid int)
|
||||
*
|
||||
* removes the entry for an object address from pg_dist_object. Only removes the entry if
|
||||
* the object does not exist anymore.
|
||||
*/
|
||||
Datum
|
||||
master_unmark_object_distributed(PG_FUNCTION_ARGS)
|
||||
citus_unmark_object_distributed(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid classid = PG_GETARG_OID(0);
|
||||
Oid objid = PG_GETARG_OID(1);
|
||||
|
@ -85,6 +86,16 @@ master_unmark_object_distributed(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_unmark_object_distributed is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_unmark_object_distributed(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_unmark_object_distributed(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ObjectExists checks if an object given by its object address exists
|
||||
*
|
||||
|
|
|
@ -261,12 +261,18 @@ static bool RefreshTableCacheEntryIfInvalid(ShardIdCacheEntry *shardEntry);
|
|||
|
||||
|
||||
/* exports for SQL callable functions */
|
||||
PG_FUNCTION_INFO_V1(citus_dist_partition_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(master_dist_partition_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(citus_dist_shard_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(master_dist_shard_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(citus_dist_placement_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(master_dist_placement_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(citus_dist_node_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(master_dist_node_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(citus_dist_local_group_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(master_dist_local_group_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(master_dist_authinfo_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(citus_dist_object_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(master_dist_object_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(role_exists);
|
||||
PG_FUNCTION_INFO_V1(authinfo_valid);
|
||||
|
@ -2629,7 +2635,7 @@ SecondaryNodeRoleId(void)
|
|||
|
||||
|
||||
/*
|
||||
* master_dist_partition_cache_invalidate is a trigger function that performs
|
||||
* citus_dist_partition_cache_invalidate is a trigger function that performs
|
||||
* relcache invalidations when the contents of pg_dist_partition are changed
|
||||
* on the SQL level.
|
||||
*
|
||||
|
@ -2637,7 +2643,7 @@ SecondaryNodeRoleId(void)
|
|||
* are much easier ways to waste CPU than causing cache invalidations.
|
||||
*/
|
||||
Datum
|
||||
master_dist_partition_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
citus_dist_partition_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TriggerData *triggerData = (TriggerData *) fcinfo->context;
|
||||
Oid oldLogicalRelationId = InvalidOid;
|
||||
|
@ -2696,7 +2702,17 @@ master_dist_partition_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_dist_shard_cache_invalidate is a trigger function that performs
|
||||
* master_dist_partition_cache_invalidate is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_dist_partition_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_dist_partition_cache_invalidate(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_dist_shard_cache_invalidate is a trigger function that performs
|
||||
* relcache invalidations when the contents of pg_dist_shard are changed
|
||||
* on the SQL level.
|
||||
*
|
||||
|
@ -2704,7 +2720,7 @@ master_dist_partition_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
* are much easier ways to waste CPU than causing cache invalidations.
|
||||
*/
|
||||
Datum
|
||||
master_dist_shard_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
citus_dist_shard_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TriggerData *triggerData = (TriggerData *) fcinfo->context;
|
||||
Oid oldLogicalRelationId = InvalidOid;
|
||||
|
@ -2763,7 +2779,17 @@ master_dist_shard_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_dist_placement_cache_invalidate is a trigger function that performs
|
||||
* master_dist_shard_cache_invalidate is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_dist_shard_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_dist_shard_cache_invalidate(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_dist_placement_cache_invalidate is a trigger function that performs
|
||||
* relcache invalidations when the contents of pg_dist_placement are
|
||||
* changed on the SQL level.
|
||||
*
|
||||
|
@ -2771,7 +2797,7 @@ master_dist_shard_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
* are much easier ways to waste CPU than causing cache invalidations.
|
||||
*/
|
||||
Datum
|
||||
master_dist_placement_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
citus_dist_placement_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TriggerData *triggerData = (TriggerData *) fcinfo->context;
|
||||
Oid oldShardId = InvalidOid;
|
||||
|
@ -2842,7 +2868,17 @@ master_dist_placement_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_dist_node_cache_invalidate is a trigger function that performs
|
||||
* master_dist_placement_cache_invalidate is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_dist_placement_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_dist_placement_cache_invalidate(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_dist_node_cache_invalidate is a trigger function that performs
|
||||
* relcache invalidations when the contents of pg_dist_node are changed
|
||||
* on the SQL level.
|
||||
*
|
||||
|
@ -2850,7 +2886,7 @@ master_dist_placement_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
* are much easier ways to waste CPU than causing cache invalidations.
|
||||
*/
|
||||
Datum
|
||||
master_dist_node_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
citus_dist_node_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
if (!CALLED_AS_TRIGGER(fcinfo))
|
||||
{
|
||||
|
@ -2866,6 +2902,16 @@ master_dist_node_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_dist_node_cache_invalidate is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_dist_node_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_dist_node_cache_invalidate(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_dist_authinfo_cache_invalidate is a trigger function that performs
|
||||
* relcache invalidations when the contents of pg_dist_authinfo are changed
|
||||
|
@ -2892,7 +2938,7 @@ master_dist_authinfo_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_dist_local_group_cache_invalidate is a trigger function that performs
|
||||
* citus_dist_local_group_cache_invalidate is a trigger function that performs
|
||||
* relcache invalidations when the contents of pg_dist_local_group are changed
|
||||
* on the SQL level.
|
||||
*
|
||||
|
@ -2900,7 +2946,7 @@ master_dist_authinfo_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
* are much easier ways to waste CPU than causing cache invalidations.
|
||||
*/
|
||||
Datum
|
||||
master_dist_local_group_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
citus_dist_local_group_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
if (!CALLED_AS_TRIGGER(fcinfo))
|
||||
{
|
||||
|
@ -2917,7 +2963,17 @@ master_dist_local_group_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_dist_object_cache_invalidate is a trigger function that performs relcache
|
||||
* master_dist_local_group_cache_invalidate is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_dist_local_group_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_dist_local_group_cache_invalidate(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_dist_object_cache_invalidate is a trigger function that performs relcache
|
||||
* invalidation when the contents of pg_dist_object are changed on the SQL
|
||||
* level.
|
||||
*
|
||||
|
@ -2925,7 +2981,7 @@ master_dist_local_group_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
* are much easier ways to waste CPU than causing cache invalidations.
|
||||
*/
|
||||
Datum
|
||||
master_dist_object_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
citus_dist_object_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
if (!CALLED_AS_TRIGGER(fcinfo))
|
||||
{
|
||||
|
@ -2941,6 +2997,16 @@ master_dist_object_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_dist_object_cache_invalidate is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_dist_object_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_dist_object_cache_invalidate(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* InitializeCaches() registers invalidation handlers for metadata_cache.c's
|
||||
* caches.
|
||||
|
|
|
@ -115,13 +115,21 @@ static WorkerNode * SetShouldHaveShards(WorkerNode *workerNode, bool shouldHaveS
|
|||
|
||||
/* declarations for dynamic loading */
|
||||
PG_FUNCTION_INFO_V1(citus_set_coordinator_host);
|
||||
PG_FUNCTION_INFO_V1(citus_add_node);
|
||||
PG_FUNCTION_INFO_V1(master_add_node);
|
||||
PG_FUNCTION_INFO_V1(citus_add_inactive_node);
|
||||
PG_FUNCTION_INFO_V1(master_add_inactive_node);
|
||||
PG_FUNCTION_INFO_V1(citus_add_secondary_node);
|
||||
PG_FUNCTION_INFO_V1(master_add_secondary_node);
|
||||
PG_FUNCTION_INFO_V1(citus_set_node_property);
|
||||
PG_FUNCTION_INFO_V1(master_set_node_property);
|
||||
PG_FUNCTION_INFO_V1(citus_remove_node);
|
||||
PG_FUNCTION_INFO_V1(master_remove_node);
|
||||
PG_FUNCTION_INFO_V1(citus_disable_node);
|
||||
PG_FUNCTION_INFO_V1(master_disable_node);
|
||||
PG_FUNCTION_INFO_V1(citus_activate_node);
|
||||
PG_FUNCTION_INFO_V1(master_activate_node);
|
||||
PG_FUNCTION_INFO_V1(citus_update_node);
|
||||
PG_FUNCTION_INFO_V1(master_update_node);
|
||||
PG_FUNCTION_INFO_V1(get_shard_id_for_distribution_column);
|
||||
|
||||
|
@ -204,11 +212,11 @@ citus_set_coordinator_host(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_add_node function adds a new node to the cluster and returns its id. It also
|
||||
* citus_add_node function adds a new node to the cluster and returns its id. It also
|
||||
* replicates all reference tables to the new node.
|
||||
*/
|
||||
Datum
|
||||
master_add_node(PG_FUNCTION_ARGS)
|
||||
citus_add_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *nodeName = PG_GETARG_TEXT_P(0);
|
||||
int32 nodePort = PG_GETARG_INT32(1);
|
||||
|
@ -262,12 +270,22 @@ master_add_node(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_add_inactive_node function adds a new node to the cluster as inactive node
|
||||
* master_add_node is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_add_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_add_node(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_add_inactive_node function adds a new node to the cluster as inactive node
|
||||
* and returns id of the newly added node. It does not replicate reference
|
||||
* tables to the new node, it only adds new node to the pg_dist_node table.
|
||||
*/
|
||||
Datum
|
||||
master_add_inactive_node(PG_FUNCTION_ARGS)
|
||||
citus_add_inactive_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *nodeName = PG_GETARG_TEXT_P(0);
|
||||
int32 nodePort = PG_GETARG_INT32(1);
|
||||
|
@ -296,11 +314,21 @@ master_add_inactive_node(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_add_secondary_node adds a new secondary node to the cluster. It accepts as
|
||||
* master_add_inactive_node is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_add_inactive_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_add_inactive_node(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_add_secondary_node adds a new secondary node to the cluster. It accepts as
|
||||
* arguments the primary node it should share a group with.
|
||||
*/
|
||||
Datum
|
||||
master_add_secondary_node(PG_FUNCTION_ARGS)
|
||||
citus_add_secondary_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *nodeName = PG_GETARG_TEXT_P(0);
|
||||
int32 nodePort = PG_GETARG_INT32(1);
|
||||
|
@ -330,16 +358,26 @@ master_add_secondary_node(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_remove_node function removes the provided node from the pg_dist_node table of
|
||||
* master_add_secondary_node is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_add_secondary_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_add_secondary_node(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_remove_node function removes the provided node from the pg_dist_node table of
|
||||
* the master node and all nodes with metadata.
|
||||
* The call to the master_remove_node should be done by the super user and the specified
|
||||
* The call to the citus_remove_node should be done by the super user and the specified
|
||||
* node should not have any active placements.
|
||||
* This function also deletes all reference table placements belong to the given node from
|
||||
* pg_dist_placement, but it does not drop actual placement at the node. In the case of
|
||||
* re-adding the node, master_add_node first drops and re-creates the reference tables.
|
||||
*/
|
||||
Datum
|
||||
master_remove_node(PG_FUNCTION_ARGS)
|
||||
citus_remove_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *nodeNameText = PG_GETARG_TEXT_P(0);
|
||||
int32 nodePort = PG_GETARG_INT32(1);
|
||||
|
@ -354,19 +392,29 @@ master_remove_node(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_disable_node function sets isactive value of the provided node as inactive at
|
||||
* master node and all nodes with metadata regardless of the node having an active shard
|
||||
* master_remove_node is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_remove_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_remove_node(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_disable_node function sets isactive value of the provided node as inactive at
|
||||
* coordinator and all nodes with metadata regardless of the node having an active shard
|
||||
* placement.
|
||||
*
|
||||
* The call to the master_disable_node must be done by the super user.
|
||||
* The call to the citus_disable_node must be done by the super user.
|
||||
*
|
||||
* This function also deletes all reference table placements belong to the given node
|
||||
* from pg_dist_placement, but it does not drop actual placement at the node. In the case
|
||||
* of re-activating the node, master_add_node first drops and re-creates the reference
|
||||
* of re-activating the node, citus_add_node first drops and re-creates the reference
|
||||
* tables.
|
||||
*/
|
||||
Datum
|
||||
master_disable_node(PG_FUNCTION_ARGS)
|
||||
citus_disable_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *nodeNameText = PG_GETARG_TEXT_P(0);
|
||||
int32 nodePort = PG_GETARG_INT32(1);
|
||||
|
@ -434,10 +482,20 @@ master_disable_node(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_set_node_property sets a property of the node
|
||||
* master_disable_node is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_set_node_property(PG_FUNCTION_ARGS)
|
||||
master_disable_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_disable_node(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_set_node_property sets a property of the node
|
||||
*/
|
||||
Datum
|
||||
citus_set_node_property(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *nodeNameText = PG_GETARG_TEXT_P(0);
|
||||
int32 nodePort = PG_GETARG_INT32(1);
|
||||
|
@ -464,6 +522,16 @@ master_set_node_property(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_set_node_property is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_set_node_property(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_set_node_property(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SetUpDistributedTableDependencies sets up up the following on a node if it's
|
||||
* a primary node that currently stores data:
|
||||
|
@ -579,11 +647,11 @@ ModifiableWorkerNode(const char *nodeName, int32 nodePort)
|
|||
|
||||
|
||||
/*
|
||||
* master_activate_node UDF activates the given node. It sets the node's isactive
|
||||
* citus_activate_node UDF activates the given node. It sets the node's isactive
|
||||
* value to active and replicates all reference tables to that node.
|
||||
*/
|
||||
Datum
|
||||
master_activate_node(PG_FUNCTION_ARGS)
|
||||
citus_activate_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *nodeNameText = PG_GETARG_TEXT_P(0);
|
||||
int32 nodePort = PG_GETARG_INT32(1);
|
||||
|
@ -598,6 +666,16 @@ master_activate_node(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_activate_node is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_activate_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_activate_node(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GroupForNode returns the group which a given node belongs to.
|
||||
*
|
||||
|
@ -736,12 +814,12 @@ ActivateNode(char *nodeName, int nodePort)
|
|||
|
||||
|
||||
/*
|
||||
* master_update_node moves the requested node to a different nodename and nodeport. It
|
||||
* citus_update_node moves the requested node to a different nodename and nodeport. It
|
||||
* locks to ensure no queries are running concurrently; and is intended for customers who
|
||||
* are running their own failover solution.
|
||||
*/
|
||||
Datum
|
||||
master_update_node(PG_FUNCTION_ARGS)
|
||||
citus_update_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int32 nodeId = PG_GETARG_INT32(0);
|
||||
|
||||
|
@ -812,7 +890,7 @@ master_update_node(PG_FUNCTION_ARGS)
|
|||
* In case of node failure said long-running queries will fail in the end
|
||||
* anyway as they will be unable to commit successfully on the failed
|
||||
* machine. To cause quick failure of these queries use force => true
|
||||
* during the invocation of master_update_node to terminate conflicting
|
||||
* during the invocation of citus_update_node to terminate conflicting
|
||||
* backends proactively.
|
||||
*
|
||||
* It might be worth blocking reads to a secondary for the same reasons,
|
||||
|
@ -833,7 +911,7 @@ master_update_node(PG_FUNCTION_ARGS)
|
|||
/*
|
||||
* We failed to start a background worker, which probably means that we exceeded
|
||||
* max_worker_processes, and this is unlikely to be resolved by retrying. We do not want
|
||||
* to repeatedly throw an error because if master_update_node is called to complete a
|
||||
* to repeatedly throw an error because if citus_update_node is called to complete a
|
||||
* failover then finishing is the only way to bring the cluster back up. Therefore we
|
||||
* give up on killing other backends and simply wait for the lock. We do set
|
||||
* lock_timeout to lock_cooldown, because we don't want to wait forever to get a lock.
|
||||
|
@ -866,7 +944,7 @@ master_update_node(PG_FUNCTION_ARGS)
|
|||
|
||||
/*
|
||||
* Propagate the updated pg_dist_node entry to all metadata workers.
|
||||
* citus-ha uses master_update_node() in a prepared transaction, and
|
||||
* citus-ha uses citus_update_node() in a prepared transaction, and
|
||||
* we don't support coordinated prepared transactions, so we cannot
|
||||
* propagate the changes to the worker nodes here. Instead we mark
|
||||
* all metadata nodes as not-synced and ask maintenanced to do the
|
||||
|
@ -896,6 +974,16 @@ master_update_node(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_update_node is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_update_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_update_node(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SetLockTimeoutLocally sets the lock_timeout to the given value.
|
||||
* This setting is local.
|
||||
|
@ -1905,7 +1993,7 @@ UnsetMetadataSyncedForAll(void)
|
|||
bool indexOK = false;
|
||||
|
||||
/*
|
||||
* Concurrent master_update_node() calls might iterate and try to update
|
||||
* Concurrent citus_update_node() calls might iterate and try to update
|
||||
* pg_dist_node in different orders. To protect against deadlock, we
|
||||
* get an exclusive lock here.
|
||||
*/
|
||||
|
|
|
@ -95,6 +95,7 @@ static char * CreateDropShardPlacementCommand(const char *schemaName,
|
|||
|
||||
/* exports for SQL callable functions */
|
||||
PG_FUNCTION_INFO_V1(master_apply_delete_command);
|
||||
PG_FUNCTION_INFO_V1(citus_drop_all_shards);
|
||||
PG_FUNCTION_INFO_V1(master_drop_all_shards);
|
||||
PG_FUNCTION_INFO_V1(master_drop_sequences);
|
||||
|
||||
|
@ -207,12 +208,12 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_drop_all_shards attempts to drop all shards for a given relation.
|
||||
* citus_drop_all_shards attempts to drop all shards for a given relation.
|
||||
* Unlike master_apply_delete_command, this function can be called even
|
||||
* if the table has already been dropped.
|
||||
*/
|
||||
Datum
|
||||
master_drop_all_shards(PG_FUNCTION_ARGS)
|
||||
citus_drop_all_shards(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
text *schemaNameText = PG_GETARG_TEXT_P(1);
|
||||
|
@ -236,7 +237,7 @@ master_drop_all_shards(PG_FUNCTION_ARGS)
|
|||
CheckTableSchemaNameForDrop(relationId, &schemaName, &relationName);
|
||||
|
||||
/*
|
||||
* master_drop_all_shards is typically called from the DROP TABLE trigger,
|
||||
* citus_drop_all_shards is typically called from the DROP TABLE trigger,
|
||||
* but could be called by a user directly. Make sure we have an
|
||||
* AccessExclusiveLock to prevent any other commands from running on this table
|
||||
* concurrently.
|
||||
|
@ -251,6 +252,16 @@ master_drop_all_shards(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_drop_all_shards is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_drop_all_shards(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_drop_all_shards(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_drop_sequences was previously used to drop sequences on workers
|
||||
* when using metadata syncing.
|
||||
|
|
|
@ -85,7 +85,9 @@ static void UpdateColocatedShardPlacementMetadataOnWorkers(int64 shardId,
|
|||
int32 targetNodePort);
|
||||
|
||||
/* declarations for dynamic loading */
|
||||
PG_FUNCTION_INFO_V1(citus_copy_shard_placement);
|
||||
PG_FUNCTION_INFO_V1(master_copy_shard_placement);
|
||||
PG_FUNCTION_INFO_V1(citus_move_shard_placement);
|
||||
PG_FUNCTION_INFO_V1(master_move_shard_placement);
|
||||
|
||||
|
||||
|
@ -93,17 +95,17 @@ bool DeferShardDeleteOnMove = false;
|
|||
|
||||
|
||||
/*
|
||||
* master_copy_shard_placement implements a user-facing UDF to repair data from
|
||||
* citus_copy_shard_placement implements a user-facing UDF to repair data from
|
||||
* a healthy (source) node to an inactive (target) node. To accomplish this it
|
||||
* entirely recreates the table structure before copying all data. During this
|
||||
* time all modifications are paused to the shard. After successful repair, the
|
||||
* inactive placement is marked healthy and modifications may continue. If the
|
||||
* repair fails at any point, this function throws an error, leaving the node
|
||||
* in an unhealthy state. Please note that master_copy_shard_placement copies
|
||||
* in an unhealthy state. Please note that citus_copy_shard_placement copies
|
||||
* given shard along with its co-located shards.
|
||||
*/
|
||||
Datum
|
||||
master_copy_shard_placement(PG_FUNCTION_ARGS)
|
||||
citus_copy_shard_placement(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int64 shardId = PG_GETARG_INT64(0);
|
||||
text *sourceNodeNameText = PG_GETARG_TEXT_P(1);
|
||||
|
@ -147,7 +149,17 @@ master_copy_shard_placement(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_move_shard_placement moves given shard (and its co-located shards) from one
|
||||
* master_copy_shard_placement is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_copy_shard_placement(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_copy_shard_placement(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_move_shard_placement moves given shard (and its co-located shards) from one
|
||||
* node to the other node. To accomplish this it entirely recreates the table structure
|
||||
* before copying all data.
|
||||
*
|
||||
|
@ -162,7 +174,7 @@ master_copy_shard_placement(PG_FUNCTION_ARGS)
|
|||
* any changes in source node or target node.
|
||||
*/
|
||||
Datum
|
||||
master_move_shard_placement(PG_FUNCTION_ARGS)
|
||||
citus_move_shard_placement(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int64 shardId = PG_GETARG_INT64(0);
|
||||
char *sourceNodeName = text_to_cstring(PG_GETARG_TEXT_P(1));
|
||||
|
@ -198,7 +210,7 @@ master_move_shard_placement(PG_FUNCTION_ARGS)
|
|||
|
||||
/*
|
||||
* Block concurrent DDL / TRUNCATE commands on the relation. Similarly,
|
||||
* block concurrent master_move_shard_placement() on any shard of
|
||||
* block concurrent citus_move_shard_placement() on any shard of
|
||||
* the same relation. This is OK for now since we're executing shard
|
||||
* moves sequentially anyway.
|
||||
*/
|
||||
|
@ -274,9 +286,19 @@ master_move_shard_placement(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_move_shard_placement is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_move_shard_placement(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_move_shard_placement(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ErrorIfMoveCitusLocalTable is a helper function for rebalance_table_shards
|
||||
* and master_move_shard_placement udf's to error out if relation with relationId
|
||||
* and citus_move_shard_placement udf's to error out if relation with relationId
|
||||
* is a citus local table.
|
||||
*/
|
||||
void
|
||||
|
|
|
@ -151,12 +151,12 @@ static void EnsureShardCostUDF(Oid functionOid);
|
|||
static void EnsureNodeCapacityUDF(Oid functionOid);
|
||||
static void EnsureShardAllowedOnNodeUDF(Oid functionOid);
|
||||
|
||||
|
||||
/* declarations for dynamic loading */
|
||||
PG_FUNCTION_INFO_V1(rebalance_table_shards);
|
||||
PG_FUNCTION_INFO_V1(replicate_table_shards);
|
||||
PG_FUNCTION_INFO_V1(get_rebalance_table_shards_plan);
|
||||
PG_FUNCTION_INFO_V1(get_rebalance_progress);
|
||||
PG_FUNCTION_INFO_V1(citus_drain_node);
|
||||
PG_FUNCTION_INFO_V1(master_drain_node);
|
||||
PG_FUNCTION_INFO_V1(citus_shard_cost_by_disk_size);
|
||||
PG_FUNCTION_INFO_V1(citus_validate_rebalance_strategy_functions);
|
||||
|
@ -788,11 +788,11 @@ GetRebalanceStrategy(Name name)
|
|||
|
||||
|
||||
/*
|
||||
* master_drain_node drains a node by setting shouldhaveshards to false and
|
||||
* citus_drain_node drains a node by setting shouldhaveshards to false and
|
||||
* running the rebalancer after in drain_only mode.
|
||||
*/
|
||||
Datum
|
||||
master_drain_node(PG_FUNCTION_ARGS)
|
||||
citus_drain_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
PG_ENSURE_ARGNOTNULL(0, "nodename");
|
||||
PG_ENSURE_ARGNOTNULL(1, "nodeport");
|
||||
|
@ -863,6 +863,16 @@ replicate_table_shards(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_drain_node is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_drain_node(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_drain_node(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* get_rebalance_table_shards_plan function calculates the shard move steps
|
||||
* required for the rebalance operations including the ones for colocated
|
||||
|
@ -1160,7 +1170,7 @@ UpdateShardPlacement(PlacementUpdateEvent *placementUpdateEvent,
|
|||
if (updateType == PLACEMENT_UPDATE_MOVE)
|
||||
{
|
||||
appendStringInfo(placementUpdateCommand,
|
||||
"SELECT master_move_shard_placement(%ld,%s,%u,%s,%u,%s)",
|
||||
"SELECT citus_move_shard_placement(%ld,%s,%u,%s,%u,%s)",
|
||||
shardId,
|
||||
quote_literal_cstr(sourceNode->workerName),
|
||||
sourceNode->workerPort,
|
||||
|
@ -1171,7 +1181,7 @@ UpdateShardPlacement(PlacementUpdateEvent *placementUpdateEvent,
|
|||
else if (updateType == PLACEMENT_UPDATE_COPY)
|
||||
{
|
||||
appendStringInfo(placementUpdateCommand,
|
||||
"SELECT master_copy_shard_placement(%ld,%s,%u,%s,%u,%s,%s)",
|
||||
"SELECT citus_copy_shard_placement(%ld,%s,%u,%s,%u,%s,%s)",
|
||||
shardId,
|
||||
quote_literal_cstr(sourceNode->workerName),
|
||||
sourceNode->workerPort,
|
||||
|
|
|
@ -69,6 +69,7 @@ static bool WorkerShardStats(ShardPlacement *placement, Oid relationId,
|
|||
/* exports for SQL callable functions */
|
||||
PG_FUNCTION_INFO_V1(master_create_empty_shard);
|
||||
PG_FUNCTION_INFO_V1(master_append_table_to_shard);
|
||||
PG_FUNCTION_INFO_V1(citus_update_shard_statistics);
|
||||
PG_FUNCTION_INFO_V1(master_update_shard_statistics);
|
||||
|
||||
|
||||
|
@ -345,11 +346,11 @@ master_append_table_to_shard(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_update_shard_statistics updates metadata (shard size and shard min/max
|
||||
* citus_update_shard_statistics updates metadata (shard size and shard min/max
|
||||
* values) of the given shard and returns the updated shard size.
|
||||
*/
|
||||
Datum
|
||||
master_update_shard_statistics(PG_FUNCTION_ARGS)
|
||||
citus_update_shard_statistics(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int64 shardId = PG_GETARG_INT64(0);
|
||||
|
||||
|
@ -361,6 +362,16 @@ master_update_shard_statistics(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_update_shard_statistics is a wrapper function for old UDF name.
|
||||
*/
|
||||
Datum
|
||||
master_update_shard_statistics(PG_FUNCTION_ARGS)
|
||||
{
|
||||
return citus_update_shard_statistics(fcinfo);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CheckDistributedTable checks if the given relationId corresponds to a
|
||||
* distributed table. If it does not, the function errors out.
|
||||
|
|
|
@ -9,8 +9,44 @@ DROP FUNCTION IF EXISTS pg_catalog.citus_total_relation_size(regclass);
|
|||
#include "udfs/undistribute_table/10.0-1.sql"
|
||||
#include "udfs/create_citus_local_table/10.0-1.sql"
|
||||
#include "udfs/citus_set_coordinator_host/10.0-1.sql"
|
||||
#include "udfs/citus_add_node/10.0-1.sql"
|
||||
#include "udfs/citus_activate_node/10.0-1.sql"
|
||||
#include "udfs/citus_add_inactive_node/10.0-1.sql"
|
||||
#include "udfs/citus_add_secondary_node/10.0-1.sql"
|
||||
#include "udfs/citus_disable_node/10.0-1.sql"
|
||||
#include "udfs/citus_drain_node/10.0-1.sql"
|
||||
#include "udfs/citus_remove_node/10.0-1.sql"
|
||||
#include "udfs/citus_set_node_property/10.0-1.sql"
|
||||
#include "udfs/citus_unmark_object_distributed/10.0-1.sql"
|
||||
#include "udfs/citus_update_node/10.0-1.sql"
|
||||
#include "udfs/citus_update_shard_statistics/10.0-1.sql"
|
||||
#include "udfs/citus_update_table_statistics/10.0-1.sql"
|
||||
#include "udfs/citus_copy_shard_placement/10.0-1.sql"
|
||||
#include "udfs/citus_move_shard_placement/10.0-1.sql"
|
||||
#include "udfs/citus_drop_trigger/10.0-1.sql"
|
||||
|
||||
#include "../../columnar/sql/columnar--9.5-1--10.0-1.sql"
|
||||
|
||||
#include "udfs/time_partition_range/10.0-1.sql"
|
||||
#include "udfs/time_partitions/10.0-1.sql"
|
||||
|
||||
ALTER FUNCTION master_conninfo_cache_invalidate()
|
||||
RENAME TO citus_conninfo_cache_invalidate;
|
||||
ALTER FUNCTION master_dist_local_group_cache_invalidate()
|
||||
RENAME TO citus_dist_local_group_cache_invalidate;
|
||||
ALTER FUNCTION master_dist_node_cache_invalidate()
|
||||
RENAME TO citus_dist_node_cache_invalidate;
|
||||
ALTER FUNCTION master_dist_object_cache_invalidate()
|
||||
RENAME TO citus_dist_object_cache_invalidate;
|
||||
ALTER FUNCTION master_dist_partition_cache_invalidate()
|
||||
RENAME TO citus_dist_partition_cache_invalidate;
|
||||
ALTER FUNCTION master_dist_placement_cache_invalidate()
|
||||
RENAME TO citus_dist_placement_cache_invalidate;
|
||||
ALTER FUNCTION master_dist_shard_cache_invalidate()
|
||||
RENAME TO citus_dist_shard_cache_invalidate;
|
||||
ALTER FUNCTION master_drop_all_shards(regclass, text, text)
|
||||
RENAME TO citus_drop_all_shards;
|
||||
|
||||
DROP FUNCTION pg_catalog.master_modify_multiple_shards(text);
|
||||
DROP FUNCTION master_create_distributed_table(regclass, text, citus.distribution_type);
|
||||
DROP FUNCTION master_create_worker_shards(text, integer, integer);
|
||||
|
|
|
@ -9,12 +9,68 @@ DROP VIEW public.citus_tables;
|
|||
DROP FUNCTION pg_catalog.citus_total_relation_size(regclass,boolean);
|
||||
DROP FUNCTION pg_catalog.undistribute_table(regclass,boolean);
|
||||
DROP FUNCTION pg_catalog.create_citus_local_table(regclass,boolean);
|
||||
DROP FUNCTION pg_catalog.citus_add_node(text, integer, integer, noderole, name);
|
||||
DROP FUNCTION pg_catalog.citus_activate_node(text, integer);
|
||||
DROP FUNCTION pg_catalog.citus_add_inactive_node(text, integer, integer, noderole, name);
|
||||
DROP FUNCTION pg_catalog.citus_add_secondary_node(text, integer, text, integer, name);
|
||||
DROP FUNCTION pg_catalog.citus_disable_node(text, integer);
|
||||
DROP FUNCTION pg_catalog.citus_drain_node(text, integer, citus.shard_transfer_mode, name);
|
||||
DROP FUNCTION pg_catalog.citus_remove_node(text, integer);
|
||||
DROP FUNCTION pg_catalog.citus_set_node_property(text, integer, text, boolean);
|
||||
DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid, oid, int);
|
||||
DROP FUNCTION pg_catalog.citus_update_node(int, text, int, bool, int);
|
||||
DROP FUNCTION pg_catalog.citus_update_shard_statistics(bigint);
|
||||
DROP FUNCTION pg_catalog.citus_update_table_statistics(regclass);
|
||||
DROP FUNCTION pg_catalog.citus_copy_shard_placement(bigint, text, integer, text, integer, bool, citus.shard_transfer_mode);
|
||||
DROP FUNCTION pg_catalog.citus_move_shard_placement(bigint, text, integer, text, integer, citus.shard_transfer_mode);
|
||||
|
||||
ALTER FUNCTION citus_conninfo_cache_invalidate()
|
||||
RENAME TO master_conninfo_cache_invalidate;
|
||||
ALTER FUNCTION citus_dist_local_group_cache_invalidate()
|
||||
RENAME TO master_dist_local_group_cache_invalidate;
|
||||
ALTER FUNCTION citus_dist_node_cache_invalidate()
|
||||
RENAME TO master_dist_node_cache_invalidate;
|
||||
ALTER FUNCTION citus_dist_object_cache_invalidate()
|
||||
RENAME TO master_dist_object_cache_invalidate;
|
||||
ALTER FUNCTION citus_dist_partition_cache_invalidate()
|
||||
RENAME TO master_dist_partition_cache_invalidate;
|
||||
ALTER FUNCTION citus_dist_placement_cache_invalidate()
|
||||
RENAME TO master_dist_placement_cache_invalidate;
|
||||
ALTER FUNCTION citus_dist_shard_cache_invalidate()
|
||||
RENAME TO master_dist_shard_cache_invalidate;
|
||||
ALTER FUNCTION citus_drop_all_shards(regclass, text, text)
|
||||
RENAME TO master_drop_all_shards;
|
||||
|
||||
DROP VIEW pg_catalog.time_partitions;
|
||||
DROP FUNCTION pg_catalog.time_partition_range(regclass);
|
||||
|
||||
DROP FUNCTION pg_catalog.citus_set_coordinator_host(text,int,noderole,name);
|
||||
|
||||
CREATE FUNCTION pg_catalog.master_modify_multiple_shards(text)
|
||||
RETURNS integer
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$master_modify_multiple_shards$$;
|
||||
COMMENT ON FUNCTION master_modify_multiple_shards(text)
|
||||
IS 'push delete and update queries to shards';
|
||||
|
||||
CREATE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
|
||||
CREATE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
#include "../udfs/citus_drop_trigger/9.5-1.sql"
|
||||
#include "../udfs/citus_total_relation_size/7.0-1.sql"
|
||||
#include "../udfs/upgrade_to_reference_table/8.0-1.sql"
|
||||
#include "../udfs/undistribute_table/9.5-1.sql"
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
CREATE FUNCTION pg_catalog.citus_activate_node(nodename text,
|
||||
nodeport integer)
|
||||
RETURNS INTEGER
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME',$$citus_activate_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_activate_node(nodename text, nodeport integer)
|
||||
IS 'activate a node which is in the cluster';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_activate_node(text, integer) FROM PUBLIC;
|
|
@ -0,0 +1,9 @@
|
|||
CREATE FUNCTION pg_catalog.citus_activate_node(nodename text,
|
||||
nodeport integer)
|
||||
RETURNS INTEGER
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME',$$citus_activate_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_activate_node(nodename text, nodeport integer)
|
||||
IS 'activate a node which is in the cluster';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_activate_node(text, integer) FROM PUBLIC;
|
|
@ -0,0 +1,13 @@
|
|||
CREATE FUNCTION pg_catalog.citus_add_inactive_node(nodename text,
|
||||
nodeport integer,
|
||||
groupid integer default -1,
|
||||
noderole noderole default 'primary',
|
||||
nodecluster name default 'default')
|
||||
RETURNS INTEGER
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME',$$citus_add_inactive_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_add_inactive_node(nodename text,nodeport integer,
|
||||
groupid integer, noderole noderole,
|
||||
nodecluster name)
|
||||
IS 'prepare node by adding it to pg_dist_node';
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_add_inactive_node(text,int,int,noderole,name) FROM PUBLIC;
|
|
@ -0,0 +1,13 @@
|
|||
CREATE FUNCTION pg_catalog.citus_add_inactive_node(nodename text,
|
||||
nodeport integer,
|
||||
groupid integer default -1,
|
||||
noderole noderole default 'primary',
|
||||
nodecluster name default 'default')
|
||||
RETURNS INTEGER
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME',$$citus_add_inactive_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_add_inactive_node(nodename text,nodeport integer,
|
||||
groupid integer, noderole noderole,
|
||||
nodecluster name)
|
||||
IS 'prepare node by adding it to pg_dist_node';
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_add_inactive_node(text,int,int,noderole,name) FROM PUBLIC;
|
|
@ -0,0 +1,12 @@
|
|||
CREATE FUNCTION pg_catalog.citus_add_node(nodename text,
|
||||
nodeport integer,
|
||||
groupid integer default -1,
|
||||
noderole noderole default 'primary',
|
||||
nodecluster name default 'default')
|
||||
RETURNS INTEGER
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_add_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_add_node(nodename text, nodeport integer,
|
||||
groupid integer, noderole noderole, nodecluster name)
|
||||
IS 'add node to the cluster';
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_add_node(text,int,int,noderole,name) FROM PUBLIC;
|
|
@ -0,0 +1,12 @@
|
|||
CREATE FUNCTION pg_catalog.citus_add_node(nodename text,
|
||||
nodeport integer,
|
||||
groupid integer default -1,
|
||||
noderole noderole default 'primary',
|
||||
nodecluster name default 'default')
|
||||
RETURNS INTEGER
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_add_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_add_node(nodename text, nodeport integer,
|
||||
groupid integer, noderole noderole, nodecluster name)
|
||||
IS 'add node to the cluster';
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_add_node(text,int,int,noderole,name) FROM PUBLIC;
|
|
@ -0,0 +1,14 @@
|
|||
CREATE FUNCTION pg_catalog.citus_add_secondary_node(nodename text,
|
||||
nodeport integer,
|
||||
primaryname text,
|
||||
primaryport integer,
|
||||
nodecluster name default 'default')
|
||||
RETURNS INTEGER
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_add_secondary_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_add_secondary_node(nodename text, nodeport integer,
|
||||
primaryname text, primaryport integer,
|
||||
nodecluster name)
|
||||
IS 'add a secondary node to the cluster';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_add_secondary_node(text,int,text,int,name) FROM PUBLIC;
|
|
@ -0,0 +1,14 @@
|
|||
CREATE FUNCTION pg_catalog.citus_add_secondary_node(nodename text,
|
||||
nodeport integer,
|
||||
primaryname text,
|
||||
primaryport integer,
|
||||
nodecluster name default 'default')
|
||||
RETURNS INTEGER
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_add_secondary_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_add_secondary_node(nodename text, nodeport integer,
|
||||
primaryname text, primaryport integer,
|
||||
nodecluster name)
|
||||
IS 'add a secondary node to the cluster';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_add_secondary_node(text,int,text,int,name) FROM PUBLIC;
|
|
@ -0,0 +1,20 @@
|
|||
CREATE FUNCTION pg_catalog.citus_copy_shard_placement(
|
||||
shard_id bigint,
|
||||
source_node_name text,
|
||||
source_node_port integer,
|
||||
target_node_name text,
|
||||
target_node_port integer,
|
||||
do_repair bool DEFAULT true,
|
||||
transfer_mode citus.shard_transfer_mode default 'auto')
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_copy_shard_placement$$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_copy_shard_placement(shard_id bigint,
|
||||
source_node_name text,
|
||||
source_node_port integer,
|
||||
target_node_name text,
|
||||
target_node_port integer,
|
||||
do_repair bool,
|
||||
shard_transfer_mode citus.shard_transfer_mode)
|
||||
IS 'copy a shard from the source node to the destination node';
|
|
@ -0,0 +1,20 @@
|
|||
CREATE FUNCTION pg_catalog.citus_copy_shard_placement(
|
||||
shard_id bigint,
|
||||
source_node_name text,
|
||||
source_node_port integer,
|
||||
target_node_name text,
|
||||
target_node_port integer,
|
||||
do_repair bool DEFAULT true,
|
||||
transfer_mode citus.shard_transfer_mode default 'auto')
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_copy_shard_placement$$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_copy_shard_placement(shard_id bigint,
|
||||
source_node_name text,
|
||||
source_node_port integer,
|
||||
target_node_name text,
|
||||
target_node_port integer,
|
||||
do_repair bool,
|
||||
shard_transfer_mode citus.shard_transfer_mode)
|
||||
IS 'copy a shard from the source node to the destination node';
|
|
@ -0,0 +1,8 @@
|
|||
CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_disable_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer)
|
||||
IS 'removes node from the cluster temporarily';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_disable_node(text,int) FROM PUBLIC;
|
|
@ -0,0 +1,8 @@
|
|||
CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_disable_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer)
|
||||
IS 'removes node from the cluster temporarily';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_disable_node(text,int) FROM PUBLIC;
|
|
@ -0,0 +1,13 @@
|
|||
CREATE FUNCTION pg_catalog.citus_drain_node(
|
||||
nodename text,
|
||||
nodeport integer,
|
||||
shard_transfer_mode citus.shard_transfer_mode default 'auto',
|
||||
rebalance_strategy name default NULL
|
||||
)
|
||||
RETURNS VOID
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME', $$citus_drain_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_drain_node(text,int,citus.shard_transfer_mode,name)
|
||||
IS 'mark a node to be drained of data and actually drain it as well';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_drain_node(text,int,citus.shard_transfer_mode,name) FROM PUBLIC;
|
|
@ -0,0 +1,13 @@
|
|||
CREATE FUNCTION pg_catalog.citus_drain_node(
|
||||
nodename text,
|
||||
nodeport integer,
|
||||
shard_transfer_mode citus.shard_transfer_mode default 'auto',
|
||||
rebalance_strategy name default NULL
|
||||
)
|
||||
RETURNS VOID
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME', $$citus_drain_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_drain_node(text,int,citus.shard_transfer_mode,name)
|
||||
IS 'mark a node to be drained of data and actually drain it as well';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_drain_node(text,int,citus.shard_transfer_mode,name) FROM PUBLIC;
|
|
@ -0,0 +1,31 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
SET search_path = pg_catalog
|
||||
AS $cdbdt$
|
||||
DECLARE
|
||||
v_obj record;
|
||||
sequence_names text[] := '{}';
|
||||
table_colocation_id integer;
|
||||
propagate_drop boolean := false;
|
||||
BEGIN
|
||||
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
|
||||
WHERE object_type IN ('table', 'foreign table')
|
||||
LOOP
|
||||
-- first drop the table and metadata on the workers
|
||||
-- then drop all the shards on the workers
|
||||
-- finally remove the pg_dist_partition entry on the coordinator
|
||||
PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
PERFORM master_remove_partition_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
END LOOP;
|
||||
|
||||
-- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects
|
||||
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
|
||||
LOOP
|
||||
PERFORM master_unmark_object_distributed(v_obj.classid, v_obj.objid, v_obj.objsubid);
|
||||
END LOOP;
|
||||
END;
|
||||
$cdbdt$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_drop_trigger()
|
||||
IS 'perform checks and actions at the end of DROP actions';
|
|
@ -16,7 +16,7 @@ BEGIN
|
|||
-- then drop all the shards on the workers
|
||||
-- finally remove the pg_dist_partition entry on the coordinator
|
||||
PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
PERFORM master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
PERFORM master_remove_partition_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
END LOOP;
|
||||
|
||||
|
@ -29,5 +29,3 @@ END;
|
|||
$cdbdt$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_drop_trigger()
|
||||
IS 'perform checks and actions at the end of DROP actions';
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
CREATE FUNCTION pg_catalog.citus_move_shard_placement(
|
||||
shard_id bigint,
|
||||
source_node_name text,
|
||||
source_node_port integer,
|
||||
target_node_name text,
|
||||
target_node_port integer,
|
||||
shard_transfer_mode citus.shard_transfer_mode default 'auto')
|
||||
RETURNS void LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_move_shard_placement$$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_move_shard_placement(
|
||||
shard_id bigint,
|
||||
source_node_name text,
|
||||
source_node_port integer,
|
||||
target_node_name text,
|
||||
target_node_port integer,
|
||||
shard_transfer_mode citus.shard_transfer_mode)
|
||||
IS 'move a shard from a the source node to the destination node';
|
|
@ -0,0 +1,18 @@
|
|||
CREATE FUNCTION pg_catalog.citus_move_shard_placement(
|
||||
shard_id bigint,
|
||||
source_node_name text,
|
||||
source_node_port integer,
|
||||
target_node_name text,
|
||||
target_node_port integer,
|
||||
shard_transfer_mode citus.shard_transfer_mode default 'auto')
|
||||
RETURNS void LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_move_shard_placement$$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_move_shard_placement(
|
||||
shard_id bigint,
|
||||
source_node_name text,
|
||||
source_node_port integer,
|
||||
target_node_name text,
|
||||
target_node_port integer,
|
||||
shard_transfer_mode citus.shard_transfer_mode)
|
||||
IS 'move a shard from a the source node to the destination node';
|
|
@ -0,0 +1,7 @@
|
|||
CREATE FUNCTION pg_catalog.citus_remove_node(nodename text, nodeport integer)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_remove_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_remove_node(nodename text, nodeport integer)
|
||||
IS 'remove node from the cluster';
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_remove_node(text,int) FROM PUBLIC;
|
|
@ -0,0 +1,7 @@
|
|||
CREATE FUNCTION pg_catalog.citus_remove_node(nodename text, nodeport integer)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_remove_node$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_remove_node(nodename text, nodeport integer)
|
||||
IS 'remove node from the cluster';
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_remove_node(text,int) FROM PUBLIC;
|
|
@ -0,0 +1,21 @@
|
|||
CREATE FUNCTION pg_catalog.citus_set_node_property(
|
||||
nodename text,
|
||||
nodeport integer,
|
||||
property text,
|
||||
value boolean)
|
||||
RETURNS VOID
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', 'citus_set_node_property';
|
||||
COMMENT ON FUNCTION pg_catalog.citus_set_node_property(
|
||||
nodename text,
|
||||
nodeport integer,
|
||||
property text,
|
||||
value boolean)
|
||||
IS 'set a property of a node in pg_dist_node';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_set_node_property(
|
||||
nodename text,
|
||||
nodeport integer,
|
||||
property text,
|
||||
value boolean)
|
||||
FROM PUBLIC;
|
|
@ -0,0 +1,21 @@
|
|||
CREATE FUNCTION pg_catalog.citus_set_node_property(
|
||||
nodename text,
|
||||
nodeport integer,
|
||||
property text,
|
||||
value boolean)
|
||||
RETURNS VOID
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', 'citus_set_node_property';
|
||||
COMMENT ON FUNCTION pg_catalog.citus_set_node_property(
|
||||
nodename text,
|
||||
nodeport integer,
|
||||
property text,
|
||||
value boolean)
|
||||
IS 'set a property of a node in pg_dist_node';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_set_node_property(
|
||||
nodename text,
|
||||
nodeport integer,
|
||||
property text,
|
||||
value boolean)
|
||||
FROM PUBLIC;
|
|
@ -0,0 +1,6 @@
|
|||
CREATE FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_unmark_object_distributed$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int)
|
||||
IS 'remove an object address from citus.pg_dist_object once the object has been deleted';
|
|
@ -0,0 +1,6 @@
|
|||
CREATE FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_unmark_object_distributed$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int)
|
||||
IS 'remove an object address from citus.pg_dist_object once the object has been deleted';
|
|
@ -0,0 +1,17 @@
|
|||
CREATE FUNCTION pg_catalog.citus_update_node(node_id int,
|
||||
new_node_name text,
|
||||
new_node_port int,
|
||||
force bool DEFAULT false,
|
||||
lock_cooldown int DEFAULT 10000)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_update_node$$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_update_node(node_id int,
|
||||
new_node_name text,
|
||||
new_node_port int,
|
||||
force bool,
|
||||
lock_cooldown int)
|
||||
IS 'change the location of a node. when force => true it will wait lock_cooldown ms before killing competing locks';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_update_node(int,text,int,bool,int) FROM PUBLIC;
|
|
@ -0,0 +1,17 @@
|
|||
CREATE FUNCTION pg_catalog.citus_update_node(node_id int,
|
||||
new_node_name text,
|
||||
new_node_port int,
|
||||
force bool DEFAULT false,
|
||||
lock_cooldown int DEFAULT 10000)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_update_node$$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_update_node(node_id int,
|
||||
new_node_name text,
|
||||
new_node_port int,
|
||||
force bool,
|
||||
lock_cooldown int)
|
||||
IS 'change the location of a node. when force => true it will wait lock_cooldown ms before killing competing locks';
|
||||
|
||||
REVOKE ALL ON FUNCTION pg_catalog.citus_update_node(int,text,int,bool,int) FROM PUBLIC;
|
|
@ -0,0 +1,6 @@
|
|||
CREATE FUNCTION pg_catalog.citus_update_shard_statistics(shard_id bigint)
|
||||
RETURNS bigint
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_update_shard_statistics$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_update_shard_statistics(bigint)
|
||||
IS 'updates shard statistics and returns the updated shard size';
|
|
@ -0,0 +1,6 @@
|
|||
CREATE FUNCTION pg_catalog.citus_update_shard_statistics(shard_id bigint)
|
||||
RETURNS bigint
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_update_shard_statistics$$;
|
||||
COMMENT ON FUNCTION pg_catalog.citus_update_shard_statistics(bigint)
|
||||
IS 'updates shard statistics and returns the updated shard size';
|
|
@ -0,0 +1,17 @@
|
|||
CREATE FUNCTION pg_catalog.citus_update_table_statistics(relation regclass)
|
||||
RETURNS VOID AS $$
|
||||
DECLARE
|
||||
colocated_tables regclass[];
|
||||
BEGIN
|
||||
SELECT get_colocated_table_array(relation) INTO colocated_tables;
|
||||
|
||||
PERFORM
|
||||
master_update_shard_statistics(shardid)
|
||||
FROM
|
||||
pg_dist_shard
|
||||
WHERE
|
||||
logicalrelid = ANY (colocated_tables);
|
||||
END;
|
||||
$$ LANGUAGE 'plpgsql';
|
||||
COMMENT ON FUNCTION pg_catalog.citus_update_table_statistics(regclass)
|
||||
IS 'updates shard statistics of the given table and its colocated tables';
|
|
@ -0,0 +1,17 @@
|
|||
CREATE FUNCTION pg_catalog.citus_update_table_statistics(relation regclass)
|
||||
RETURNS VOID AS $$
|
||||
DECLARE
|
||||
colocated_tables regclass[];
|
||||
BEGIN
|
||||
SELECT get_colocated_table_array(relation) INTO colocated_tables;
|
||||
|
||||
PERFORM
|
||||
master_update_shard_statistics(shardid)
|
||||
FROM
|
||||
pg_dist_shard
|
||||
WHERE
|
||||
logicalrelid = ANY (colocated_tables);
|
||||
END;
|
||||
$$ LANGUAGE 'plpgsql';
|
||||
COMMENT ON FUNCTION pg_catalog.citus_update_table_statistics(regclass)
|
||||
IS 'updates shard statistics of the given table and its colocated tables';
|
|
@ -228,6 +228,7 @@ extern Datum master_apply_delete_command(PG_FUNCTION_ARGS);
|
|||
extern Datum master_drop_sequences(PG_FUNCTION_ARGS);
|
||||
extern Datum master_modify_multiple_shards(PG_FUNCTION_ARGS);
|
||||
extern Datum lock_relation_if_exists(PG_FUNCTION_ARGS);
|
||||
extern Datum citus_drop_all_shards(PG_FUNCTION_ARGS);
|
||||
extern Datum master_drop_all_shards(PG_FUNCTION_ARGS);
|
||||
extern int MasterDropAllShards(Oid relationId, char *schemaName, char *relationName);
|
||||
|
||||
|
|
|
@ -588,6 +588,23 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
|
|||
DROP TABLE test_table;
|
||||
DROP SCHEMA failure_create_table;
|
||||
CREATE SCHEMA failure_create_table;
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
-- Test master_create_worker_shards with 2pc
|
||||
SET citus.multi_shard_commit_protocol TO "2pc";
|
||||
CREATE TABLE test_table_2(id int, value_1 int);
|
||||
|
|
|
@ -324,10 +324,10 @@ create_distributed_table
|
|||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-master-drop-all-shards: SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); <waiting ...>
|
||||
step s2-master-drop-all-shards: SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-master-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
2
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
|
@ -645,8 +645,8 @@ create_distributed_table
|
|||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-drop-all-shards: SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy');
|
||||
master_drop_all_shards
|
||||
step s1-master-drop-all-shards: SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy');
|
||||
citus_drop_all_shards
|
||||
|
||||
1
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
|
|
|
@ -8,9 +8,9 @@ step s1-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
16
|
||||
step s2-truncate:
|
||||
|
@ -29,9 +29,9 @@ step s1-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
16
|
||||
step s2-apply-delete-command:
|
||||
|
@ -53,19 +53,19 @@ step s1-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
16
|
||||
step s2-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
0
|
||||
|
||||
|
@ -77,9 +77,9 @@ step s1-begin:
|
|||
BEGIN;
|
||||
|
||||
step s1-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
16
|
||||
step s2-select:
|
||||
|
@ -151,13 +151,13 @@ master_apply_delete_command
|
|||
|
||||
16
|
||||
step s2-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
0
|
||||
|
||||
|
@ -211,13 +211,13 @@ step s1-truncate:
|
|||
TRUNCATE append_table;
|
||||
|
||||
step s2-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
0
|
||||
|
||||
|
|
|
@ -320,10 +320,10 @@ create_distributed_table
|
|||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-master-drop-all-shards: SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); <waiting ...>
|
||||
step s2-master-drop-all-shards: SELECT citus_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-master-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
4
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
|
@ -734,8 +734,8 @@ create_distributed_table
|
|||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-drop-all-shards: SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy');
|
||||
master_drop_all_shards
|
||||
step s1-master-drop-all-shards: SELECT citus_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy');
|
||||
citus_drop_all_shards
|
||||
|
||||
4
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
|
|
|
@ -307,10 +307,10 @@ step s1-initialize:
|
|||
|
||||
step s1-begin: BEGIN;
|
||||
step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;;
|
||||
step s2-master-drop-all-shards-on-inserted: SELECT master_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash'); <waiting ...>
|
||||
step s2-master-drop-all-shards-on-inserted: SELECT citus_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash'); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-master-drop-all-shards-on-inserted: <... completed>
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
4
|
||||
step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash;
|
||||
|
@ -623,10 +623,10 @@ step s1-initialize:
|
|||
|
||||
step s1-begin: BEGIN;
|
||||
step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;;
|
||||
step s2-master-drop-all-shards-on-selected: SELECT master_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash'); <waiting ...>
|
||||
step s2-master-drop-all-shards-on-selected: SELECT citus_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash'); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-master-drop-all-shards-on-selected: <... completed>
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
4
|
||||
step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash;
|
||||
|
@ -916,8 +916,8 @@ step s1-initialize:
|
|||
COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV;
|
||||
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-drop-all-shards-on-inserted: SELECT master_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash');
|
||||
master_drop_all_shards
|
||||
step s1-master-drop-all-shards-on-inserted: SELECT citus_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash');
|
||||
citus_drop_all_shards
|
||||
|
||||
4
|
||||
step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; <waiting ...>
|
||||
|
@ -1210,8 +1210,8 @@ step s1-initialize:
|
|||
COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a && echo 6, b && echo 7, c && echo 8, d && echo 9, e' WITH CSV;
|
||||
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-drop-all-shards-on-selected: SELECT master_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash');
|
||||
master_drop_all_shards
|
||||
step s1-master-drop-all-shards-on-selected: SELECT citus_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash');
|
||||
citus_drop_all_shards
|
||||
|
||||
4
|
||||
step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; <waiting ...>
|
||||
|
|
|
@ -259,10 +259,10 @@ create_distributed_table
|
|||
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-master-drop-all-shards: SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); <waiting ...>
|
||||
step s2-master-drop-all-shards: SELECT citus_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-master-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
4
|
||||
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
|
||||
|
@ -536,8 +536,8 @@ create_distributed_table
|
|||
|
||||
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-drop-all-shards: SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy');
|
||||
master_drop_all_shards
|
||||
step s1-master-drop-all-shards: SELECT citus_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy');
|
||||
citus_drop_all_shards
|
||||
|
||||
4
|
||||
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
|
|
|
@ -338,10 +338,10 @@ create_distributed_table
|
|||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-master-drop-all-shards: SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); <waiting ...>
|
||||
step s2-master-drop-all-shards: SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-master-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
2
|
||||
step s1-select-count: SELECT COUNT(*) FROM range_copy;
|
||||
|
@ -673,8 +673,8 @@ create_distributed_table
|
|||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-drop-all-shards: SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy');
|
||||
master_drop_all_shards
|
||||
step s1-master-drop-all-shards: SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy');
|
||||
citus_drop_all_shards
|
||||
|
||||
1
|
||||
step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
|
|
|
@ -621,8 +621,8 @@ create_distributed_table
|
|||
|
||||
step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-master-drop-all-shards: SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy');
|
||||
master_drop_all_shards
|
||||
step s2-master-drop-all-shards: SELECT citus_drop_all_shards('select_append'::regclass, 'public', 'append_copy');
|
||||
citus_drop_all_shards
|
||||
|
||||
1
|
||||
step s1-commit: COMMIT;
|
||||
|
@ -973,8 +973,8 @@ create_distributed_table
|
|||
|
||||
step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-drop-all-shards: SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy');
|
||||
master_drop_all_shards
|
||||
step s1-master-drop-all-shards: SELECT citus_drop_all_shards('select_append'::regclass, 'public', 'append_copy');
|
||||
citus_drop_all_shards
|
||||
|
||||
1
|
||||
step s1-commit: COMMIT;
|
||||
|
|
|
@ -282,10 +282,10 @@ step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b &&
|
|||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE truncate_append;
|
||||
step s2-master-drop-all-shards: SELECT master_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); <waiting ...>
|
||||
step s2-master-drop-all-shards: SELECT citus_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-master-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
citus_drop_all_shards
|
||||
|
||||
0
|
||||
step s2-commit: COMMIT;
|
||||
|
@ -559,8 +559,8 @@ create_distributed_table
|
|||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-master-drop-all-shards: SELECT master_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append');
|
||||
master_drop_all_shards
|
||||
step s1-master-drop-all-shards: SELECT citus_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append');
|
||||
citus_drop_all_shards
|
||||
|
||||
1
|
||||
step s2-truncate: TRUNCATE truncate_append; <waiting ...>
|
||||
|
|
|
@ -81,10 +81,10 @@ SELECT master_get_active_worker_nodes();
|
|||
-- add some shard placements to the cluster
|
||||
SET citus.shard_count TO 16;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT * FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT * FROM citus_activate_node('localhost', :worker_2_port);
|
||||
WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker
|
||||
DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created
|
||||
master_activate_node
|
||||
citus_activate_node
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
|
|
@ -15,9 +15,16 @@ SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
|
|||
-- change this test every time the previous tests change the set of tables they leave
|
||||
-- around.
|
||||
SET client_min_messages TO 'WARNING';
|
||||
DROP FUNCTION pg_catalog.master_create_worker_shards;
|
||||
DROP EXTENSION citus CASCADE;
|
||||
RESET client_min_messages;
|
||||
CREATE EXTENSION citus;
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
-- re-add the nodes to the cluster
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
?column?
|
||||
|
|
|
@ -443,30 +443,63 @@ SELECT * FROM print_extension_changes();
|
|||
-- Snapshot of state at 10.0-1
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-1';
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
function citus_total_relation_size(regclass) |
|
||||
function create_citus_local_table(regclass) |
|
||||
function undistribute_table(regclass) |
|
||||
function upgrade_to_reference_table(regclass) |
|
||||
| access method columnar
|
||||
| function alter_columnar_table_reset(regclass,boolean,boolean,boolean,boolean)
|
||||
| function alter_columnar_table_set(regclass,integer,integer,name,integer)
|
||||
| function citus_internal.columnar_ensure_objects_exist()
|
||||
| function citus_set_coordinator_host(text,integer,noderole,name)
|
||||
| function citus_total_relation_size(regclass,boolean)
|
||||
| function columnar.columnar_handler(internal)
|
||||
| function create_citus_local_table(regclass,boolean)
|
||||
| function time_partition_range(regclass)
|
||||
| function undistribute_table(regclass,boolean)
|
||||
| schema columnar
|
||||
| sequence columnar.storageid_seq
|
||||
| table columnar.columnar_skipnodes
|
||||
| table columnar.columnar_stripes
|
||||
| table columnar.options
|
||||
| view citus_tables
|
||||
| view time_partitions
|
||||
(21 rows)
|
||||
function citus_total_relation_size(regclass) |
|
||||
function create_citus_local_table(regclass) |
|
||||
function master_conninfo_cache_invalidate() |
|
||||
function master_create_distributed_table(regclass,text,citus.distribution_type) |
|
||||
function master_create_worker_shards(text,integer,integer) |
|
||||
function master_dist_local_group_cache_invalidate() |
|
||||
function master_dist_node_cache_invalidate() |
|
||||
function master_dist_object_cache_invalidate() |
|
||||
function master_dist_partition_cache_invalidate() |
|
||||
function master_dist_placement_cache_invalidate() |
|
||||
function master_dist_shard_cache_invalidate() |
|
||||
function master_drop_all_shards(regclass,text,text) |
|
||||
function master_modify_multiple_shards(text) |
|
||||
function undistribute_table(regclass) |
|
||||
function upgrade_to_reference_table(regclass) |
|
||||
| access method columnar
|
||||
| function alter_columnar_table_reset(regclass,boolean,boolean,boolean,boolean)
|
||||
| function alter_columnar_table_set(regclass,integer,integer,name,integer)
|
||||
| function citus_activate_node(text,integer)
|
||||
| function citus_add_inactive_node(text,integer,integer,noderole,name)
|
||||
| function citus_add_node(text,integer,integer,noderole,name)
|
||||
| function citus_add_secondary_node(text,integer,text,integer,name)
|
||||
| function citus_conninfo_cache_invalidate()
|
||||
| function citus_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode)
|
||||
| function citus_disable_node(text,integer)
|
||||
| function citus_dist_local_group_cache_invalidate()
|
||||
| function citus_dist_node_cache_invalidate()
|
||||
| function citus_dist_object_cache_invalidate()
|
||||
| function citus_dist_partition_cache_invalidate()
|
||||
| function citus_dist_placement_cache_invalidate()
|
||||
| function citus_dist_shard_cache_invalidate()
|
||||
| function citus_drain_node(text,integer,citus.shard_transfer_mode,name)
|
||||
| function citus_drop_all_shards(regclass,text,text)
|
||||
| function citus_internal.columnar_ensure_objects_exist()
|
||||
| function citus_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode)
|
||||
| function citus_remove_node(text,integer)
|
||||
| function citus_set_coordinator_host(text,integer,noderole,name)
|
||||
| function citus_set_node_property(text,integer,text,boolean)
|
||||
| function citus_total_relation_size(regclass,boolean)
|
||||
| function citus_unmark_object_distributed(oid,oid,integer)
|
||||
| function citus_update_node(integer,text,integer,boolean,integer)
|
||||
| function citus_update_shard_statistics(bigint)
|
||||
| function citus_update_table_statistics(regclass)
|
||||
| function columnar.columnar_handler(internal)
|
||||
| function create_citus_local_table(regclass,boolean)
|
||||
| function time_partition_range(regclass)
|
||||
| function undistribute_table(regclass,boolean)
|
||||
| schema columnar
|
||||
| sequence columnar.storageid_seq
|
||||
| table columnar.columnar_skipnodes
|
||||
| table columnar.columnar_stripes
|
||||
| table columnar.options
|
||||
| view citus_tables
|
||||
| view time_partitions
|
||||
(54 rows)
|
||||
|
||||
DROP TABLE prev_objects, extension_diff;
|
||||
-- show running version
|
||||
|
|
|
@ -443,26 +443,59 @@ SELECT * FROM print_extension_changes();
|
|||
-- Snapshot of state at 10.0-1
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-1';
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
function citus_total_relation_size(regclass) |
|
||||
function create_citus_local_table(regclass) |
|
||||
function undistribute_table(regclass) |
|
||||
function upgrade_to_reference_table(regclass) |
|
||||
| function citus_internal.columnar_ensure_objects_exist()
|
||||
| function citus_set_coordinator_host(text,integer,noderole,name)
|
||||
| function citus_total_relation_size(regclass,boolean)
|
||||
| function create_citus_local_table(regclass,boolean)
|
||||
| function time_partition_range(regclass)
|
||||
| function undistribute_table(regclass,boolean)
|
||||
| schema columnar
|
||||
| sequence columnar.storageid_seq
|
||||
| table columnar.columnar_skipnodes
|
||||
| table columnar.columnar_stripes
|
||||
| table columnar.options
|
||||
| view citus_tables
|
||||
| view time_partitions
|
||||
(17 rows)
|
||||
function citus_total_relation_size(regclass) |
|
||||
function create_citus_local_table(regclass) |
|
||||
function master_conninfo_cache_invalidate() |
|
||||
function master_create_distributed_table(regclass,text,citus.distribution_type) |
|
||||
function master_create_worker_shards(text,integer,integer) |
|
||||
function master_dist_local_group_cache_invalidate() |
|
||||
function master_dist_node_cache_invalidate() |
|
||||
function master_dist_object_cache_invalidate() |
|
||||
function master_dist_partition_cache_invalidate() |
|
||||
function master_dist_placement_cache_invalidate() |
|
||||
function master_dist_shard_cache_invalidate() |
|
||||
function master_drop_all_shards(regclass,text,text) |
|
||||
function master_modify_multiple_shards(text) |
|
||||
function undistribute_table(regclass) |
|
||||
function upgrade_to_reference_table(regclass) |
|
||||
| function citus_activate_node(text,integer)
|
||||
| function citus_add_inactive_node(text,integer,integer,noderole,name)
|
||||
| function citus_add_node(text,integer,integer,noderole,name)
|
||||
| function citus_add_secondary_node(text,integer,text,integer,name)
|
||||
| function citus_conninfo_cache_invalidate()
|
||||
| function citus_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode)
|
||||
| function citus_disable_node(text, integer)
|
||||
| function citus_dist_local_group_cache_invalidate()
|
||||
| function citus_dist_node_cache_invalidate()
|
||||
| function citus_dist_object_cache_invalidate()
|
||||
| function citus_dist_partition_cache_invalidate()
|
||||
| function citus_dist_placement_cache_invalidate()
|
||||
| function citus_dist_shard_cache_invalidate()
|
||||
| function citus_drain_node(text,integer,citus.shard_transfer_mode,name)
|
||||
| function citus_drop_all_shards(regclass,text,text)
|
||||
| function citus_internal.columnar_ensure_objects_exist()
|
||||
| function citus_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode)
|
||||
| function citus_remove_node(text,integer)
|
||||
| function citus_set_coordinator_host(text,integer,noderole,name)
|
||||
| function citus_set_node_property(text,integer,text,boolean)
|
||||
| function citus_total_relation_size(regclass,boolean)
|
||||
| function citus_unmark_object_distributed(oid,oid,integer)
|
||||
| function citus_update_node(integer,text,integer,boolean,integer)
|
||||
| function citus_update_shard_statistics(bigint)
|
||||
| function citus_update_table_statistics(regclass)
|
||||
| function create_citus_local_table(regclass,boolean)
|
||||
| function time_partition_range(regclass)
|
||||
| function undistribute_table(regclass,boolean)
|
||||
| schema columnar
|
||||
| sequence columnar.storageid_seq
|
||||
| table columnar.columnar_skipnodes
|
||||
| table columnar.columnar_stripes
|
||||
| table columnar.options
|
||||
| view citus_tables
|
||||
| view time_partitions
|
||||
(50 rows)
|
||||
|
||||
DROP TABLE prev_objects, extension_diff;
|
||||
-- show running version
|
||||
|
|
|
@ -33,6 +33,23 @@ SELECT unnest(master_metadata_snapshot()) order by 1;
|
|||
TRUNCATE pg_dist_node CASCADE
|
||||
(3 rows)
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
-- Create a test table with constraints and SERIAL
|
||||
CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL);
|
||||
SELECT master_create_distributed_table('mx_test_table', 'col_1', 'hash');
|
||||
|
|
|
@ -249,8 +249,8 @@ SELECT count(*) FROM pg_tables WHERE tablename LIKE 'objects_for_xacts2_%' and s
|
|||
0
|
||||
(1 row)
|
||||
|
||||
-- make sure that master_drop_all_shards does not work from the worker nodes
|
||||
SELECT master_drop_all_shards('citus_mx_schema_for_xacts.objects_for_xacts'::regclass, 'citus_mx_schema_for_xacts', 'objects_for_xacts');
|
||||
-- make sure that citus_drop_all_shards does not work from the worker nodes
|
||||
SELECT citus_drop_all_shards('citus_mx_schema_for_xacts.objects_for_xacts'::regclass, 'citus_mx_schema_for_xacts', 'objects_for_xacts');
|
||||
ERROR: operation is not allowed on this node
|
||||
HINT: Connect to the coordinator and run it again.
|
||||
-- Ensure pg_dist_transaction is empty for test
|
||||
|
@ -325,7 +325,7 @@ SELECT raise_failed_aclcheck($$
|
|||
ERROR: must be owner of the object
|
||||
CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE
|
||||
SELECT raise_failed_aclcheck($$
|
||||
SELECT master_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table');
|
||||
SELECT citus_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table');
|
||||
$$);
|
||||
ERROR: must be owner of the object
|
||||
CONTEXT: PL/pgSQL function raise_failed_aclcheck(text) line 6 at RAISE
|
||||
|
@ -376,7 +376,7 @@ $$);
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT master_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table');
|
||||
SELECT citus_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table');
|
||||
ERROR: operation is not allowed on this node
|
||||
HINT: Connect to the coordinator and run it again.
|
||||
SELECT master_remove_partition_metadata('distributed_mx_table'::regclass, 'public', 'distributed_mx_table');
|
||||
|
|
|
@ -4,6 +4,23 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000;
|
||||
SET citus.multi_shard_commit_protocol = '2pc';
|
||||
SET citus.shard_count TO 2;
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
-- Verify that a table name > 56 characters gets hashed properly.
|
||||
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
|
||||
col1 integer not null,
|
||||
|
|
|
@ -1,357 +0,0 @@
|
|||
--
|
||||
-- MULTI_SHARD_MODIFY
|
||||
--
|
||||
SET citus.next_shard_id TO 350000;
|
||||
-- Create a new hash partitioned multi_shard_modify_test table and load data into it.
|
||||
CREATE TABLE multi_shard_modify_test (
|
||||
t_key integer not null,
|
||||
t_name varchar(25) not null,
|
||||
t_value integer not null);
|
||||
SELECT create_distributed_table('multi_shard_modify_test', 't_key', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY multi_shard_modify_test (t_key, t_name, t_value) FROM STDIN WITH (FORMAT 'csv');
|
||||
-- Testing master_modify_multiple_shards
|
||||
-- Verify that master_modify_multiple_shards can be rolled back
|
||||
BEGIN;
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 10 AND t_key <= 13');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 202');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
SELECT count(*) FROM multi_shard_modify_test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
27
|
||||
(1 row)
|
||||
|
||||
-- commands with volatile functions in their quals
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = (random() * 1000)');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
ERROR: functions used in the WHERE clause of modification queries on distributed tables must not be VOLATILE
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_value = (random() * 1000)');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
ERROR: functions used in the WHERE clause of modification queries on distributed tables must not be VOLATILE
|
||||
-- commands with immutable functions in their quals
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = abs(-3)');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- DELETE with expression in WHERE clause
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = (3*18-40)');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- commands with a USING a non distributed table error out
|
||||
CREATE TABLE temp_nations(name text, key integer);
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' ');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- commands with a USING clause are unsupported
|
||||
SELECT create_distributed_table('temp_nations', 'name', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' ');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
|
||||
-- commands with a RETURNING clause are unsupported
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 3 RETURNING *');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- commands containing a CTE are unsupported
|
||||
SELECT master_modify_multiple_shards('WITH deleted_stuff AS (INSERT INTO multi_shard_modify_test DEFAULT VALUES RETURNING *) DELETE FROM multi_shard_modify_test');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
ERROR: cannot perform an INSERT without a partition column value
|
||||
-- Check that we can successfully delete from multiple shards with 1PC
|
||||
SET citus.multi_shard_commit_protocol TO '1pc';
|
||||
SELECT count(*) FROM multi_shard_modify_test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
25
|
||||
(1 row)
|
||||
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 200');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM multi_shard_modify_test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
23
|
||||
(1 row)
|
||||
|
||||
-- Check that we can successfully delete from multiple shards with 2PC
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 100');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM multi_shard_modify_test;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
21
|
||||
(1 row)
|
||||
|
||||
-- Check that shard pruning works
|
||||
SET client_min_messages TO DEBUG2;
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 15');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
DEBUG: Distributed planning for a fast-path router query
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: query has a single distribution column value: 15
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO NOTICE;
|
||||
-- Check that master_modify_multiple_shards works without partition keys
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_name LIKE ''barce%'' ');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Simple, Single Shard Update
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''warsaw'' WHERE t_key=17');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT t_name FROM multi_shard_modify_test WHERE t_key=17;
|
||||
t_name
|
||||
---------------------------------------------------------------------
|
||||
warsaw
|
||||
(1 row)
|
||||
|
||||
-- Simple, Multi Shard Update
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''???'' WHERE t_key>30 AND t_key<35');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT t_name FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35;
|
||||
t_name
|
||||
---------------------------------------------------------------------
|
||||
???
|
||||
???
|
||||
???
|
||||
???
|
||||
(4 rows)
|
||||
|
||||
-- expression UPDATE
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value=8*37 WHERE t_key>30 AND t_key<35');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35;
|
||||
t_value
|
||||
---------------------------------------------------------------------
|
||||
296
|
||||
296
|
||||
296
|
||||
296
|
||||
(4 rows)
|
||||
|
||||
-- multi-column UPDATE
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''somename'', t_value=333 WHERE t_key>30 AND t_key<35');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT t_name, t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35;
|
||||
t_name | t_value
|
||||
---------------------------------------------------------------------
|
||||
somename | 333
|
||||
somename | 333
|
||||
somename | 333
|
||||
somename | 333
|
||||
(4 rows)
|
||||
|
||||
-- commands with no constraints on the partition key are supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''nice city'' WHERE t_value < 0');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT t_name FROM multi_shard_modify_test WHERE t_value < 0;
|
||||
t_name
|
||||
---------------------------------------------------------------------
|
||||
nice city
|
||||
nice city
|
||||
(2 rows)
|
||||
|
||||
-- attempting to change the partition key is unsupported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_key=3000 WHERE t_key < 10 ');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
ERROR: modifying the partition value of rows is not allowed
|
||||
-- UPDATEs with a FROM clause are supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name = ''FAIL'' FROM temp_nations WHERE multi_shard_modify_test.t_key = 3 AND multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''dummy'' ');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- commands with a RETURNING clause are unsupported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''FAIL'' WHERE t_key=4 RETURNING *');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- commands containing a CTE are unsupported
|
||||
SELECT master_modify_multiple_shards('WITH t AS (INSERT INTO multi_shard_modify_test DEFAULT VALUES RETURNING *) UPDATE multi_shard_modify_test SET t_name = ''FAIL'' ');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
ERROR: cannot perform an INSERT without a partition column value
|
||||
-- updates referencing just a var are supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value=t_key WHERE t_key = 10');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT t_value FROM multi_shard_modify_test WHERE t_key=10;
|
||||
t_value
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
-- updates referencing a column are supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = t_value + 37 WHERE t_key = 10');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT t_value FROM multi_shard_modify_test WHERE t_key=10;
|
||||
t_value
|
||||
---------------------------------------------------------------------
|
||||
47
|
||||
(1 row)
|
||||
|
||||
CREATE FUNCTION temp_stable_func() RETURNS integer AS 'SELECT 10;' LANGUAGE SQL STABLE;
|
||||
-- updates referencing non-IMMUTABLE functions are unsupported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name = ''FAIL!'' WHERE t_key = temp_stable_func()');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- updates referencing IMMUTABLE functions in SET section are supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = abs(-78) WHERE t_key = 10');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT t_value FROM multi_shard_modify_test WHERE t_key=10;
|
||||
t_value
|
||||
---------------------------------------------------------------------
|
||||
78
|
||||
(1 row)
|
||||
|
||||
-- updates referencing STABLE functions in SET section are supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = temp_stable_func() * 2 WHERE t_key = 10');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- updates referencing VOLATILE functions in SET section are not supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = random() WHERE t_key = 10');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
ERROR: functions used in UPDATE queries on distributed tables must not be VOLATILE
|
||||
-- commands with stable functions in their quals are allowed
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = temp_stable_func()');
|
||||
WARNING: master_modify_multiple_shards is deprecated and will be removed in a future release.
|
||||
HINT: Run the command directly
|
||||
master_modify_multiple_shards
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SET citus.next_shard_id TO 102046;
|
|
@ -5,7 +5,7 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1410000;
|
||||
SET citus.shard_count TO 4;
|
||||
SET client_min_messages TO WARNING;
|
||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK
|
||||
-- test DROP TABLE(ergo citus_drop_all_shards) in transaction, then ROLLBACK
|
||||
CREATE TABLE transactional_drop_shards(column1 int);
|
||||
SELECT create_distributed_table('transactional_drop_shards', 'column1');
|
||||
create_distributed_table
|
||||
|
@ -67,7 +67,7 @@ ORDER BY
|
|||
(4 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then COMMIT
|
||||
-- test DROP TABLE(ergo citus_drop_all_shards) in transaction, then COMMIT
|
||||
BEGIN;
|
||||
DROP TABLE transactional_drop_shards;
|
||||
COMMIT;
|
||||
|
@ -432,7 +432,7 @@ SELECT create_distributed_table('transactional_drop_serial', 'column1');
|
|||
|
||||
(1 row)
|
||||
|
||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK
|
||||
-- test DROP TABLE(ergo citus_drop_all_shards) in transaction, then ROLLBACK
|
||||
BEGIN;
|
||||
DROP TABLE transactional_drop_serial;
|
||||
ROLLBACK;
|
||||
|
@ -509,7 +509,7 @@ ORDER BY
|
|||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then COMMIT
|
||||
-- test DROP TABLE(ergo citus_drop_all_shards) in transaction, then COMMIT
|
||||
BEGIN;
|
||||
DROP TABLE transactional_drop_serial;
|
||||
COMMIT;
|
||||
|
|
|
@ -327,8 +327,8 @@ SELECT * FROM test_local_truncate;
|
|||
(1 row)
|
||||
|
||||
-- Undistribute table
|
||||
SELECT master_drop_all_shards('test_local_truncate', 'public', 'test_local_truncate');
|
||||
master_drop_all_shards
|
||||
SELECT citus_drop_all_shards('test_local_truncate', 'public', 'test_local_truncate');
|
||||
citus_drop_all_shards
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
@ -366,8 +366,8 @@ SELECT * FROM test_local_truncate;
|
|||
(1 row)
|
||||
|
||||
-- Undistribute table
|
||||
SELECT master_drop_all_shards('test_local_truncate', 'public', 'test_local_truncate');
|
||||
master_drop_all_shards
|
||||
SELECT citus_drop_all_shards('test_local_truncate', 'public', 'test_local_truncate');
|
||||
citus_drop_all_shards
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
|
|
@ -68,6 +68,23 @@ SELECT * FROM mx_table ORDER BY col_1;
|
|||
|
||||
-- Try commands from metadata worker
|
||||
\c - - - :worker_1_port
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
CREATE TABLE mx_table_worker(col_1 text);
|
||||
-- master_create_distributed_table
|
||||
SELECT master_create_distributed_table('mx_table_worker', 'col_1', 'hash');
|
||||
|
@ -146,8 +163,8 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_tabl
|
|||
(3 rows)
|
||||
|
||||
\d mx_test_index
|
||||
-- master_drop_all_shards
|
||||
SELECT master_drop_all_shards('mx_table'::regclass, 'public', 'mx_table');
|
||||
-- citus_drop_all_shards
|
||||
SELECT citus_drop_all_shards('mx_table'::regclass, 'public', 'mx_table');
|
||||
ERROR: operation is not allowed on this node
|
||||
HINT: Connect to the coordinator and run it again.
|
||||
SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_table'::regclass;
|
||||
|
|
|
@ -147,6 +147,23 @@ CREATE FUNCTION worker_node_responsive(worker_node_name text, worker_node_port i
|
|||
RETURNS boolean
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT VOLATILE;
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
SET citus.next_shard_id TO 123000;
|
||||
SELECT worker_node_responsive(node_name, node_port::int)
|
||||
FROM master_get_active_worker_nodes()
|
||||
|
|
|
@ -52,6 +52,23 @@ SELECT create_distributed_table('tr', 'pk');
|
|||
(1 row)
|
||||
|
||||
INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c;
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
CREATE TABLE t_append(id int, value_1 int);
|
||||
SELECT master_create_distributed_table('t_append', 'id', 'append');
|
||||
master_create_distributed_table
|
||||
|
|
|
@ -28,10 +28,25 @@ ORDER BY 1;
|
|||
function authinfo_valid(text)
|
||||
function broadcast_intermediate_result(text,text)
|
||||
function check_distributed_deadlocks()
|
||||
function citus_activate_node(text,integer)
|
||||
function citus_add_inactive_node(text,integer,integer,noderole,name)
|
||||
function citus_add_node(text,integer,integer,noderole,name)
|
||||
function citus_add_rebalance_strategy(name,regproc,regproc,regproc,real,real)
|
||||
function citus_add_secondary_node(text,integer,text,integer,name)
|
||||
function citus_blocking_pids(integer)
|
||||
function citus_conninfo_cache_invalidate()
|
||||
function citus_copy_shard_placement(bigint, text, integer, text, integer, boolean, citus.shard_transfer_mode)
|
||||
function citus_create_restore_point(text)
|
||||
function citus_disable_node(text, integer)
|
||||
function citus_dist_local_group_cache_invalidate()
|
||||
function citus_dist_node_cache_invalidate()
|
||||
function citus_dist_object_cache_invalidate()
|
||||
function citus_dist_partition_cache_invalidate()
|
||||
function citus_dist_placement_cache_invalidate()
|
||||
function citus_dist_shard_cache_invalidate()
|
||||
function citus_dist_stat_activity()
|
||||
function citus_drain_node(text, integer, citus.shard_transfer_mode, name)
|
||||
function citus_drop_all_shards(regclass,text,text)
|
||||
function citus_drop_trigger()
|
||||
function citus_executor_name(integer)
|
||||
function citus_extradata_container(internal)
|
||||
|
@ -50,14 +65,17 @@ ORDER BY 1;
|
|||
function citus_json_concatenate_final(json)
|
||||
function citus_jsonb_concatenate(jsonb,jsonb)
|
||||
function citus_jsonb_concatenate_final(jsonb)
|
||||
function citus_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode)
|
||||
function citus_node_capacity_1(integer)
|
||||
function citus_prepare_pg_upgrade()
|
||||
function citus_query_stats()
|
||||
function citus_relation_size(regclass)
|
||||
function citus_remote_connection_stats()
|
||||
function citus_remove_node(text, integer)
|
||||
function citus_server_id()
|
||||
function citus_set_coordinator_host(text,integer,noderole,name)
|
||||
function citus_set_default_rebalance_strategy(text)
|
||||
function citus_set_node_property(text, integer, text, boolean)
|
||||
function citus_shard_allowed_on_node_true(bigint,integer)
|
||||
function citus_shard_cost_1(bigint)
|
||||
function citus_shard_cost_by_disk_size(bigint)
|
||||
|
@ -68,6 +86,10 @@ ORDER BY 1;
|
|||
function citus_text_send_as_jsonb(text)
|
||||
function citus_total_relation_size(regclass,boolean)
|
||||
function citus_truncate_trigger()
|
||||
function citus_unmark_object_distributed(oid, oid, integer)
|
||||
function citus_update_node(integer,text,integer,boolean,integer)
|
||||
function citus_update_shard_statistics(bigint)
|
||||
function citus_update_table_statistics(regclass)
|
||||
function citus_validate_rebalance_strategy_functions(regproc,regproc,regproc)
|
||||
function citus_version()
|
||||
function citus_worker_stat_activity()
|
||||
|
@ -107,26 +129,15 @@ ORDER BY 1;
|
|||
function master_add_secondary_node(text,integer,text,integer,name)
|
||||
function master_append_table_to_shard(bigint,text,text,integer)
|
||||
function master_apply_delete_command(text)
|
||||
function master_conninfo_cache_invalidate()
|
||||
function master_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode)
|
||||
function master_create_distributed_table(regclass,text,citus.distribution_type)
|
||||
function master_create_empty_shard(text)
|
||||
function master_create_worker_shards(text,integer,integer)
|
||||
function master_disable_node(text,integer)
|
||||
function master_dist_local_group_cache_invalidate()
|
||||
function master_dist_node_cache_invalidate()
|
||||
function master_dist_object_cache_invalidate()
|
||||
function master_dist_partition_cache_invalidate()
|
||||
function master_dist_placement_cache_invalidate()
|
||||
function master_dist_shard_cache_invalidate()
|
||||
function master_drain_node(text,integer,citus.shard_transfer_mode,name)
|
||||
function master_drop_all_shards(regclass,text,text)
|
||||
function master_get_active_worker_nodes()
|
||||
function master_get_new_placementid()
|
||||
function master_get_new_shardid()
|
||||
function master_get_table_ddl_events(text)
|
||||
function master_get_table_metadata(text)
|
||||
function master_modify_multiple_shards(text)
|
||||
function master_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode)
|
||||
function master_remove_distributed_table_metadata_from_workers(regclass,text,text)
|
||||
function master_remove_node(text,integer)
|
||||
|
@ -220,5 +231,5 @@ ORDER BY 1;
|
|||
view citus_worker_stat_activity
|
||||
view pg_dist_shard_placement
|
||||
view time_partitions
|
||||
(204 rows)
|
||||
(215 rows)
|
||||
|
||||
|
|
|
@ -25,10 +25,25 @@ ORDER BY 1;
|
|||
function authinfo_valid(text)
|
||||
function broadcast_intermediate_result(text,text)
|
||||
function check_distributed_deadlocks()
|
||||
function citus_activate_node(text,integer)
|
||||
function citus_add_inactive_node(text,integer,integer,noderole,name)
|
||||
function citus_add_node(text,integer,integer,noderole,name)
|
||||
function citus_add_rebalance_strategy(name,regproc,regproc,regproc,real,real)
|
||||
function citus_add_secondary_node(text,integer,text,integer,name)
|
||||
function citus_blocking_pids(integer)
|
||||
function citus_conninfo_cache_invalidate()
|
||||
function citus_copy_shard_placement(bigint, text, integer, text, integer, boolean, citus.shard_transfer_mode)
|
||||
function citus_create_restore_point(text)
|
||||
function citus_disable_node(text, integer)
|
||||
function citus_dist_local_group_cache_invalidate()
|
||||
function citus_dist_node_cache_invalidate()
|
||||
function citus_dist_object_cache_invalidate()
|
||||
function citus_dist_partition_cache_invalidate()
|
||||
function citus_dist_placement_cache_invalidate()
|
||||
function citus_dist_shard_cache_invalidate()
|
||||
function citus_dist_stat_activity()
|
||||
function citus_drain_node(text, integer, citus.shard_transfer_mode, name)
|
||||
function citus_drop_all_shards(regclass,text,text)
|
||||
function citus_drop_trigger()
|
||||
function citus_executor_name(integer)
|
||||
function citus_extradata_container(internal)
|
||||
|
@ -47,14 +62,17 @@ ORDER BY 1;
|
|||
function citus_json_concatenate_final(json)
|
||||
function citus_jsonb_concatenate(jsonb,jsonb)
|
||||
function citus_jsonb_concatenate_final(jsonb)
|
||||
function citus_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode)
|
||||
function citus_node_capacity_1(integer)
|
||||
function citus_prepare_pg_upgrade()
|
||||
function citus_query_stats()
|
||||
function citus_relation_size(regclass)
|
||||
function citus_remote_connection_stats()
|
||||
function citus_remove_node(text, integer)
|
||||
function citus_server_id()
|
||||
function citus_set_coordinator_host(text,integer,noderole,name)
|
||||
function citus_set_default_rebalance_strategy(text)
|
||||
function citus_set_node_property(text, integer, text, boolean)
|
||||
function citus_shard_allowed_on_node_true(bigint,integer)
|
||||
function citus_shard_cost_1(bigint)
|
||||
function citus_shard_cost_by_disk_size(bigint)
|
||||
|
@ -65,6 +83,10 @@ ORDER BY 1;
|
|||
function citus_text_send_as_jsonb(text)
|
||||
function citus_total_relation_size(regclass,boolean)
|
||||
function citus_truncate_trigger()
|
||||
function citus_unmark_object_distributed(oid, oid, integer)
|
||||
function citus_update_node(integer,text,integer,boolean,integer)
|
||||
function citus_update_shard_statistics(bigint)
|
||||
function citus_update_table_statistics(regclass)
|
||||
function citus_validate_rebalance_strategy_functions(regproc,regproc,regproc)
|
||||
function citus_version()
|
||||
function citus_worker_stat_activity()
|
||||
|
@ -103,26 +125,15 @@ ORDER BY 1;
|
|||
function master_add_secondary_node(text,integer,text,integer,name)
|
||||
function master_append_table_to_shard(bigint,text,text,integer)
|
||||
function master_apply_delete_command(text)
|
||||
function master_conninfo_cache_invalidate()
|
||||
function master_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode)
|
||||
function master_create_distributed_table(regclass,text,citus.distribution_type)
|
||||
function master_create_empty_shard(text)
|
||||
function master_create_worker_shards(text,integer,integer)
|
||||
function master_disable_node(text,integer)
|
||||
function master_dist_local_group_cache_invalidate()
|
||||
function master_dist_node_cache_invalidate()
|
||||
function master_dist_object_cache_invalidate()
|
||||
function master_dist_partition_cache_invalidate()
|
||||
function master_dist_placement_cache_invalidate()
|
||||
function master_dist_shard_cache_invalidate()
|
||||
function master_drain_node(text,integer,citus.shard_transfer_mode,name)
|
||||
function master_drop_all_shards(regclass,text,text)
|
||||
function master_get_active_worker_nodes()
|
||||
function master_get_new_placementid()
|
||||
function master_get_new_shardid()
|
||||
function master_get_table_ddl_events(text)
|
||||
function master_get_table_metadata(text)
|
||||
function master_modify_multiple_shards(text)
|
||||
function master_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode)
|
||||
function master_remove_distributed_table_metadata_from_workers(regclass,text,text)
|
||||
function master_remove_node(text,integer)
|
||||
|
@ -216,5 +227,5 @@ ORDER BY 1;
|
|||
view citus_worker_stat_activity
|
||||
view pg_dist_shard_placement
|
||||
view time_partitions
|
||||
(200 rows)
|
||||
(211 rows)
|
||||
|
||||
|
|
|
@ -167,7 +167,6 @@ test: with_executors with_join with_partitioning with_transactions with_dml
|
|||
# ----------
|
||||
test: multi_load_large_records
|
||||
test: multi_master_delete_protocol
|
||||
test: multi_shard_modify
|
||||
|
||||
# ----------
|
||||
# Tests around DDL statements run on distributed tables
|
||||
|
|
|
@ -42,7 +42,7 @@ step "s1-ddl-drop-column" { ALTER TABLE append_copy DROP new_column; }
|
|||
step "s1-ddl-rename-column" { ALTER TABLE append_copy RENAME data TO new_column; }
|
||||
step "s1-table-size" { SELECT citus_total_relation_size('append_copy'); }
|
||||
step "s1-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM append_copy WHERE id <= 4;'); }
|
||||
step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); }
|
||||
step "s1-master-drop-all-shards" { SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); }
|
||||
step "s1-create-non-distributed-table" { CREATE TABLE append_copy(id integer, data text, int_data int); }
|
||||
step "s1-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); }
|
||||
step "s1-select-count" { SELECT COUNT(*) FROM append_copy; }
|
||||
|
@ -74,7 +74,7 @@ step "s2-ddl-drop-column" { ALTER TABLE append_copy DROP new_column; }
|
|||
step "s2-ddl-rename-column" { ALTER TABLE append_copy RENAME data TO new_column; }
|
||||
step "s2-table-size" { SELECT citus_total_relation_size('append_copy'); }
|
||||
step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM append_copy WHERE id <= 4;'); }
|
||||
step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); }
|
||||
step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); }
|
||||
step "s2-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); }
|
||||
|
||||
// permutations - COPY vs COPY
|
||||
|
|
|
@ -1,5 +1,21 @@
|
|||
setup
|
||||
{
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
CREATE TABLE test_dml_vs_repair (test_id integer NOT NULL, data int);
|
||||
SELECT master_create_distributed_table('test_dml_vs_repair', 'test_id', 'hash');
|
||||
SELECT master_create_worker_shards('test_dml_vs_repair', 1, 2);
|
||||
|
|
|
@ -32,7 +32,7 @@ step "s1-apply-delete-command"
|
|||
|
||||
step "s1-drop-all-shards"
|
||||
{
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
|
@ -54,7 +54,7 @@ step "s2-apply-delete-command"
|
|||
|
||||
step "s2-drop-all-shards"
|
||||
{
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
SELECT citus_drop_all_shards('append_table', 'public', 'append_table');
|
||||
}
|
||||
|
||||
step "s2-select"
|
||||
|
|
|
@ -42,7 +42,7 @@ step "s1-ddl-drop-column" { ALTER TABLE hash_copy DROP new_column; }
|
|||
step "s1-ddl-rename-column" { ALTER TABLE hash_copy RENAME data TO new_column; }
|
||||
step "s1-table-size" { SELECT citus_total_relation_size('hash_copy'); }
|
||||
step "s1-master-modify-multiple-shards" { DELETE FROM hash_copy; }
|
||||
step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); }
|
||||
step "s1-master-drop-all-shards" { SELECT citus_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); }
|
||||
step "s1-create-non-distributed-table" { CREATE TABLE hash_copy(id integer, data text, int_data int); COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; }
|
||||
step "s1-distribute-table" { SELECT create_distributed_table('hash_copy', 'id'); }
|
||||
step "s1-select-count" { SELECT COUNT(*) FROM hash_copy; }
|
||||
|
@ -81,7 +81,7 @@ step "s2-ddl-drop-column" { ALTER TABLE hash_copy DROP new_column; }
|
|||
step "s2-ddl-rename-column" { ALTER TABLE hash_copy RENAME data TO new_column; }
|
||||
step "s2-table-size" { SELECT citus_total_relation_size('hash_copy'); }
|
||||
step "s2-master-modify-multiple-shards" { DELETE FROM hash_copy; }
|
||||
step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); }
|
||||
step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); }
|
||||
step "s2-distribute-table" { SELECT create_distributed_table('hash_copy', 'id'); }
|
||||
|
||||
// permutations - COPY vs COPY
|
||||
|
|
|
@ -41,7 +41,7 @@ step "s1-ddl-drop-column-on-inserted" { ALTER TABLE insert_of_insert_select_hash
|
|||
step "s1-ddl-rename-column-on-inserted" { ALTER TABLE insert_of_insert_select_hash RENAME data TO new_column; }
|
||||
step "s1-table-size-on-inserted" { SELECT citus_total_relation_size('insert_of_insert_select_hash'); }
|
||||
step "s1-master-modify-multiple-shards-on-inserted" { DELETE FROM insert_of_insert_select_hash; }
|
||||
step "s1-master-drop-all-shards-on-inserted" { SELECT master_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash'); }
|
||||
step "s1-master-drop-all-shards-on-inserted" { SELECT citus_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash'); }
|
||||
step "s1-create-non-distributed-table-on-inserted" { CREATE TABLE insert_of_insert_select_hash(id integer, data text); }
|
||||
step "s1-distribute-table-on-inserted" { SELECT create_distributed_table('insert_of_insert_select_hash', 'id'); }
|
||||
step "s1-show-indexes-inserted" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); }
|
||||
|
@ -57,7 +57,7 @@ step "s1-ddl-drop-column-on-selected" { ALTER TABLE select_of_insert_select_hash
|
|||
step "s1-ddl-rename-column-on-selected" { ALTER TABLE select_of_insert_select_hash RENAME data TO new_column; }
|
||||
step "s1-table-size-on-selected" { SELECT citus_total_relation_size('select_of_insert_select_hash'); }
|
||||
step "s1-master-modify-multiple-shards-on-selected" { DELETE FROM select_of_insert_select_hash; }
|
||||
step "s1-master-drop-all-shards-on-selected" { SELECT master_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash'); }
|
||||
step "s1-master-drop-all-shards-on-selected" { SELECT citus_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash'); }
|
||||
step "s1-create-non-distributed-table-on-selected" { CREATE TABLE select_of_insert_select_hash(id integer, data text); }
|
||||
step "s1-distribute-table-on-selected" { SELECT create_distributed_table('select_of_insert_select_hash', 'id'); }
|
||||
step "s1-show-indexes-selected" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); }
|
||||
|
@ -80,7 +80,7 @@ step "s2-ddl-drop-column-on-inserted" { ALTER TABLE insert_of_insert_select_hash
|
|||
step "s2-ddl-rename-column-on-inserted" { ALTER TABLE insert_of_insert_select_hash RENAME data TO new_column; }
|
||||
step "s2-table-size-on-inserted" { SELECT citus_total_relation_size('insert_of_insert_select_hash'); }
|
||||
step "s2-master-modify-multiple-shards-on-inserted" { DELETE FROM insert_of_insert_select_hash; }
|
||||
step "s2-master-drop-all-shards-on-inserted" { SELECT master_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash'); }
|
||||
step "s2-master-drop-all-shards-on-inserted" { SELECT citus_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash'); }
|
||||
step "s2-distribute-table-on-inserted" { SELECT create_distributed_table('insert_of_insert_select_hash', 'id'); }
|
||||
step "s2-update-on-selected" { UPDATE select_of_insert_select_hash SET data = 'l' WHERE id = 4; }
|
||||
step "s2-delete-on-selected" { DELETE FROM select_of_insert_select_hash WHERE id = 4; }
|
||||
|
@ -94,7 +94,7 @@ step "s2-ddl-drop-column-on-selected" { ALTER TABLE select_of_insert_select_hash
|
|||
step "s2-ddl-rename-column-on-selected" { ALTER TABLE select_of_insert_select_hash RENAME data TO new_column; }
|
||||
step "s2-table-size-on-selected" { SELECT citus_total_relation_size('select_of_insert_select_hash'); }
|
||||
step "s2-master-modify-multiple-shards-on-selected" { DELETE FROM select_of_insert_select_hash; }
|
||||
step "s2-master-drop-all-shards-on-selected" { SELECT master_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash'); }
|
||||
step "s2-master-drop-all-shards-on-selected" { SELECT citus_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash'); }
|
||||
step "s2-distribute-table-on-selected" { SELECT create_distributed_table('select_of_insert_select_hash', 'id'); }
|
||||
|
||||
// permutations - INSERT/SELECT vs INSERT/SELECT
|
||||
|
|
|
@ -43,7 +43,7 @@ step "s1-ddl-drop-column" { ALTER TABLE partitioned_copy DROP new_column; }
|
|||
step "s1-ddl-rename-column" { ALTER TABLE partitioned_copy RENAME data TO new_column; }
|
||||
step "s1-table-size" { SELECT citus_total_relation_size('partitioned_copy'); }
|
||||
step "s1-master-modify-multiple-shards" { DELETE FROM partitioned_copy; }
|
||||
step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); }
|
||||
step "s1-master-drop-all-shards" { SELECT citus_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); }
|
||||
step "s1-create-non-distributed-table" { CREATE TABLE partitioned_copy(id integer, data text, int_data int); COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; }
|
||||
step "s1-distribute-table" { SELECT create_distributed_table('partitioned_copy', 'id'); }
|
||||
step "s1-select-count" { SELECT COUNT(*) FROM partitioned_copy; }
|
||||
|
@ -71,7 +71,7 @@ step "s2-ddl-drop-column" { ALTER TABLE partitioned_copy DROP new_column; }
|
|||
step "s2-ddl-rename-column" { ALTER TABLE partitioned_copy RENAME data TO new_column; }
|
||||
step "s2-table-size" { SELECT citus_total_relation_size('partitioned_copy'); }
|
||||
step "s2-master-modify-multiple-shards" { DELETE FROM partitioned_copy; }
|
||||
step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); }
|
||||
step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); }
|
||||
step "s2-distribute-table" { SELECT create_distributed_table('partitioned_copy', 'id'); }
|
||||
|
||||
// permutations - COPY vs COPY
|
||||
|
|
|
@ -43,7 +43,7 @@ step "s1-ddl-rename-column" { ALTER TABLE range_copy RENAME data TO new_column;
|
|||
step "s1-table-size" { SELECT citus_total_relation_size('range_copy'); }
|
||||
step "s1-master-modify-multiple-shards" { DELETE FROM range_copy; }
|
||||
step "s1-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM range_copy WHERE id <= 4;'); }
|
||||
step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); }
|
||||
step "s1-master-drop-all-shards" { SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); }
|
||||
step "s1-create-non-distributed-table" { CREATE TABLE range_copy(id integer, data text, int_data int); }
|
||||
step "s1-distribute-table" { SELECT create_distributed_table('range_copy', 'id', 'range'); }
|
||||
step "s1-select-count" { SELECT COUNT(*) FROM range_copy; }
|
||||
|
@ -77,7 +77,7 @@ step "s2-ddl-rename-column" { ALTER TABLE range_copy RENAME data TO new_column;
|
|||
step "s2-table-size" { SELECT citus_total_relation_size('range_copy'); }
|
||||
step "s2-master-modify-multiple-shards" { DELETE FROM range_copy; }
|
||||
step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM range_copy WHERE id <= 4;'); }
|
||||
step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); }
|
||||
step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); }
|
||||
step "s2-distribute-table" { SELECT create_distributed_table('range_copy', 'id', 'range'); }
|
||||
|
||||
// permutations - COPY vs COPY
|
||||
|
|
|
@ -50,7 +50,7 @@ step "s1-ddl-rename-column" { ALTER TABLE select_append RENAME data TO new_colum
|
|||
step "s1-table-size" { SELECT citus_total_relation_size('select_append'); }
|
||||
step "s1-master-modify-multiple-shards" { DELETE FROM select_append; }
|
||||
step "s1-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM select_append WHERE id <= 4;'); }
|
||||
step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); }
|
||||
step "s1-master-drop-all-shards" { SELECT citus_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); }
|
||||
step "s1-create-non-distributed-table" { CREATE TABLE select_append(id integer, data text, int_data int); }
|
||||
step "s1-distribute-table" { SELECT create_distributed_table('select_append', 'id', 'append'); }
|
||||
step "s1-select-count" { SELECT COUNT(*) FROM select_append; }
|
||||
|
@ -82,7 +82,7 @@ step "s2-ddl-rename-column" { ALTER TABLE select_append RENAME data TO new_colum
|
|||
step "s2-table-size" { SELECT citus_total_relation_size('select_append'); }
|
||||
step "s2-master-modify-multiple-shards" { DELETE FROM select_append; }
|
||||
step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM select_append WHERE id <= 4;'); }
|
||||
step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); }
|
||||
step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); }
|
||||
step "s2-distribute-table" { SELECT create_distributed_table('select_append', 'id', 'append'); }
|
||||
|
||||
// permutations - SELECT vs SELECT
|
||||
|
|
|
@ -35,7 +35,7 @@ step "s1-ddl-rename-column" { ALTER TABLE truncate_append RENAME data TO new_col
|
|||
step "s1-table-size" { SELECT citus_total_relation_size('truncate_append'); }
|
||||
step "s1-master-modify-multiple-shards" { DELETE FROM truncate_append; }
|
||||
step "s1-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM truncate_append WHERE id <= 4;'); }
|
||||
step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); }
|
||||
step "s1-master-drop-all-shards" { SELECT citus_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); }
|
||||
step "s1-create-non-distributed-table" { CREATE TABLE truncate_append(id integer, data text); }
|
||||
step "s1-distribute-table" { SELECT create_distributed_table('truncate_append', 'id', 'append'); }
|
||||
step "s1-select-count" { SELECT COUNT(*) FROM truncate_append; }
|
||||
|
@ -57,7 +57,7 @@ step "s2-ddl-rename-column" { ALTER TABLE truncate_append RENAME data TO new_col
|
|||
step "s2-table-size" { SELECT citus_total_relation_size('truncate_append'); }
|
||||
step "s2-master-modify-multiple-shards" { DELETE FROM truncate_append; }
|
||||
step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM truncate_append WHERE id <= 4;'); }
|
||||
step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); }
|
||||
step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); }
|
||||
step "s2-distribute-table" { SELECT create_distributed_table('truncate_append', 'id', 'append'); }
|
||||
step "s2-commit" { COMMIT; }
|
||||
|
||||
|
|
|
@ -222,6 +222,25 @@ DROP TABLE test_table;
|
|||
DROP SCHEMA failure_create_table;
|
||||
CREATE SCHEMA failure_create_table;
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
-- Test master_create_worker_shards with 2pc
|
||||
SET citus.multi_shard_commit_protocol TO "2pc";
|
||||
CREATE TABLE test_table_2(id int, value_1 int);
|
||||
|
|
|
@ -33,7 +33,7 @@ SELECT master_get_active_worker_nodes();
|
|||
SET citus.shard_count TO 16;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
SELECT * FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT * FROM citus_activate_node('localhost', :worker_2_port);
|
||||
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
||||
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
||||
|
||||
|
|
|
@ -15,11 +15,17 @@ SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
|
|||
-- change this test every time the previous tests change the set of tables they leave
|
||||
-- around.
|
||||
SET client_min_messages TO 'WARNING';
|
||||
DROP FUNCTION pg_catalog.master_create_worker_shards;
|
||||
DROP EXTENSION citus CASCADE;
|
||||
RESET client_min_messages;
|
||||
|
||||
CREATE EXTENSION citus;
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
-- re-add the nodes to the cluster
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
|
|
|
@ -32,6 +32,25 @@ SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s';
|
|||
-- pg_dist_node entries and reference tables
|
||||
SELECT unnest(master_metadata_snapshot()) order by 1;
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
-- Create a test table with constraints and SERIAL
|
||||
CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL);
|
||||
SELECT master_create_distributed_table('mx_test_table', 'col_1', 'hash');
|
||||
|
|
|
@ -154,8 +154,8 @@ SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schema
|
|||
-- shard also does not exist since we create shards in a transaction
|
||||
SELECT count(*) FROM pg_tables WHERE tablename LIKE 'objects_for_xacts2_%' and schemaname = 'citus_mx_schema_for_xacts';
|
||||
|
||||
-- make sure that master_drop_all_shards does not work from the worker nodes
|
||||
SELECT master_drop_all_shards('citus_mx_schema_for_xacts.objects_for_xacts'::regclass, 'citus_mx_schema_for_xacts', 'objects_for_xacts');
|
||||
-- make sure that citus_drop_all_shards does not work from the worker nodes
|
||||
SELECT citus_drop_all_shards('citus_mx_schema_for_xacts.objects_for_xacts'::regclass, 'citus_mx_schema_for_xacts', 'objects_for_xacts');
|
||||
|
||||
-- Ensure pg_dist_transaction is empty for test
|
||||
SELECT recover_prepared_transactions();
|
||||
|
@ -205,7 +205,7 @@ SELECT raise_failed_aclcheck($$
|
|||
$$);
|
||||
|
||||
SELECT raise_failed_aclcheck($$
|
||||
SELECT master_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table');
|
||||
SELECT citus_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table');
|
||||
$$);
|
||||
SELECT raise_failed_aclcheck($$
|
||||
SELECT master_remove_partition_metadata('distributed_mx_table'::regclass, 'public', 'distributed_mx_table');
|
||||
|
@ -245,7 +245,7 @@ SELECT raise_failed_aclcheck($$
|
|||
SELECT master_drop_sequences(ARRAY['public.distributed_mx_table_some_val_seq']);
|
||||
$$);
|
||||
|
||||
SELECT master_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table');
|
||||
SELECT citus_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table');
|
||||
SELECT master_remove_partition_metadata('distributed_mx_table'::regclass, 'public', 'distributed_mx_table');
|
||||
|
||||
-- make sure that we can drop unrelated tables/sequences
|
||||
|
|
|
@ -7,6 +7,25 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000;
|
|||
SET citus.multi_shard_commit_protocol = '2pc';
|
||||
SET citus.shard_count TO 2;
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
-- Verify that a table name > 56 characters gets hashed properly.
|
||||
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
|
||||
col1 integer not null,
|
||||
|
|
|
@ -1,158 +0,0 @@
|
|||
--
|
||||
-- MULTI_SHARD_MODIFY
|
||||
--
|
||||
|
||||
|
||||
SET citus.next_shard_id TO 350000;
|
||||
|
||||
|
||||
-- Create a new hash partitioned multi_shard_modify_test table and load data into it.
|
||||
CREATE TABLE multi_shard_modify_test (
|
||||
t_key integer not null,
|
||||
t_name varchar(25) not null,
|
||||
t_value integer not null);
|
||||
SELECT create_distributed_table('multi_shard_modify_test', 't_key', 'hash');
|
||||
|
||||
COPY multi_shard_modify_test (t_key, t_name, t_value) FROM STDIN WITH (FORMAT 'csv');
|
||||
1,san francisco,99
|
||||
2,istanbul,34
|
||||
3,paris,46
|
||||
4,london,91
|
||||
5,toronto,98
|
||||
6,london,44
|
||||
7,stockholm,21
|
||||
8,tallinn,33
|
||||
9,helsinki,21
|
||||
10,ankara,6
|
||||
11,karabuk,78
|
||||
12,kastamonu,37
|
||||
13,samsun,55
|
||||
14,rome,13
|
||||
15,madrid,1
|
||||
16,barcelona,8
|
||||
17,poznan,12
|
||||
31,kabul,4
|
||||
32,dhaka,62
|
||||
33,iamey,121
|
||||
34,muscat,77
|
||||
41,uppsala,-1
|
||||
42,malmo,-2
|
||||
101,tokyo,106
|
||||
102,new delhi,978
|
||||
201,taipei,556
|
||||
202,beijing,754
|
||||
\.
|
||||
|
||||
-- Testing master_modify_multiple_shards
|
||||
|
||||
-- Verify that master_modify_multiple_shards can be rolled back
|
||||
BEGIN;
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 10 AND t_key <= 13');
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 202');
|
||||
ROLLBACK;
|
||||
|
||||
SELECT count(*) FROM multi_shard_modify_test;
|
||||
|
||||
-- commands with volatile functions in their quals
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = (random() * 1000)');
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_value = (random() * 1000)');
|
||||
|
||||
-- commands with immutable functions in their quals
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = abs(-3)');
|
||||
|
||||
-- DELETE with expression in WHERE clause
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = (3*18-40)');
|
||||
|
||||
-- commands with a USING a non distributed table error out
|
||||
CREATE TABLE temp_nations(name text, key integer);
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' ');
|
||||
|
||||
-- commands with a USING clause are unsupported
|
||||
SELECT create_distributed_table('temp_nations', 'name', 'hash');
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' ');
|
||||
|
||||
-- commands with a RETURNING clause are unsupported
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 3 RETURNING *');
|
||||
|
||||
-- commands containing a CTE are unsupported
|
||||
SELECT master_modify_multiple_shards('WITH deleted_stuff AS (INSERT INTO multi_shard_modify_test DEFAULT VALUES RETURNING *) DELETE FROM multi_shard_modify_test');
|
||||
|
||||
-- Check that we can successfully delete from multiple shards with 1PC
|
||||
SET citus.multi_shard_commit_protocol TO '1pc';
|
||||
SELECT count(*) FROM multi_shard_modify_test;
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 200');
|
||||
SELECT count(*) FROM multi_shard_modify_test;
|
||||
|
||||
-- Check that we can successfully delete from multiple shards with 2PC
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 100');
|
||||
SELECT count(*) FROM multi_shard_modify_test;
|
||||
|
||||
-- Check that shard pruning works
|
||||
SET client_min_messages TO DEBUG2;
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 15');
|
||||
SET client_min_messages TO NOTICE;
|
||||
|
||||
-- Check that master_modify_multiple_shards works without partition keys
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_name LIKE ''barce%'' ');
|
||||
|
||||
|
||||
-- Simple, Single Shard Update
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''warsaw'' WHERE t_key=17');
|
||||
SELECT t_name FROM multi_shard_modify_test WHERE t_key=17;
|
||||
|
||||
-- Simple, Multi Shard Update
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''???'' WHERE t_key>30 AND t_key<35');
|
||||
SELECT t_name FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35;
|
||||
|
||||
-- expression UPDATE
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value=8*37 WHERE t_key>30 AND t_key<35');
|
||||
SELECT t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35;
|
||||
|
||||
-- multi-column UPDATE
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''somename'', t_value=333 WHERE t_key>30 AND t_key<35');
|
||||
SELECT t_name, t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35;
|
||||
|
||||
-- commands with no constraints on the partition key are supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''nice city'' WHERE t_value < 0');
|
||||
SELECT t_name FROM multi_shard_modify_test WHERE t_value < 0;
|
||||
|
||||
-- attempting to change the partition key is unsupported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_key=3000 WHERE t_key < 10 ');
|
||||
|
||||
-- UPDATEs with a FROM clause are supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name = ''FAIL'' FROM temp_nations WHERE multi_shard_modify_test.t_key = 3 AND multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''dummy'' ');
|
||||
|
||||
-- commands with a RETURNING clause are unsupported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''FAIL'' WHERE t_key=4 RETURNING *');
|
||||
|
||||
-- commands containing a CTE are unsupported
|
||||
SELECT master_modify_multiple_shards('WITH t AS (INSERT INTO multi_shard_modify_test DEFAULT VALUES RETURNING *) UPDATE multi_shard_modify_test SET t_name = ''FAIL'' ');
|
||||
|
||||
-- updates referencing just a var are supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value=t_key WHERE t_key = 10');
|
||||
SELECT t_value FROM multi_shard_modify_test WHERE t_key=10;
|
||||
|
||||
-- updates referencing a column are supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = t_value + 37 WHERE t_key = 10');
|
||||
SELECT t_value FROM multi_shard_modify_test WHERE t_key=10;
|
||||
|
||||
CREATE FUNCTION temp_stable_func() RETURNS integer AS 'SELECT 10;' LANGUAGE SQL STABLE;
|
||||
|
||||
-- updates referencing non-IMMUTABLE functions are unsupported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name = ''FAIL!'' WHERE t_key = temp_stable_func()');
|
||||
|
||||
-- updates referencing IMMUTABLE functions in SET section are supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = abs(-78) WHERE t_key = 10');
|
||||
SELECT t_value FROM multi_shard_modify_test WHERE t_key=10;
|
||||
|
||||
-- updates referencing STABLE functions in SET section are supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = temp_stable_func() * 2 WHERE t_key = 10');
|
||||
|
||||
-- updates referencing VOLATILE functions in SET section are not supported
|
||||
SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = random() WHERE t_key = 10');
|
||||
|
||||
-- commands with stable functions in their quals are allowed
|
||||
SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = temp_stable_func()');
|
||||
|
||||
SET citus.next_shard_id TO 102046;
|
|
@ -9,7 +9,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1410000;
|
|||
SET citus.shard_count TO 4;
|
||||
SET client_min_messages TO WARNING;
|
||||
|
||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK
|
||||
-- test DROP TABLE(ergo citus_drop_all_shards) in transaction, then ROLLBACK
|
||||
CREATE TABLE transactional_drop_shards(column1 int);
|
||||
SELECT create_distributed_table('transactional_drop_shards', 'column1');
|
||||
|
||||
|
@ -37,7 +37,7 @@ ORDER BY
|
|||
\c - - - :master_port
|
||||
|
||||
|
||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then COMMIT
|
||||
-- test DROP TABLE(ergo citus_drop_all_shards) in transaction, then COMMIT
|
||||
BEGIN;
|
||||
DROP TABLE transactional_drop_shards;
|
||||
COMMIT;
|
||||
|
@ -251,7 +251,7 @@ SET citus.shard_count TO 8;
|
|||
CREATE TABLE transactional_drop_serial(column1 int, column2 SERIAL);
|
||||
SELECT create_distributed_table('transactional_drop_serial', 'column1');
|
||||
|
||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK
|
||||
-- test DROP TABLE(ergo citus_drop_all_shards) in transaction, then ROLLBACK
|
||||
BEGIN;
|
||||
DROP TABLE transactional_drop_serial;
|
||||
ROLLBACK;
|
||||
|
@ -277,7 +277,7 @@ ORDER BY
|
|||
\c - - - :master_port
|
||||
|
||||
|
||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then COMMIT
|
||||
-- test DROP TABLE(ergo citus_drop_all_shards) in transaction, then COMMIT
|
||||
BEGIN;
|
||||
DROP TABLE transactional_drop_serial;
|
||||
COMMIT;
|
||||
|
|
|
@ -205,7 +205,7 @@ COMMIT;
|
|||
SELECT * FROM test_local_truncate;
|
||||
|
||||
-- Undistribute table
|
||||
SELECT master_drop_all_shards('test_local_truncate', 'public', 'test_local_truncate');
|
||||
SELECT citus_drop_all_shards('test_local_truncate', 'public', 'test_local_truncate');
|
||||
DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass;
|
||||
|
||||
-- Ensure local data is truncated
|
||||
|
@ -227,7 +227,7 @@ ROLLBACK;
|
|||
SELECT * FROM test_local_truncate;
|
||||
|
||||
-- Undistribute table
|
||||
SELECT master_drop_all_shards('test_local_truncate', 'public', 'test_local_truncate');
|
||||
SELECT citus_drop_all_shards('test_local_truncate', 'public', 'test_local_truncate');
|
||||
DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass;
|
||||
|
||||
-- Ensure local data is not truncated
|
||||
|
|
|
@ -53,6 +53,25 @@ SELECT * FROM mx_table ORDER BY col_1;
|
|||
-- Try commands from metadata worker
|
||||
\c - - - :worker_1_port
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
CREATE TABLE mx_table_worker(col_1 text);
|
||||
|
||||
-- master_create_distributed_table
|
||||
|
@ -96,8 +115,8 @@ ALTER TABLE mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col_1) REFERE
|
|||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass;
|
||||
\d mx_test_index
|
||||
|
||||
-- master_drop_all_shards
|
||||
SELECT master_drop_all_shards('mx_table'::regclass, 'public', 'mx_table');
|
||||
-- citus_drop_all_shards
|
||||
SELECT citus_drop_all_shards('mx_table'::regclass, 'public', 'mx_table');
|
||||
SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_table'::regclass;
|
||||
|
||||
-- master_apply_delete_command
|
||||
|
|
|
@ -84,6 +84,25 @@ RETURNS boolean
|
|||
AS 'citus'
|
||||
LANGUAGE C STRICT VOLATILE;
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
|
||||
SET citus.next_shard_id TO 123000;
|
||||
|
||||
SELECT worker_node_responsive(node_name, node_port::int)
|
||||
|
|
|
@ -33,7 +33,23 @@ INSERT INTO r SELECT * FROM generate_series(1, 5);
|
|||
CREATE TABLE tr(pk int, a int REFERENCES r(a) ON DELETE CASCADE ON UPDATE CASCADE);
|
||||
SELECT create_distributed_table('tr', 'pk');
|
||||
INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c;
|
||||
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
-- this function is dropped in Citus10, added here for tests
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
CREATE TABLE t_append(id int, value_1 int);
|
||||
SELECT master_create_distributed_table('t_append', 'id', 'append');
|
||||
|
||||
|
|
Loading…
Reference in New Issue