mirror of https://github.com/citusdata/citus.git
Hide shard names on MX worker nodes
This commit by default enables hiding shard names on MX workers by simple replacing `pg_table_is_visible()` calls with `citus_table_is_visible()` calls on the MX worker nodes. The latter function filters out tables that are known to be shards. The main motivation of this change is a better UX. The functionality can be opted out via a GUC. We also added two views, namely citus_shards_on_worker and citus_shard_indexes_on_worker such that users can query them to see the shards and their corresponding indexes. We also added debug messages such that the filtered tables can be interactively seen by setting the level to DEBUG1.pull/2323/head
parent
e13da6a343
commit
974cbf11a5
1
Makefile
1
Makefile
|
@ -106,6 +106,7 @@ OBJS = src/backend/distributed/shared_library_init.o \
|
|||
src/backend/distributed/utils/ruleutils_96.o \
|
||||
src/backend/distributed/utils/shardinterval_utils.o \
|
||||
src/backend/distributed/utils/statistics_collection.o \
|
||||
src/backend/distributed/worker/worker_shard_visibility.o \
|
||||
src/backend/distributed/worker/task_tracker.o \
|
||||
src/backend/distributed/worker/task_tracker_protocol.o \
|
||||
src/backend/distributed/worker/worker_data_fetch_protocol.o \
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Citus extension
|
||||
comment = 'Citus distributed database'
|
||||
default_version = '8.0-1'
|
||||
default_version = '8.0-2'
|
||||
module_pathname = '$libdir/citus'
|
||||
relocatable = false
|
||||
schema = pg_catalog
|
||||
|
|
|
@ -17,7 +17,7 @@ EXTVERSIONS = 5.0 5.0-1 5.0-2 \
|
|||
7.3-1 7.3-2 7.3-3 \
|
||||
7.4-1 7.4-2 7.4-3 \
|
||||
7.5-1 7.5-2 7.5-3 7.5-4 7.5-5 7.5-6 7.5-7 \
|
||||
8.0-1
|
||||
8.0-1 8.0-2
|
||||
|
||||
# All citus--*.sql files in the source directory
|
||||
DATA = $(patsubst $(citus_abs_srcdir)/%.sql,%.sql,$(wildcard $(citus_abs_srcdir)/$(EXTENSION)--*--*.sql))
|
||||
|
@ -217,6 +217,8 @@ $(EXTENSION)--7.5-7.sql: $(EXTENSION)--7.5-6.sql $(EXTENSION)--7.5-6--7.5-7.sql
|
|||
cat $^ > $@
|
||||
$(EXTENSION)--8.0-1.sql: $(EXTENSION)--7.5-7.sql $(EXTENSION)--7.5-7--8.0-1.sql
|
||||
cat $^ > $@
|
||||
$(EXTENSION)--8.0-2.sql: $(EXTENSION)--8.0-1.sql $(EXTENSION)--8.0-1--8.0-2.sql
|
||||
cat $^ > $@
|
||||
|
||||
NO_PGXS = 1
|
||||
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
/* citus--7.5-7--8.0-1 */
|
||||
SET search_path = 'pg_catalog';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.relation_is_a_known_shard(regclass)
|
||||
RETURNS bool
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$relation_is_a_known_shard$$;
|
||||
COMMENT ON FUNCTION relation_is_a_known_shard(regclass)
|
||||
IS 'returns true if the given relation is a known shard';
|
||||
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.citus_table_is_visible(oid)
|
||||
RETURNS bool
|
||||
LANGUAGE C STRICT
|
||||
STABLE
|
||||
PARALLEL SAFE
|
||||
AS 'MODULE_PATHNAME', $$citus_table_is_visible$$;
|
||||
COMMENT ON FUNCTION citus_table_is_visible(oid)
|
||||
IS 'wrapper on pg_table_is_visible, filtering out tables (and indexes) that are known to be shards';
|
||||
|
||||
-- this is the exact same query with what \d
|
||||
-- command produces, except pg_table_is_visible
|
||||
-- is replaced with pg_catalog.relation_is_a_known_shard(c.oid)
|
||||
CREATE VIEW citus.citus_shards_on_worker AS
|
||||
SELECT n.nspname as "Schema",
|
||||
c.relname as "Name",
|
||||
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'p' THEN 'table' END as "Type",
|
||||
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
|
||||
FROM pg_catalog.pg_class c
|
||||
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE c.relkind IN ('r','p','v','m','S','f','')
|
||||
AND n.nspname <> 'pg_catalog'
|
||||
AND n.nspname <> 'information_schema'
|
||||
AND n.nspname !~ '^pg_toast'
|
||||
AND pg_catalog.relation_is_a_known_shard(c.oid)
|
||||
ORDER BY 1,2;
|
||||
ALTER VIEW citus.citus_shards_on_worker SET SCHEMA pg_catalog;
|
||||
GRANT SELECT ON pg_catalog.citus_shards_on_worker TO public;
|
||||
|
||||
-- this is the exact same query with what \di
|
||||
-- command produces, except pg_table_is_visible
|
||||
-- is replaced with pg_catalog.relation_is_a_known_shard(c.oid)
|
||||
CREATE VIEW citus.citus_shard_indexes_on_worker AS
|
||||
SELECT n.nspname as "Schema",
|
||||
c.relname as "Name",
|
||||
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'p' THEN 'table' END as "Type",
|
||||
pg_catalog.pg_get_userbyid(c.relowner) as "Owner",
|
||||
c2.relname as "Table"
|
||||
FROM pg_catalog.pg_class c
|
||||
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
|
||||
LEFT JOIN pg_catalog.pg_index i ON i.indexrelid = c.oid
|
||||
LEFT JOIN pg_catalog.pg_class c2 ON i.indrelid = c2.oid
|
||||
WHERE c.relkind IN ('i','')
|
||||
AND n.nspname <> 'pg_catalog'
|
||||
AND n.nspname <> 'information_schema'
|
||||
AND n.nspname !~ '^pg_toast'
|
||||
AND pg_catalog.relation_is_a_known_shard(c.oid)
|
||||
ORDER BY 1,2;
|
||||
|
||||
ALTER VIEW citus.citus_shard_indexes_on_worker SET SCHEMA pg_catalog;
|
||||
GRANT SELECT ON pg_catalog.citus_shard_indexes_on_worker TO public;
|
||||
|
||||
RESET search_path;
|
|
@ -1,6 +1,6 @@
|
|||
# Citus extension
|
||||
comment = 'Citus distributed database'
|
||||
default_version = '8.0-1'
|
||||
default_version = '8.0-2'
|
||||
module_pathname = '$libdir/citus'
|
||||
relocatable = false
|
||||
schema = pg_catalog
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "distributed/multi_master_planner.h"
|
||||
#include "distributed/multi_router_planner.h"
|
||||
#include "distributed/recursive_planning.h"
|
||||
#include "distributed/worker_shard_visibility.h"
|
||||
#include "executor/executor.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/nodeFuncs.h"
|
||||
|
@ -129,6 +130,12 @@ distributed_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
|
|||
AdjustPartitioningForDistributedPlanning(parse, setPartitionedTablesInherited);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that we hide shard names on the Citus MX worker nodes. See comments in
|
||||
* ReplaceTableVisibleFunction() for the details.
|
||||
*/
|
||||
parse = (Query *) ReplaceTableVisibleFunction((Node *) parse);
|
||||
|
||||
/* create a restriction context and put it at the end if context list */
|
||||
plannerRestrictionContext = CreateAndPushPlannerRestrictionContext();
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@
|
|||
#include "distributed/transaction_recovery.h"
|
||||
#include "distributed/worker_manager.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "distributed/worker_shard_visibility.h"
|
||||
#include "postmaster/postmaster.h"
|
||||
#include "optimizer/planner.h"
|
||||
#include "optimizer/paths.h"
|
||||
|
@ -379,6 +380,19 @@ RegisterCitusConfigVariables(void)
|
|||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.override_table_visibility",
|
||||
gettext_noop("Enables replacing occurencens of pg_catalog.pg_table_visible() "
|
||||
"with pg_catalog.citus_table_visible()"),
|
||||
gettext_noop("When enabled, shards on the Citus MX worker (data) nodes would be "
|
||||
"filtered out by many psql commands to provide better user "
|
||||
"experience."),
|
||||
&OverrideTableVisibility,
|
||||
true,
|
||||
PGC_USERSET,
|
||||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enforce_foreign_key_restrictions",
|
||||
gettext_noop("Enforce restrictions while querying distributed/reference "
|
||||
|
|
|
@ -134,6 +134,8 @@ typedef struct MetadataCacheData
|
|||
Oid primaryNodeRoleId;
|
||||
Oid secondaryNodeRoleId;
|
||||
Oid unavailableNodeRoleId;
|
||||
Oid pgTableIsVisibleFuncId;
|
||||
Oid citusTableIsVisibleFuncId;
|
||||
} MetadataCacheData;
|
||||
|
||||
|
||||
|
@ -198,7 +200,6 @@ static void InvalidateNodeRelationCacheCallback(Datum argument, Oid relationId);
|
|||
static void InvalidateLocalGroupIdRelationCacheCallback(Datum argument, Oid relationId);
|
||||
static HeapTuple LookupDistPartitionTuple(Relation pgDistPartition, Oid relationId);
|
||||
static List * LookupDistShardTuples(Oid relationId);
|
||||
static Oid LookupShardRelation(int64 shardId);
|
||||
static void GetPartitionTypeInputInfo(char *partitionKeyString, char partitionMethod,
|
||||
Oid *columnTypeId, int32 *columnTypeMod,
|
||||
Oid *intervalTypeId, int32 *intervalTypeMod);
|
||||
|
@ -706,7 +707,7 @@ LookupShardCacheEntry(int64 shardId)
|
|||
* know that the shard has to be in the cache if it exists. If the
|
||||
* shard does *not* exist LookupShardRelation() will error out.
|
||||
*/
|
||||
Oid relationId = LookupShardRelation(shardId);
|
||||
Oid relationId = LookupShardRelation(shardId, false);
|
||||
|
||||
/* trigger building the cache for the shard id */
|
||||
LookupDistTableCacheEntry(relationId);
|
||||
|
@ -724,7 +725,7 @@ LookupShardCacheEntry(int64 shardId)
|
|||
if (!shardEntry->tableEntry->isValid)
|
||||
{
|
||||
Oid oldRelationId = shardEntry->tableEntry->relationId;
|
||||
Oid currentRelationId = LookupShardRelation(shardId);
|
||||
Oid currentRelationId = LookupShardRelation(shardId, false);
|
||||
|
||||
/*
|
||||
* The relation OID to which the shard belongs could have changed,
|
||||
|
@ -2051,6 +2052,42 @@ CitusTextSendAsJsonbFunctionId(void)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PgTableVisibleFuncId returns oid of the pg_table_is_visible function.
|
||||
*/
|
||||
Oid
|
||||
PgTableVisibleFuncId(void)
|
||||
{
|
||||
if (MetadataCache.pgTableIsVisibleFuncId == InvalidOid)
|
||||
{
|
||||
const int argCount = 1;
|
||||
|
||||
MetadataCache.pgTableIsVisibleFuncId =
|
||||
FunctionOid("pg_catalog", "pg_table_is_visible", argCount);
|
||||
}
|
||||
|
||||
return MetadataCache.pgTableIsVisibleFuncId;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CitusTableVisibleFuncId returns oid of the citus_table_is_visible function.
|
||||
*/
|
||||
Oid
|
||||
CitusTableVisibleFuncId(void)
|
||||
{
|
||||
if (MetadataCache.citusTableIsVisibleFuncId == InvalidOid)
|
||||
{
|
||||
const int argCount = 1;
|
||||
|
||||
MetadataCache.citusTableIsVisibleFuncId =
|
||||
FunctionOid("pg_catalog", "citus_table_is_visible", argCount);
|
||||
}
|
||||
|
||||
return MetadataCache.citusTableIsVisibleFuncId;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CitusExtensionOwner() returns the owner of the 'citus' extension. That user
|
||||
* is, amongst others, used to perform actions a normal user might not be
|
||||
|
@ -3263,10 +3300,11 @@ LookupDistShardTuples(Oid relationId)
|
|||
/*
|
||||
* LookupShardRelation returns the logical relation oid a shard belongs to.
|
||||
*
|
||||
* Errors out if the shardId does not exist.
|
||||
* Errors out if the shardId does not exist and missingOk is false. Returns
|
||||
* InvalidOid if the shardId does not exist and missingOk is true.
|
||||
*/
|
||||
static Oid
|
||||
LookupShardRelation(int64 shardId)
|
||||
Oid
|
||||
LookupShardRelation(int64 shardId, bool missingOk)
|
||||
{
|
||||
SysScanDesc scanDescriptor = NULL;
|
||||
ScanKeyData scanKey[1];
|
||||
|
@ -3284,14 +3322,21 @@ LookupShardRelation(int64 shardId)
|
|||
NULL, scanKeyCount, scanKey);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
if (!HeapTupleIsValid(heapTuple) && !missingOk)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not find valid entry for shard "
|
||||
UINT64_FORMAT, shardId)));
|
||||
}
|
||||
|
||||
shardForm = (Form_pg_dist_shard) GETSTRUCT(heapTuple);
|
||||
relationId = shardForm->logicalrelid;
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
relationId = InvalidOid;
|
||||
}
|
||||
else
|
||||
{
|
||||
shardForm = (Form_pg_dist_shard) GETSTRUCT(heapTuple);
|
||||
relationId = shardForm->logicalrelid;
|
||||
}
|
||||
|
||||
systable_endscan(scanDescriptor);
|
||||
heap_close(pgDistShard, NoLock);
|
||||
|
|
|
@ -526,12 +526,12 @@ worker_apply_sequence_command(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* ExtractShardId tries to extract shard id from the given table name,
|
||||
* ExtractShardIdFromTableName tries to extract shard id from the given table name,
|
||||
* and returns the shard id if table name is formatted as shard name.
|
||||
* Else, the function returns INVALID_SHARD_ID.
|
||||
*/
|
||||
uint64
|
||||
ExtractShardId(const char *tableName, bool missingOk)
|
||||
ExtractShardIdFromTableName(const char *tableName, bool missingOk)
|
||||
{
|
||||
uint64 shardId = 0;
|
||||
char *shardIdString = NULL;
|
||||
|
@ -776,7 +776,7 @@ worker_append_table_to_shard(PG_FUNCTION_ARGS)
|
|||
* the transaction for this function commits, this lock will automatically
|
||||
* be released. This ensures appends to a shard happen in a serial manner.
|
||||
*/
|
||||
shardId = ExtractShardId(shardTableName, false);
|
||||
shardId = ExtractShardIdFromTableName(shardTableName, false);
|
||||
LockShardResource(shardId, AccessExclusiveLock);
|
||||
|
||||
/* copy remote table's data to this node */
|
||||
|
|
|
@ -1,57 +1,117 @@
|
|||
/*
|
||||
* worker_shard_visibility.c
|
||||
*
|
||||
* TODO: Write some meaningful comment
|
||||
* Implements the functions for hiding shards on the Citus MX
|
||||
* worker (data) nodes.
|
||||
*
|
||||
* Copyright (c) 2012-2018, Citus Data, Inc.
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "catalog/index.h"
|
||||
#include "catalog/namespace.h"
|
||||
#include "catalog/pg_class.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "distributed/worker_shard_visibility.h"
|
||||
#include "nodes/nodeFuncs.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "utils/syscache.h"
|
||||
|
||||
|
||||
/* Config variable managed via guc.c */
|
||||
bool OverrideTableVisibility = true;
|
||||
|
||||
static bool RelationIsAKnownShard(Oid shardRelationId);
|
||||
static Node * ReplaceTableVisibleFunctionWalker(Node *inputNode);
|
||||
|
||||
PG_FUNCTION_INFO_V1(citus_table_is_visible);
|
||||
PG_FUNCTION_INFO_V1(relation_is_a_known_shard);
|
||||
|
||||
|
||||
static bool RelationIsAKnownShard(Oid shardRelationId);
|
||||
|
||||
|
||||
/*
|
||||
* relation_is_a_known_shard a wrapper around RelationIsAKnownShard(), so
|
||||
* see the details there. The function also treats the indexes on shards
|
||||
* as if they were shards.
|
||||
*/
|
||||
Datum
|
||||
relation_is_a_known_shard(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
PG_RETURN_BOOL(RelationIsAKnownShard(relationId));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Given a relationId, check whether it's a shard of any distributed table.
|
||||
* We can only do that in MX since we've metadata there. We can actually
|
||||
* implement for non-mx as well, but, there is currently no need for that.
|
||||
*
|
||||
* TODO: improve the comment
|
||||
* TODO: Make sure that we're not missing any edge cases with our
|
||||
* implementation
|
||||
* citus_table_is_visible aims to behave exactly the same with
|
||||
* pg_table_is_visible with only one exception. The former one
|
||||
* returns false for the relations that are known to be shards.
|
||||
*/
|
||||
bool
|
||||
Datum
|
||||
citus_table_is_visible(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
char relKind = '\0';
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
/*
|
||||
* We don't want to deal with not valid/existing relations
|
||||
* as pg_table_is_visible does.
|
||||
*/
|
||||
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relationId)))
|
||||
{
|
||||
PG_RETURN_NULL();
|
||||
}
|
||||
|
||||
if (RelationIsAKnownShard(relationId))
|
||||
{
|
||||
/*
|
||||
* If the input relation is an index we simply replace the
|
||||
* relationId with the corresponding relation to hide indexes
|
||||
* as well. See RelationIsAKnownShard() for the details and give
|
||||
* more meaningful debug message here.
|
||||
*/
|
||||
relKind = get_rel_relkind(relationId);
|
||||
if (relKind == RELKIND_INDEX)
|
||||
{
|
||||
ereport(DEBUG2, (errmsg("skipping index \"%s\" since it belongs to a shard",
|
||||
get_rel_name(relationId))));
|
||||
}
|
||||
else
|
||||
{
|
||||
ereport(DEBUG2, (errmsg("skipping relation \"%s\" since it is a shard",
|
||||
get_rel_name(relationId))));
|
||||
}
|
||||
|
||||
PG_RETURN_BOOL(false);
|
||||
}
|
||||
|
||||
PG_RETURN_BOOL(RelationIsVisible(relationId));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* RelationIsAKnownShard gets a relationId, check whether it's a shard of
|
||||
* any distributed table in the current search path.
|
||||
*
|
||||
* We can only do that in MX since both the metadata and tables are only
|
||||
* present there.
|
||||
*/
|
||||
static bool
|
||||
RelationIsAKnownShard(Oid shardRelationId)
|
||||
{
|
||||
int localGroupId = -1;
|
||||
char *shardRelationName = NULL;
|
||||
char *relationName = NULL;
|
||||
char *generatedRelationName = NULL;
|
||||
bool missingOk = true;
|
||||
uint64 shardId = INVALID_SHARD_ID;
|
||||
ShardInterval *shardInterval = NULL;
|
||||
Oid relationId = InvalidOid;
|
||||
char *shardIdString = NULL;
|
||||
int relationNameLength = 0;
|
||||
|
||||
|
||||
/*
|
||||
* TODO: version check
|
||||
*/
|
||||
char relKind = '\0';
|
||||
|
||||
if (!OidIsValid(shardRelationId))
|
||||
{
|
||||
|
@ -65,36 +125,31 @@ RelationIsAKnownShard(Oid shardRelationId)
|
|||
/*
|
||||
* We're not interested in shards in the coordinator
|
||||
* or non-mx worker nodes.
|
||||
*/
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
/* we're not interested in the relations that are not in the search path */
|
||||
if (!RelationIsVisible(shardRelationId))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the input relation is an index we simply replace the
|
||||
* relationId with the corresponding relation to hide indexes
|
||||
* as well.
|
||||
*/
|
||||
relKind = get_rel_relkind(shardRelationId);
|
||||
if (relKind == RELKIND_INDEX)
|
||||
{
|
||||
shardRelationId = IndexGetRelation(shardRelationId, false);
|
||||
}
|
||||
|
||||
/* get the shard's relation name */
|
||||
shardRelationName = get_rel_name(shardRelationId);
|
||||
|
||||
/* find the last underscore and increment for shardId string */
|
||||
shardIdString = strrchr(shardRelationName, SHARD_NAME_SEPARATOR);
|
||||
if (shardIdString == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
relationNameLength = shardIdString - shardRelationName;
|
||||
relationName = strndup(shardRelationName, relationNameLength);
|
||||
|
||||
relationId = RelnameGetRelid(relationName);
|
||||
if (!OidIsValid(relationId))
|
||||
{
|
||||
/* there is no such relation */
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!IsDistributedTable(relationId))
|
||||
{
|
||||
/* we're obviously only interested in distributed tables */
|
||||
return false;
|
||||
}
|
||||
|
||||
shardId = ExtractShardId(shardRelationName, missingOk);
|
||||
shardId = ExtractShardIdFromTableName(shardRelationName, missingOk);
|
||||
if (shardId == INVALID_SHARD_ID)
|
||||
{
|
||||
/*
|
||||
|
@ -104,15 +159,90 @@ RelationIsAKnownShard(Oid shardRelationId)
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* At this point we're sure that this is a shard of a
|
||||
* distributed table.
|
||||
*/
|
||||
shardInterval = LoadShardInterval(shardId);
|
||||
if (shardInterval->relationId == relationId)
|
||||
/* try to get the relation id */
|
||||
relationId = LookupShardRelation(shardId, true);
|
||||
if (!OidIsValid(relationId))
|
||||
{
|
||||
/* there is no such relation */
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now get the relation name and append the shardId to it. We need
|
||||
* to do that because otherwise a local table with a valid shardId
|
||||
* appended to its name could be misleading.
|
||||
*/
|
||||
generatedRelationName = get_rel_name(relationId);
|
||||
AppendShardIdToName(&generatedRelationName, shardId);
|
||||
if (strncmp(shardRelationName, generatedRelationName, NAMEDATALEN) == 0)
|
||||
{
|
||||
/* we found the distributed table that the input shard belongs to */
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ReplaceTableVisibleFunction is a wrapper around ReplaceTableVisibleFunctionWalker.
|
||||
* The replace functionality can be enabled/disable via a GUC. This function also
|
||||
* ensures that the extension is loaded and the version is compatible.
|
||||
*/
|
||||
Node *
|
||||
ReplaceTableVisibleFunction(Node *inputNode)
|
||||
{
|
||||
if (!OverrideTableVisibility ||
|
||||
!CitusHasBeenLoaded() || !CheckCitusVersion(DEBUG2))
|
||||
{
|
||||
return inputNode;
|
||||
}
|
||||
|
||||
return ReplaceTableVisibleFunctionWalker(inputNode);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ReplaceTableVisibleFunction replaces all occurences of
|
||||
* pg_catalog.pg_table_visible() to
|
||||
* pg_catalog.citus_table_visible() in the given input node.
|
||||
*
|
||||
* Note that the only difference between the functions is that
|
||||
* the latter filters the tables that are known to be shards on
|
||||
* Citus MX worker (data) nodes.
|
||||
*/
|
||||
static Node *
|
||||
ReplaceTableVisibleFunctionWalker(Node *inputNode)
|
||||
{
|
||||
if (inputNode == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (IsA(inputNode, FuncExpr))
|
||||
{
|
||||
FuncExpr *functionToProcess = (FuncExpr *) inputNode;
|
||||
Oid functionId = functionToProcess->funcid;
|
||||
|
||||
if (functionId == PgTableVisibleFuncId())
|
||||
{
|
||||
/*
|
||||
* We simply update the function id of the FuncExpr for
|
||||
* two reasons: (i) We don't want to interfere with the
|
||||
* memory contexts so don't want to deal with allocating
|
||||
* a new functionExpr (ii) We already know that both
|
||||
* functions have the exact same signature.
|
||||
*/
|
||||
functionToProcess->funcid = CitusTableVisibleFuncId();
|
||||
|
||||
return (Node *) functionToProcess;
|
||||
}
|
||||
}
|
||||
else if (IsA(inputNode, Query))
|
||||
{
|
||||
return (Node *) query_tree_mutator((Query *) inputNode,
|
||||
ReplaceTableVisibleFunctionWalker, NULL, 0);
|
||||
}
|
||||
|
||||
return expression_tree_mutator(inputNode, ReplaceTableVisibleFunctionWalker, NULL);
|
||||
}
|
||||
|
|
|
@ -99,6 +99,7 @@ extern ShardPlacement * LoadShardPlacement(uint64 shardId, uint64 placementId);
|
|||
extern DistTableCacheEntry * DistributedTableCacheEntry(Oid distributedRelationId);
|
||||
extern int GetLocalGroupId(void);
|
||||
extern List * DistTableOidList(void);
|
||||
extern Oid LookupShardRelation(int64 shardId, bool missing_ok);
|
||||
extern List * ShardPlacementList(uint64 shardId);
|
||||
extern void CitusInvalidateRelcacheByRelid(Oid relationId);
|
||||
extern void CitusInvalidateRelcacheByShardId(int64 shardId);
|
||||
|
@ -151,6 +152,8 @@ extern Oid CitusReadIntermediateResultFuncId(void);
|
|||
extern Oid CitusExtraDataContainerFuncId(void);
|
||||
extern Oid CitusWorkerHashFunctionId(void);
|
||||
extern Oid CitusTextSendAsJsonbFunctionId(void);
|
||||
extern Oid PgTableVisibleFuncId(void);
|
||||
extern Oid CitusTableVisibleFuncId(void);
|
||||
|
||||
/* enum oids */
|
||||
extern Oid PrimaryNodeRoleId(void);
|
||||
|
|
|
@ -118,9 +118,9 @@ extern void RemoveJobSchema(StringInfo schemaName);
|
|||
extern Datum * DeconstructArrayObject(ArrayType *arrayObject);
|
||||
extern int32 ArrayObjectCount(ArrayType *arrayObject);
|
||||
extern FmgrInfo * GetFunctionInfo(Oid typeId, Oid accessMethodId, int16 procedureId);
|
||||
extern uint64 ExtractShardIdFromTableName(const char *tableName, bool missingOk);
|
||||
extern List * TableDDLCommandList(const char *nodeName, uint32 nodePort,
|
||||
const char *tableName);
|
||||
extern uint64 ExtractShardId(const char *tableName, bool missingOk);
|
||||
|
||||
|
||||
/* Function declarations shared with the master planner */
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* worker_shard_visibility.h
|
||||
* Hide shard names on MX worker nodes.
|
||||
*
|
||||
* Copyright (c) 2018, Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef WORKER_SHARD_VISIBILITY_H
|
||||
#define WORKER_SHARD_VISIBILITY_H
|
||||
|
||||
#include "nodes/nodes.h"
|
||||
|
||||
extern bool OverrideTableVisibility;
|
||||
|
||||
|
||||
extern Node * ReplaceTableVisibleFunction(Node *inputNode);
|
||||
|
||||
|
||||
#endif /* WORKER_SHARD_VISIBILITY_H */
|
|
@ -144,6 +144,7 @@ ALTER EXTENSION citus UPDATE TO '7.5-5';
|
|||
ALTER EXTENSION citus UPDATE TO '7.5-6';
|
||||
ALTER EXTENSION citus UPDATE TO '7.5-7';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-1';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-2';
|
||||
-- show running version
|
||||
SHOW citus.version;
|
||||
citus.version
|
||||
|
|
|
@ -50,6 +50,8 @@ btree, for table "public.mx_ddl_table"
|
|||
btree, for table "public.mx_ddl_table"
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- make sure we don't break the following tests by hiding the shard names
|
||||
SET citus.override_table_visibility TO FALSE;
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||
Column | Type | Modifiers
|
||||
---------+---------+--------------------
|
||||
|
@ -93,6 +95,8 @@ Index "public.ddl_test_index_1220088"
|
|||
btree, for table "public.mx_ddl_table_1220088"
|
||||
|
||||
\c - - - :worker_2_port
|
||||
-- make sure we don't break the following tests by hiding the shard names
|
||||
SET citus.override_table_visibility TO FALSE;
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||
Column | Type | Modifiers
|
||||
---------+---------+--------------------
|
||||
|
|
|
@ -0,0 +1,326 @@
|
|||
--
|
||||
-- Hide shard names on MX worker nodes
|
||||
--
|
||||
SET citus.next_shard_id TO 1130000;
|
||||
-- make sure that the signature of the citus_table_is_visible
|
||||
-- and pg_table_is_visible are the same since the logic
|
||||
-- relies on that
|
||||
SELECT
|
||||
proname, proisstrict, proretset, provolatile,
|
||||
proparallel, pronargs, pronargdefaults ,prorettype,
|
||||
proargtypes, proacl
|
||||
FROM
|
||||
pg_proc
|
||||
WHERE
|
||||
proname LIKE '%table_is_visible%'
|
||||
ORDER BY 1;
|
||||
proname | proisstrict | proretset | provolatile | proparallel | pronargs | pronargdefaults | prorettype | proargtypes | proacl
|
||||
------------------------+-------------+-----------+-------------+-------------+----------+-----------------+------------+-------------+--------
|
||||
citus_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 |
|
||||
pg_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 |
|
||||
(2 rows)
|
||||
|
||||
CREATE SCHEMA mx_hide_shard_names;
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO 'streaming';
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
start_metadata_sync_to_node
|
||||
-----------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
start_metadata_sync_to_node
|
||||
-----------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE test_table(id int, time date);
|
||||
SELECT create_distributed_table('test_table', 'id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- first show that the views does not show
|
||||
-- any shards on the coordinator as expected
|
||||
SELECT * FROM citus_shards_on_worker;
|
||||
Schema | Name | Type | Owner
|
||||
--------+------+------+-------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM citus_shard_indexes_on_worker;
|
||||
Schema | Name | Type | Owner | Table
|
||||
--------+------+------+-------+-------
|
||||
(0 rows)
|
||||
|
||||
-- now show that we see the shards, but not the
|
||||
-- indexes as there are no indexes
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner
|
||||
---------------------+--------------------+-------+----------
|
||||
mx_hide_shard_names | test_table_1130000 | table | postgres
|
||||
mx_hide_shard_names | test_table_1130002 | table | postgres
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner | Table
|
||||
--------+------+------+-------+-------
|
||||
(0 rows)
|
||||
|
||||
-- now create an index
|
||||
\c - - - :master_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
CREATE INDEX test_index ON mx_hide_shard_names.test_table(id);
|
||||
-- now show that we see the shards, and the
|
||||
-- indexes as well
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner
|
||||
---------------------+--------------------+-------+----------
|
||||
mx_hide_shard_names | test_table_1130000 | table | postgres
|
||||
mx_hide_shard_names | test_table_1130002 | table | postgres
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner | Table
|
||||
---------------------+--------------------+-------+----------+--------------------
|
||||
mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000
|
||||
mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002
|
||||
(2 rows)
|
||||
|
||||
-- we should be able to select from the shards directly if we
|
||||
-- know the name of the tables
|
||||
SELECT count(*) FROM test_table_1130000;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- disable the config so that table becomes visible
|
||||
SELECT pg_table_is_visible('test_table_1130000'::regclass);
|
||||
pg_table_is_visible
|
||||
---------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
SET citus.override_table_visibility TO FALSE;
|
||||
SELECT pg_table_is_visible('test_table_1130000'::regclass);
|
||||
pg_table_is_visible
|
||||
---------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- make sure that we're resilient to the edge cases
|
||||
-- such that the table name includes the shard number
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO 'streaming';
|
||||
-- not existing shard ids appended to the distributed table name
|
||||
CREATE TABLE test_table_102008(id int, time date);
|
||||
SELECT create_distributed_table('test_table_102008', 'id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
-- existing shard ids appended to a local table name
|
||||
-- note that we cannot create a distributed or local table
|
||||
-- with the same name since a table with the same
|
||||
-- name already exists :)
|
||||
CREATE TABLE test_table_2_1130000(id int, time date);
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner
|
||||
---------------------+--------------------------+-------+----------
|
||||
mx_hide_shard_names | test_table_102008_102012 | table | postgres
|
||||
mx_hide_shard_names | test_table_102008_102014 | table | postgres
|
||||
mx_hide_shard_names | test_table_1130000 | table | postgres
|
||||
mx_hide_shard_names | test_table_1130002 | table | postgres
|
||||
(4 rows)
|
||||
|
||||
\d
|
||||
List of relations
|
||||
Schema | Name | Type | Owner
|
||||
---------------------+----------------------+-------+----------
|
||||
mx_hide_shard_names | test_table | table | postgres
|
||||
mx_hide_shard_names | test_table_102008 | table | postgres
|
||||
mx_hide_shard_names | test_table_2_1130000 | table | postgres
|
||||
(3 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- make sure that don't mess up with schemas
|
||||
CREATE SCHEMA mx_hide_shard_names_2;
|
||||
SET search_path TO 'mx_hide_shard_names_2';
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO 'streaming';
|
||||
CREATE TABLE test_table(id int, time date);
|
||||
SELECT create_distributed_table('test_table', 'id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX test_index ON mx_hide_shard_names_2.test_table(id);
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner
|
||||
---------------------+--------------------------+-------+----------
|
||||
mx_hide_shard_names | test_table_102008_102012 | table | postgres
|
||||
mx_hide_shard_names | test_table_102008_102014 | table | postgres
|
||||
mx_hide_shard_names | test_table_1130000 | table | postgres
|
||||
mx_hide_shard_names | test_table_1130002 | table | postgres
|
||||
(4 rows)
|
||||
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner | Table
|
||||
---------------------+--------------------+-------+----------+--------------------
|
||||
mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000
|
||||
mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002
|
||||
(2 rows)
|
||||
|
||||
SET search_path TO 'mx_hide_shard_names_2';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner
|
||||
-----------------------+-------------------+-------+----------
|
||||
mx_hide_shard_names_2 | test_table_102016 | table | postgres
|
||||
mx_hide_shard_names_2 | test_table_102018 | table | postgres
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner | Table
|
||||
-----------------------+-------------------+-------+----------+-------------------
|
||||
mx_hide_shard_names_2 | test_index_102016 | index | postgres | test_table_102016
|
||||
mx_hide_shard_names_2 | test_index_102018 | index | postgres | test_table_102018
|
||||
(2 rows)
|
||||
|
||||
SET search_path TO 'mx_hide_shard_names_2, mx_hide_shard_names';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner
|
||||
--------+------+------+-------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner | Table
|
||||
--------+------+------+-------+-------
|
||||
(0 rows)
|
||||
|
||||
-- now try very long table names
|
||||
\c - - - :master_port
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO 'streaming';
|
||||
CREATE SCHEMA mx_hide_shard_names_3;
|
||||
SET search_path TO 'mx_hide_shard_names_3';
|
||||
-- Verify that a table name > 56 characters handled properly.
|
||||
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
|
||||
col1 integer not null,
|
||||
col2 integer not null);
|
||||
SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names_3';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner
|
||||
-----------------------+-----------------------------------------------------------------+-------+----------
|
||||
mx_hide_shard_names_3 | too_long_12345678901234567890123456789012345678_e0119164_102020 | table | postgres
|
||||
mx_hide_shard_names_3 | too_long_12345678901234567890123456789012345678_e0119164_102022 | table | postgres
|
||||
(2 rows)
|
||||
|
||||
\d
|
||||
List of relations
|
||||
Schema | Name | Type | Owner
|
||||
-----------------------+-------------------------------------------------------------+-------+----------
|
||||
mx_hide_shard_names_3 | too_long_12345678901234567890123456789012345678901234567890 | table | postgres
|
||||
(1 row)
|
||||
|
||||
-- now try weird schema names
|
||||
\c - - - :master_port
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO 'streaming';
|
||||
CREATE SCHEMA "CiTuS.TeeN";
|
||||
SET search_path TO "CiTuS.TeeN";
|
||||
CREATE TABLE "TeeNTabLE.1!?!"(id int, "TeNANt_Id" int);
|
||||
CREATE INDEX "MyTenantIndex" ON "CiTuS.TeeN"."TeeNTabLE.1!?!"("TeNANt_Id");
|
||||
-- create distributed table with weird names
|
||||
SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO "CiTuS.TeeN";
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner
|
||||
------------+-----------------------+-------+----------
|
||||
CiTuS.TeeN | TeeNTabLE.1!?!_102024 | table | postgres
|
||||
CiTuS.TeeN | TeeNTabLE.1!?!_102026 | table | postgres
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
Schema | Name | Type | Owner | Table
|
||||
------------+----------------------+-------+----------+-----------------------
|
||||
CiTuS.TeeN | MyTenantIndex_102024 | index | postgres | TeeNTabLE.1!?!_102024
|
||||
CiTuS.TeeN | MyTenantIndex_102026 | index | postgres | TeeNTabLE.1!?!_102026
|
||||
(2 rows)
|
||||
|
||||
\d
|
||||
List of relations
|
||||
Schema | Name | Type | Owner
|
||||
------------+----------------+-------+----------
|
||||
CiTuS.TeeN | TeeNTabLE.1!?! | table | postgres
|
||||
(1 row)
|
||||
|
||||
\di
|
||||
List of relations
|
||||
Schema | Name | Type | Owner | Table
|
||||
------------+---------------+-------+----------+----------------
|
||||
CiTuS.TeeN | MyTenantIndex | index | postgres | TeeNTabLE.1!?!
|
||||
(1 row)
|
||||
|
||||
-- clean-up
|
||||
\c - - - :master_port
|
||||
-- show that common psql functions do not show shards
|
||||
-- including the ones that are not in the current schema
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
\d
|
||||
List of relations
|
||||
Schema | Name | Type | Owner
|
||||
---------------------+-------------------+-------+----------
|
||||
mx_hide_shard_names | test_table | table | postgres
|
||||
mx_hide_shard_names | test_table_102008 | table | postgres
|
||||
(2 rows)
|
||||
|
||||
\di
|
||||
List of relations
|
||||
Schema | Name | Type | Owner | Table
|
||||
---------------------+------------+-------+----------+------------
|
||||
mx_hide_shard_names | test_index | index | postgres | test_table
|
||||
(1 row)
|
||||
|
||||
DROP SCHEMA mx_hide_shard_names CASCADE;
|
||||
NOTICE: drop cascades to 2 other objects
|
||||
DETAIL: drop cascades to table test_table
|
||||
drop cascades to table test_table_102008
|
||||
DROP SCHEMA mx_hide_shard_names_2 CASCADE;
|
||||
NOTICE: drop cascades to table mx_hide_shard_names_2.test_table
|
||||
DROP SCHEMA mx_hide_shard_names_3 CASCADE;
|
||||
NOTICE: drop cascades to table mx_hide_shard_names_3.too_long_12345678901234567890123456789012345678901234567890
|
||||
DROP SCHEMA "CiTuS.TeeN" CASCADE;
|
||||
NOTICE: drop cascades to table "CiTuS.TeeN"."TeeNTabLE.1!?!"
|
|
@ -17,7 +17,9 @@ test: multi_extension
|
|||
test: multi_cluster_management
|
||||
test: multi_test_helpers
|
||||
|
||||
test: multi_mx_partitioning
|
||||
# the following test has to be run sequentially
|
||||
test: multi_mx_hide_shard_names
|
||||
test: multi_mx_partitioning
|
||||
test: multi_mx_create_table
|
||||
test: multi_mx_copy_data multi_mx_router_planner
|
||||
test: multi_mx_schema_support multi_mx_tpch_query1 multi_mx_tpch_query10
|
||||
|
|
|
@ -144,6 +144,7 @@ ALTER EXTENSION citus UPDATE TO '7.5-5';
|
|||
ALTER EXTENSION citus UPDATE TO '7.5-6';
|
||||
ALTER EXTENSION citus UPDATE TO '7.5-7';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-1';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-2';
|
||||
|
||||
-- show running version
|
||||
SHOW citus.version;
|
||||
|
|
|
@ -25,6 +25,9 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table':
|
|||
|
||||
\c - - - :worker_1_port
|
||||
|
||||
-- make sure we don't break the following tests by hiding the shard names
|
||||
SET citus.override_table_visibility TO FALSE;
|
||||
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||
\d ddl_test*_index
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass;
|
||||
|
@ -32,6 +35,9 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1
|
|||
|
||||
\c - - - :worker_2_port
|
||||
|
||||
-- make sure we don't break the following tests by hiding the shard names
|
||||
SET citus.override_table_visibility TO FALSE;
|
||||
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||
\d ddl_test*_index
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass;
|
||||
|
|
|
@ -0,0 +1,177 @@
|
|||
--
|
||||
-- Hide shard names on MX worker nodes
|
||||
--
|
||||
|
||||
SET citus.next_shard_id TO 1130000;
|
||||
|
||||
|
||||
-- make sure that the signature of the citus_table_is_visible
|
||||
-- and pg_table_is_visible are the same since the logic
|
||||
-- relies on that
|
||||
SELECT
|
||||
proname, proisstrict, proretset, provolatile,
|
||||
proparallel, pronargs, pronargdefaults ,prorettype,
|
||||
proargtypes, proacl
|
||||
FROM
|
||||
pg_proc
|
||||
WHERE
|
||||
proname LIKE '%table_is_visible%'
|
||||
ORDER BY 1;
|
||||
|
||||
CREATE SCHEMA mx_hide_shard_names;
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
SET citus.replication_model TO 'streaming';
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
|
||||
CREATE TABLE test_table(id int, time date);
|
||||
SELECT create_distributed_table('test_table', 'id');
|
||||
|
||||
-- first show that the views does not show
|
||||
-- any shards on the coordinator as expected
|
||||
SELECT * FROM citus_shards_on_worker;
|
||||
SELECT * FROM citus_shard_indexes_on_worker;
|
||||
|
||||
-- now show that we see the shards, but not the
|
||||
-- indexes as there are no indexes
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
|
||||
-- now create an index
|
||||
\c - - - :master_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
CREATE INDEX test_index ON mx_hide_shard_names.test_table(id);
|
||||
|
||||
-- now show that we see the shards, and the
|
||||
-- indexes as well
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
|
||||
-- we should be able to select from the shards directly if we
|
||||
-- know the name of the tables
|
||||
SELECT count(*) FROM test_table_1130000;
|
||||
|
||||
-- disable the config so that table becomes visible
|
||||
SELECT pg_table_is_visible('test_table_1130000'::regclass);
|
||||
SET citus.override_table_visibility TO FALSE;
|
||||
SELECT pg_table_is_visible('test_table_1130000'::regclass);
|
||||
|
||||
\c - - - :master_port
|
||||
-- make sure that we're resilient to the edge cases
|
||||
-- such that the table name includes the shard number
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
SET citus.replication_model TO 'streaming';
|
||||
|
||||
-- not existing shard ids appended to the distributed table name
|
||||
CREATE TABLE test_table_102008(id int, time date);
|
||||
SELECT create_distributed_table('test_table_102008', 'id');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
|
||||
-- existing shard ids appended to a local table name
|
||||
-- note that we cannot create a distributed or local table
|
||||
-- with the same name since a table with the same
|
||||
-- name already exists :)
|
||||
CREATE TABLE test_table_2_1130000(id int, time date);
|
||||
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
|
||||
\d
|
||||
|
||||
\c - - - :master_port
|
||||
-- make sure that don't mess up with schemas
|
||||
CREATE SCHEMA mx_hide_shard_names_2;
|
||||
SET search_path TO 'mx_hide_shard_names_2';
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
SET citus.replication_model TO 'streaming';
|
||||
CREATE TABLE test_table(id int, time date);
|
||||
SELECT create_distributed_table('test_table', 'id');
|
||||
CREATE INDEX test_index ON mx_hide_shard_names_2.test_table(id);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
SET search_path TO 'mx_hide_shard_names_2';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
SET search_path TO 'mx_hide_shard_names_2, mx_hide_shard_names';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
|
||||
-- now try very long table names
|
||||
\c - - - :master_port
|
||||
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
SET citus.replication_model TO 'streaming';
|
||||
|
||||
CREATE SCHEMA mx_hide_shard_names_3;
|
||||
SET search_path TO 'mx_hide_shard_names_3';
|
||||
|
||||
-- Verify that a table name > 56 characters handled properly.
|
||||
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
|
||||
col1 integer not null,
|
||||
col2 integer not null);
|
||||
SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names_3';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
\d
|
||||
|
||||
|
||||
|
||||
-- now try weird schema names
|
||||
\c - - - :master_port
|
||||
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
SET citus.replication_model TO 'streaming';
|
||||
|
||||
CREATE SCHEMA "CiTuS.TeeN";
|
||||
SET search_path TO "CiTuS.TeeN";
|
||||
|
||||
CREATE TABLE "TeeNTabLE.1!?!"(id int, "TeNANt_Id" int);
|
||||
|
||||
CREATE INDEX "MyTenantIndex" ON "CiTuS.TeeN"."TeeNTabLE.1!?!"("TeNANt_Id");
|
||||
-- create distributed table with weird names
|
||||
SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO "CiTuS.TeeN";
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
|
||||
\d
|
||||
\di
|
||||
|
||||
-- clean-up
|
||||
\c - - - :master_port
|
||||
|
||||
-- show that common psql functions do not show shards
|
||||
-- including the ones that are not in the current schema
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
\d
|
||||
\di
|
||||
|
||||
DROP SCHEMA mx_hide_shard_names CASCADE;
|
||||
DROP SCHEMA mx_hide_shard_names_2 CASCADE;
|
||||
DROP SCHEMA mx_hide_shard_names_3 CASCADE;
|
||||
DROP SCHEMA "CiTuS.TeeN" CASCADE;
|
Loading…
Reference in New Issue