Merge pull request #2323 from citusdata/shards_go_behind_schema

Alternative approach for hiding shards on the MX workers for better UX
pull/2279/head
Önder Kalacı 2018-08-07 16:24:36 +03:00 committed by GitHub
commit 3fa04d8f2c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 959 additions and 21 deletions

View File

@ -106,6 +106,7 @@ OBJS = src/backend/distributed/shared_library_init.o \
src/backend/distributed/utils/ruleutils_96.o \ src/backend/distributed/utils/ruleutils_96.o \
src/backend/distributed/utils/shardinterval_utils.o \ src/backend/distributed/utils/shardinterval_utils.o \
src/backend/distributed/utils/statistics_collection.o \ src/backend/distributed/utils/statistics_collection.o \
src/backend/distributed/worker/worker_shard_visibility.o \
src/backend/distributed/worker/task_tracker.o \ src/backend/distributed/worker/task_tracker.o \
src/backend/distributed/worker/task_tracker_protocol.o \ src/backend/distributed/worker/task_tracker_protocol.o \
src/backend/distributed/worker/worker_data_fetch_protocol.o \ src/backend/distributed/worker/worker_data_fetch_protocol.o \

View File

@ -1,6 +1,6 @@
# Citus extension # Citus extension
comment = 'Citus distributed database' comment = 'Citus distributed database'
default_version = '8.0-1' default_version = '8.0-2'
module_pathname = '$libdir/citus' module_pathname = '$libdir/citus'
relocatable = false relocatable = false
schema = pg_catalog schema = pg_catalog

View File

@ -17,7 +17,7 @@ EXTVERSIONS = 5.0 5.0-1 5.0-2 \
7.3-1 7.3-2 7.3-3 \ 7.3-1 7.3-2 7.3-3 \
7.4-1 7.4-2 7.4-3 \ 7.4-1 7.4-2 7.4-3 \
7.5-1 7.5-2 7.5-3 7.5-4 7.5-5 7.5-6 7.5-7 \ 7.5-1 7.5-2 7.5-3 7.5-4 7.5-5 7.5-6 7.5-7 \
8.0-1 8.0-1 8.0-2
# All citus--*.sql files in the source directory # All citus--*.sql files in the source directory
DATA = $(patsubst $(citus_abs_srcdir)/%.sql,%.sql,$(wildcard $(citus_abs_srcdir)/$(EXTENSION)--*--*.sql)) DATA = $(patsubst $(citus_abs_srcdir)/%.sql,%.sql,$(wildcard $(citus_abs_srcdir)/$(EXTENSION)--*--*.sql))
@ -217,6 +217,8 @@ $(EXTENSION)--7.5-7.sql: $(EXTENSION)--7.5-6.sql $(EXTENSION)--7.5-6--7.5-7.sql
cat $^ > $@ cat $^ > $@
$(EXTENSION)--8.0-1.sql: $(EXTENSION)--7.5-7.sql $(EXTENSION)--7.5-7--8.0-1.sql $(EXTENSION)--8.0-1.sql: $(EXTENSION)--7.5-7.sql $(EXTENSION)--7.5-7--8.0-1.sql
cat $^ > $@ cat $^ > $@
$(EXTENSION)--8.0-2.sql: $(EXTENSION)--8.0-1.sql $(EXTENSION)--8.0-1--8.0-2.sql
cat $^ > $@
NO_PGXS = 1 NO_PGXS = 1

View File

@ -0,0 +1,62 @@
/* citus--7.5-7--8.0-1 */
SET search_path = 'pg_catalog';
CREATE OR REPLACE FUNCTION pg_catalog.relation_is_a_known_shard(regclass)
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$relation_is_a_known_shard$$;
COMMENT ON FUNCTION relation_is_a_known_shard(regclass)
IS 'returns true if the given relation is a known shard';
CREATE OR REPLACE FUNCTION pg_catalog.citus_table_is_visible(oid)
RETURNS bool
LANGUAGE C STRICT
STABLE
PARALLEL SAFE
AS 'MODULE_PATHNAME', $$citus_table_is_visible$$;
COMMENT ON FUNCTION citus_table_is_visible(oid)
IS 'wrapper on pg_table_is_visible, filtering out tables (and indexes) that are known to be shards';
-- this is the exact same query with what \d
-- command produces, except pg_table_is_visible
-- is replaced with pg_catalog.relation_is_a_known_shard(c.oid)
CREATE VIEW citus.citus_shards_on_worker AS
SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'p' THEN 'table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','p','v','m','S','f','')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.relation_is_a_known_shard(c.oid)
ORDER BY 1,2;
ALTER VIEW citus.citus_shards_on_worker SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_shards_on_worker TO public;
-- this is the exact same query with what \di
-- command produces, except pg_table_is_visible
-- is replaced with pg_catalog.relation_is_a_known_shard(c.oid)
CREATE VIEW citus.citus_shard_indexes_on_worker AS
SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'p' THEN 'table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner",
c2.relname as "Table"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_catalog.pg_index i ON i.indexrelid = c.oid
LEFT JOIN pg_catalog.pg_class c2 ON i.indrelid = c2.oid
WHERE c.relkind IN ('i','')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.relation_is_a_known_shard(c.oid)
ORDER BY 1,2;
ALTER VIEW citus.citus_shard_indexes_on_worker SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_shard_indexes_on_worker TO public;
RESET search_path;

View File

@ -1,6 +1,6 @@
# Citus extension # Citus extension
comment = 'Citus distributed database' comment = 'Citus distributed database'
default_version = '8.0-1' default_version = '8.0-2'
module_pathname = '$libdir/citus' module_pathname = '$libdir/citus'
relocatable = false relocatable = false
schema = pg_catalog schema = pg_catalog

View File

@ -28,6 +28,7 @@
#include "distributed/multi_master_planner.h" #include "distributed/multi_master_planner.h"
#include "distributed/multi_router_planner.h" #include "distributed/multi_router_planner.h"
#include "distributed/recursive_planning.h" #include "distributed/recursive_planning.h"
#include "distributed/worker_shard_visibility.h"
#include "executor/executor.h" #include "executor/executor.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
@ -129,6 +130,12 @@ distributed_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
AdjustPartitioningForDistributedPlanning(parse, setPartitionedTablesInherited); AdjustPartitioningForDistributedPlanning(parse, setPartitionedTablesInherited);
} }
/*
* Make sure that we hide shard names on the Citus MX worker nodes. See comments in
* ReplaceTableVisibleFunction() for the details.
*/
parse = (Query *) ReplaceTableVisibleFunction((Node *) parse);
/* create a restriction context and put it at the end if context list */ /* create a restriction context and put it at the end if context list */
plannerRestrictionContext = CreateAndPushPlannerRestrictionContext(); plannerRestrictionContext = CreateAndPushPlannerRestrictionContext();

View File

@ -50,6 +50,7 @@
#include "distributed/transaction_recovery.h" #include "distributed/transaction_recovery.h"
#include "distributed/worker_manager.h" #include "distributed/worker_manager.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "distributed/worker_shard_visibility.h"
#include "postmaster/postmaster.h" #include "postmaster/postmaster.h"
#include "optimizer/planner.h" #include "optimizer/planner.h"
#include "optimizer/paths.h" #include "optimizer/paths.h"
@ -379,6 +380,19 @@ RegisterCitusConfigVariables(void)
GUC_NO_SHOW_ALL, GUC_NO_SHOW_ALL,
NULL, NULL, NULL); NULL, NULL, NULL);
DefineCustomBoolVariable(
"citus.override_table_visibility",
gettext_noop("Enables replacing occurencens of pg_catalog.pg_table_visible() "
"with pg_catalog.citus_table_visible()"),
gettext_noop("When enabled, shards on the Citus MX worker (data) nodes would be "
"filtered out by many psql commands to provide better user "
"experience."),
&OverrideTableVisibility,
true,
PGC_USERSET,
GUC_NO_SHOW_ALL,
NULL, NULL, NULL);
DefineCustomBoolVariable( DefineCustomBoolVariable(
"citus.enforce_foreign_key_restrictions", "citus.enforce_foreign_key_restrictions",
gettext_noop("Enforce restrictions while querying distributed/reference " gettext_noop("Enforce restrictions while querying distributed/reference "

View File

@ -134,6 +134,8 @@ typedef struct MetadataCacheData
Oid primaryNodeRoleId; Oid primaryNodeRoleId;
Oid secondaryNodeRoleId; Oid secondaryNodeRoleId;
Oid unavailableNodeRoleId; Oid unavailableNodeRoleId;
Oid pgTableIsVisibleFuncId;
Oid citusTableIsVisibleFuncId;
} MetadataCacheData; } MetadataCacheData;
@ -198,7 +200,6 @@ static void InvalidateNodeRelationCacheCallback(Datum argument, Oid relationId);
static void InvalidateLocalGroupIdRelationCacheCallback(Datum argument, Oid relationId); static void InvalidateLocalGroupIdRelationCacheCallback(Datum argument, Oid relationId);
static HeapTuple LookupDistPartitionTuple(Relation pgDistPartition, Oid relationId); static HeapTuple LookupDistPartitionTuple(Relation pgDistPartition, Oid relationId);
static List * LookupDistShardTuples(Oid relationId); static List * LookupDistShardTuples(Oid relationId);
static Oid LookupShardRelation(int64 shardId);
static void GetPartitionTypeInputInfo(char *partitionKeyString, char partitionMethod, static void GetPartitionTypeInputInfo(char *partitionKeyString, char partitionMethod,
Oid *columnTypeId, int32 *columnTypeMod, Oid *columnTypeId, int32 *columnTypeMod,
Oid *intervalTypeId, int32 *intervalTypeMod); Oid *intervalTypeId, int32 *intervalTypeMod);
@ -706,7 +707,7 @@ LookupShardCacheEntry(int64 shardId)
* know that the shard has to be in the cache if it exists. If the * know that the shard has to be in the cache if it exists. If the
* shard does *not* exist LookupShardRelation() will error out. * shard does *not* exist LookupShardRelation() will error out.
*/ */
Oid relationId = LookupShardRelation(shardId); Oid relationId = LookupShardRelation(shardId, false);
/* trigger building the cache for the shard id */ /* trigger building the cache for the shard id */
LookupDistTableCacheEntry(relationId); LookupDistTableCacheEntry(relationId);
@ -724,7 +725,7 @@ LookupShardCacheEntry(int64 shardId)
if (!shardEntry->tableEntry->isValid) if (!shardEntry->tableEntry->isValid)
{ {
Oid oldRelationId = shardEntry->tableEntry->relationId; Oid oldRelationId = shardEntry->tableEntry->relationId;
Oid currentRelationId = LookupShardRelation(shardId); Oid currentRelationId = LookupShardRelation(shardId, false);
/* /*
* The relation OID to which the shard belongs could have changed, * The relation OID to which the shard belongs could have changed,
@ -2051,6 +2052,42 @@ CitusTextSendAsJsonbFunctionId(void)
} }
/*
* PgTableVisibleFuncId returns oid of the pg_table_is_visible function.
*/
Oid
PgTableVisibleFuncId(void)
{
if (MetadataCache.pgTableIsVisibleFuncId == InvalidOid)
{
const int argCount = 1;
MetadataCache.pgTableIsVisibleFuncId =
FunctionOid("pg_catalog", "pg_table_is_visible", argCount);
}
return MetadataCache.pgTableIsVisibleFuncId;
}
/*
* CitusTableVisibleFuncId returns oid of the citus_table_is_visible function.
*/
Oid
CitusTableVisibleFuncId(void)
{
if (MetadataCache.citusTableIsVisibleFuncId == InvalidOid)
{
const int argCount = 1;
MetadataCache.citusTableIsVisibleFuncId =
FunctionOid("pg_catalog", "citus_table_is_visible", argCount);
}
return MetadataCache.citusTableIsVisibleFuncId;
}
/* /*
* CitusExtensionOwner() returns the owner of the 'citus' extension. That user * CitusExtensionOwner() returns the owner of the 'citus' extension. That user
* is, amongst others, used to perform actions a normal user might not be * is, amongst others, used to perform actions a normal user might not be
@ -3263,10 +3300,11 @@ LookupDistShardTuples(Oid relationId)
/* /*
* LookupShardRelation returns the logical relation oid a shard belongs to. * LookupShardRelation returns the logical relation oid a shard belongs to.
* *
* Errors out if the shardId does not exist. * Errors out if the shardId does not exist and missingOk is false. Returns
* InvalidOid if the shardId does not exist and missingOk is true.
*/ */
static Oid Oid
LookupShardRelation(int64 shardId) LookupShardRelation(int64 shardId, bool missingOk)
{ {
SysScanDesc scanDescriptor = NULL; SysScanDesc scanDescriptor = NULL;
ScanKeyData scanKey[1]; ScanKeyData scanKey[1];
@ -3284,14 +3322,21 @@ LookupShardRelation(int64 shardId)
NULL, scanKeyCount, scanKey); NULL, scanKeyCount, scanKey);
heapTuple = systable_getnext(scanDescriptor); heapTuple = systable_getnext(scanDescriptor);
if (!HeapTupleIsValid(heapTuple)) if (!HeapTupleIsValid(heapTuple) && !missingOk)
{ {
ereport(ERROR, (errmsg("could not find valid entry for shard " ereport(ERROR, (errmsg("could not find valid entry for shard "
UINT64_FORMAT, shardId))); UINT64_FORMAT, shardId)));
} }
shardForm = (Form_pg_dist_shard) GETSTRUCT(heapTuple); if (!HeapTupleIsValid(heapTuple))
relationId = shardForm->logicalrelid; {
relationId = InvalidOid;
}
else
{
shardForm = (Form_pg_dist_shard) GETSTRUCT(heapTuple);
relationId = shardForm->logicalrelid;
}
systable_endscan(scanDescriptor); systable_endscan(scanDescriptor);
heap_close(pgDistShard, NoLock); heap_close(pgDistShard, NoLock);

View File

@ -62,7 +62,6 @@ static bool ReceiveRegularFile(const char *nodeName, uint32 nodePort,
static void ReceiveResourceCleanup(int32 connectionId, const char *filename, static void ReceiveResourceCleanup(int32 connectionId, const char *filename,
int32 fileDescriptor); int32 fileDescriptor);
static void CitusDeleteFile(const char *filename); static void CitusDeleteFile(const char *filename);
static uint64 ExtractShardId(const char *tableName);
static bool check_log_statement(List *stmt_list); static bool check_log_statement(List *stmt_list);
static void AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName); static void AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName);
static void SetDefElemArg(AlterSeqStmt *statement, const char *name, Node *arg); static void SetDefElemArg(AlterSeqStmt *statement, const char *name, Node *arg);
@ -526,9 +525,13 @@ worker_apply_sequence_command(PG_FUNCTION_ARGS)
} }
/* Extracts shard id from the given table name, and returns it. */ /*
static uint64 * ExtractShardIdFromTableName tries to extract shard id from the given table name,
ExtractShardId(const char *tableName) * and returns the shard id if table name is formatted as shard name.
* Else, the function returns INVALID_SHARD_ID.
*/
uint64
ExtractShardIdFromTableName(const char *tableName, bool missingOk)
{ {
uint64 shardId = 0; uint64 shardId = 0;
char *shardIdString = NULL; char *shardIdString = NULL;
@ -536,11 +539,16 @@ ExtractShardId(const char *tableName)
/* find the last underscore and increment for shardId string */ /* find the last underscore and increment for shardId string */
shardIdString = strrchr(tableName, SHARD_NAME_SEPARATOR); shardIdString = strrchr(tableName, SHARD_NAME_SEPARATOR);
if (shardIdString == NULL) if (shardIdString == NULL && !missingOk)
{ {
ereport(ERROR, (errmsg("could not extract shardId from table name \"%s\"", ereport(ERROR, (errmsg("could not extract shardId from table name \"%s\"",
tableName))); tableName)));
} }
else if (shardIdString == NULL && missingOk)
{
return INVALID_SHARD_ID;
}
shardIdString++; shardIdString++;
errno = 0; errno = 0;
@ -548,8 +556,15 @@ ExtractShardId(const char *tableName)
if (errno != 0 || (*shardIdStringEnd != '\0')) if (errno != 0 || (*shardIdStringEnd != '\0'))
{ {
ereport(ERROR, (errmsg("could not extract shardId from table name \"%s\"", if (!missingOk)
tableName))); {
ereport(ERROR, (errmsg("could not extract shardId from table name \"%s\"",
tableName)));
}
else
{
return INVALID_SHARD_ID;
}
} }
return shardId; return shardId;
@ -761,7 +776,7 @@ worker_append_table_to_shard(PG_FUNCTION_ARGS)
* the transaction for this function commits, this lock will automatically * the transaction for this function commits, this lock will automatically
* be released. This ensures appends to a shard happen in a serial manner. * be released. This ensures appends to a shard happen in a serial manner.
*/ */
shardId = ExtractShardId(shardTableName); shardId = ExtractShardIdFromTableName(shardTableName, false);
LockShardResource(shardId, AccessExclusiveLock); LockShardResource(shardId, AccessExclusiveLock);
/* copy remote table's data to this node */ /* copy remote table's data to this node */

View File

@ -0,0 +1,248 @@
/*
* worker_shard_visibility.c
*
* Implements the functions for hiding shards on the Citus MX
* worker (data) nodes.
*
* Copyright (c) 2012-2018, Citus Data, Inc.
*/
#include "postgres.h"
#include "catalog/index.h"
#include "catalog/namespace.h"
#include "catalog/pg_class.h"
#include "distributed/metadata_cache.h"
#include "distributed/worker_protocol.h"
#include "distributed/worker_shard_visibility.h"
#include "nodes/nodeFuncs.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
/* Config variable managed via guc.c */
bool OverrideTableVisibility = true;
static bool RelationIsAKnownShard(Oid shardRelationId);
static Node * ReplaceTableVisibleFunctionWalker(Node *inputNode);
PG_FUNCTION_INFO_V1(citus_table_is_visible);
PG_FUNCTION_INFO_V1(relation_is_a_known_shard);
/*
* relation_is_a_known_shard a wrapper around RelationIsAKnownShard(), so
* see the details there. The function also treats the indexes on shards
* as if they were shards.
*/
Datum
relation_is_a_known_shard(PG_FUNCTION_ARGS)
{
Oid relationId = PG_GETARG_OID(0);
CheckCitusVersion(ERROR);
PG_RETURN_BOOL(RelationIsAKnownShard(relationId));
}
/*
* citus_table_is_visible aims to behave exactly the same with
* pg_table_is_visible with only one exception. The former one
* returns false for the relations that are known to be shards.
*/
Datum
citus_table_is_visible(PG_FUNCTION_ARGS)
{
Oid relationId = PG_GETARG_OID(0);
char relKind = '\0';
CheckCitusVersion(ERROR);
/*
* We don't want to deal with not valid/existing relations
* as pg_table_is_visible does.
*/
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relationId)))
{
PG_RETURN_NULL();
}
if (RelationIsAKnownShard(relationId))
{
/*
* If the input relation is an index we simply replace the
* relationId with the corresponding relation to hide indexes
* as well. See RelationIsAKnownShard() for the details and give
* more meaningful debug message here.
*/
relKind = get_rel_relkind(relationId);
if (relKind == RELKIND_INDEX)
{
ereport(DEBUG2, (errmsg("skipping index \"%s\" since it belongs to a shard",
get_rel_name(relationId))));
}
else
{
ereport(DEBUG2, (errmsg("skipping relation \"%s\" since it is a shard",
get_rel_name(relationId))));
}
PG_RETURN_BOOL(false);
}
PG_RETURN_BOOL(RelationIsVisible(relationId));
}
/*
* RelationIsAKnownShard gets a relationId, check whether it's a shard of
* any distributed table in the current search path.
*
* We can only do that in MX since both the metadata and tables are only
* present there.
*/
static bool
RelationIsAKnownShard(Oid shardRelationId)
{
int localGroupId = -1;
char *shardRelationName = NULL;
char *generatedRelationName = NULL;
bool missingOk = true;
uint64 shardId = INVALID_SHARD_ID;
Oid relationId = InvalidOid;
char relKind = '\0';
if (!OidIsValid(shardRelationId))
{
/* we cannot continue without a valid Oid */
return false;
}
localGroupId = GetLocalGroupId();
if (localGroupId == 0)
{
/*
* We're not interested in shards in the coordinator
* or non-mx worker nodes.
*/
return false;
}
/* we're not interested in the relations that are not in the search path */
if (!RelationIsVisible(shardRelationId))
{
return false;
}
/*
* If the input relation is an index we simply replace the
* relationId with the corresponding relation to hide indexes
* as well.
*/
relKind = get_rel_relkind(shardRelationId);
if (relKind == RELKIND_INDEX)
{
shardRelationId = IndexGetRelation(shardRelationId, false);
}
/* get the shard's relation name */
shardRelationName = get_rel_name(shardRelationId);
shardId = ExtractShardIdFromTableName(shardRelationName, missingOk);
if (shardId == INVALID_SHARD_ID)
{
/*
* The format of the table name does not align with
* our shard name definition.
*/
return false;
}
/* try to get the relation id */
relationId = LookupShardRelation(shardId, true);
if (!OidIsValid(relationId))
{
/* there is no such relation */
return false;
}
/*
* Now get the relation name and append the shardId to it. We need
* to do that because otherwise a local table with a valid shardId
* appended to its name could be misleading.
*/
generatedRelationName = get_rel_name(relationId);
AppendShardIdToName(&generatedRelationName, shardId);
if (strncmp(shardRelationName, generatedRelationName, NAMEDATALEN) == 0)
{
/* we found the distributed table that the input shard belongs to */
return true;
}
return false;
}
/*
* ReplaceTableVisibleFunction is a wrapper around ReplaceTableVisibleFunctionWalker.
* The replace functionality can be enabled/disable via a GUC. This function also
* ensures that the extension is loaded and the version is compatible.
*/
Node *
ReplaceTableVisibleFunction(Node *inputNode)
{
if (!OverrideTableVisibility ||
!CitusHasBeenLoaded() || !CheckCitusVersion(DEBUG2))
{
return inputNode;
}
return ReplaceTableVisibleFunctionWalker(inputNode);
}
/*
* ReplaceTableVisibleFunction replaces all occurences of
* pg_catalog.pg_table_visible() to
* pg_catalog.citus_table_visible() in the given input node.
*
* Note that the only difference between the functions is that
* the latter filters the tables that are known to be shards on
* Citus MX worker (data) nodes.
*/
static Node *
ReplaceTableVisibleFunctionWalker(Node *inputNode)
{
if (inputNode == NULL)
{
return NULL;
}
if (IsA(inputNode, FuncExpr))
{
FuncExpr *functionToProcess = (FuncExpr *) inputNode;
Oid functionId = functionToProcess->funcid;
if (functionId == PgTableVisibleFuncId())
{
/*
* We simply update the function id of the FuncExpr for
* two reasons: (i) We don't want to interfere with the
* memory contexts so don't want to deal with allocating
* a new functionExpr (ii) We already know that both
* functions have the exact same signature.
*/
functionToProcess->funcid = CitusTableVisibleFuncId();
return (Node *) functionToProcess;
}
}
else if (IsA(inputNode, Query))
{
return (Node *) query_tree_mutator((Query *) inputNode,
ReplaceTableVisibleFunctionWalker, NULL, 0);
}
return expression_tree_mutator(inputNode, ReplaceTableVisibleFunctionWalker, NULL);
}

View File

@ -99,6 +99,7 @@ extern ShardPlacement * LoadShardPlacement(uint64 shardId, uint64 placementId);
extern DistTableCacheEntry * DistributedTableCacheEntry(Oid distributedRelationId); extern DistTableCacheEntry * DistributedTableCacheEntry(Oid distributedRelationId);
extern int GetLocalGroupId(void); extern int GetLocalGroupId(void);
extern List * DistTableOidList(void); extern List * DistTableOidList(void);
extern Oid LookupShardRelation(int64 shardId, bool missing_ok);
extern List * ShardPlacementList(uint64 shardId); extern List * ShardPlacementList(uint64 shardId);
extern void CitusInvalidateRelcacheByRelid(Oid relationId); extern void CitusInvalidateRelcacheByRelid(Oid relationId);
extern void CitusInvalidateRelcacheByShardId(int64 shardId); extern void CitusInvalidateRelcacheByShardId(int64 shardId);
@ -151,6 +152,8 @@ extern Oid CitusReadIntermediateResultFuncId(void);
extern Oid CitusExtraDataContainerFuncId(void); extern Oid CitusExtraDataContainerFuncId(void);
extern Oid CitusWorkerHashFunctionId(void); extern Oid CitusWorkerHashFunctionId(void);
extern Oid CitusTextSendAsJsonbFunctionId(void); extern Oid CitusTextSendAsJsonbFunctionId(void);
extern Oid PgTableVisibleFuncId(void);
extern Oid CitusTableVisibleFuncId(void);
/* enum oids */ /* enum oids */
extern Oid PrimaryNodeRoleId(void); extern Oid PrimaryNodeRoleId(void);

View File

@ -118,9 +118,11 @@ extern void RemoveJobSchema(StringInfo schemaName);
extern Datum * DeconstructArrayObject(ArrayType *arrayObject); extern Datum * DeconstructArrayObject(ArrayType *arrayObject);
extern int32 ArrayObjectCount(ArrayType *arrayObject); extern int32 ArrayObjectCount(ArrayType *arrayObject);
extern FmgrInfo * GetFunctionInfo(Oid typeId, Oid accessMethodId, int16 procedureId); extern FmgrInfo * GetFunctionInfo(Oid typeId, Oid accessMethodId, int16 procedureId);
extern uint64 ExtractShardIdFromTableName(const char *tableName, bool missingOk);
extern List * TableDDLCommandList(const char *nodeName, uint32 nodePort, extern List * TableDDLCommandList(const char *nodeName, uint32 nodePort,
const char *tableName); const char *tableName);
/* Function declarations shared with the master planner */ /* Function declarations shared with the master planner */
extern StringInfo TaskFilename(StringInfo directoryName, uint32 taskId); extern StringInfo TaskFilename(StringInfo directoryName, uint32 taskId);
extern List * ExecuteRemoteQuery(const char *nodeName, uint32 nodePort, char *runAsUser, extern List * ExecuteRemoteQuery(const char *nodeName, uint32 nodePort, char *runAsUser,

View File

@ -0,0 +1,22 @@
/*-------------------------------------------------------------------------
*
* worker_shard_visibility.h
* Hide shard names on MX worker nodes.
*
* Copyright (c) 2018, Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#ifndef WORKER_SHARD_VISIBILITY_H
#define WORKER_SHARD_VISIBILITY_H
#include "nodes/nodes.h"
extern bool OverrideTableVisibility;
extern Node * ReplaceTableVisibleFunction(Node *inputNode);
#endif /* WORKER_SHARD_VISIBILITY_H */

View File

@ -144,6 +144,7 @@ ALTER EXTENSION citus UPDATE TO '7.5-5';
ALTER EXTENSION citus UPDATE TO '7.5-6'; ALTER EXTENSION citus UPDATE TO '7.5-6';
ALTER EXTENSION citus UPDATE TO '7.5-7'; ALTER EXTENSION citus UPDATE TO '7.5-7';
ALTER EXTENSION citus UPDATE TO '8.0-1'; ALTER EXTENSION citus UPDATE TO '8.0-1';
ALTER EXTENSION citus UPDATE TO '8.0-2';
-- show running version -- show running version
SHOW citus.version; SHOW citus.version;
citus.version citus.version

View File

@ -50,6 +50,8 @@ btree, for table "public.mx_ddl_table"
btree, for table "public.mx_ddl_table" btree, for table "public.mx_ddl_table"
\c - - - :worker_1_port \c - - - :worker_1_port
-- make sure we don't break the following tests by hiding the shard names
SET citus.override_table_visibility TO FALSE;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
Column | Type | Modifiers Column | Type | Modifiers
---------+---------+-------------------- ---------+---------+--------------------
@ -93,6 +95,8 @@ Index "public.ddl_test_index_1220088"
btree, for table "public.mx_ddl_table_1220088" btree, for table "public.mx_ddl_table_1220088"
\c - - - :worker_2_port \c - - - :worker_2_port
-- make sure we don't break the following tests by hiding the shard names
SET citus.override_table_visibility TO FALSE;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
Column | Type | Modifiers Column | Type | Modifiers
---------+---------+-------------------- ---------+---------+--------------------

View File

@ -0,0 +1,326 @@
--
-- Hide shard names on MX worker nodes
--
SET citus.next_shard_id TO 1130000;
-- make sure that the signature of the citus_table_is_visible
-- and pg_table_is_visible are the same since the logic
-- relies on that
SELECT
proname, proisstrict, proretset, provolatile,
proparallel, pronargs, pronargdefaults ,prorettype,
proargtypes, proacl
FROM
pg_proc
WHERE
proname LIKE '%table_is_visible%'
ORDER BY 1;
proname | proisstrict | proretset | provolatile | proparallel | pronargs | pronargdefaults | prorettype | proargtypes | proacl
------------------------+-------------+-----------+-------------+-------------+----------+-----------------+------------+-------------+--------
citus_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 |
pg_table_is_visible | t | f | s | s | 1 | 0 | 16 | 26 |
(2 rows)
CREATE SCHEMA mx_hide_shard_names;
SET search_path TO 'mx_hide_shard_names';
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
-----------------------------
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
start_metadata_sync_to_node
-----------------------------
(1 row)
CREATE TABLE test_table(id int, time date);
SELECT create_distributed_table('test_table', 'id');
create_distributed_table
--------------------------
(1 row)
-- first show that the views does not show
-- any shards on the coordinator as expected
SELECT * FROM citus_shards_on_worker;
Schema | Name | Type | Owner
--------+------+------+-------
(0 rows)
SELECT * FROM citus_shard_indexes_on_worker;
Schema | Name | Type | Owner | Table
--------+------+------+-------+-------
(0 rows)
-- now show that we see the shards, but not the
-- indexes as there are no indexes
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
Schema | Name | Type | Owner
---------------------+--------------------+-------+----------
mx_hide_shard_names | test_table_1130000 | table | postgres
mx_hide_shard_names | test_table_1130002 | table | postgres
(2 rows)
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
Schema | Name | Type | Owner | Table
--------+------+------+-------+-------
(0 rows)
-- now create an index
\c - - - :master_port
SET search_path TO 'mx_hide_shard_names';
CREATE INDEX test_index ON mx_hide_shard_names.test_table(id);
-- now show that we see the shards, and the
-- indexes as well
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
Schema | Name | Type | Owner
---------------------+--------------------+-------+----------
mx_hide_shard_names | test_table_1130000 | table | postgres
mx_hide_shard_names | test_table_1130002 | table | postgres
(2 rows)
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
Schema | Name | Type | Owner | Table
---------------------+--------------------+-------+----------+--------------------
mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000
mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002
(2 rows)
-- we should be able to select from the shards directly if we
-- know the name of the tables
SELECT count(*) FROM test_table_1130000;
count
-------
0
(1 row)
-- disable the config so that table becomes visible
SELECT pg_table_is_visible('test_table_1130000'::regclass);
pg_table_is_visible
---------------------
f
(1 row)
SET citus.override_table_visibility TO FALSE;
SELECT pg_table_is_visible('test_table_1130000'::regclass);
pg_table_is_visible
---------------------
t
(1 row)
\c - - - :master_port
-- make sure that we're resilient to the edge cases
-- such that the table name includes the shard number
SET search_path TO 'mx_hide_shard_names';
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
-- not existing shard ids appended to the distributed table name
CREATE TABLE test_table_102008(id int, time date);
SELECT create_distributed_table('test_table_102008', 'id');
create_distributed_table
--------------------------
(1 row)
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names';
-- existing shard ids appended to a local table name
-- note that we cannot create a distributed or local table
-- with the same name since a table with the same
-- name already exists :)
CREATE TABLE test_table_2_1130000(id int, time date);
SELECT * FROM citus_shards_on_worker ORDER BY 2;
Schema | Name | Type | Owner
---------------------+--------------------------+-------+----------
mx_hide_shard_names | test_table_102008_102012 | table | postgres
mx_hide_shard_names | test_table_102008_102014 | table | postgres
mx_hide_shard_names | test_table_1130000 | table | postgres
mx_hide_shard_names | test_table_1130002 | table | postgres
(4 rows)
\d
List of relations
Schema | Name | Type | Owner
---------------------+----------------------+-------+----------
mx_hide_shard_names | test_table | table | postgres
mx_hide_shard_names | test_table_102008 | table | postgres
mx_hide_shard_names | test_table_2_1130000 | table | postgres
(3 rows)
\c - - - :master_port
-- make sure that don't mess up with schemas
CREATE SCHEMA mx_hide_shard_names_2;
SET search_path TO 'mx_hide_shard_names_2';
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE test_table(id int, time date);
SELECT create_distributed_table('test_table', 'id');
create_distributed_table
--------------------------
(1 row)
CREATE INDEX test_index ON mx_hide_shard_names_2.test_table(id);
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
Schema | Name | Type | Owner
---------------------+--------------------------+-------+----------
mx_hide_shard_names | test_table_102008_102012 | table | postgres
mx_hide_shard_names | test_table_102008_102014 | table | postgres
mx_hide_shard_names | test_table_1130000 | table | postgres
mx_hide_shard_names | test_table_1130002 | table | postgres
(4 rows)
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
Schema | Name | Type | Owner | Table
---------------------+--------------------+-------+----------+--------------------
mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000
mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002
(2 rows)
SET search_path TO 'mx_hide_shard_names_2';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
Schema | Name | Type | Owner
-----------------------+-------------------+-------+----------
mx_hide_shard_names_2 | test_table_102016 | table | postgres
mx_hide_shard_names_2 | test_table_102018 | table | postgres
(2 rows)
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
Schema | Name | Type | Owner | Table
-----------------------+-------------------+-------+----------+-------------------
mx_hide_shard_names_2 | test_index_102016 | index | postgres | test_table_102016
mx_hide_shard_names_2 | test_index_102018 | index | postgres | test_table_102018
(2 rows)
SET search_path TO 'mx_hide_shard_names_2, mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
Schema | Name | Type | Owner
--------+------+------+-------
(0 rows)
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
Schema | Name | Type | Owner | Table
--------+------+------+-------+-------
(0 rows)
-- now try very long table names
\c - - - :master_port
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE SCHEMA mx_hide_shard_names_3;
SET search_path TO 'mx_hide_shard_names_3';
-- Verify that a table name > 56 characters handled properly.
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
col1 integer not null,
col2 integer not null);
SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1');
create_distributed_table
--------------------------
(1 row)
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names_3';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
Schema | Name | Type | Owner
-----------------------+-----------------------------------------------------------------+-------+----------
mx_hide_shard_names_3 | too_long_12345678901234567890123456789012345678_e0119164_102020 | table | postgres
mx_hide_shard_names_3 | too_long_12345678901234567890123456789012345678_e0119164_102022 | table | postgres
(2 rows)
\d
List of relations
Schema | Name | Type | Owner
-----------------------+-------------------------------------------------------------+-------+----------
mx_hide_shard_names_3 | too_long_12345678901234567890123456789012345678901234567890 | table | postgres
(1 row)
-- now try weird schema names
\c - - - :master_port
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE SCHEMA "CiTuS.TeeN";
SET search_path TO "CiTuS.TeeN";
CREATE TABLE "TeeNTabLE.1!?!"(id int, "TeNANt_Id" int);
CREATE INDEX "MyTenantIndex" ON "CiTuS.TeeN"."TeeNTabLE.1!?!"("TeNANt_Id");
-- create distributed table with weird names
SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id');
create_distributed_table
--------------------------
(1 row)
\c - - - :worker_1_port
SET search_path TO "CiTuS.TeeN";
SELECT * FROM citus_shards_on_worker ORDER BY 2;
Schema | Name | Type | Owner
------------+-----------------------+-------+----------
CiTuS.TeeN | TeeNTabLE.1!?!_102024 | table | postgres
CiTuS.TeeN | TeeNTabLE.1!?!_102026 | table | postgres
(2 rows)
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
Schema | Name | Type | Owner | Table
------------+----------------------+-------+----------+-----------------------
CiTuS.TeeN | MyTenantIndex_102024 | index | postgres | TeeNTabLE.1!?!_102024
CiTuS.TeeN | MyTenantIndex_102026 | index | postgres | TeeNTabLE.1!?!_102026
(2 rows)
\d
List of relations
Schema | Name | Type | Owner
------------+----------------+-------+----------
CiTuS.TeeN | TeeNTabLE.1!?! | table | postgres
(1 row)
\di
List of relations
Schema | Name | Type | Owner | Table
------------+---------------+-------+----------+----------------
CiTuS.TeeN | MyTenantIndex | index | postgres | TeeNTabLE.1!?!
(1 row)
-- clean-up
\c - - - :master_port
-- show that common psql functions do not show shards
-- including the ones that are not in the current schema
SET search_path TO 'mx_hide_shard_names';
\d
List of relations
Schema | Name | Type | Owner
---------------------+-------------------+-------+----------
mx_hide_shard_names | test_table | table | postgres
mx_hide_shard_names | test_table_102008 | table | postgres
(2 rows)
\di
List of relations
Schema | Name | Type | Owner | Table
---------------------+------------+-------+----------+------------
mx_hide_shard_names | test_index | index | postgres | test_table
(1 row)
DROP SCHEMA mx_hide_shard_names CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table test_table
drop cascades to table test_table_102008
DROP SCHEMA mx_hide_shard_names_2 CASCADE;
NOTICE: drop cascades to table mx_hide_shard_names_2.test_table
DROP SCHEMA mx_hide_shard_names_3 CASCADE;
NOTICE: drop cascades to table mx_hide_shard_names_3.too_long_12345678901234567890123456789012345678901234567890
DROP SCHEMA "CiTuS.TeeN" CASCADE;
NOTICE: drop cascades to table "CiTuS.TeeN"."TeeNTabLE.1!?!"

View File

@ -17,6 +17,8 @@ test: multi_extension
test: multi_cluster_management test: multi_cluster_management
test: multi_test_helpers test: multi_test_helpers
# the following test has to be run sequentially
test: multi_mx_hide_shard_names
test: multi_mx_partitioning test: multi_mx_partitioning
test: multi_mx_create_table test: multi_mx_create_table
test: multi_mx_copy_data multi_mx_router_planner test: multi_mx_copy_data multi_mx_router_planner

View File

@ -144,6 +144,7 @@ ALTER EXTENSION citus UPDATE TO '7.5-5';
ALTER EXTENSION citus UPDATE TO '7.5-6'; ALTER EXTENSION citus UPDATE TO '7.5-6';
ALTER EXTENSION citus UPDATE TO '7.5-7'; ALTER EXTENSION citus UPDATE TO '7.5-7';
ALTER EXTENSION citus UPDATE TO '8.0-1'; ALTER EXTENSION citus UPDATE TO '8.0-1';
ALTER EXTENSION citus UPDATE TO '8.0-2';
-- show running version -- show running version
SHOW citus.version; SHOW citus.version;

View File

@ -25,6 +25,9 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table':
\c - - - :worker_1_port \c - - - :worker_1_port
-- make sure we don't break the following tests by hiding the shard names
SET citus.override_table_visibility TO FALSE;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
\d ddl_test*_index \d ddl_test*_index
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass;
@ -32,6 +35,9 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1
\c - - - :worker_2_port \c - - - :worker_2_port
-- make sure we don't break the following tests by hiding the shard names
SET citus.override_table_visibility TO FALSE;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
\d ddl_test*_index \d ddl_test*_index
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass;

View File

@ -0,0 +1,177 @@
--
-- Hide shard names on MX worker nodes
--
SET citus.next_shard_id TO 1130000;
-- make sure that the signature of the citus_table_is_visible
-- and pg_table_is_visible are the same since the logic
-- relies on that
SELECT
proname, proisstrict, proretset, provolatile,
proparallel, pronargs, pronargdefaults ,prorettype,
proargtypes, proacl
FROM
pg_proc
WHERE
proname LIKE '%table_is_visible%'
ORDER BY 1;
CREATE SCHEMA mx_hide_shard_names;
SET search_path TO 'mx_hide_shard_names';
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
CREATE TABLE test_table(id int, time date);
SELECT create_distributed_table('test_table', 'id');
-- first show that the views does not show
-- any shards on the coordinator as expected
SELECT * FROM citus_shards_on_worker;
SELECT * FROM citus_shard_indexes_on_worker;
-- now show that we see the shards, but not the
-- indexes as there are no indexes
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
-- now create an index
\c - - - :master_port
SET search_path TO 'mx_hide_shard_names';
CREATE INDEX test_index ON mx_hide_shard_names.test_table(id);
-- now show that we see the shards, and the
-- indexes as well
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
-- we should be able to select from the shards directly if we
-- know the name of the tables
SELECT count(*) FROM test_table_1130000;
-- disable the config so that table becomes visible
SELECT pg_table_is_visible('test_table_1130000'::regclass);
SET citus.override_table_visibility TO FALSE;
SELECT pg_table_is_visible('test_table_1130000'::regclass);
\c - - - :master_port
-- make sure that we're resilient to the edge cases
-- such that the table name includes the shard number
SET search_path TO 'mx_hide_shard_names';
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
-- not existing shard ids appended to the distributed table name
CREATE TABLE test_table_102008(id int, time date);
SELECT create_distributed_table('test_table_102008', 'id');
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names';
-- existing shard ids appended to a local table name
-- note that we cannot create a distributed or local table
-- with the same name since a table with the same
-- name already exists :)
CREATE TABLE test_table_2_1130000(id int, time date);
SELECT * FROM citus_shards_on_worker ORDER BY 2;
\d
\c - - - :master_port
-- make sure that don't mess up with schemas
CREATE SCHEMA mx_hide_shard_names_2;
SET search_path TO 'mx_hide_shard_names_2';
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE test_table(id int, time date);
SELECT create_distributed_table('test_table', 'id');
CREATE INDEX test_index ON mx_hide_shard_names_2.test_table(id);
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SET search_path TO 'mx_hide_shard_names_2';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SET search_path TO 'mx_hide_shard_names_2, mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
-- now try very long table names
\c - - - :master_port
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE SCHEMA mx_hide_shard_names_3;
SET search_path TO 'mx_hide_shard_names_3';
-- Verify that a table name > 56 characters handled properly.
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
col1 integer not null,
col2 integer not null);
SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1');
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names_3';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
\d
-- now try weird schema names
\c - - - :master_port
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE SCHEMA "CiTuS.TeeN";
SET search_path TO "CiTuS.TeeN";
CREATE TABLE "TeeNTabLE.1!?!"(id int, "TeNANt_Id" int);
CREATE INDEX "MyTenantIndex" ON "CiTuS.TeeN"."TeeNTabLE.1!?!"("TeNANt_Id");
-- create distributed table with weird names
SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id');
\c - - - :worker_1_port
SET search_path TO "CiTuS.TeeN";
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
\d
\di
-- clean-up
\c - - - :master_port
-- show that common psql functions do not show shards
-- including the ones that are not in the current schema
SET search_path TO 'mx_hide_shard_names';
\d
\di
DROP SCHEMA mx_hide_shard_names CASCADE;
DROP SCHEMA mx_hide_shard_names_2 CASCADE;
DROP SCHEMA mx_hide_shard_names_3 CASCADE;
DROP SCHEMA "CiTuS.TeeN" CASCADE;