Merge branch 'master' into velioglu/table_wo_seq_prototype

velioglu/wo_seq_test_1
Burak Velioglu 2022-01-21 00:56:12 +03:00
commit 3bb1a1fe6c
No known key found for this signature in database
GPG Key ID: F6827E620F6549C6
63 changed files with 3778 additions and 1784 deletions

View File

@ -91,6 +91,7 @@ static void DistributeFunctionWithDistributionArgument(RegProcedure funcOid,
char *distributionArgumentName,
Oid distributionArgumentOid,
char *colocateWithTableName,
bool *forceDelegationAddress,
const ObjectAddress *
functionAddress);
static void DistributeFunctionColocatedWithDistributedTable(RegProcedure funcOid,
@ -124,6 +125,8 @@ create_distributed_function(PG_FUNCTION_ARGS)
char *distributionArgumentName = NULL;
char *colocateWithTableName = NULL;
bool *forceDelegationAddress = NULL;
bool forceDelegation = false;
/* if called on NULL input, error out */
if (funcOid == InvalidOid)
@ -169,6 +172,17 @@ create_distributed_function(PG_FUNCTION_ARGS)
}
}
/* check if the force_delegation flag is explicitly set (default is NULL) */
if (PG_ARGISNULL(3))
{
forceDelegationAddress = NULL;
}
else
{
forceDelegation = PG_GETARG_BOOL(3);
forceDelegationAddress = &forceDelegation;
}
EnsureCoordinator();
EnsureFunctionOwner(funcOid);
@ -204,6 +218,7 @@ create_distributed_function(PG_FUNCTION_ARGS)
DistributeFunctionWithDistributionArgument(funcOid, distributionArgumentName,
distributionArgumentOid,
colocateWithTableName,
forceDelegationAddress,
&functionAddress);
}
else if (!colocatedWithReferenceTable)
@ -265,6 +280,7 @@ DistributeFunctionWithDistributionArgument(RegProcedure funcOid,
char *distributionArgumentName,
Oid distributionArgumentOid,
char *colocateWithTableName,
bool *forceDelegationAddress,
const ObjectAddress *functionAddress)
{
/* get the argument index, or error out if we cannot find a valid index */
@ -279,7 +295,8 @@ DistributeFunctionWithDistributionArgument(RegProcedure funcOid,
/* record the distribution argument and colocationId */
UpdateFunctionDistributionInfo(functionAddress, &distributionArgumentIndex,
&colocationId);
&colocationId,
forceDelegationAddress);
}
@ -310,7 +327,7 @@ DistributeFunctionColocatedWithDistributedTable(RegProcedure funcOid,
}
/* set distribution argument and colocationId to NULL */
UpdateFunctionDistributionInfo(functionAddress, NULL, NULL);
UpdateFunctionDistributionInfo(functionAddress, NULL, NULL, NULL);
}
@ -327,7 +344,8 @@ DistributeFunctionColocatedWithReferenceTable(const ObjectAddress *functionAddre
/* set distribution argument to NULL and colocationId to the reference table colocation id */
int *distributionArgumentIndex = NULL;
UpdateFunctionDistributionInfo(functionAddress, distributionArgumentIndex,
&colocationId);
&colocationId,
NULL);
}
@ -596,7 +614,8 @@ EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid distributionColumnTyp
void
UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
int *distribution_argument_index,
int *colocationId)
int *colocationId,
bool *forceDelegation)
{
const bool indexOK = true;
@ -655,6 +674,18 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
isnull[Anum_pg_dist_object_colocationid - 1] = true;
}
replace[Anum_pg_dist_object_force_delegation - 1] = true;
if (forceDelegation != NULL)
{
values[Anum_pg_dist_object_force_delegation - 1] = BoolGetDatum(
*forceDelegation);
isnull[Anum_pg_dist_object_force_delegation - 1] = false;
}
else
{
isnull[Anum_pg_dist_object_force_delegation - 1] = true;
}
heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace);
CatalogTupleUpdate(pgDistObjectRel, &heapTuple->t_self, heapTuple);
@ -672,6 +703,7 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
List *objectAddressList = list_make1((ObjectAddress *) distAddress);
List *distArgumentIndexList = NIL;
List *colocationIdList = NIL;
List *forceDelegationList = NIL;
if (distribution_argument_index == NULL)
{
@ -691,10 +723,20 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
colocationIdList = list_make1_int(*colocationId);
}
if (forceDelegation == NULL)
{
forceDelegationList = list_make1_int(NO_FORCE_PUSHDOWN);
}
else
{
forceDelegationList = list_make1_int(*forceDelegation);
}
char *workerPgDistObjectUpdateCommand =
MarkObjectsDistributedCreateCommand(objectAddressList,
distArgumentIndexList,
colocationIdList);
colocationIdList,
forceDelegationList);
SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand);
}
}

View File

@ -40,6 +40,7 @@
#include "miscadmin.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/fmgrprotos.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/ruleutils.h"
@ -447,8 +448,20 @@ GetExplicitStatisticsCommandList(Oid relationId)
foreach_oid(statisticsId, statisticsIdList)
{
/* we need create commands for already created stats before distribution */
char *createStatisticsCommand = pg_get_statisticsobj_worker_compat(statisticsId,
false, false);
Datum commandText = DirectFunctionCall1(pg_get_statisticsobjdef,
ObjectIdGetDatum(statisticsId));
/*
* pg_get_statisticsobjdef doesn't throw an error if there is no such
* statistics object, be on the safe side.
*/
if (DatumGetPointer(commandText) == NULL)
{
ereport(ERROR, (errmsg("statistics with oid %u does not exist",
statisticsId)));
}
char *createStatisticsCommand = TextDatumGetCString(commandText);
explicitStatisticsCommandList =
lappend(explicitStatisticsCommandList,

View File

@ -17,7 +17,6 @@
#include "catalog/namespace.h"
#include "catalog/pg_trigger.h"
#include "commands/trigger.h"
#include "distributed/citus_ruleutils.h"
#include "distributed/commands.h"
#include "distributed/commands/utility_hook.h"
#include "distributed/coordinator_protocol.h"
@ -27,6 +26,8 @@
#include "distributed/namespace_utils.h"
#include "distributed/shard_utils.h"
#include "distributed/worker_protocol.h"
#include "utils/builtins.h"
#include "utils/fmgrprotos.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
@ -68,7 +69,22 @@ GetExplicitTriggerCommandList(Oid relationId)
Oid triggerId = InvalidOid;
foreach_oid(triggerId, triggerIdList)
{
char *createTriggerCommand = pg_get_triggerdef_command(triggerId);
bool prettyOutput = false;
Datum commandText = DirectFunctionCall2(pg_get_triggerdef_ext,
ObjectIdGetDatum(triggerId),
BoolGetDatum(prettyOutput));
/*
* pg_get_triggerdef_ext doesn't throw an error if there is no such
* trigger, be on the safe side.
*/
if (DatumGetPointer(commandText) == NULL)
{
ereport(ERROR, (errmsg("trigger with oid %u does not exist",
triggerId)));
}
char *createTriggerCommand = TextDatumGetCString(commandText);
createTriggerCommandList = lappend(
createTriggerCommandList,

View File

@ -66,6 +66,7 @@
#include "distributed/resource_lock.h"
#include "distributed/transmit.h"
#include "distributed/version_compat.h"
#include "distributed/worker_shard_visibility.h"
#include "distributed/worker_transaction.h"
#include "foreign/foreign.h"
#include "lib/stringinfo.h"

View File

@ -92,6 +92,7 @@ IsSettingSafeToPropagate(char *name)
{
/* if this list grows considerably we should switch to bsearch */
const char *skipSettings[] = {
"application_name",
"citus.propagate_set_commands",
"client_encoding",
"exit_on_error",

View File

@ -103,7 +103,6 @@
/* Pretty flags */
#define PRETTYFLAG_PAREN 0x0001
#define PRETTYFLAG_INDENT 0x0002
#define PRETTYFLAG_SCHEMA 0x0004
/* Default line length for pretty-print wrapping: 0 means wrap always */
#define WRAP_COLUMN_DEFAULT 0
@ -111,7 +110,6 @@
/* macros to test if pretty action needed */
#define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN)
#define PRETTY_INDENT(context) ((context)->prettyFlags & PRETTYFLAG_INDENT)
#define PRETTY_SCHEMA(context) ((context)->prettyFlags & PRETTYFLAG_SCHEMA)
/* ----------
@ -429,9 +427,6 @@ static void get_from_clause_coldeflist(RangeTblFunction *rtfunc,
deparse_context *context);
static void get_tablesample_def(TableSampleClause *tablesample,
deparse_context *context);
char *pg_get_statisticsobj_worker(Oid statextid, bool missing_ok);
static char *pg_get_triggerdef_worker(Oid trigid, bool pretty);
static void set_simple_column_names(deparse_namespace *dpns);
static void get_opclass_name(Oid opclass, Oid actual_datatype,
StringInfo buf);
static Node *processIndirection(Node *node, deparse_context *context);
@ -7519,428 +7514,6 @@ get_tablesample_def(TableSampleClause *tablesample, deparse_context *context)
}
}
char *
pg_get_triggerdef_command(Oid triggerId)
{
Assert(OidIsValid(triggerId));
/* no need to have pretty SQL command */
bool prettyOutput = false;
return pg_get_triggerdef_worker(triggerId, prettyOutput);
}
char *
pg_get_statisticsobj_worker(Oid statextid, bool missing_ok)
{
StringInfoData buf;
int colno;
bool isnull;
int i;
HeapTuple statexttup = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statextid));
if (!HeapTupleIsValid(statexttup))
{
if (missing_ok)
{
return NULL;
}
elog(ERROR, "cache lookup failed for statistics object %u", statextid);
}
Form_pg_statistic_ext statextrec = (Form_pg_statistic_ext) GETSTRUCT(statexttup);
initStringInfo(&buf);
char *nsp = get_namespace_name(statextrec->stxnamespace);
appendStringInfo(&buf, "CREATE STATISTICS %s",
quote_qualified_identifier(nsp,
NameStr(statextrec->stxname)));
/*
* Decode the stxkind column so that we know which stats types to print.
*/
Datum datum = SysCacheGetAttr(STATEXTOID, statexttup,
Anum_pg_statistic_ext_stxkind, &isnull);
Assert(!isnull);
ArrayType *arr = DatumGetArrayTypeP(datum);
if (ARR_NDIM(arr) != 1 ||
ARR_HASNULL(arr) ||
ARR_ELEMTYPE(arr) != CHAROID)
{
elog(ERROR, "stxkind is not a 1-D char array");
}
char *enabled = (char *) ARR_DATA_PTR(arr);
bool ndistinct_enabled = false;
bool dependencies_enabled = false;
bool mcv_enabled = false;
for (i = 0; i < ARR_DIMS(arr)[0]; i++)
{
if (enabled[i] == STATS_EXT_NDISTINCT)
{
ndistinct_enabled = true;
}
if (enabled[i] == STATS_EXT_DEPENDENCIES)
{
dependencies_enabled = true;
}
if (enabled[i] == STATS_EXT_MCV)
{
mcv_enabled = true;
}
}
/*
* If any option is disabled, then we'll need to append the types clause
* to show which options are enabled. We omit the types clause on purpose
* when all options are enabled, so a pg_dump/pg_restore will create all
* statistics types on a newer postgres version, if the statistics had all
* options enabled on the original version.
*/
if (!ndistinct_enabled || !dependencies_enabled || !mcv_enabled)
{
bool gotone = false;
appendStringInfoString(&buf, " (");
if (ndistinct_enabled)
{
appendStringInfoString(&buf, "ndistinct");
gotone = true;
}
if (dependencies_enabled)
{
appendStringInfo(&buf, "%sdependencies", gotone ? ", " : "");
gotone = true;
}
if (mcv_enabled)
{
appendStringInfo(&buf, "%smcv", gotone ? ", " : "");
}
appendStringInfoChar(&buf, ')');
}
appendStringInfoString(&buf, " ON ");
for (colno = 0; colno < statextrec->stxkeys.dim1; colno++)
{
AttrNumber attnum = statextrec->stxkeys.values[colno];
if (colno > 0)
{
appendStringInfoString(&buf, ", ");
}
char *attname = get_attname(statextrec->stxrelid, attnum, false);
appendStringInfoString(&buf, quote_identifier(attname));
}
appendStringInfo(&buf, " FROM %s",
generate_relation_name(statextrec->stxrelid, NIL));
ReleaseSysCache(statexttup);
return buf.data;
}
static char *
pg_get_triggerdef_worker(Oid trigid, bool pretty)
{
HeapTuple ht_trig;
Form_pg_trigger trigrec;
StringInfoData buf;
Relation tgrel;
ScanKeyData skey[1];
SysScanDesc tgscan;
int findx = 0;
char *tgname;
char *tgoldtable;
char *tgnewtable;
Oid argtypes[1]; /* dummy */
Datum value;
bool isnull;
/*
* Fetch the pg_trigger tuple by the Oid of the trigger
*/
tgrel = table_open(TriggerRelationId, AccessShareLock);
ScanKeyInit(&skey[0],
Anum_pg_trigger_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(trigid));
tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
NULL, 1, skey);
ht_trig = systable_getnext(tgscan);
if (!HeapTupleIsValid(ht_trig))
{
systable_endscan(tgscan);
table_close(tgrel, AccessShareLock);
return NULL;
}
trigrec = (Form_pg_trigger) GETSTRUCT(ht_trig);
/*
* Start the trigger definition. Note that the trigger's name should never
* be schema-qualified, but the trigger rel's name may be.
*/
initStringInfo(&buf);
tgname = NameStr(trigrec->tgname);
appendStringInfo(&buf, "CREATE %sTRIGGER %s ",
OidIsValid(trigrec->tgconstraint) ? "CONSTRAINT " : "",
quote_identifier(tgname));
if (TRIGGER_FOR_BEFORE(trigrec->tgtype))
appendStringInfoString(&buf, "BEFORE");
else if (TRIGGER_FOR_AFTER(trigrec->tgtype))
appendStringInfoString(&buf, "AFTER");
else if (TRIGGER_FOR_INSTEAD(trigrec->tgtype))
appendStringInfoString(&buf, "INSTEAD OF");
else
elog(ERROR, "unexpected tgtype value: %d", trigrec->tgtype);
if (TRIGGER_FOR_INSERT(trigrec->tgtype))
{
appendStringInfoString(&buf, " INSERT");
findx++;
}
if (TRIGGER_FOR_DELETE(trigrec->tgtype))
{
if (findx > 0)
appendStringInfoString(&buf, " OR DELETE");
else
appendStringInfoString(&buf, " DELETE");
findx++;
}
if (TRIGGER_FOR_UPDATE(trigrec->tgtype))
{
if (findx > 0)
appendStringInfoString(&buf, " OR UPDATE");
else
appendStringInfoString(&buf, " UPDATE");
findx++;
/* tgattr is first var-width field, so OK to access directly */
if (trigrec->tgattr.dim1 > 0)
{
int i;
appendStringInfoString(&buf, " OF ");
for (i = 0; i < trigrec->tgattr.dim1; i++)
{
char *attname;
if (i > 0)
appendStringInfoString(&buf, ", ");
attname = get_attname(trigrec->tgrelid,
trigrec->tgattr.values[i], false);
appendStringInfoString(&buf, quote_identifier(attname));
}
}
}
if (TRIGGER_FOR_TRUNCATE(trigrec->tgtype))
{
if (findx > 0)
appendStringInfoString(&buf, " OR TRUNCATE");
else
appendStringInfoString(&buf, " TRUNCATE");
findx++;
}
/*
* In non-pretty mode, always schema-qualify the target table name for
* safety. In pretty mode, schema-qualify only if not visible.
*/
appendStringInfo(&buf, " ON %s ",
pretty ?
generate_relation_name(trigrec->tgrelid, NIL) :
generate_qualified_relation_name(trigrec->tgrelid));
if (OidIsValid(trigrec->tgconstraint))
{
if (OidIsValid(trigrec->tgconstrrelid))
appendStringInfo(&buf, "FROM %s ",
generate_relation_name(trigrec->tgconstrrelid, NIL));
if (!trigrec->tgdeferrable)
appendStringInfoString(&buf, "NOT ");
appendStringInfoString(&buf, "DEFERRABLE INITIALLY ");
if (trigrec->tginitdeferred)
appendStringInfoString(&buf, "DEFERRED ");
else
appendStringInfoString(&buf, "IMMEDIATE ");
}
value = fastgetattr(ht_trig, Anum_pg_trigger_tgoldtable,
tgrel->rd_att, &isnull);
if (!isnull)
tgoldtable = NameStr(*DatumGetName(value));
else
tgoldtable = NULL;
value = fastgetattr(ht_trig, Anum_pg_trigger_tgnewtable,
tgrel->rd_att, &isnull);
if (!isnull)
tgnewtable = NameStr(*DatumGetName(value));
else
tgnewtable = NULL;
if (tgoldtable != NULL || tgnewtable != NULL)
{
appendStringInfoString(&buf, "REFERENCING ");
if (tgoldtable != NULL)
appendStringInfo(&buf, "OLD TABLE AS %s ",
quote_identifier(tgoldtable));
if (tgnewtable != NULL)
appendStringInfo(&buf, "NEW TABLE AS %s ",
quote_identifier(tgnewtable));
}
if (TRIGGER_FOR_ROW(trigrec->tgtype))
appendStringInfoString(&buf, "FOR EACH ROW ");
else
appendStringInfoString(&buf, "FOR EACH STATEMENT ");
/* If the trigger has a WHEN qualification, add that */
value = fastgetattr(ht_trig, Anum_pg_trigger_tgqual,
tgrel->rd_att, &isnull);
if (!isnull)
{
Node *qual;
char relkind;
deparse_context context;
deparse_namespace dpns;
RangeTblEntry *oldrte;
RangeTblEntry *newrte;
appendStringInfoString(&buf, "WHEN (");
qual = stringToNode(TextDatumGetCString(value));
relkind = get_rel_relkind(trigrec->tgrelid);
/* Build minimal OLD and NEW RTEs for the rel */
oldrte = makeNode(RangeTblEntry);
oldrte->rtekind = RTE_RELATION;
oldrte->relid = trigrec->tgrelid;
oldrte->relkind = relkind;
oldrte->rellockmode = AccessShareLock;
oldrte->alias = makeAlias("old", NIL);
oldrte->eref = oldrte->alias;
oldrte->lateral = false;
oldrte->inh = false;
oldrte->inFromCl = true;
newrte = makeNode(RangeTblEntry);
newrte->rtekind = RTE_RELATION;
newrte->relid = trigrec->tgrelid;
newrte->relkind = relkind;
newrte->rellockmode = AccessShareLock;
newrte->alias = makeAlias("new", NIL);
newrte->eref = newrte->alias;
newrte->lateral = false;
newrte->inh = false;
newrte->inFromCl = true;
/* Build two-element rtable */
memset(&dpns, 0, sizeof(dpns));
dpns.rtable = list_make2(oldrte, newrte);
dpns.ctes = NIL;
set_rtable_names(&dpns, NIL, NULL);
set_simple_column_names(&dpns);
/* Set up context with one-deep namespace stack */
context.buf = &buf;
context.namespaces = list_make1(&dpns);
context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = true;
context.prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA) : PRETTYFLAG_INDENT;
context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = PRETTYINDENT_STD;
context.special_exprkind = EXPR_KIND_NONE;
get_rule_expr(qual, &context, false);
appendStringInfoString(&buf, ") ");
}
appendStringInfo(&buf, "EXECUTE FUNCTION %s(",
generate_function_name(trigrec->tgfoid, 0,
NIL, argtypes,
false, NULL, EXPR_KIND_NONE));
if (trigrec->tgnargs > 0)
{
char *p;
int i;
value = fastgetattr(ht_trig, Anum_pg_trigger_tgargs,
tgrel->rd_att, &isnull);
if (isnull)
elog(ERROR, "tgargs is null for trigger %u", trigid);
p = (char *) VARDATA_ANY(DatumGetByteaPP(value));
for (i = 0; i < trigrec->tgnargs; i++)
{
if (i > 0)
appendStringInfoString(&buf, ", ");
simple_quote_literal(&buf, p);
/* advance p to next string embedded in tgargs */
while (*p)
p++;
p++;
}
}
/* We deliberately do not put semi-colon at end */
appendStringInfoChar(&buf, ')');
/* Clean up */
systable_endscan(tgscan);
table_close(tgrel, AccessShareLock);
return buf.data;
}
/*
* set_simple_column_names: fill in column aliases for non-query situations
*
* This handles EXPLAIN and cases where we only have relation RTEs. Without
* a join tree, we can't do anything smart about join RTEs, but we don't
* need to (note that EXPLAIN should never see join alias Vars anyway).
* If we do hit a join RTE we'll just process it like a non-table base RTE.
*/
static void
set_simple_column_names(deparse_namespace *dpns)
{
ListCell *lc;
ListCell *lc2;
/* Initialize dpns->rtable_columns to contain zeroed structs */
dpns->rtable_columns = NIL;
while (list_length(dpns->rtable_columns) < list_length(dpns->rtable))
dpns->rtable_columns = lappend(dpns->rtable_columns,
palloc0(sizeof(deparse_columns)));
/* Assign unique column aliases within each RTE */
forboth(lc, dpns->rtable, lc2, dpns->rtable_columns)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
deparse_columns *colinfo = (deparse_columns *) lfirst(lc2);
set_relation_column_names(dpns, rte, colinfo);
}
}
/*
* get_opclass_name - fetch name of an index operator class

View File

@ -103,7 +103,6 @@
/* Pretty flags */
#define PRETTYFLAG_PAREN 0x0001
#define PRETTYFLAG_INDENT 0x0002
#define PRETTYFLAG_SCHEMA 0x0004
/* Default line length for pretty-print wrapping: 0 means wrap always */
#define WRAP_COLUMN_DEFAULT 0
@ -111,7 +110,6 @@
/* macros to test if pretty action needed */
#define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN)
#define PRETTY_INDENT(context) ((context)->prettyFlags & PRETTYFLAG_INDENT)
#define PRETTY_SCHEMA(context) ((context)->prettyFlags & PRETTYFLAG_SCHEMA)
/* ----------
@ -434,9 +432,6 @@ static void get_from_clause_coldeflist(RangeTblFunction *rtfunc,
deparse_context *context);
static void get_tablesample_def(TableSampleClause *tablesample,
deparse_context *context);
char *pg_get_statisticsobj_worker(Oid statextid, bool missing_ok);
static char *pg_get_triggerdef_worker(Oid trigid, bool pretty);
static void set_simple_column_names(deparse_namespace *dpns);
static void get_opclass_name(Oid opclass, Oid actual_datatype,
StringInfo buf);
static Node *processIndirection(Node *node, deparse_context *context);
@ -7577,429 +7572,6 @@ get_tablesample_def(TableSampleClause *tablesample, deparse_context *context)
}
}
char *
pg_get_triggerdef_command(Oid triggerId)
{
Assert(OidIsValid(triggerId));
/* no need to have pretty SQL command */
bool prettyOutput = false;
return pg_get_triggerdef_worker(triggerId, prettyOutput);
}
char *
pg_get_statisticsobj_worker(Oid statextid, bool missing_ok)
{
StringInfoData buf;
int colno;
bool isnull;
int i;
HeapTuple statexttup = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statextid));
if (!HeapTupleIsValid(statexttup))
{
if (missing_ok)
{
return NULL;
}
elog(ERROR, "cache lookup failed for statistics object %u", statextid);
}
Form_pg_statistic_ext statextrec = (Form_pg_statistic_ext) GETSTRUCT(statexttup);
initStringInfo(&buf);
char *nsp = get_namespace_name(statextrec->stxnamespace);
appendStringInfo(&buf, "CREATE STATISTICS %s",
quote_qualified_identifier(nsp,
NameStr(statextrec->stxname)));
/*
* Decode the stxkind column so that we know which stats types to print.
*/
Datum datum = SysCacheGetAttr(STATEXTOID, statexttup,
Anum_pg_statistic_ext_stxkind, &isnull);
Assert(!isnull);
ArrayType *arr = DatumGetArrayTypeP(datum);
if (ARR_NDIM(arr) != 1 ||
ARR_HASNULL(arr) ||
ARR_ELEMTYPE(arr) != CHAROID)
{
elog(ERROR, "stxkind is not a 1-D char array");
}
char *enabled = (char *) ARR_DATA_PTR(arr);
bool ndistinct_enabled = false;
bool dependencies_enabled = false;
bool mcv_enabled = false;
for (i = 0; i < ARR_DIMS(arr)[0]; i++)
{
if (enabled[i] == STATS_EXT_NDISTINCT)
{
ndistinct_enabled = true;
}
if (enabled[i] == STATS_EXT_DEPENDENCIES)
{
dependencies_enabled = true;
}
if (enabled[i] == STATS_EXT_MCV)
{
mcv_enabled = true;
}
}
/*
* If any option is disabled, then we'll need to append the types clause
* to show which options are enabled. We omit the types clause on purpose
* when all options are enabled, so a pg_dump/pg_restore will create all
* statistics types on a newer postgres version, if the statistics had all
* options enabled on the original version.
*/
if (!ndistinct_enabled || !dependencies_enabled || !mcv_enabled)
{
bool gotone = false;
appendStringInfoString(&buf, " (");
if (ndistinct_enabled)
{
appendStringInfoString(&buf, "ndistinct");
gotone = true;
}
if (dependencies_enabled)
{
appendStringInfo(&buf, "%sdependencies", gotone ? ", " : "");
gotone = true;
}
if (mcv_enabled)
{
appendStringInfo(&buf, "%smcv", gotone ? ", " : "");
}
appendStringInfoChar(&buf, ')');
}
appendStringInfoString(&buf, " ON ");
for (colno = 0; colno < statextrec->stxkeys.dim1; colno++)
{
AttrNumber attnum = statextrec->stxkeys.values[colno];
if (colno > 0)
{
appendStringInfoString(&buf, ", ");
}
char *attname = get_attname(statextrec->stxrelid, attnum, false);
appendStringInfoString(&buf, quote_identifier(attname));
}
appendStringInfo(&buf, " FROM %s",
generate_relation_name(statextrec->stxrelid, NIL));
ReleaseSysCache(statexttup);
return buf.data;
}
static char *
pg_get_triggerdef_worker(Oid trigid, bool pretty)
{
HeapTuple ht_trig;
Form_pg_trigger trigrec;
StringInfoData buf;
Relation tgrel;
ScanKeyData skey[1];
SysScanDesc tgscan;
int findx = 0;
char *tgname;
char *tgoldtable;
char *tgnewtable;
Oid argtypes[1]; /* dummy */
Datum value;
bool isnull;
/*
* Fetch the pg_trigger tuple by the Oid of the trigger
*/
tgrel = table_open(TriggerRelationId, AccessShareLock);
ScanKeyInit(&skey[0],
Anum_pg_trigger_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(trigid));
tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
NULL, 1, skey);
ht_trig = systable_getnext(tgscan);
if (!HeapTupleIsValid(ht_trig))
{
systable_endscan(tgscan);
table_close(tgrel, AccessShareLock);
return NULL;
}
trigrec = (Form_pg_trigger) GETSTRUCT(ht_trig);
/*
* Start the trigger definition. Note that the trigger's name should never
* be schema-qualified, but the trigger rel's name may be.
*/
initStringInfo(&buf);
tgname = NameStr(trigrec->tgname);
appendStringInfo(&buf, "CREATE %sTRIGGER %s ",
OidIsValid(trigrec->tgconstraint) ? "CONSTRAINT " : "",
quote_identifier(tgname));
if (TRIGGER_FOR_BEFORE(trigrec->tgtype))
appendStringInfoString(&buf, "BEFORE");
else if (TRIGGER_FOR_AFTER(trigrec->tgtype))
appendStringInfoString(&buf, "AFTER");
else if (TRIGGER_FOR_INSTEAD(trigrec->tgtype))
appendStringInfoString(&buf, "INSTEAD OF");
else
elog(ERROR, "unexpected tgtype value: %d", trigrec->tgtype);
if (TRIGGER_FOR_INSERT(trigrec->tgtype))
{
appendStringInfoString(&buf, " INSERT");
findx++;
}
if (TRIGGER_FOR_DELETE(trigrec->tgtype))
{
if (findx > 0)
appendStringInfoString(&buf, " OR DELETE");
else
appendStringInfoString(&buf, " DELETE");
findx++;
}
if (TRIGGER_FOR_UPDATE(trigrec->tgtype))
{
if (findx > 0)
appendStringInfoString(&buf, " OR UPDATE");
else
appendStringInfoString(&buf, " UPDATE");
findx++;
/* tgattr is first var-width field, so OK to access directly */
if (trigrec->tgattr.dim1 > 0)
{
int i;
appendStringInfoString(&buf, " OF ");
for (i = 0; i < trigrec->tgattr.dim1; i++)
{
char *attname;
if (i > 0)
appendStringInfoString(&buf, ", ");
attname = get_attname(trigrec->tgrelid,
trigrec->tgattr.values[i], false);
appendStringInfoString(&buf, quote_identifier(attname));
}
}
}
if (TRIGGER_FOR_TRUNCATE(trigrec->tgtype))
{
if (findx > 0)
appendStringInfoString(&buf, " OR TRUNCATE");
else
appendStringInfoString(&buf, " TRUNCATE");
findx++;
}
/*
* In non-pretty mode, always schema-qualify the target table name for
* safety. In pretty mode, schema-qualify only if not visible.
*/
appendStringInfo(&buf, " ON %s ",
pretty ?
generate_relation_name(trigrec->tgrelid, NIL) :
generate_qualified_relation_name(trigrec->tgrelid));
if (OidIsValid(trigrec->tgconstraint))
{
if (OidIsValid(trigrec->tgconstrrelid))
appendStringInfo(&buf, "FROM %s ",
generate_relation_name(trigrec->tgconstrrelid, NIL));
if (!trigrec->tgdeferrable)
appendStringInfoString(&buf, "NOT ");
appendStringInfoString(&buf, "DEFERRABLE INITIALLY ");
if (trigrec->tginitdeferred)
appendStringInfoString(&buf, "DEFERRED ");
else
appendStringInfoString(&buf, "IMMEDIATE ");
}
value = fastgetattr(ht_trig, Anum_pg_trigger_tgoldtable,
tgrel->rd_att, &isnull);
if (!isnull)
tgoldtable = NameStr(*DatumGetName(value));
else
tgoldtable = NULL;
value = fastgetattr(ht_trig, Anum_pg_trigger_tgnewtable,
tgrel->rd_att, &isnull);
if (!isnull)
tgnewtable = NameStr(*DatumGetName(value));
else
tgnewtable = NULL;
if (tgoldtable != NULL || tgnewtable != NULL)
{
appendStringInfoString(&buf, "REFERENCING ");
if (tgoldtable != NULL)
appendStringInfo(&buf, "OLD TABLE AS %s ",
quote_identifier(tgoldtable));
if (tgnewtable != NULL)
appendStringInfo(&buf, "NEW TABLE AS %s ",
quote_identifier(tgnewtable));
}
if (TRIGGER_FOR_ROW(trigrec->tgtype))
appendStringInfoString(&buf, "FOR EACH ROW ");
else
appendStringInfoString(&buf, "FOR EACH STATEMENT ");
/* If the trigger has a WHEN qualification, add that */
value = fastgetattr(ht_trig, Anum_pg_trigger_tgqual,
tgrel->rd_att, &isnull);
if (!isnull)
{
Node *qual;
char relkind;
deparse_context context;
deparse_namespace dpns;
RangeTblEntry *oldrte;
RangeTblEntry *newrte;
appendStringInfoString(&buf, "WHEN (");
qual = stringToNode(TextDatumGetCString(value));
relkind = get_rel_relkind(trigrec->tgrelid);
/* Build minimal OLD and NEW RTEs for the rel */
oldrte = makeNode(RangeTblEntry);
oldrte->rtekind = RTE_RELATION;
oldrte->relid = trigrec->tgrelid;
oldrte->relkind = relkind;
oldrte->rellockmode = AccessShareLock;
oldrte->alias = makeAlias("old", NIL);
oldrte->eref = oldrte->alias;
oldrte->lateral = false;
oldrte->inh = false;
oldrte->inFromCl = true;
newrte = makeNode(RangeTblEntry);
newrte->rtekind = RTE_RELATION;
newrte->relid = trigrec->tgrelid;
newrte->relkind = relkind;
newrte->rellockmode = AccessShareLock;
newrte->alias = makeAlias("new", NIL);
newrte->eref = newrte->alias;
newrte->lateral = false;
newrte->inh = false;
newrte->inFromCl = true;
/* Build two-element rtable */
memset(&dpns, 0, sizeof(dpns));
dpns.rtable = list_make2(oldrte, newrte);
dpns.ctes = NIL;
set_rtable_names(&dpns, NIL, NULL);
set_simple_column_names(&dpns);
/* Set up context with one-deep namespace stack */
context.buf = &buf;
context.namespaces = list_make1(&dpns);
context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = true;
context.prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA) : PRETTYFLAG_INDENT;
context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = PRETTYINDENT_STD;
context.special_exprkind = EXPR_KIND_NONE;
context.appendparents = NULL;
get_rule_expr(qual, &context, false);
appendStringInfoString(&buf, ") ");
}
appendStringInfo(&buf, "EXECUTE FUNCTION %s(",
generate_function_name(trigrec->tgfoid, 0,
NIL, argtypes,
false, NULL, EXPR_KIND_NONE));
if (trigrec->tgnargs > 0)
{
char *p;
int i;
value = fastgetattr(ht_trig, Anum_pg_trigger_tgargs,
tgrel->rd_att, &isnull);
if (isnull)
elog(ERROR, "tgargs is null for trigger %u", trigid);
p = (char *) VARDATA_ANY(DatumGetByteaPP(value));
for (i = 0; i < trigrec->tgnargs; i++)
{
if (i > 0)
appendStringInfoString(&buf, ", ");
simple_quote_literal(&buf, p);
/* advance p to next string embedded in tgargs */
while (*p)
p++;
p++;
}
}
/* We deliberately do not put semi-colon at end */
appendStringInfoChar(&buf, ')');
/* Clean up */
systable_endscan(tgscan);
table_close(tgrel, AccessShareLock);
return buf.data;
}
/*
* set_simple_column_names: fill in column aliases for non-query situations
*
* This handles EXPLAIN and cases where we only have relation RTEs. Without
* a join tree, we can't do anything smart about join RTEs, but we don't
* need to (note that EXPLAIN should never see join alias Vars anyway).
* If we do hit a join RTE we'll just process it like a non-table base RTE.
*/
static void
set_simple_column_names(deparse_namespace *dpns)
{
ListCell *lc;
ListCell *lc2;
/* Initialize dpns->rtable_columns to contain zeroed structs */
dpns->rtable_columns = NIL;
while (list_length(dpns->rtable_columns) < list_length(dpns->rtable))
dpns->rtable_columns = lappend(dpns->rtable_columns,
palloc0(sizeof(deparse_columns)));
/* Assign unique column aliases within each RTE */
forboth(lc, dpns->rtable, lc2, dpns->rtable_columns)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
deparse_columns *colinfo = (deparse_columns *) lfirst(lc2);
set_relation_column_names(dpns, rte, colinfo);
}
}
/*
* get_opclass_name - fetch name of an index operator class

View File

@ -101,7 +101,6 @@
/* Pretty flags */
#define PRETTYFLAG_PAREN 0x0001
#define PRETTYFLAG_INDENT 0x0002
#define PRETTYFLAG_SCHEMA 0x0004
/* Default line length for pretty-print wrapping: 0 means wrap always */
#define WRAP_COLUMN_DEFAULT 0
@ -109,7 +108,6 @@
/* macros to test if pretty action needed */
#define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN)
#define PRETTY_INDENT(context) ((context)->prettyFlags & PRETTYFLAG_INDENT)
#define PRETTY_SCHEMA(context) ((context)->prettyFlags & PRETTYFLAG_SCHEMA)
/* ----------
@ -322,9 +320,6 @@ typedef struct
* as a parameter, and append their text output to its contents.
* ----------
*/
static char *deparse_expression_pretty(Node *expr, List *dpcontext,
bool forceprefix, bool showimplicit,
int prettyFlags, int startIndent);
static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces,
Bitmapset *rels_used);
static void set_deparse_for_query(deparse_namespace *dpns, Query *query,
@ -442,10 +437,6 @@ static void get_from_clause_coldeflist(RangeTblFunction *rtfunc,
deparse_context *context);
static void get_tablesample_def(TableSampleClause *tablesample,
deparse_context *context);
char *pg_get_statisticsobj_worker(Oid statextid, bool columns_only,
bool missing_ok);
static char *pg_get_triggerdef_worker(Oid trigid, bool pretty);
static void set_simple_column_names(deparse_namespace *dpns);
static void get_opclass_name(Oid opclass, Oid actual_datatype,
StringInfo buf);
static Node *processIndirection(Node *node, deparse_context *context);
@ -652,51 +643,6 @@ pg_get_rule_expr(Node *expression)
}
/* ----------
* deparse_expression_pretty - General utility for deparsing expressions
*
* expr is the node tree to be deparsed. It must be a transformed expression
* tree (ie, not the raw output of gram.y).
*
* dpcontext is a list of deparse_namespace nodes representing the context
* for interpreting Vars in the node tree. It can be NIL if no Vars are
* expected.
*
* forceprefix is true to force all Vars to be prefixed with their table names.
*
* showimplicit is true to force all implicit casts to be shown explicitly.
*
* Tries to pretty up the output according to prettyFlags and startIndent.
*
* The result is a palloc'd string.
* ----------
*/
static char *
deparse_expression_pretty(Node *expr, List *dpcontext,
bool forceprefix, bool showimplicit,
int prettyFlags, int startIndent)
{
StringInfoData buf;
deparse_context context;
initStringInfo(&buf);
context.buf = &buf;
context.namespaces = dpcontext;
context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = forceprefix;
context.prettyFlags = prettyFlags;
context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = startIndent;
context.special_exprkind = EXPR_KIND_NONE;
context.appendparents = NULL;
get_rule_expr(expr, &context, showimplicit);
return buf.data;
}
/*
* set_rtable_names: select RTE aliases to be used in printing a query
*
@ -8133,497 +8079,6 @@ get_tablesample_def(TableSampleClause *tablesample, deparse_context *context)
}
}
char *
pg_get_triggerdef_command(Oid triggerId)
{
Assert(OidIsValid(triggerId));
/* no need to have pretty SQL command */
bool prettyOutput = false;
return pg_get_triggerdef_worker(triggerId, prettyOutput);
}
char *
pg_get_statisticsobj_worker(Oid statextid, bool columns_only, bool missing_ok)
{
StringInfoData buf;
int colno;
char *nsp;
ArrayType *arr;
char *enabled;
Datum datum;
bool isnull;
bool ndistinct_enabled;
bool dependencies_enabled;
bool mcv_enabled;
int i;
List *context;
ListCell *lc;
List *exprs = NIL;
bool has_exprs;
int ncolumns;
HeapTuple statexttup = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statextid));
if (!HeapTupleIsValid(statexttup))
{
if (missing_ok)
{
return NULL;
}
elog(ERROR, "cache lookup failed for statistics object %u", statextid);
}
/* has the statistics expressions? */
has_exprs = !heap_attisnull(statexttup, Anum_pg_statistic_ext_stxexprs, NULL);
Form_pg_statistic_ext statextrec = (Form_pg_statistic_ext) GETSTRUCT(statexttup);
/*
* Get the statistics expressions, if any. (NOTE: we do not use the
* relcache versions of the expressions, because we want to display
* non-const-folded expressions.)
*/
if (has_exprs)
{
Datum exprsDatum;
bool isNull;
char *exprsString;
exprsDatum = SysCacheGetAttr(STATEXTOID, statexttup,
Anum_pg_statistic_ext_stxexprs, &isNull);
Assert(!isNull);
exprsString = TextDatumGetCString(exprsDatum);
exprs = (List *) stringToNode(exprsString);
pfree(exprsString);
}
else
{
exprs = NIL;
}
/* count the number of columns (attributes and expressions) */
ncolumns = statextrec->stxkeys.dim1 + list_length(exprs);
initStringInfo(&buf);
if (!columns_only)
{
nsp = get_namespace_name(statextrec->stxnamespace);
appendStringInfo(&buf, "CREATE STATISTICS %s",
quote_qualified_identifier(nsp,
NameStr(statextrec->stxname)));
/*
* Decode the stxkind column so that we know which stats types to
* print.
*/
datum = SysCacheGetAttr(STATEXTOID, statexttup,
Anum_pg_statistic_ext_stxkind, &isnull);
Assert(!isnull);
arr = DatumGetArrayTypeP(datum);
if (ARR_NDIM(arr) != 1 ||
ARR_HASNULL(arr) ||
ARR_ELEMTYPE(arr) != CHAROID)
elog(ERROR, "stxkind is not a 1-D char array");
enabled = (char *) ARR_DATA_PTR(arr);
ndistinct_enabled = false;
dependencies_enabled = false;
mcv_enabled = false;
for (i = 0; i < ARR_DIMS(arr)[0]; i++)
{
if (enabled[i] == STATS_EXT_NDISTINCT)
ndistinct_enabled = true;
else if (enabled[i] == STATS_EXT_DEPENDENCIES)
dependencies_enabled = true;
else if (enabled[i] == STATS_EXT_MCV)
mcv_enabled = true;
/* ignore STATS_EXT_EXPRESSIONS (it's built automatically) */
}
/*
* If any option is disabled, then we'll need to append the types
* clause to show which options are enabled. We omit the types clause
* on purpose when all options are enabled, so a pg_dump/pg_restore
* will create all statistics types on a newer postgres version, if
* the statistics had all options enabled on the original version.
*
* But if the statistics is defined on just a single column, it has to
* be an expression statistics. In that case we don't need to specify
* kinds.
*/
if ((!ndistinct_enabled || !dependencies_enabled || !mcv_enabled) &&
(ncolumns > 1))
{
bool gotone = false;
appendStringInfoString(&buf, " (");
if (ndistinct_enabled)
{
appendStringInfoString(&buf, "ndistinct");
gotone = true;
}
if (dependencies_enabled)
{
appendStringInfo(&buf, "%sdependencies", gotone ? ", " : "");
gotone = true;
}
if (mcv_enabled)
appendStringInfo(&buf, "%smcv", gotone ? ", " : "");
appendStringInfoChar(&buf, ')');
}
appendStringInfoString(&buf, " ON ");
}
/* decode simple column references */
for (colno = 0; colno < statextrec->stxkeys.dim1; colno++)
{
AttrNumber attnum = statextrec->stxkeys.values[colno];
if (colno > 0)
{
appendStringInfoString(&buf, ", ");
}
char *attname = get_attname(statextrec->stxrelid, attnum, false);
appendStringInfoString(&buf, quote_identifier(attname));
}
context = deparse_context_for(get_relation_name(statextrec->stxrelid),
statextrec->stxrelid);
foreach(lc, exprs)
{
Node *expr = (Node *) lfirst(lc);
char *str;
int prettyFlags = PRETTYFLAG_INDENT;
str = deparse_expression_pretty(expr, context, false, false,
prettyFlags, 0);
if (colno > 0)
appendStringInfoString(&buf, ", ");
/* Need parens if it's not a bare function call */
if (looks_like_function(expr))
appendStringInfoString(&buf, str);
else
appendStringInfo(&buf, "(%s)", str);
colno++;
}
if (!columns_only)
appendStringInfo(&buf, " FROM %s",
generate_relation_name(statextrec->stxrelid, NIL));
ReleaseSysCache(statexttup);
return buf.data;
}
static char *
pg_get_triggerdef_worker(Oid trigid, bool pretty)
{
HeapTuple ht_trig;
Form_pg_trigger trigrec;
StringInfoData buf;
Relation tgrel;
ScanKeyData skey[1];
SysScanDesc tgscan;
int findx = 0;
char *tgname;
char *tgoldtable;
char *tgnewtable;
Oid argtypes[1]; /* dummy */
Datum value;
bool isnull;
/*
* Fetch the pg_trigger tuple by the Oid of the trigger
*/
tgrel = table_open(TriggerRelationId, AccessShareLock);
ScanKeyInit(&skey[0],
Anum_pg_trigger_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(trigid));
tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
NULL, 1, skey);
ht_trig = systable_getnext(tgscan);
if (!HeapTupleIsValid(ht_trig))
{
systable_endscan(tgscan);
table_close(tgrel, AccessShareLock);
return NULL;
}
trigrec = (Form_pg_trigger) GETSTRUCT(ht_trig);
/*
* Start the trigger definition. Note that the trigger's name should never
* be schema-qualified, but the trigger rel's name may be.
*/
initStringInfo(&buf);
tgname = NameStr(trigrec->tgname);
appendStringInfo(&buf, "CREATE %sTRIGGER %s ",
OidIsValid(trigrec->tgconstraint) ? "CONSTRAINT " : "",
quote_identifier(tgname));
if (TRIGGER_FOR_BEFORE(trigrec->tgtype))
appendStringInfoString(&buf, "BEFORE");
else if (TRIGGER_FOR_AFTER(trigrec->tgtype))
appendStringInfoString(&buf, "AFTER");
else if (TRIGGER_FOR_INSTEAD(trigrec->tgtype))
appendStringInfoString(&buf, "INSTEAD OF");
else
elog(ERROR, "unexpected tgtype value: %d", trigrec->tgtype);
if (TRIGGER_FOR_INSERT(trigrec->tgtype))
{
appendStringInfoString(&buf, " INSERT");
findx++;
}
if (TRIGGER_FOR_DELETE(trigrec->tgtype))
{
if (findx > 0)
appendStringInfoString(&buf, " OR DELETE");
else
appendStringInfoString(&buf, " DELETE");
findx++;
}
if (TRIGGER_FOR_UPDATE(trigrec->tgtype))
{
if (findx > 0)
appendStringInfoString(&buf, " OR UPDATE");
else
appendStringInfoString(&buf, " UPDATE");
findx++;
/* tgattr is first var-width field, so OK to access directly */
if (trigrec->tgattr.dim1 > 0)
{
int i;
appendStringInfoString(&buf, " OF ");
for (i = 0; i < trigrec->tgattr.dim1; i++)
{
char *attname;
if (i > 0)
appendStringInfoString(&buf, ", ");
attname = get_attname(trigrec->tgrelid,
trigrec->tgattr.values[i], false);
appendStringInfoString(&buf, quote_identifier(attname));
}
}
}
if (TRIGGER_FOR_TRUNCATE(trigrec->tgtype))
{
if (findx > 0)
appendStringInfoString(&buf, " OR TRUNCATE");
else
appendStringInfoString(&buf, " TRUNCATE");
findx++;
}
/*
* In non-pretty mode, always schema-qualify the target table name for
* safety. In pretty mode, schema-qualify only if not visible.
*/
appendStringInfo(&buf, " ON %s ",
pretty ?
generate_relation_name(trigrec->tgrelid, NIL) :
generate_qualified_relation_name(trigrec->tgrelid));
if (OidIsValid(trigrec->tgconstraint))
{
if (OidIsValid(trigrec->tgconstrrelid))
appendStringInfo(&buf, "FROM %s ",
generate_relation_name(trigrec->tgconstrrelid, NIL));
if (!trigrec->tgdeferrable)
appendStringInfoString(&buf, "NOT ");
appendStringInfoString(&buf, "DEFERRABLE INITIALLY ");
if (trigrec->tginitdeferred)
appendStringInfoString(&buf, "DEFERRED ");
else
appendStringInfoString(&buf, "IMMEDIATE ");
}
value = fastgetattr(ht_trig, Anum_pg_trigger_tgoldtable,
tgrel->rd_att, &isnull);
if (!isnull)
tgoldtable = NameStr(*DatumGetName(value));
else
tgoldtable = NULL;
value = fastgetattr(ht_trig, Anum_pg_trigger_tgnewtable,
tgrel->rd_att, &isnull);
if (!isnull)
tgnewtable = NameStr(*DatumGetName(value));
else
tgnewtable = NULL;
if (tgoldtable != NULL || tgnewtable != NULL)
{
appendStringInfoString(&buf, "REFERENCING ");
if (tgoldtable != NULL)
appendStringInfo(&buf, "OLD TABLE AS %s ",
quote_identifier(tgoldtable));
if (tgnewtable != NULL)
appendStringInfo(&buf, "NEW TABLE AS %s ",
quote_identifier(tgnewtable));
}
if (TRIGGER_FOR_ROW(trigrec->tgtype))
appendStringInfoString(&buf, "FOR EACH ROW ");
else
appendStringInfoString(&buf, "FOR EACH STATEMENT ");
/* If the trigger has a WHEN qualification, add that */
value = fastgetattr(ht_trig, Anum_pg_trigger_tgqual,
tgrel->rd_att, &isnull);
if (!isnull)
{
Node *qual;
char relkind;
deparse_context context;
deparse_namespace dpns;
RangeTblEntry *oldrte;
RangeTblEntry *newrte;
appendStringInfoString(&buf, "WHEN (");
qual = stringToNode(TextDatumGetCString(value));
relkind = get_rel_relkind(trigrec->tgrelid);
/* Build minimal OLD and NEW RTEs for the rel */
oldrte = makeNode(RangeTblEntry);
oldrte->rtekind = RTE_RELATION;
oldrte->relid = trigrec->tgrelid;
oldrte->relkind = relkind;
oldrte->rellockmode = AccessShareLock;
oldrte->alias = makeAlias("old", NIL);
oldrte->eref = oldrte->alias;
oldrte->lateral = false;
oldrte->inh = false;
oldrte->inFromCl = true;
newrte = makeNode(RangeTblEntry);
newrte->rtekind = RTE_RELATION;
newrte->relid = trigrec->tgrelid;
newrte->relkind = relkind;
newrte->rellockmode = AccessShareLock;
newrte->alias = makeAlias("new", NIL);
newrte->eref = newrte->alias;
newrte->lateral = false;
newrte->inh = false;
newrte->inFromCl = true;
/* Build two-element rtable */
memset(&dpns, 0, sizeof(dpns));
dpns.rtable = list_make2(oldrte, newrte);
dpns.ctes = NIL;
set_rtable_names(&dpns, NIL, NULL);
set_simple_column_names(&dpns);
/* Set up context with one-deep namespace stack */
context.buf = &buf;
context.namespaces = list_make1(&dpns);
context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = true;
context.prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA) : PRETTYFLAG_INDENT;
context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = PRETTYINDENT_STD;
context.special_exprkind = EXPR_KIND_NONE;
context.appendparents = NULL;
get_rule_expr(qual, &context, false);
appendStringInfoString(&buf, ") ");
}
appendStringInfo(&buf, "EXECUTE FUNCTION %s(",
generate_function_name(trigrec->tgfoid, 0,
NIL, argtypes,
false, NULL, EXPR_KIND_NONE));
if (trigrec->tgnargs > 0)
{
char *p;
int i;
value = fastgetattr(ht_trig, Anum_pg_trigger_tgargs,
tgrel->rd_att, &isnull);
if (isnull)
elog(ERROR, "tgargs is null for trigger %u", trigid);
p = (char *) VARDATA_ANY(DatumGetByteaPP(value));
for (i = 0; i < trigrec->tgnargs; i++)
{
if (i > 0)
appendStringInfoString(&buf, ", ");
simple_quote_literal(&buf, p);
/* advance p to next string embedded in tgargs */
while (*p)
p++;
p++;
}
}
/* We deliberately do not put semi-colon at end */
appendStringInfoChar(&buf, ')');
/* Clean up */
systable_endscan(tgscan);
table_close(tgrel, AccessShareLock);
return buf.data;
}
/*
* set_simple_column_names: fill in column aliases for non-query situations
*
* This handles EXPLAIN and cases where we only have relation RTEs. Without
* a join tree, we can't do anything smart about join RTEs, but we don't
* need to (note that EXPLAIN should never see join alias Vars anyway).
* If we do hit a join RTE we'll just process it like a non-table base RTE.
*/
static void
set_simple_column_names(deparse_namespace *dpns)
{
ListCell *lc;
ListCell *lc2;
/* Initialize dpns->rtable_columns to contain zeroed structs */
dpns->rtable_columns = NIL;
while (list_length(dpns->rtable_columns) < list_length(dpns->rtable))
dpns->rtable_columns = lappend(dpns->rtable_columns,
palloc0(sizeof(deparse_columns)));
/* Assign unique column aliases within each RTE */
forboth(lc, dpns->rtable, lc2, dpns->rtable_columns)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
deparse_columns *colinfo = (deparse_columns *) lfirst(lc2);
set_relation_column_names(dpns, rte, colinfo);
}
}
/*
* get_opclass_name - fetch name of an index operator class

View File

@ -161,9 +161,11 @@
#include "distributed/shared_connection_stats.h"
#include "distributed/subplan_execution.h"
#include "distributed/transaction_management.h"
#include "distributed/transaction_identifier.h"
#include "distributed/tuple_destination.h"
#include "distributed/version_compat.h"
#include "distributed/worker_protocol.h"
#include "distributed/backend_data.h"
#include "lib/ilist.h"
#include "portability/instr_time.h"
#include "storage/fd.h"

View File

@ -34,14 +34,19 @@
#include "distributed/subplan_execution.h"
#include "distributed/worker_log_messages.h"
#include "distributed/worker_protocol.h"
#include "distributed/colocation_utils.h"
#include "distributed/function_call_delegation.h"
#include "executor/executor.h"
#include "nodes/makefuncs.h"
#include "optimizer/optimizer.h"
#include "optimizer/clauses.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/datum.h"
extern AllowedDistributionColumn AllowedDistributionColumnValue;
/* functions for creating custom scan nodes */
static Node * AdaptiveExecutorCreateScan(CustomScan *scan);
static Node * NonPushableInsertSelectCreateScan(CustomScan *scan);
@ -59,6 +64,8 @@ static DistributedPlan * CopyDistributedPlanWithoutCache(
DistributedPlan *originalDistributedPlan);
static void CitusEndScan(CustomScanState *node);
static void CitusReScan(CustomScanState *node);
static void SetJobColocationId(Job *job);
static void EnsureForceDelegationDistributionKey(Job *job);
/* create custom scan methods for all executors */
@ -190,6 +197,17 @@ CitusBeginScan(CustomScanState *node, EState *estate, int eflags)
CitusBeginModifyScan(node, estate, eflags);
}
/*
* If there is force_delgation functions' distribution argument set,
* enforce it
*/
if (AllowedDistributionColumnValue.isActive)
{
Job *workerJob = scanState->distributedPlan->workerJob;
EnsureForceDelegationDistributionKey(workerJob);
}
/*
* In case of a prepared statement, we will see this distributed plan again
* on the next execution with a higher usage counter.
@ -801,3 +819,96 @@ IsCitusCustomScan(Plan *plan)
return true;
}
/*
* In a Job, given a list of relations, if all them belong to the same
* colocation group, the Job's colocation ID is set to the group ID, else,
* it will be set to INVALID_COLOCATION_ID.
*/
static void
SetJobColocationId(Job *job)
{
uint32 jobColocationId = INVALID_COLOCATION_ID;
if (!job->partitionKeyValue)
{
/* if the Job has no shard key, nothing to do */
return;
}
List *rangeTableList = ExtractRangeTableEntryList(job->jobQuery);
ListCell *rangeTableCell = NULL;
foreach(rangeTableCell, rangeTableList)
{
RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell);
Oid relationId = rangeTableEntry->relid;
if (!IsCitusTable(relationId))
{
/* ignore the non distributed table */
continue;
}
uint32 colocationId = TableColocationId(relationId);
if (jobColocationId == INVALID_COLOCATION_ID)
{
/* Initialize the ID */
jobColocationId = colocationId;
}
else if (jobColocationId != colocationId)
{
/* Tables' colocationId is not the same */
jobColocationId = INVALID_COLOCATION_ID;
break;
}
}
job->colocationId = jobColocationId;
}
/*
* Any function with force_delegate flag(true) must ensure that the Job's
* partition key match with the functions' distribution argument.
*/
static void
EnsureForceDelegationDistributionKey(Job *job)
{
/* If the Job has the subquery, punt the shard-key-check to the subquery */
if (job->subqueryPushdown)
{
return;
}
/*
* If the query doesn't have shard key, nothing to check, only exception is when
* the query doesn't have distributed tables but an RTE with intermediate_results
* function (a subquery plan).
*/
if (!job->partitionKeyValue)
{
bool queryContainsDistributedTable =
FindNodeMatchingCheckFunction((Node *) job->jobQuery, IsDistributedTableRTE);
if (!queryContainsDistributedTable)
{
return;
}
}
/* We should match both the key and the colocation ID */
SetJobColocationId(job);
if (!IsShardKeyValueAllowed(job->partitionKeyValue, job->colocationId))
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg(
"queries must filter by the distribution argument in the same "
"colocation group when using the forced function pushdown"),
errhint(
"consider disabling forced delegation through "
"create_distributed_table(..., force_delegation := false)")));
}
}

View File

@ -37,6 +37,7 @@
#include "distributed/version_compat.h"
#include "distributed/worker_shard_visibility.h"
#include "distributed/worker_protocol.h"
#include "distributed/function_call_delegation.h"
#include "executor/execdebug.h"
#include "commands/copy.h"
#include "nodes/execnodes.h"
@ -234,6 +235,15 @@ CitusExecutorRun(QueryDesc *queryDesc,
* transactions.
*/
CitusTableCacheFlushInvalidatedEntries();
/*
* Within a 2PC, when a function is delegated to a remote node, we pin
* the distribution argument as the shard key for all the SQL in the
* function's block. The restriction is imposed to not to access other
* nodes from the current node and violate the transactional integrity
* of the 2PC. Now that the query is ending, reset the shard key to NULL.
*/
ResetAllowedShardKeyValue();
}
}
PG_CATCH();
@ -246,6 +256,15 @@ CitusExecutorRun(QueryDesc *queryDesc,
executorBoundParams = savedBoundParams;
ExecutorLevel--;
if (ExecutorLevel == 0 && PlannerLevel == 0)
{
/*
* In case of an exception, reset the pinned shard-key, for more
* details see the function header.
*/
ResetAllowedShardKeyValue();
}
PG_RE_THROW();
}
PG_END_TRY();
@ -761,6 +780,6 @@ InTaskExecution(void)
* are in a delegated function/procedure call.
*/
return IsCitusInitiatedRemoteBackend() &&
!InDelegatedFunctionCall &&
!InTopLevelDelegatedFunctionCall &&
!InDelegatedProcedureCall;
}

View File

@ -178,11 +178,13 @@ MarkObjectDistributed(const ObjectAddress *distAddress)
List *objectAddressList = list_make1((ObjectAddress *) distAddress);
List *distArgumetIndexList = list_make1_int(INVALID_DISTRIBUTION_ARGUMENT_INDEX);
List *colocationIdList = list_make1_int(INVALID_COLOCATION_ID);
List *forceDelegationList = list_make1_int(NO_FORCE_PUSHDOWN);
char *workerPgDistObjectUpdateCommand =
MarkObjectsDistributedCreateCommand(objectAddressList,
distArgumetIndexList,
colocationIdList);
colocationIdList,
forceDelegationList);
SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand);
}
}

View File

@ -166,6 +166,7 @@ typedef struct MetadataCacheData
Oid secondaryNodeRoleId;
Oid pgTableIsVisibleFuncId;
Oid citusTableIsVisibleFuncId;
Oid relationIsAKnownShardFuncId;
Oid jsonbExtractPathFuncId;
bool databaseNameValid;
char databaseName[NAMEDATALEN];
@ -1362,6 +1363,9 @@ LookupDistObjectCacheEntry(Oid classid, Oid objid, int32 objsubid)
1]);
cacheEntry->colocationId =
DatumGetInt32(datumArray[Anum_pg_dist_object_colocationid - 1]);
cacheEntry->forceDelegation =
DatumGetBool(datumArray[Anum_pg_dist_object_force_delegation - 1]);
}
else
{
@ -2619,6 +2623,24 @@ CitusTableVisibleFuncId(void)
}
/*
* RelationIsAKnownShardFuncId returns oid of the relation_is_a_known_shard function.
*/
Oid
RelationIsAKnownShardFuncId(void)
{
if (MetadataCache.relationIsAKnownShardFuncId == InvalidOid)
{
const int argCount = 1;
MetadataCache.relationIsAKnownShardFuncId =
FunctionOid("pg_catalog", "relation_is_a_known_shard", argCount);
}
return MetadataCache.relationIsAKnownShardFuncId;
}
/*
* JsonbExtractPathFuncId returns oid of the jsonb_extract_path function.
*/

View File

@ -569,6 +569,7 @@ DistributedObjectMetadataSyncCommandList(void)
List *objectAddressList = NIL;
List *distArgumentIndexList = NIL;
List *colocationIdList = NIL;
List *forceDelegationList = NIL;
/* It is not strictly necessary to read the tuples in order.
* However, it is useful to get consistent behavior, both for regression
@ -604,6 +605,14 @@ DistributedObjectMetadataSyncCommandList(void)
&colocationIdIsNull);
int32 colocationId = DatumGetInt32(colocationIdDatum);
bool forceDelegationIsNull = false;
Datum forceDelegationDatum =
heap_getattr(pgDistObjectTup,
Anum_pg_dist_object_force_delegation,
pgDistObjectDesc,
&forceDelegationIsNull);
bool forceDelegation = DatumGetBool(forceDelegationDatum);
objectAddressList = lappend(objectAddressList, address);
if (distributionArgumentIndexIsNull)
@ -626,6 +635,15 @@ DistributedObjectMetadataSyncCommandList(void)
{
colocationIdList = lappend_int(colocationIdList, colocationId);
}
if (forceDelegationIsNull)
{
forceDelegationList = lappend_int(forceDelegationList, NO_FORCE_PUSHDOWN);
}
else
{
forceDelegationList = lappend_int(forceDelegationList, forceDelegation);
}
}
systable_endscan_ordered(pgDistObjectScan);
@ -635,7 +653,8 @@ DistributedObjectMetadataSyncCommandList(void)
char *workerMetadataUpdateCommand =
MarkObjectsDistributedCreateCommand(objectAddressList,
distArgumentIndexList,
colocationIdList);
colocationIdList,
forceDelegationList);
List *commandList = list_make1(workerMetadataUpdateCommand);
return commandList;
@ -763,7 +782,8 @@ NodeListInsertCommand(List *workerNodeList)
char *
MarkObjectsDistributedCreateCommand(List *addresses,
List *distributionArgumentIndexes,
List *colocationIds)
List *colocationIds,
List *forceDelegations)
{
StringInfo insertDistributedObjectsCommand = makeStringInfo();
@ -772,7 +792,7 @@ MarkObjectsDistributedCreateCommand(List *addresses,
appendStringInfo(insertDistributedObjectsCommand,
"WITH distributed_object_data(typetext, objnames, "
"objargs, distargumentindex, colocationid) AS (VALUES ");
"objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ");
bool isFirstObject = true;
for (int currentObjectCounter = 0; currentObjectCounter < list_length(addresses);
@ -782,6 +802,7 @@ MarkObjectsDistributedCreateCommand(List *addresses,
int distributionArgumentIndex = list_nth_int(distributionArgumentIndexes,
currentObjectCounter);
int colocationId = list_nth_int(colocationIds, currentObjectCounter);
int forceDelegation = list_nth_int(forceDelegations, currentObjectCounter);
List *names = NIL;
List *args = NIL;
char *objectType = NULL;
@ -837,15 +858,18 @@ MarkObjectsDistributedCreateCommand(List *addresses,
appendStringInfo(insertDistributedObjectsCommand, "%d, ",
distributionArgumentIndex);
appendStringInfo(insertDistributedObjectsCommand, "%d)",
appendStringInfo(insertDistributedObjectsCommand, "%d, ",
colocationId);
appendStringInfo(insertDistributedObjectsCommand, "%s)",
forceDelegation ? "true" : "false");
}
appendStringInfo(insertDistributedObjectsCommand, ") ");
appendStringInfo(insertDistributedObjectsCommand,
"SELECT citus_internal_add_object_metadata("
"typetext, objnames, objargs, distargumentindex::int, colocationid::int) "
"typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) "
"FROM distributed_object_data;");
return insertDistributedObjectsCommand->data;
@ -864,6 +888,7 @@ citus_internal_add_object_metadata(PG_FUNCTION_ARGS)
ArrayType *argsArray = PG_GETARG_ARRAYTYPE_P(2);
int distributionArgumentIndex = PG_GETARG_INT32(3);
int colocationId = PG_GETARG_INT32(4);
bool forceDelegation = PG_GETARG_INT32(5);
if (!ShouldSkipMetadataChecks())
{
@ -905,9 +930,14 @@ citus_internal_add_object_metadata(PG_FUNCTION_ARGS)
NULL :
&colocationId;
bool *forceDelegationAddress =
forceDelegation == false ?
NULL :
&forceDelegation;
UpdateFunctionDistributionInfo(&objectAddress,
distributionArgumentIndexAddress,
colocationIdAddress);
colocationIdAddress,
forceDelegationAddress);
}
SetLocalEnableDependencyCreation(prevDependencyCreationValue);

View File

@ -1114,7 +1114,8 @@ ActivateNode(char *nodeName, int nodePort)
/*
* Sync node metadata. We must sync node metadata before syncing table
* related pg_dist_xxx metadata.
* related pg_dist_xxx metadata. Since table related metadata requires
* to have right pg_dist_node entries.
*/
SyncNodeMetadataToNode(nodeName, nodePort);

View File

@ -202,9 +202,9 @@ distributed_planner(Query *parse,
/*
* Make sure that we hide shard names on the Citus MX worker nodes. See comments in
* ReplaceTableVisibleFunction() for the details.
* HideShardsFromSomeApplications() for the details.
*/
ReplaceTableVisibleFunction((Node *) parse);
HideShardsFromSomeApplications(parse);
/* create a restriction context and put it at the end if context list */
planContext.plannerRestrictionContext = CreateAndPushPlannerRestrictionContext();

View File

@ -17,7 +17,7 @@
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "commands/defrem.h"
#include "distributed/citus_custom_scan.h"
#include "distributed/metadata_utility.h"
#include "distributed/citus_ruleutils.h"
#include "distributed/colocation_utils.h"
#include "distributed/commands.h"
@ -26,7 +26,7 @@
#include "distributed/deparse_shard_query.h"
#include "distributed/function_call_delegation.h"
#include "distributed/insert_select_planner.h"
#include "distributed/metadata_utility.h"
#include "distributed/citus_custom_scan.h"
#include "distributed/coordinator_protocol.h"
#include "distributed/listutils.h"
#include "distributed/metadata_cache.h"
@ -41,6 +41,7 @@
#include "nodes/nodeFuncs.h"
#include "nodes/parsenodes.h"
#include "nodes/primnodes.h"
#include "nodes/print.h"
#include "optimizer/clauses.h"
#include "parser/parse_coerce.h"
#include "parser/parsetree.h"
@ -55,7 +56,18 @@ struct ParamWalkerContext
ParamKind paramKind;
};
extern AllowedDistributionColumn AllowedDistributionColumnValue;
static bool contain_param_walker(Node *node, void *context);
static void CheckDelegatedFunctionExecution(DistObjectCacheEntry *procedure,
FuncExpr *funcExpr);
static bool IsQuerySimple(Query *query);
static FuncExpr * FunctionInFromClause(List *fromlist, Query *query);
static void EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId);
/* global variable keeping track of whether we are in a delegated function call */
bool InTopLevelDelegatedFunctionCall = false;
/* global variable keeping track of whether we are in a delegated function call */
@ -84,15 +96,14 @@ contain_param_walker(Node *node, void *context)
pwcontext->hasParam = true;
pwcontext->paramKind = paramNode->paramkind;
if (paramNode->paramkind == PARAM_EXEC)
return paramNode->paramkind == PARAM_EXEC;
}
else
{
return true;
return expression_tree_walker((Node *) node, contain_param_walker, context);
}
}
return false;
}
/*
* TryToDelegateFunctionCall calls a function on the worker if possible.
@ -106,8 +117,8 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
{
bool colocatedWithReferenceTable = false;
ShardPlacement *placement = NULL;
DistributedPlan *distributedPlan = CitusMakeNode(DistributedPlan);
struct ParamWalkerContext walkerParamContext = { 0 };
bool inTransactionBlock = false;
if (!CitusHasBeenLoaded() || !CheckCitusVersion(DEBUG4))
{
@ -147,25 +158,41 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
return NULL;
}
FuncExpr *fromFuncExpr = NULL;
if (joinTree->fromlist != NIL)
{
if (list_length(joinTree->fromlist) != 1)
{
/* e.g. SELECT ... FROM rel1, rel2. */
Assert(list_length(joinTree->fromlist) > 1);
return NULL;
}
/*
* In pg12's planning phase empty FROMs are represented with an RTE_RESULT.
* When we arrive here, standard_planner has already been called which calls
* replace_empty_jointree() which replaces empty fromlist with a list of
* single RTE_RESULT RangleTableRef node.
*/
if (list_length(joinTree->fromlist) == 1)
{
RangeTblRef *reference = linitial(joinTree->fromlist);
if (IsA(reference, RangeTblRef))
{
RangeTblEntry *rtentry = rt_fetch(reference->rtindex,
planContext->query->rtable);
if (rtentry->rtekind != RTE_RESULT)
if (rtentry->rtekind == RTE_FUNCTION)
{
/*
* Look for a function in the FROM clause.
*/
fromFuncExpr = FunctionInFromClause(joinTree->fromlist,
planContext->query);
}
else if (rtentry->rtekind != RTE_RESULT)
{
/* e.g. SELECT f() FROM rel */
ereport(DEBUG4, (errmsg("FromList item is not empty")));
return NULL;
}
}
@ -178,29 +205,49 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
return NULL;
}
}
FuncExpr *targetFuncExpr = NULL;
List *targetList = planContext->query->targetList;
int targetListLen = list_length(targetList);
if (targetListLen == 1)
{
TargetEntry *targetEntry = (TargetEntry *) linitial(targetList);
if (IsA(targetEntry->expr, FuncExpr))
{
/* function from the SELECT clause e.g. SELECT fn() FROM */
targetFuncExpr = (FuncExpr *) targetEntry->expr;
}
}
/*
* Look for one of:
* SELECT fn(...);
* SELECT ... FROM fn(...);
*/
FuncExpr *funcExpr = NULL;
if (targetFuncExpr != NULL)
{
if (fromFuncExpr != NULL)
{
/* query is of the form: SELECT fn() FROM fn() */
return NULL;
}
/* query is of the form: SELECT fn(); */
funcExpr = targetFuncExpr;
}
else if (fromFuncExpr != NULL)
{
/* query is of the form: SELECT ... FROM fn(); */
funcExpr = fromFuncExpr;
}
else
{
/* e.g. SELECT ... FROM rel1, rel2. */
Assert(list_length(joinTree->fromlist) > 1);
return NULL;
}
}
List *targetList = planContext->query->targetList;
if (list_length(planContext->query->targetList) != 1)
{
/* multiple target list items */
/* query does not have a function call in SELECT or FROM */
return NULL;
}
TargetEntry *targetEntry = (TargetEntry *) linitial(targetList);
if (!IsA(targetEntry->expr, FuncExpr))
{
/* target list item is not a function call */
return NULL;
}
FuncExpr *funcExpr = (FuncExpr *) targetEntry->expr;
DistObjectCacheEntry *procedure = LookupDistObjectCacheEntry(ProcedureRelationId,
funcExpr->funcid, 0);
if (procedure == NULL || !procedure->isDistributed)
@ -215,11 +262,44 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
if (IsCitusInitiatedRemoteBackend())
{
bool isFunctionForceDelegated = procedure->forceDelegation;
/*
* We are planning a call to a distributed function within a Citus backend,
* that means that this is the delegated call.
* that means that this is the delegated call. If the function is forcefully
* delegated, capture the distribution argument.
*/
InDelegatedFunctionCall = true;
if (isFunctionForceDelegated)
{
CheckDelegatedFunctionExecution(procedure, funcExpr);
}
/* Are we planning the top function call? */
if (ExecutorLevel == 0 && PlannerLevel == 1)
{
/*
* InTopLevelDelegatedFunctionCall flag grants the levy
* to do remote tasks from a delegated function.
*/
if (!isFunctionForceDelegated)
{
/*
* we are planning a regular delegated call, we
* are allowed to do remote execution.
*/
InTopLevelDelegatedFunctionCall = true;
}
else if (!IsMultiStatementTransaction())
{
/*
* we are planning a force-delegated call, we
* are allowed to do remote execution if there
* is no explicit BEGIN-END transaction.
*/
InTopLevelDelegatedFunctionCall = true;
}
}
return NULL;
}
@ -244,13 +324,77 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
return NULL;
}
if (fromFuncExpr && !IsMultiStatementTransaction())
{
/*
* For now, let's not push the function from the FROM clause unless it's in a
* multistatement transaction with the forceDelegation flag ON.
*/
ereport(DEBUG2, (errmsg("function from the FROM clause is not pushed")));
return NULL;
}
/* dissuade the planner from trying a generic plan with parameters */
(void) expression_tree_walker((Node *) funcExpr->args, contain_param_walker,
&walkerParamContext);
if (walkerParamContext.hasParam)
{
if (walkerParamContext.paramKind == PARAM_EXTERN)
{
/* Don't log a message, we should end up here again without a parameter */
DissuadePlannerFromUsingPlan(planContext->plan);
}
else
{
ereport(DEBUG1, (errmsg("arguments in a distributed function must "
"not contain subqueries")));
}
return NULL;
}
if (IsMultiStatementTransaction())
{
if (!procedure->forceDelegation)
{
/* cannot delegate function calls in a multi-statement transaction */
ereport(DEBUG1, (errmsg("not pushing down function calls in "
"a multi-statement transaction")));
return NULL;
}
else
{
Node *partitionValueNode = (Node *) list_nth(funcExpr->args,
procedure->distributionArgIndex);
if (!IsA(partitionValueNode, Const))
{
ereport(DEBUG1, (errmsg("distribution argument value must be a "
"constant when using force_delegation flag")));
return NULL;
}
/*
* If the expression is simple, such as, SELECT fn() in
* PL/PgSQL code, PL engine is doing simple expression
* evaluation, which can't interpret the CustomScan Node.
* Function from FROM clause is not simple, so it's ok.
*/
if (MaybeExecutingUDF() && IsQuerySimple(planContext->query) && !fromFuncExpr)
{
ereport(DEBUG1, (errmsg("Skipping delegation of function "
"from a PL/PgSQL simple expression")));
return NULL;
}
/*
* When is flag is on, delegate the function call in a multi-statement
* transaction but with restrictions.
*/
ereport(DEBUG1, (errmsg("pushing down function call in "
"a multi-statement transaction")));
inTransactionBlock = true;
}
}
if (contain_volatile_functions((Node *) funcExpr->args))
{
@ -300,7 +444,7 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
/* return if we could not find a placement */
if (placement == NULL)
{
return false;
return NULL;
}
WorkerNode *workerNode = FindWorkerNode(placement->nodeName, placement->nodePort);
@ -312,6 +456,12 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
}
else if (workerNode->groupId == GetLocalGroupId())
{
/* If the force_pushdown flag is set, capture the distribution argument */
if (procedure->forceDelegation)
{
CheckDelegatedFunctionExecution(procedure, funcExpr);
}
/*
* Two reasons for this:
* (a) It would lead to infinite recursion as the node would
@ -323,27 +473,28 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
return NULL;
}
(void) expression_tree_walker((Node *) funcExpr->args, contain_param_walker,
&walkerParamContext);
if (walkerParamContext.hasParam)
{
if (walkerParamContext.paramKind == PARAM_EXTERN)
{
/* Don't log a message, we should end up here again without a parameter */
DissuadePlannerFromUsingPlan(planContext->plan);
}
else
{
ereport(DEBUG1, (errmsg("arguments in a distributed function must "
"not contain subqueries")));
}
return NULL;
}
ereport(DEBUG1, (errmsg("pushing down the function call")));
Task *task = CitusMakeNode(Task);
/*
* In a multi-statement block the function should be part of the sorrounding
* transaction, at this time, not knowing the operations in the function, it
* is safe to assume that it's a write task.
*
* TODO: We should compile the function to see the internals of the function
* and find if this has read-only tasks, does it involve doing a remote task
* or queries involving non-distribution column, etc.
*/
if (inTransactionBlock)
{
task->taskType = MODIFY_TASK;
}
else
{
task->taskType = READ_TASK;
}
task->taskPlacementList = list_make1(placement);
SetTaskQueryIfShouldLazyDeparse(task, planContext->query);
task->anchorShardId = placement->shardId;
@ -354,7 +505,7 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
job->jobQuery = planContext->query;
job->taskList = list_make1(task);
distributedPlan = CitusMakeNode(DistributedPlan);
DistributedPlan *distributedPlan = CitusMakeNode(DistributedPlan);
distributedPlan->workerJob = job;
distributedPlan->combineQuery = NULL;
distributedPlan->expectResults = true;
@ -465,3 +616,184 @@ ShardPlacementForFunctionColocatedWithReferenceTable(CitusTableCacheEntry *cache
return (ShardPlacement *) linitial(placementList);
}
/*
* Checks to see if the procedure is being executed on a worker after delegated
* by the coordinator. If the flag forceDelegation is set, capture the distribution
* argument value, to be used by the planner to make sure that function uses only
* the colocated shards of the distribution argument.
*/
void
CheckDelegatedFunctionExecution(DistObjectCacheEntry *procedure, FuncExpr *funcExpr)
{
Assert(procedure->forceDelegation);
/*
* On the coordinator PartiallyEvaluateExpression() descends into an
* expression tree to evaluate expressions that can be resolved to a
* constant. Expressions containing a Var are skipped, since the value
* of the Var is not known on the coordinator.
*/
Node *partitionValueNode = (Node *) list_nth(funcExpr->args,
procedure->distributionArgIndex);
Assert(partitionValueNode);
partitionValueNode = strip_implicit_coercions(partitionValueNode);
if (IsA(partitionValueNode, Param))
{
Param *partitionParam = (Param *) partitionValueNode;
if (partitionParam->paramkind == PARAM_EXTERN)
{
/* we should end up here again without a parameter */
return;
}
}
if (IsA(partitionValueNode, Const))
{
Const *partitionValueConst = (Const *) partitionValueNode;
ereport(DEBUG1, (errmsg("Pushdown argument: %s", pretty_format_node_dump(
nodeToString(partitionValueNode)))));
EnableInForceDelegatedFuncExecution(partitionValueConst, procedure->colocationId);
}
}
/*
* Function returns true if the query is simple enough to skip the full executor
* It checks only for expressions in the query clauses, and not WHERE and FROM
* lists.
*/
static bool
IsQuerySimple(Query *query)
{
if (query->hasAggs ||
query->hasWindowFuncs ||
query->hasTargetSRFs ||
query->hasSubLinks ||
query->cteList ||
query->groupClause ||
query->groupingSets ||
query->havingQual ||
query->windowClause ||
query->distinctClause ||
query->sortClause ||
query->limitOffset ||
query->limitCount ||
query->setOperations)
{
return false;
}
return true;
}
/*
* Look for a function in the FROM clause.
*/
static FuncExpr *
FunctionInFromClause(List *fromlist, Query *query)
{
if (list_length(fromlist) != 1)
{
/* We are looking for a single function */
return NULL;
}
RangeTblRef *reference = linitial(fromlist);
if (!IsA(reference, RangeTblRef))
{
/* Skip if there is no RTE */
return NULL;
}
RangeTblEntry *rtentry = rt_fetch(reference->rtindex, query->rtable);
if (rtentry->rtekind != RTE_FUNCTION)
{
return NULL;
}
if (list_length(rtentry->functions) != 1)
{
/* Skip if RTE isn't a single FuncExpr */
return NULL;
}
RangeTblFunction *rtfunc = (RangeTblFunction *) linitial(rtentry->functions);
if (!IsA(rtfunc->funcexpr, FuncExpr))
{
/* Skip if RTE isn't a simple FuncExpr */
return NULL;
}
return (FuncExpr *) rtfunc->funcexpr;
}
/*
* Sets a flag to true indicating that the current node is executing a delegated
* function call, using forceDelegation, within a distributed transaction issued
* by the coordinator. Also, saves the distribution argument.
*/
static void
EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId)
{
/*
* The saved distribution argument need to persist through the life
* of the query, both during the planning (where we save) and execution
* (where we compare)
*/
MemoryContext oldcontext = MemoryContextSwitchTo(TopTransactionContext);
ereport(DEBUG1, errmsg("Saving Distribution Argument: %s:%d",
pretty_format_node_dump(nodeToString(distArgument)),
colocationId));
AllowedDistributionColumnValue.distributionColumnValue = copyObject(distArgument);
AllowedDistributionColumnValue.colocationId = colocationId;
AllowedDistributionColumnValue.isActive = true;
MemoryContextSwitchTo(oldcontext);
}
/*
* Within a 2PC, when a function is delegated to a remote node, we pin
* the distribution argument as the shard key for all the SQL in the
* function's block. The restriction is imposed to not to access other
* nodes from the current node and violate the transactional integrity of
* the 2PC. Reset the distribution argument value once the function ends.
*/
void
ResetAllowedShardKeyValue(void)
{
if (AllowedDistributionColumnValue.isActive)
{
pfree(AllowedDistributionColumnValue.distributionColumnValue);
AllowedDistributionColumnValue.isActive = false;
}
InTopLevelDelegatedFunctionCall = false;
}
/*
* Function returns true if the current shard key in the adaptive executor
* matches the saved distribution argument of a force_delegation function.
*/
bool
IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId)
{
Assert(AllowedDistributionColumnValue.isActive);
ereport(DEBUG4, errmsg("Comparing saved:%s with Shard key: %s colocationid:%d:%d",
pretty_format_node_dump(
nodeToString(
AllowedDistributionColumnValue.distributionColumnValue)),
pretty_format_node_dump(nodeToString(shardKey)),
AllowedDistributionColumnValue.colocationId, colocationId));
return (equal(AllowedDistributionColumnValue.distributionColumnValue, shardKey) &&
(AllowedDistributionColumnValue.colocationId == colocationId));
}

View File

@ -25,6 +25,7 @@
#include "citus_version.h"
#include "commands/explain.h"
#include "common/string.h"
#include "executor/executor.h"
#include "distributed/backend_data.h"
#include "distributed/citus_nodefuncs.h"
@ -102,6 +103,9 @@ static char *CitusVersion = CITUS_VERSION;
/* deprecated GUC value that should not be used anywhere outside this file */
static int ReplicationModel = REPLICATION_MODEL_STREAMING;
/* we override the application_name assign_hook and keep a pointer to the old one */
static GucStringAssignHook OldApplicationNameAssignHook = NULL;
void _PG_init(void);
void _PG_fini(void);
@ -115,11 +119,16 @@ static void CitusCleanupConnectionsAtExit(int code, Datum arg);
static void DecrementClientBackendCounterAtExit(int code, Datum arg);
static void CreateRequiredDirectories(void);
static void RegisterCitusConfigVariables(void);
static void OverridePostgresConfigAssignHooks(void);
static bool ErrorIfNotASuitableDeadlockFactor(double *newval, void **extra,
GucSource source);
static bool WarnIfDeprecatedExecutorUsed(int *newval, void **extra, GucSource source);
static bool WarnIfReplicationModelIsSet(int *newval, void **extra, GucSource source);
static bool NoticeIfSubqueryPushdownEnabled(bool *newval, void **extra, GucSource source);
static bool HideShardsFromAppNamePrefixesCheckHook(char **newval, void **extra,
GucSource source);
static void HideShardsFromAppNamePrefixesAssignHook(const char *newval, void *extra);
static void ApplicationNameAssignHook(const char *newval, void *extra);
static bool NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source);
static void NodeConninfoGucAssignHook(const char *newval, void *extra);
static const char * MaxSharedPoolSizeGucShowHook(void);
@ -1106,6 +1115,24 @@ RegisterCitusConfigVariables(void)
GUC_NO_SHOW_ALL,
NULL, NULL, NULL);
DefineCustomStringVariable(
"citus.hide_shards_from_app_name_prefixes",
gettext_noop("If application_name starts with one of these values, hide shards"),
gettext_noop("Citus places distributed tables and shards in the same schema. "
"That can cause confusion when inspecting the list of tables on "
"a node with shards. This GUC can be used to hide the shards from "
"pg_class for certain applications based on the application_name "
"of the connection. The default is *, which hides shards from all "
"applications. This behaviour can be overridden using the "
"citus.override_table_visibility setting"),
&HideShardsFromAppNamePrefixes,
"*",
PGC_USERSET,
GUC_STANDARD,
HideShardsFromAppNamePrefixesCheckHook,
HideShardsFromAppNamePrefixesAssignHook,
NULL);
DefineCustomIntVariable(
"citus.isolation_test_session_process_id",
NULL,
@ -1782,6 +1809,33 @@ RegisterCitusConfigVariables(void)
/* warn about config items in the citus namespace that are not registered above */
EmitWarningsOnPlaceholders("citus");
OverridePostgresConfigAssignHooks();
}
/*
* OverridePostgresConfigAssignHooks overrides GUC assign hooks where we want
* custom behaviour.
*/
static void
OverridePostgresConfigAssignHooks(void)
{
struct config_generic **guc_vars = get_guc_variables();
int gucCount = GetNumConfigOptions();
for (int gucIndex = 0; gucIndex < gucCount; gucIndex++)
{
struct config_generic *var = (struct config_generic *) guc_vars[gucIndex];
if (strcmp(var->name, "application_name") == 0)
{
struct config_string *stringVar = (struct config_string *) var;
OldApplicationNameAssignHook = stringVar->assign_hook;
stringVar->assign_hook = ApplicationNameAssignHook;
}
}
}
@ -1884,6 +1938,76 @@ WarnIfReplicationModelIsSet(int *newval, void **extra, GucSource source)
}
/*
* HideShardsFromAppNamePrefixesCheckHook ensures that the
* citus.hide_shards_from_app_name_prefixes holds a valid list of application_name
* values.
*/
static bool
HideShardsFromAppNamePrefixesCheckHook(char **newval, void **extra, GucSource source)
{
List *prefixList = NIL;
/* SplitGUCList scribbles on the input */
char *splitCopy = pstrdup(*newval);
/* check whether we can split into a list of identifiers */
if (!SplitGUCList(splitCopy, ',', &prefixList))
{
GUC_check_errdetail("not a valid list of identifiers");
return false;
}
char *appNamePrefix = NULL;
foreach_ptr(appNamePrefix, prefixList)
{
int prefixLength = strlen(appNamePrefix);
if (prefixLength >= NAMEDATALEN)
{
GUC_check_errdetail("prefix %s is more than %d characters", appNamePrefix,
NAMEDATALEN);
return false;
}
char *prefixAscii = pstrdup(appNamePrefix);
pg_clean_ascii(prefixAscii);
if (strcmp(prefixAscii, appNamePrefix) != 0)
{
GUC_check_errdetail("prefix %s in citus.hide_shards_from_app_name_prefixes "
"contains non-ascii characters", appNamePrefix);
return false;
}
}
return true;
}
/*
* HideShardsFromAppNamePrefixesAssignHook ensures changes to
* citus.hide_shards_from_app_name_prefixes are reflected in the decision
* whether or not to show shards.
*/
static void
HideShardsFromAppNamePrefixesAssignHook(const char *newval, void *extra)
{
ResetHideShardsDecision();
}
/*
* ApplicationNameAssignHook is called whenever application_name changes
* to allow us to reset our hide shards decision.
*/
static void
ApplicationNameAssignHook(const char *newval, void *extra)
{
ResetHideShardsDecision();
OldApplicationNameAssignHook(newval, extra);
}
/*
* NodeConninfoGucCheckHook ensures conninfo settings are in the expected form
* and that the keywords of all non-null settings are on a allowlist devised to

View File

@ -2,9 +2,12 @@
-- bump version to 11.0-1
#include "udfs/citus_disable_node/11.0-1.sql"
#include "udfs/create_distributed_function/11.0-1.sql"
#include "udfs/citus_check_connection_to_node/11.0-1.sql"
#include "udfs/citus_check_cluster_node_health/11.0-1.sql"
#include "udfs/citus_shards_on_worker/11.0-1.sql"
#include "udfs/citus_shard_indexes_on_worker/11.0-1.sql"
#include "udfs/citus_internal_add_object_metadata/11.0-1.sql"
#include "udfs/citus_run_local_command/11.0-1.sql"
@ -19,6 +22,7 @@ DROP FUNCTION pg_catalog.master_append_table_to_shard(bigint, text, text, intege
-- all existing citus local tables are auto converted
-- none of the other tables can have auto-converted as true
ALTER TABLE pg_catalog.pg_dist_partition ADD COLUMN autoconverted boolean DEFAULT false;
ALTER TABLE citus.pg_dist_object ADD COLUMN force_delegation bool DEFAULT NULL;
UPDATE pg_catalog.pg_dist_partition SET autoconverted = TRUE WHERE partmethod = 'n' AND repmodel = 's';
REVOKE ALL ON FUNCTION start_metadata_sync_to_node(text, integer) FROM PUBLIC;

View File

@ -1,5 +1,6 @@
-- citus--11.0-1--10.2-4
DROP FUNCTION pg_catalog.create_distributed_function(regprocedure, text, text, bool);
CREATE FUNCTION pg_catalog.master_apply_delete_command(text)
RETURNS integer
LANGUAGE C STRICT
@ -43,7 +44,43 @@ COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport intege
DROP FUNCTION pg_catalog.citus_check_connection_to_node (text, integer);
DROP FUNCTION pg_catalog.citus_check_cluster_node_health ();
DROP FUNCTION pg_catalog.citus_internal_add_object_metadata(text, text[], text[], integer, integer);
DROP FUNCTION pg_catalog.citus_internal_add_object_metadata(text, text[], text[], integer, integer, boolean);
DROP FUNCTION pg_catalog.citus_run_local_command(text);
DROP FUNCTION pg_catalog.worker_drop_sequence_dependency(text);
DROP FUNCTION pg_catalog.worker_drop_distributed_table_only(table_name text);
CREATE OR REPLACE VIEW pg_catalog.citus_shards_on_worker AS
SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'p' THEN 'table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','p','v','m','S','f','')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.relation_is_a_known_shard(c.oid)
ORDER BY 1,2;
CREATE OR REPLACE VIEW pg_catalog.citus_shard_indexes_on_worker AS
SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'p' THEN 'table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner",
c2.relname as "Table"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_catalog.pg_index i ON i.indexrelid = c.oid
LEFT JOIN pg_catalog.pg_class c2 ON i.indrelid = c2.oid
WHERE c.relkind IN ('i','')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.relation_is_a_known_shard(c.oid)
ORDER BY 1,2;
DROP FUNCTION pg_catalog.citus_shards_on_worker();
DROP FUNCTION pg_catalog.citus_shard_indexes_on_worker();
#include "../udfs/create_distributed_function/9.0-1.sql"
ALTER TABLE citus.pg_dist_object DROP COLUMN force_delegation;

View File

@ -3,11 +3,12 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_object_metadata(
objNames text[],
objArgs text[],
distribution_argument_index int,
colocationid int)
colocationid int,
force_delegation bool)
RETURNS void
LANGUAGE C
STRICT
AS 'MODULE_PATHNAME';
COMMENT ON FUNCTION pg_catalog.citus_internal_add_object_metadata(text,text[],text[],int,int) IS
COMMENT ON FUNCTION pg_catalog.citus_internal_add_object_metadata(text,text[],text[],int,int,bool) IS
'Inserts distributed object into pg_dist_object';

View File

@ -3,11 +3,12 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_object_metadata(
objNames text[],
objArgs text[],
distribution_argument_index int,
colocationid int)
colocationid int,
force_delegation bool)
RETURNS void
LANGUAGE C
STRICT
AS 'MODULE_PATHNAME';
COMMENT ON FUNCTION pg_catalog.citus_internal_add_object_metadata(text,text[],text[],int,int) IS
COMMENT ON FUNCTION pg_catalog.citus_internal_add_object_metadata(text,text[],text[],int,int,bool) IS
'Inserts distributed object into pg_dist_object';

View File

@ -0,0 +1,39 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_shard_indexes_on_worker(
OUT schema_name name,
OUT index_name name,
OUT table_type text,
OUT owner_name name,
OUT shard_name name)
RETURNS SETOF record
LANGUAGE plpgsql
SET citus.hide_shards_from_app_name_prefixes = ''
AS $$
BEGIN
-- this is the query that \di produces, except pg_table_is_visible
-- is replaced with pg_catalog.relation_is_a_known_shard(c.oid)
RETURN QUERY
SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'p' THEN 'table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner",
c2.relname as "Table"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_catalog.pg_index i ON i.indexrelid = c.oid
LEFT JOIN pg_catalog.pg_class c2 ON i.indrelid = c2.oid
WHERE c.relkind IN ('i','')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.relation_is_a_known_shard(c.oid)
ORDER BY 1,2;
END;
$$;
CREATE OR REPLACE VIEW pg_catalog.citus_shard_indexes_on_worker AS
SELECT schema_name as "Schema",
index_name as "Name",
table_type as "Type",
owner_name as "Owner",
shard_name as "Table"
FROM pg_catalog.citus_shard_indexes_on_worker() s;

View File

@ -0,0 +1,39 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_shard_indexes_on_worker(
OUT schema_name name,
OUT index_name name,
OUT table_type text,
OUT owner_name name,
OUT shard_name name)
RETURNS SETOF record
LANGUAGE plpgsql
SET citus.hide_shards_from_app_name_prefixes = ''
AS $$
BEGIN
-- this is the query that \di produces, except pg_table_is_visible
-- is replaced with pg_catalog.relation_is_a_known_shard(c.oid)
RETURN QUERY
SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'p' THEN 'table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner",
c2.relname as "Table"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_catalog.pg_index i ON i.indexrelid = c.oid
LEFT JOIN pg_catalog.pg_class c2 ON i.indrelid = c2.oid
WHERE c.relkind IN ('i','')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.relation_is_a_known_shard(c.oid)
ORDER BY 1,2;
END;
$$;
CREATE OR REPLACE VIEW pg_catalog.citus_shard_indexes_on_worker AS
SELECT schema_name as "Schema",
index_name as "Name",
table_type as "Type",
owner_name as "Owner",
shard_name as "Table"
FROM pg_catalog.citus_shard_indexes_on_worker() s;

View File

@ -0,0 +1,34 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_shards_on_worker(
OUT schema_name name,
OUT shard_name name,
OUT table_type text,
OUT owner_name name)
RETURNS SETOF record
LANGUAGE plpgsql
SET citus.hide_shards_from_app_name_prefixes = ''
AS $$
BEGIN
-- this is the query that \d produces, except pg_table_is_visible
-- is replaced with pg_catalog.relation_is_a_known_shard(c.oid)
RETURN QUERY
SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'p' THEN 'table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','p','v','m','S','f','')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.relation_is_a_known_shard(c.oid)
ORDER BY 1,2;
END;
$$;
CREATE OR REPLACE VIEW pg_catalog.citus_shards_on_worker AS
SELECT schema_name as "Schema",
shard_name as "Name",
table_type as "Type",
owner_name as "Owner"
FROM pg_catalog.citus_shards_on_worker() s;

View File

@ -0,0 +1,34 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_shards_on_worker(
OUT schema_name name,
OUT shard_name name,
OUT table_type text,
OUT owner_name name)
RETURNS SETOF record
LANGUAGE plpgsql
SET citus.hide_shards_from_app_name_prefixes = ''
AS $$
BEGIN
-- this is the query that \d produces, except pg_table_is_visible
-- is replaced with pg_catalog.relation_is_a_known_shard(c.oid)
RETURN QUERY
SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'p' THEN 'table' END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','p','v','m','S','f','')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.relation_is_a_known_shard(c.oid)
ORDER BY 1,2;
END;
$$;
CREATE OR REPLACE VIEW pg_catalog.citus_shards_on_worker AS
SELECT schema_name as "Schema",
shard_name as "Name",
table_type as "Type",
owner_name as "Owner"
FROM pg_catalog.citus_shards_on_worker() s;

View File

@ -0,0 +1,15 @@
DROP FUNCTION pg_catalog.create_distributed_function(regprocedure, text, text);
CREATE OR REPLACE FUNCTION pg_catalog.create_distributed_function(function_name regprocedure,
distribution_arg_name text DEFAULT NULL,
colocate_with text DEFAULT 'default',
force_delegation bool DEFAULT NULL)
RETURNS void
LANGUAGE C CALLED ON NULL INPUT
AS 'MODULE_PATHNAME', $$create_distributed_function$$;
COMMENT ON FUNCTION pg_catalog.create_distributed_function(function_name regprocedure,
distribution_arg_name text,
colocate_with text,
force_delegation bool)
IS 'creates a distributed function';

View File

@ -1,11 +1,15 @@
CREATE OR REPLACE FUNCTION create_distributed_function(function_name regprocedure,
DROP FUNCTION pg_catalog.create_distributed_function(regprocedure, text, text);
CREATE OR REPLACE FUNCTION pg_catalog.create_distributed_function(function_name regprocedure,
distribution_arg_name text DEFAULT NULL,
colocate_with text DEFAULT 'default')
colocate_with text DEFAULT 'default',
force_delegation bool DEFAULT NULL)
RETURNS void
LANGUAGE C CALLED ON NULL INPUT
AS 'MODULE_PATHNAME', $$create_distributed_function$$;
COMMENT ON FUNCTION create_distributed_function(function_name regprocedure,
COMMENT ON FUNCTION pg_catalog.create_distributed_function(function_name regprocedure,
distribution_arg_name text,
colocate_with text)
colocate_with text,
force_delegation bool)
IS 'creates a distributed function';

View File

@ -39,10 +39,13 @@
#include "distributed/subplan_execution.h"
#include "distributed/version_compat.h"
#include "distributed/worker_log_messages.h"
#include "distributed/commands.h"
#include "utils/hsearch.h"
#include "utils/guc.h"
#include "utils/memutils.h"
#include "utils/datum.h"
#include "storage/fd.h"
#include "nodes/print.h"
CoordinatedTransactionState CurrentCoordinatedTransactionState = COORD_TRANS_NONE;
@ -103,6 +106,12 @@ MemoryContext CommitContext = NULL;
*/
bool ShouldCoordinatedTransactionUse2PC = false;
/*
* Distribution function argument (along with colocationId) when delegated
* using forceDelegation flag.
*/
AllowedDistributionColumn AllowedDistributionColumnValue;
/* if disabled, distributed statements in a function may run as separate transactions */
bool FunctionOpensTransactionBlock = true;
@ -119,10 +128,10 @@ static void CoordinatedSubTransactionCallback(SubXactEvent event, SubTransaction
static void AdjustMaxPreparedTransactions(void);
static void PushSubXact(SubTransactionId subId);
static void PopSubXact(SubTransactionId subId);
static bool MaybeExecutingUDF(void);
static void ResetGlobalVariables(void);
static bool SwallowErrors(void (*func)(void));
static void ForceAllInProgressConnectionsToClose(void);
static void EnsurePrepareTransactionIsAllowed(void);
/*
@ -460,12 +469,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg)
case XACT_EVENT_PARALLEL_PRE_COMMIT:
case XACT_EVENT_PRE_PREPARE:
{
if (InCoordinatedTransaction())
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot use 2PC in transactions involving "
"multiple servers")));
}
EnsurePrepareTransactionIsAllowed();
break;
}
}
@ -551,8 +555,9 @@ ResetGlobalVariables()
ShouldCoordinatedTransactionUse2PC = false;
TransactionModifiedNodeMetadata = false;
MetadataSyncOnCommit = false;
InDelegatedFunctionCall = false;
InTopLevelDelegatedFunctionCall = false;
ResetWorkerErrorIndication();
AllowedDistributionColumnValue.isActive = false;
}
@ -786,7 +791,7 @@ IsMultiStatementTransaction(void)
* If the planner is being called from the executor, then we may also be in
* a UDF.
*/
static bool
bool
MaybeExecutingUDF(void)
{
return ExecutorLevel > 1 || (ExecutorLevel == 1 && PlannerLevel > 0);
@ -803,3 +808,31 @@ TriggerMetadataSyncOnCommit(void)
{
MetadataSyncOnCommit = true;
}
/*
* Function raises an exception, if the current backend started a coordinated
* transaction and got a PREPARE event to become a participant in a 2PC
* transaction coordinated by another node.
*/
static void
EnsurePrepareTransactionIsAllowed(void)
{
if (!InCoordinatedTransaction())
{
/* If the backend has not started a coordinated transaction. */
return;
}
if (IsCitusInitiatedRemoteBackend())
{
/*
* If this is a Citus-initiated backend.
*/
return;
}
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot use 2PC in transactions involving "
"multiple servers")));
}

View File

@ -12,21 +12,43 @@
#include "catalog/index.h"
#include "catalog/namespace.h"
#include "catalog/pg_class.h"
#include "catalog/pg_type.h"
#include "distributed/metadata_cache.h"
#include "distributed/coordinator_protocol.h"
#include "distributed/listutils.h"
#include "distributed/local_executor.h"
#include "distributed/query_colocation_checker.h"
#include "distributed/worker_protocol.h"
#include "distributed/worker_shard_visibility.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
#include "utils/varlena.h"
/* HideShardsMode is used to determine whether to hide shards */
typedef enum HideShardsMode
{
CHECK_APPLICATION_NAME,
HIDE_SHARDS_FROM_APPLICATION,
DO_NOT_HIDE_SHARDS
} HideShardsMode;
/* Config variable managed via guc.c */
bool OverrideTableVisibility = true;
bool EnableManualChangesToShards = false;
static bool ReplaceTableVisibleFunctionWalker(Node *inputNode);
/* hide shards when the application_name starts with one of: */
char *HideShardsFromAppNamePrefixes = "*";
/* cache of whether or not to hide shards */
static HideShardsMode HideShards = CHECK_APPLICATION_NAME;
static bool ShouldHideShards(void);
static bool ShouldHideShardsInternal(void);
static bool FilterShardsFromPgclass(Node *node, void *context);
static Node * CreateRelationIsAKnownShardFilter(int pgClassVarno);
PG_FUNCTION_INFO_V1(citus_table_is_visible);
PG_FUNCTION_INFO_V1(relation_is_a_known_shard);
@ -43,18 +65,6 @@ relation_is_a_known_shard(PG_FUNCTION_ARGS)
CheckCitusVersion(ERROR);
Oid relationId = PG_GETARG_OID(0);
if (!RelationIsVisible(relationId))
{
/*
* Relation is not on the search path.
*
* TODO: it might be nicer to add a separate check in the
* citus_shards_on_worker views where this UDF is used.
*/
PG_RETURN_BOOL(false);
}
PG_RETURN_BOOL(RelationIsAKnownShard(relationId));
}
@ -162,9 +172,6 @@ ErrorIfIllegallyChangingKnownShard(Oid relationId)
/*
* RelationIsAKnownShard gets a relationId, check whether it's a shard of
* any distributed table.
*
* We can only do that in MX since both the metadata and tables are only
* present there.
*/
bool
RelationIsAKnownShard(Oid shardRelationId)
@ -257,72 +264,190 @@ RelationIsAKnownShard(Oid shardRelationId)
/*
* ReplaceTableVisibleFunction is a wrapper around ReplaceTableVisibleFunctionWalker.
* The replace functionality can be enabled/disable via a GUC. This function also
* ensures that the extension is loaded and the version is compatible.
* HideShardsFromSomeApplications transforms queries to pg_class to
* filter out known shards if the application_name matches any of
* the prefixes in citus.hide_shards_from_app_name_prefixes.
*/
void
ReplaceTableVisibleFunction(Node *inputNode)
HideShardsFromSomeApplications(Query *query)
{
if (!OverrideTableVisibility ||
if (!OverrideTableVisibility || HideShards == DO_NOT_HIDE_SHARDS ||
!CitusHasBeenLoaded() || !CheckCitusVersion(DEBUG2))
{
return;
}
ReplaceTableVisibleFunctionWalker(inputNode);
if (ShouldHideShards())
{
FilterShardsFromPgclass((Node *) query, NULL);
}
}
/*
* ReplaceTableVisibleFunction replaces all occurrences of
* pg_catalog.pg_table_visible() to
* pg_catalog.citus_table_visible() in the given input node.
*
* Note that the only difference between the functions is that
* the latter filters the tables that are known to be shards on
* Citus MX worker (data) nodes.
*
* Note that although the function mutates the input node, we
* prefer to use query_tree_walker/expression_tree_walker over
* their mutator equivalents. This is safe because we ensure that
* the replaced function has the exact same input/output values with
* its precedent.
* ShouldHideShards returns whether we should hide shards in the current
* session. It only checks the application_name once and then uses a
* cached response unless either the application_name or
* citus.hide_shards_from_app_name_prefixes changes.
*/
static bool
ReplaceTableVisibleFunctionWalker(Node *inputNode)
ShouldHideShards(void)
{
if (inputNode == NULL)
if (HideShards == CHECK_APPLICATION_NAME)
{
if (ShouldHideShardsInternal())
{
HideShards = HIDE_SHARDS_FROM_APPLICATION;
return true;
}
else
{
HideShards = DO_NOT_HIDE_SHARDS;
return false;
}
}
else
{
return HideShards == HIDE_SHARDS_FROM_APPLICATION;
}
}
/*
* ResetHideShardsDecision resets the decision whether to hide shards.
*/
void
ResetHideShardsDecision(void)
{
HideShards = CHECK_APPLICATION_NAME;
}
/*
* ShouldHideShardsInternal determines whether we should hide shards based on
* the current application name.
*/
static bool
ShouldHideShardsInternal(void)
{
if (IsCitusInitiatedRemoteBackend())
{
/* we never hide shards from Citus */
return false;
}
List *prefixList = NIL;
/* SplitGUCList scribbles on the input */
char *splitCopy = pstrdup(HideShardsFromAppNamePrefixes);
if (!SplitGUCList(splitCopy, ',', &prefixList))
{
/* invalid GUC value, ignore */
return false;
}
char *appNamePrefix = NULL;
foreach_ptr(appNamePrefix, prefixList)
{
/* always hide shards when one of the prefixes is * */
if (strcmp(appNamePrefix, "*") == 0)
{
return true;
}
/* compare only the first first <prefixLength> characters */
int prefixLength = strlen(appNamePrefix);
if (strncmp(application_name, appNamePrefix, prefixLength) == 0)
{
return true;
}
}
return false;
}
/*
* FilterShardsFromPgclass adds a NOT relation_is_a_known_shard(oid) filter
* to the security quals of pg_class RTEs.
*/
static bool
FilterShardsFromPgclass(Node *node, void *context)
{
if (node == NULL)
{
return false;
}
if (IsA(inputNode, FuncExpr))
if (IsA(node, Query))
{
FuncExpr *functionToProcess = (FuncExpr *) inputNode;
Oid functionId = functionToProcess->funcid;
Query *query = (Query *) node;
MemoryContext queryContext = GetMemoryChunkContext(query);
if (functionId == PgTableVisibleFuncId())
{
/*
* We simply update the function id of the FuncExpr for
* two reasons: (i) We don't want to interfere with the
* memory contexts so don't want to deal with allocating
* a new functionExpr (ii) We already know that both
* functions have the exact same signature.
* We process the whole rtable rather than visiting individual RangeTblEntry's
* in the walker, since we need to know the varno to generate the right
* fiter.
*/
functionToProcess->funcid = CitusTableVisibleFuncId();
int varno = 0;
RangeTblEntry *rangeTableEntry = NULL;
/* although not very likely, we could have nested calls to pg_table_is_visible */
return expression_tree_walker(inputNode, ReplaceTableVisibleFunctionWalker,
NULL);
}
}
else if (IsA(inputNode, Query))
foreach_ptr(rangeTableEntry, query->rtable)
{
return query_tree_walker((Query *) inputNode, ReplaceTableVisibleFunctionWalker,
NULL, 0);
varno++;
if (rangeTableEntry->rtekind != RTE_RELATION ||
rangeTableEntry->relid != RelationRelationId)
{
/* not pg_class */
continue;
}
return expression_tree_walker(inputNode, ReplaceTableVisibleFunctionWalker, NULL);
/* make sure the expression is in the right memory context */
MemoryContext originalContext = MemoryContextSwitchTo(queryContext);
/* add NOT relation_is_a_known_shard(oid) to the security quals of the RTE */
rangeTableEntry->securityQuals =
list_make1(CreateRelationIsAKnownShardFilter(varno));
MemoryContextSwitchTo(originalContext);
}
return query_tree_walker((Query *) node, FilterShardsFromPgclass, context, 0);
}
return expression_tree_walker(node, FilterShardsFromPgclass, context);
}
/*
* CreateRelationIsAKnownShardFilter constructs an expression of the form:
* NOT pg_catalog.relation_is_a_known_shard(oid)
*/
static Node *
CreateRelationIsAKnownShardFilter(int pgClassVarno)
{
/* oid is always the first column */
AttrNumber oidAttNum = 1;
Var *oidVar = makeVar(pgClassVarno, oidAttNum, OIDOID, -1, InvalidOid, 0);
/* build the call to read_intermediate_result */
FuncExpr *funcExpr = makeNode(FuncExpr);
funcExpr->funcid = RelationIsAKnownShardFuncId();
funcExpr->funcretset = false;
funcExpr->funcvariadic = false;
funcExpr->funcformat = 0;
funcExpr->funccollid = 0;
funcExpr->inputcollid = 0;
funcExpr->location = -1;
funcExpr->args = list_make1(oidVar);
BoolExpr *notExpr = makeNode(BoolExpr);
notExpr->boolop = NOT_EXPR;
notExpr->args = list_make1(funcExpr);
notExpr->location = -1;
return (Node *) notExpr;
}

View File

@ -54,13 +54,6 @@ bool get_merged_argument_list(CallStmt *stmt, List **mergedNamedArgList,
char * pg_get_rule_expr(Node *expression);
extern void deparse_shard_query(Query *query, Oid distrelid, int64 shardid,
StringInfo buffer);
extern char * pg_get_triggerdef_command(Oid triggerId);
#if PG_VERSION_NUM >= PG_VERSION_14
extern char * pg_get_statisticsobj_worker(Oid statextid, bool columns_only,
bool missing_ok);
#else
extern char * pg_get_statisticsobj_worker(Oid statextid, bool missing_ok);
#endif
extern char * generate_relation_name(Oid relid, List *namespaces);
extern char * generate_qualified_relation_name(Oid relid);
extern char * generate_operator_name(Oid operid, Oid arg1, Oid arg2);

View File

@ -506,7 +506,8 @@ extern char * GenerateBackupNameForProcCollision(const ObjectAddress *address);
extern ObjectWithArgs * ObjectWithArgsFromOid(Oid funcOid);
extern void UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
int *distribution_argument_index,
int *colocationId);
int *colocationId,
bool *forceDelegation);
/* vacuum.c - forward declarations */
extern void PostprocessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand);

View File

@ -19,11 +19,11 @@
* These flags keep track of whether the process is currently in a delegated
* function or procedure call.
*/
extern bool InDelegatedFunctionCall;
extern bool InTopLevelDelegatedFunctionCall;
extern bool InDelegatedProcedureCall;
PlannedStmt * TryToDelegateFunctionCall(DistributedPlanningContext *planContext);
extern void ResetAllowedShardKeyValue(void);
extern bool IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId);
#endif /* FUNCTION_CALL_DELEGATION_H */

View File

@ -16,6 +16,7 @@
#include "catalog/objectaddress.h"
#define INVALID_DISTRIBUTION_ARGUMENT_INDEX -1
#define NO_FORCE_PUSHDOWN 0
extern bool ObjectExists(const ObjectAddress *address);
extern bool CitusExtensionObject(const ObjectAddress *objectAddress);

View File

@ -35,6 +35,7 @@ typedef struct FormData_pg_dist_object
uint32 distribution_argument_index; /* only valid for distributed functions/procedures */
uint32 colocationid; /* only valid for distributed functions/procedures */
boolean forced_pushdown; /* only valid for distributed functions */
#endif
} FormData_pg_dist_object;
@ -49,7 +50,7 @@ typedef FormData_pg_dist_object *Form_pg_dist_object;
* compiler constants for pg_dist_object
* ----------------
*/
#define Natts_pg_dist_object 8
#define Natts_pg_dist_object 9
#define Anum_pg_dist_object_classid 1
#define Anum_pg_dist_object_objid 2
#define Anum_pg_dist_object_objsubid 3
@ -58,5 +59,6 @@ typedef FormData_pg_dist_object *Form_pg_dist_object;
#define Anum_pg_dist_object_object_args 6
#define Anum_pg_dist_object_distribution_argument_index 7
#define Anum_pg_dist_object_colocationid 8
#define Anum_pg_dist_object_force_delegation 9
#endif /* PG_DIST_OBJECT_H */

View File

@ -115,6 +115,7 @@ typedef struct DistObjectCacheEntry
int distributionArgIndex;
int colocationId;
bool forceDelegation;
} DistObjectCacheEntry;
typedef enum
@ -252,6 +253,7 @@ extern Oid CitusExtraDataContainerFuncId(void);
extern Oid CitusAnyValueFunctionId(void);
extern Oid PgTableVisibleFuncId(void);
extern Oid CitusTableVisibleFuncId(void);
extern Oid RelationIsAKnownShardFuncId(void);
extern Oid JsonbExtractPathFuncId(void);
/* enum oids */

View File

@ -39,7 +39,8 @@ extern List * DistributedObjectMetadataSyncCommandList(void);
extern List * NodeMetadataDropCommands(void);
extern char * MarkObjectsDistributedCreateCommand(List *addresses,
List *distributionArgumentIndexes,
List *colocationIds);
List *colocationIds,
List *forceDelegations);
extern char * DistributionCreateCommand(CitusTableCacheEntry *cacheEntry);
extern char * DistributionDeleteCommand(const char *schemaName,
const char *tableName);

View File

@ -199,7 +199,6 @@ extern bool IsCitusTableRTE(Node *node);
extern bool IsDistributedOrReferenceTableRTE(Node *node);
extern bool IsDistributedTableRTE(Node *node);
extern bool IsReferenceTableRTE(Node *node);
extern bool QueryContainsDistributedTableRTE(Query *query);
extern bool IsCitusExtraDataContainerRelation(RangeTblEntry *rte);
extern bool ContainsReadIntermediateResultFunction(Node *node);
extern bool ContainsReadIntermediateResultArrayFunction(Node *node);

View File

@ -161,6 +161,7 @@ typedef struct Job
* query.
*/
bool parametersInJobQueryResolved;
uint32 colocationId; /* common colocation group ID of the relations */
} Job;

View File

@ -12,6 +12,12 @@
#include "lib/ilist.h"
#include "lib/stringinfo.h"
#include "nodes/pg_list.h"
#include "lib/stringinfo.h"
#include "nodes/primnodes.h"
/* forward declare, to avoid recursive includes */
struct DistObjectCacheEntry;
/* describes what kind of modifications have occurred in the current transaction */
typedef enum
@ -53,6 +59,19 @@ typedef struct SubXactContext
StringInfo setLocalCmds;
} SubXactContext;
/*
* Function delegated with force_delegation call enforces the distribution argument
* along with the colocationId. The latter one is equally important to not allow
* the same partition key value into another distributed table which is not co-located
* and therefore might be on a different node.
*/
typedef struct AllowedDistributionColumn
{
Const *distributionColumnValue;
uint32 colocationId;
bool isActive;
} AllowedDistributionColumn;
/*
* GUC that determines whether a SELECT in a transaction block should also run in
* a transaction block on the worker.
@ -100,6 +119,8 @@ extern void Use2PCForCoordinatedTransaction(void);
extern bool GetCoordinatedTransactionShouldUse2PC(void);
extern bool IsMultiStatementTransaction(void);
extern void EnsureDistributedTransactionId(void);
extern bool MaybeExecutingUDF(void);
/* initialization function(s) */
extern void InitializeTransactionManagement(void);

View File

@ -15,9 +15,11 @@
extern bool OverrideTableVisibility;
extern bool EnableManualChangesToShards;
extern char *HideShardsFromAppNamePrefixes;
extern void ReplaceTableVisibleFunction(Node *inputNode);
extern void HideShardsFromSomeApplications(Query *query);
extern void ResetHideShardsDecision(void);
extern void ErrorIfRelationIsAKnownShard(Oid relationId);
extern void ErrorIfIllegallyChangingKnownShard(Oid relationId);
extern bool RelationIsAKnownShard(Oid shardRelationId);

View File

@ -35,7 +35,6 @@
#define RelationGetPartitionDesc_compat(a, b) RelationGetPartitionDesc(a, b)
#define make_simple_restrictinfo_compat(a, b) make_simple_restrictinfo(a, b)
#define pull_varnos_compat(a, b) pull_varnos(a, b)
#define pg_get_statisticsobj_worker_compat(a, b, c) pg_get_statisticsobj_worker(a, b, c)
#else
#define AlterTableStmtObjType_compat(a) ((a)->relkind)
#define F_NEXTVAL F_NEXTVAL_OID
@ -68,7 +67,6 @@
#define PQ_LARGE_MESSAGE_LIMIT 0
#define make_simple_restrictinfo_compat(a, b) make_simple_restrictinfo(b)
#define pull_varnos_compat(a, b) pull_varnos(b)
#define pg_get_statisticsobj_worker_compat(a, b, c) pg_get_statisticsobj_worker(a, c)
#endif
#if PG_VERSION_NUM >= PG_VERSION_13

View File

@ -844,21 +844,28 @@ SELECT relname FROM pg_class
ORDER BY relname;
relname
---------------------------------------------------------------------
partitioned_distributed
partitioned_distributed_1
partitioned_distributed_1504038
partitioned_distributed_1504040
partitioned_distributed_1_1504042
partitioned_distributed_1_1504044
partitioned_distributed_1_a_key
partitioned_distributed_1_a_key_1504042
partitioned_distributed_1_a_key_1504044
partitioned_distributed_2
partitioned_distributed_2_1504046
partitioned_distributed_2_1504048
partitioned_distributed_2_a_key
partitioned_distributed_2_a_key_1504046
partitioned_distributed_2_a_key_1504048
partitioned_distributed_a_key
partitioned_distributed_a_key_1504038
partitioned_distributed_a_key_1504040
(12 rows)
(18 rows)
\c - - - :master_port
SET citus.next_shard_id TO 1904000;
SET search_path TO citus_local_tables_test_schema;
-- error out if converting multi-level partitioned table
CREATE TABLE multi_par (id text, country text) PARTITION BY RANGE (id);
@ -945,7 +952,7 @@ select count(*) from pg_constraint where conname = 'fkey_test_drop';
select inhrelid::regclass from pg_inherits where (select inhparent::regclass::text) ~ '^parent_1_\d{7}$' order by 1;
inhrelid
---------------------------------------------------------------------
parent_1_child_1_1190046
parent_1_child_1_1904006
(1 row)
-- check the shell partition

View File

@ -146,7 +146,7 @@ SELECT master_get_table_ddl_events('test_table');
CREATE TABLE table_triggers_schema.test_table (id integer, text_number text, text_col text)
ALTER TABLE table_triggers_schema.test_table OWNER TO postgres
CREATE TRIGGER test_table_delete AFTER DELETE ON table_triggers_schema.test_table FOR EACH STATEMENT EXECUTE FUNCTION table_triggers_schema.test_table_trigger_function()
CREATE CONSTRAINT TRIGGER test_table_insert AFTER INSERT ON table_triggers_schema.test_table DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW WHEN (((new.id OPERATOR(pg_catalog.>) 5) OR ((new.text_col IS NOT NULL) AND ((new.id)::numeric OPERATOR(pg_catalog.<) to_number(new.text_number, '9999'::text))))) EXECUTE FUNCTION table_triggers_schema.test_table_trigger_function()
CREATE CONSTRAINT TRIGGER test_table_insert AFTER INSERT ON table_triggers_schema.test_table DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW WHEN (((new.id > 5) OR ((new.text_col IS NOT NULL) AND ((new.id)::numeric < to_number(new.text_number, '9999'::text))))) EXECUTE FUNCTION table_triggers_schema.test_table_trigger_function()
CREATE CONSTRAINT TRIGGER test_table_update AFTER UPDATE OF id ON table_triggers_schema.test_table NOT DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW WHEN (((NOT (old.* IS DISTINCT FROM new.*)) AND (old.text_number IS NOT NULL))) EXECUTE FUNCTION table_triggers_schema.test_table_trigger_function()
(5 rows)

File diff suppressed because it is too large Load Diff

View File

@ -556,9 +556,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET application_name to 'citus';
\set VERBOSITY terse
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ERROR: unrecognized object type "non_existing_type"
ROLLBACK;
-- check the sanity of distributionArgumentIndex and colocationId
@ -571,9 +571,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET application_name to 'citus';
\set VERBOSITY terse
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ERROR: distribution_argument_index must be between 0 and 100
ROLLBACK;
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
@ -585,9 +585,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET application_name to 'citus';
\set VERBOSITY terse
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ERROR: colocationId must be a positive number
ROLLBACK;
-- check with non-existing object
@ -600,9 +600,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET application_name to 'citus';
\set VERBOSITY terse
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ERROR: role "non_existing_user" does not exist
ROLLBACK;
-- since citus_internal_add_object_metadata is strict function returns NULL
@ -616,9 +616,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET application_name to 'citus';
\set VERBOSITY terse
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
citus_internal_add_object_metadata
---------------------------------------------------------------------
@ -640,9 +640,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
CREATE TABLE publication_test_table(id int);
CREATE PUBLICATION publication_test FOR TABLE publication_test_table;
SET ROLE metadata_sync_helper_role;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('publication', ARRAY['publication_test']::text[], ARRAY[]::text[], -1, 0))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('publication', ARRAY['publication_test']::text[], ARRAY[]::text[], -1, 0, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ERROR: Object type 29 can not be distributed by Citus
ROLLBACK;
-- Show that citus_internal_add_object_metadata checks the priviliges
@ -659,9 +659,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
AS $$ SELECT $1 $$
LANGUAGE SQL;
SET ROLE metadata_sync_helper_role;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('function', ARRAY['distribution_test_function']::text[], ARRAY['integer']::text[], -1, 0))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('function', ARRAY['distribution_test_function']::text[], ARRAY['integer']::text[], -1, 0, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ERROR: must be owner of function distribution_test_function
ROLLBACK;
-- we do not allow wrong partmethod

View File

@ -999,17 +999,21 @@ SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object
---------------------------------------------------------------------
function citus_disable_node(text,integer) void |
function create_distributed_function(regprocedure,text,text) void |
function master_append_table_to_shard(bigint,text,text,integer) real |
function master_apply_delete_command(text) integer |
function master_get_table_metadata(text) record |
| function citus_check_cluster_node_health() SETOF record
| function citus_check_connection_to_node(text,integer) boolean
| function citus_disable_node(text,integer,boolean) void
| function citus_internal_add_object_metadata(text,text[],text[],integer,integer) void
| function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) void
| function citus_run_local_command(text) void
| function citus_shard_indexes_on_worker() SETOF record
| function citus_shards_on_worker() SETOF record
| function create_distributed_function(regprocedure,text,text,boolean) void
| function worker_drop_distributed_table_only(text) void
| function worker_drop_sequence_dependency(text) void
(11 rows)
(15 rows)
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version

View File

@ -674,9 +674,9 @@ NOTICE: issuing SELECT worker_create_truncate_trigger('fix_idx_names.p2')
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_create_truncate_trigger('fix_idx_names.p2')
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx

View File

@ -8,7 +8,8 @@ SELECT attrelid::regclass, attname, atthasmissing, attmissingval
FROM pg_attribute
WHERE atthasmissing AND attrelid NOT IN ('pg_dist_node'::regclass,
'pg_dist_rebalance_strategy'::regclass,
'pg_dist_partition'::regclass)
'pg_dist_partition'::regclass,
'citus.pg_dist_object'::regclass)
ORDER BY attrelid, attname;
attrelid | attname | atthasmissing | attmissingval
---------------------------------------------------------------------

View File

@ -84,7 +84,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
TRUNCATE citus.pg_dist_object
TRUNCATE pg_dist_node CASCADE
UPDATE pg_dist_local_group SET groupid = 1
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
(26 rows)
-- this function is dropped in Citus10, added here for tests
@ -153,7 +153,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
TRUNCATE citus.pg_dist_object
TRUNCATE pg_dist_node CASCADE
UPDATE pg_dist_local_group SET groupid = 1
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
(38 rows)
@ -199,7 +199,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
TRUNCATE citus.pg_dist_object
TRUNCATE pg_dist_node CASCADE
UPDATE pg_dist_local_group SET groupid = 1
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
(39 rows)
@ -247,7 +247,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
TRUNCATE citus.pg_dist_object
TRUNCATE pg_dist_node CASCADE
UPDATE pg_dist_local_group SET groupid = 1
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
(40 rows)
@ -301,7 +301,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
TRUNCATE citus.pg_dist_object
TRUNCATE pg_dist_node CASCADE
UPDATE pg_dist_local_group SET groupid = 1
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
(40 rows)
@ -348,7 +348,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
TRUNCATE citus.pg_dist_object
TRUNCATE pg_dist_node CASCADE
UPDATE pg_dist_local_group SET groupid = 1
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
(40 rows)
@ -1885,7 +1885,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
TRUNCATE citus.pg_dist_object
TRUNCATE pg_dist_node CASCADE
UPDATE pg_dist_local_group SET groupid = 1
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0), ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0), ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 5, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 5, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 5, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310020, 1, 0, 1, 100020), (1310021, 1, 0, 5, 100021), (1310022, 1, 0, 1, 100022), (1310023, 1, 0, 5, 100023), (1310024, 1, 0, 1, 100024)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310025, 1, 0, 1, 100025), (1310026, 1, 0, 5, 100026), (1310027, 1, 0, 1, 100027), (1310028, 1, 0, 5, 100028), (1310029, 1, 0, 1, 100029)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;

View File

@ -45,47 +45,37 @@ SELECT create_distributed_table('test_table', 'id');
-- first show that the views does not show
-- any shards on the coordinator as expected
SELECT * FROM citus_shards_on_worker;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names';
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
SELECT * FROM citus_shard_indexes_on_worker;
SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_names';
Schema | Name | Type | Owner | Table
---------------------------------------------------------------------
(0 rows)
-- now show that we see the shards, but not the
-- indexes as there are no indexes
\c - - - :worker_1_port
\c postgresql://postgres@localhost::worker_1_port/regression?application_name=psql
SET search_path TO 'mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
Schema | Name | Type | Owner
---------------------------------------------------------------------
mx_hide_shard_names | test_table_1130000 | table | postgres
mx_hide_shard_names | test_table_1130002 | table | postgres
(2 rows)
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
Schema | Name | Type | Owner | Table
---------------------------------------------------------------------
(0 rows)
-- also show that nested calls to pg_table_is_visible works fine
-- if both of the calls to the pg_table_is_visible haven't been
-- replaced, we would get 0 rows in the output
SELECT
pg_table_is_visible((SELECT
"t1"."Name"::regclass
FROM
citus_shards_on_worker as t1
WHERE
NOT pg_table_is_visible("t1"."Name"::regclass)
LIMIT
1));
pg_table_is_visible
-- shards are hidden when using psql as application_name
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
relname
---------------------------------------------------------------------
f
test_table
(1 row)
-- now create an index
@ -94,22 +84,136 @@ SET search_path TO 'mx_hide_shard_names';
CREATE INDEX test_index ON mx_hide_shard_names.test_table(id);
-- now show that we see the shards, and the
-- indexes as well
\c - - - :worker_1_port
\c postgresql://postgres@localhost::worker_1_port/regression?application_name=psql
SET search_path TO 'mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
Schema | Name | Type | Owner
---------------------------------------------------------------------
mx_hide_shard_names | test_table_1130000 | table | postgres
mx_hide_shard_names | test_table_1130002 | table | postgres
(2 rows)
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
Schema | Name | Type | Owner | Table
---------------------------------------------------------------------
mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000
mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002
(2 rows)
-- shards are hidden when using psql as application_name
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
relname
---------------------------------------------------------------------
test_index
test_table
(2 rows)
-- changing application_name reveals the shards
SET application_name TO '';
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
relname
---------------------------------------------------------------------
test_index
test_index_1130000
test_index_1130002
test_table
test_table_1130000
test_table_1130002
(6 rows)
RESET application_name;
-- shards are hidden again after GUCs are reset
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
relname
---------------------------------------------------------------------
test_index
test_table
(2 rows)
-- changing application_name in transaction reveals the shards
BEGIN;
SET LOCAL application_name TO '';
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
relname
---------------------------------------------------------------------
test_index
test_index_1130000
test_index_1130002
test_table
test_table_1130000
test_table_1130002
(6 rows)
ROLLBACK;
-- shards are hidden again after GUCs are reset
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
relname
---------------------------------------------------------------------
test_index
test_table
(2 rows)
-- now with session-level GUC, but ROLLBACK;
BEGIN;
SET application_name TO '';
ROLLBACK;
-- shards are hidden again after GUCs are reset
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
relname
---------------------------------------------------------------------
test_index
test_table
(2 rows)
-- we should hide correctly based on application_name with savepoints
BEGIN;
SAVEPOINT s1;
SET application_name TO '';
-- changing application_name reveals the shards
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
relname
---------------------------------------------------------------------
test_index
test_index_1130000
test_index_1130002
test_table
test_table_1130000
test_table_1130002
(6 rows)
ROLLBACK TO SAVEPOINT s1;
-- shards are hidden again after GUCs are reset
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
relname
---------------------------------------------------------------------
test_index
test_table
(2 rows)
ROLLBACK;
-- changing citus.hide_shards_from_app_name_prefixes reveals the shards
BEGIN;
SET LOCAL citus.hide_shards_from_app_name_prefixes TO 'notpsql';
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
relname
---------------------------------------------------------------------
test_index
test_index_1130000
test_index_1130002
test_table
test_table_1130000
test_table_1130002
(6 rows)
ROLLBACK;
-- shards are hidden again after GUCs are reset
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
relname
---------------------------------------------------------------------
test_index
test_table
(2 rows)
-- we should be able to select from the shards directly if we
-- know the name of the tables
SELECT count(*) FROM test_table_1130000;
@ -118,20 +222,20 @@ SELECT count(*) FROM test_table_1130000;
0
(1 row)
-- disable the config so that table becomes visible
SELECT pg_table_is_visible('test_table_1130000'::regclass);
pg_table_is_visible
---------------------------------------------------------------------
f
(1 row)
SET citus.override_table_visibility TO FALSE;
-- shards on the search_path still match pg_table_is_visible
SELECT pg_table_is_visible('test_table_1130000'::regclass);
pg_table_is_visible
---------------------------------------------------------------------
t
(1 row)
-- shards on the search_path do not match citus_table_is_visible
SELECT citus_table_is_visible('test_table_1130000'::regclass);
citus_table_is_visible
---------------------------------------------------------------------
f
(1 row)
\c - - - :master_port
-- make sure that we're resilient to the edge cases
-- such that the table name includes the shard number
@ -153,7 +257,7 @@ SET search_path TO 'mx_hide_shard_names';
-- with the same name since a table with the same
-- name already exists :)
CREATE TABLE test_table_2_1130000(id int, time date);
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
Schema | Name | Type | Owner
---------------------------------------------------------------------
mx_hide_shard_names | test_table_102008_1130004 | table | postgres
@ -187,7 +291,7 @@ SELECT create_distributed_table('test_table', 'id');
CREATE INDEX test_index ON mx_hide_shard_names_2.test_table(id);
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
Schema | Name | Type | Owner
---------------------------------------------------------------------
mx_hide_shard_names | test_table_102008_1130004 | table | postgres
@ -196,39 +300,27 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2;
mx_hide_shard_names | test_table_1130002 | table | postgres
(4 rows)
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
Schema | Name | Type | Owner | Table
---------------------------------------------------------------------
mx_hide_shard_names | test_index_1130000 | index | postgres | test_table_1130000
mx_hide_shard_names | test_index_1130002 | index | postgres | test_table_1130002
(2 rows)
SET search_path TO 'mx_hide_shard_names_2';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names_2' ORDER BY 2;
Schema | Name | Type | Owner
---------------------------------------------------------------------
mx_hide_shard_names_2 | test_table_1130008 | table | postgres
mx_hide_shard_names_2 | test_table_1130010 | table | postgres
(2 rows)
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_names_2' ORDER BY 2;
Schema | Name | Type | Owner | Table
---------------------------------------------------------------------
mx_hide_shard_names_2 | test_index_1130008 | index | postgres | test_table_1130008
mx_hide_shard_names_2 | test_index_1130010 | index | postgres | test_table_1130010
(2 rows)
SET search_path TO 'mx_hide_shard_names_2, mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
Schema | Name | Type | Owner
---------------------------------------------------------------------
(0 rows)
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
Schema | Name | Type | Owner | Table
---------------------------------------------------------------------
(0 rows)
-- now try very long table names
\c - - - :master_port
SET citus.shard_count TO 4;
@ -247,7 +339,7 @@ SELECT create_distributed_table('too_long_12345678901234567890123456789012345678
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names_3';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names_3' ORDER BY 2;
Schema | Name | Type | Owner
---------------------------------------------------------------------
mx_hide_shard_names_3 | too_long_1234567890123456789012345678901234567_e0119164_1130012 | table | postgres
@ -278,14 +370,14 @@ SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id');
\c - - - :worker_1_port
SET search_path TO "CiTuS.TeeN";
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'CiTuS.TeeN' ORDER BY 2;
Schema | Name | Type | Owner
---------------------------------------------------------------------
CiTuS.TeeN | TeeNTabLE.1!?!_1130016 | table | postgres
CiTuS.TeeN | TeeNTabLE.1!?!_1130018 | table | postgres
(2 rows)
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'CiTuS.TeeN' ORDER BY 2;
Schema | Name | Type | Owner | Table
---------------------------------------------------------------------
CiTuS.TeeN | MyTenantIndex_1130016 | index | postgres | TeeNTabLE.1!?!_1130016

View File

@ -161,3 +161,31 @@ DETAIL: drop cascades to table data
drop cascades to table dist_columnar
drop cascades to table simple_columnar
drop cascades to table "weird.table"
CREATE SCHEMA dumper;
CREATE TABLE data (
key int,
value text
);
SELECT create_distributed_table('data', 'key');
create_distributed_table
---------------------------------------------------------------------
(1 row)
COPY data FROM STDIN WITH (format csv, delimiter '|', escape '\');
-- run pg_dump on worker (which has shards)
\COPY output FROM PROGRAM 'PGAPPNAME=pg_dump pg_dump -f results/pg_dump.tmp -h localhost -p 57637 -U postgres -d regression -n dumper --quote-all-identifiers'
-- restore pg_dump from worker via coordinator
DROP SCHEMA dumper CASCADE;
NOTICE: drop cascades to table data
\COPY (SELECT line FROM output WHERE line IS NOT NULL) TO PROGRAM 'psql -qtAX -h localhost -p 57636 -U postgres -d regression -f results/pg_dump.tmp'
-- check the tables (should not include shards)
SELECT tablename FROM pg_tables WHERE schemaname = 'dumper' ORDER BY 1;
tablename
---------------------------------------------------------------------
data
(1 row)
DROP SCHEMA dumper CASCADE;
NOTICE: drop cascades to table data

View File

@ -69,7 +69,7 @@ ORDER BY 1;
function citus_internal.replace_isolation_tester_func()
function citus_internal.restore_isolation_tester_func()
function citus_internal.upgrade_columnar_storage(regclass)
function citus_internal_add_object_metadata(text,text[],text[],integer,integer)
function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean)
function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char")
function citus_internal_add_placement_metadata(bigint,integer,bigint,integer,bigint)
function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text)
@ -97,7 +97,9 @@ ORDER BY 1;
function citus_shard_allowed_on_node_true(bigint,integer)
function citus_shard_cost_1(bigint)
function citus_shard_cost_by_disk_size(bigint)
function citus_shard_indexes_on_worker()
function citus_shard_sizes()
function citus_shards_on_worker()
function citus_stat_statements()
function citus_stat_statements_reset()
function citus_table_is_visible(oid)
@ -118,7 +120,7 @@ ORDER BY 1;
function coord_combine_agg(oid,cstring,anyelement)
function coord_combine_agg_ffunc(internal,oid,cstring,anyelement)
function coord_combine_agg_sfunc(internal,oid,cstring,anyelement)
function create_distributed_function(regprocedure,text,text)
function create_distributed_function(regprocedure,text,text,boolean)
function create_distributed_table(regclass,text,citus.distribution_type,text,integer)
function create_intermediate_result(text,text)
function create_reference_table(regclass)
@ -265,5 +267,5 @@ ORDER BY 1;
view citus_worker_stat_activity
view pg_dist_shard_placement
view time_partitions
(249 rows)
(251 rows)

View File

@ -1,4 +1,4 @@
test: multi_cluster_management
test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers
test: multi_cluster_management
test: multi_test_catalog_views
test: tablespace

View File

@ -185,23 +185,21 @@ test: foreign_key_to_reference_table validate_constraint
test: multi_repartition_udt multi_repartitioned_subquery_udf multi_subtransactions
test: multi_modifying_xacts
test: check_mx
test: turn_mx_off
test: multi_generate_ddl_commands multi_repair_shards
test: multi_create_shards
test: turn_mx_on
test: multi_transaction_recovery
test: local_dist_join_modifications
test: local_table_join
test: local_dist_join_mixed
test: citus_local_dist_joins
test: pg_dump
# ---------
# multi_copy creates hash and range-partitioned tables and performs COPY
# multi_router_planner creates hash partitioned tables.
# ---------
test: multi_copy fast_path_router_modify pg_dump
test: multi_copy fast_path_router_modify
test: multi_router_planner
# These 2 tests have prepared statements which sometimes get invalidated by concurrent tests,
# changing the debug output. We should not run them in parallel with others
@ -272,14 +270,11 @@ test: multi_foreign_key_relation_graph
# Replicating reference tables to coordinator. Add coordinator to pg_dist_node
# and rerun some of the tests.
# --------
test: check_mx
test: add_coordinator
test: foreign_key_to_reference_table
test: replicate_reference_tables_to_coordinator
test: turn_mx_off
test: citus_local_tables
test: mixed_relkind_tests
test: turn_mx_on
test: multi_row_router_insert
test: multi_reference_table citus_local_tables_queries
test: citus_local_table_triggers

View File

@ -68,6 +68,7 @@ test: multi_basic_queries cross_join multi_complex_expressions multi_subquery mu
test: multi_subquery_complex_reference_clause multi_subquery_window_functions multi_view multi_sql_function multi_prepare_sql
test: sql_procedure multi_function_in_join row_types materialized_view
test: multi_subquery_in_where_reference_clause adaptive_executor propagate_set_commands geqo
test: forcedelegation_functions
# this should be run alone as it gets too many clients
test: join_pushdown
test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc statement_cancel_error_message

View File

@ -465,6 +465,9 @@ push(@pgOptions, "citus.node_connection_timeout=${connectionTimeout}");
push(@pgOptions, "citus.explain_analyze_sort_method='taskId'");
push(@pgOptions, "citus.enable_manual_changes_to_shards=on");
# Some tests look at shards in pg_class, make sure we can usually see them:
push(@pgOptions, "citus.hide_shards_from_app_name_prefixes='psql,pg_dump'");
# we disable slow start by default to encourage parallelism within tests
push(@pgOptions, "citus.executor_slow_start_interval=0ms");

View File

@ -539,6 +539,7 @@ SELECT relname FROM pg_class
AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'citus_local_tables_test_schema')
ORDER BY relname;
\c - - - :master_port
SET citus.next_shard_id TO 1904000;
SET search_path TO citus_local_tables_test_schema;
-- error out if converting multi-level partitioned table

View File

@ -0,0 +1,666 @@
SET citus.log_remote_commands TO OFF;
DROP SCHEMA IF EXISTS forcepushdown_schema CASCADE;
CREATE SCHEMA forcepushdown_schema;
SET search_path TO 'forcepushdown_schema';
SET citus.shard_replication_factor = 1;
SET citus.shard_count = 32;
SET citus.next_shard_id TO 900000;
CREATE TABLE test_forcepushdown(intcol int PRIMARY KEY, data char(50) default 'default');
SELECT create_distributed_table('test_forcepushdown', 'intcol', colocate_with := 'none');
--
--Table in a different colocation group
--
CREATE TABLE test_forcepushdown_noncolocate(intcol int PRIMARY KEY);
SELECT create_distributed_table('test_forcepushdown_noncolocate', 'intcol', colocate_with := 'none');
CREATE FUNCTION insert_data(a integer)
RETURNS void LANGUAGE plpgsql AS $fn$
BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (a);
END;
$fn$;
CREATE FUNCTION insert_data_non_distarg(a integer)
RETURNS void LANGUAGE plpgsql AS $fn$
BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (a+1);
END;
$fn$;
CREATE FUNCTION update_data_nonlocal(a integer)
RETURNS void LANGUAGE plpgsql AS $fn$
BEGIN
UPDATE forcepushdown_schema.test_forcepushdown SET data = 'non-default';
END;
$fn$;
CREATE FUNCTION insert_data_noncolocation(a int)
RETURNS void LANGUAGE plpgsql AS $fn$
BEGIN
-- Insert into a different table than the function is colocated with
INSERT INTO forcepushdown_schema.test_forcepushdown_noncolocate VALUES (a);
END;
$fn$;
SELECT create_distributed_function(
'insert_data(int)', 'a',
colocate_with := 'test_forcepushdown',
force_delegation := true
);
SELECT create_distributed_function(
'insert_data_non_distarg(int)', 'a',
colocate_with := 'test_forcepushdown',
force_delegation := true
);
SELECT create_distributed_function(
'update_data_nonlocal(int)', 'a',
colocate_with := 'test_forcepushdown',
force_delegation := true
);
SELECT create_distributed_function(
'insert_data_noncolocation(int)', 'a',
colocate_with := 'test_forcepushdown',
force_delegation := true
);
SET client_min_messages TO DEBUG1;
--SET citus.log_remote_commands TO on;
SELECT public.wait_until_metadata_sync(30000);
SELECT 'Transaction with no errors' Testing;
BEGIN;
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (1);
-- This call will insert both the rows locally on the remote worker
SELECT insert_data(2);
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (3);
COMMIT;
SELECT 'Transaction with duplicate error in the remote function' Testing;
BEGIN;
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (4);
-- This call will fail with duplicate error on the remote worker
SELECT insert_data(3);
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (5);
COMMIT;
SELECT 'Transaction with duplicate error in the local statement' Testing;
BEGIN;
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (6);
-- This call will insert both the rows locally on the remote worker
SELECT insert_data(7);
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (8);
-- This will fail
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (8);
COMMIT;
SELECT 'Transaction with function using non-distribution argument' Testing;
BEGIN;
-- This should fail
SELECT insert_data_non_distarg(9);
COMMIT;
SELECT 'Transaction with function doing remote connection' Testing;
BEGIN;
-- This statement will pass
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (11);
-- This call will try to update rows locally and on remote node(s)
SELECT update_data_nonlocal(12);
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (13);
COMMIT;
SELECT 'Transaction with no errors but with a rollback' Testing;
BEGIN;
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (14);
-- This call will insert both the rows locally on the remote worker
SELECT insert_data(15);
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (16);
ROLLBACK;
--
-- Add function with pushdown=true in the targetList of a query
--
BEGIN;
-- Query gets delegated to the node of the shard xx_900001 for the key=1,
-- and the function inserts value (1+17) locally on the shard xx_900031
SELECT insert_data(intcol+17) from test_forcepushdown where intcol = 1;
-- This will fail with duplicate error as the function already inserted
-- the value(1+17)
SELECT insert_data(18);
COMMIT;
--
-- Access a table with the same shard key as distribution argument but in a
-- different colocation group.
--
BEGIN;
SELECT insert_data_noncolocation(19);
COMMIT;
SELECT insert_data_noncolocation(19);
-- This should have only the first 3 rows as all other transactions were rolled back.
SELECT * FROM forcepushdown_schema.test_forcepushdown ORDER BY 1;
--
-- Nested call, function with pushdown=false calling function with pushdown=true
--
CREATE TABLE test_nested (id int, name text);
SELECT create_distributed_table('test_nested','id');
INSERT INTO test_nested VALUES (100,'hundred');
INSERT INTO test_nested VALUES (200,'twohundred');
INSERT INTO test_nested VALUES (300,'threehundred');
INSERT INTO test_nested VALUES (400,'fourhundred');
INSERT INTO test_nested VALUES (512,'fivetwelve');
CREATE OR REPLACE FUNCTION inner_force_delegation_function(int)
RETURNS NUMERIC AS $$
DECLARE ret_val NUMERIC;
BEGIN
SELECT max(id)::numeric+1 INTO ret_val FROM forcepushdown_schema.test_nested WHERE id = $1;
RAISE NOTICE 'inner_force_delegation_function():%', ret_val;
RETURN ret_val;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION func_calls_forcepush_func()
RETURNS NUMERIC AS $$
DECLARE incremented_val NUMERIC;
BEGIN
-- Constant distribution argument
SELECT inner_force_delegation_function INTO incremented_val FROM inner_force_delegation_function(100);
RETURN incremented_val;
END;
$$ LANGUAGE plpgsql;
SELECT create_distributed_function('func_calls_forcepush_func()');
SELECT create_distributed_function('inner_force_delegation_function(int)', '$1', colocate_with := 'test_nested', force_delegation := true);
SELECT public.wait_until_metadata_sync(30000);
BEGIN;
SELECT func_calls_forcepush_func();
COMMIT;
SELECT func_calls_forcepush_func();
CREATE OR REPLACE FUNCTION get_val()
RETURNS INT AS $$
BEGIN
RETURN 100::INT;
END;
$$ LANGUAGE plpgsql;
--
-- UDF calling another UDF in a FROM clause
-- fn()
-- {
-- select res into var from fn();
-- }
--
CREATE OR REPLACE FUNCTION func_calls_forcepush_func_infrom()
RETURNS NUMERIC AS $$
DECLARE incremented_val NUMERIC;
DECLARE add_val INT;
BEGIN
add_val := get_val();
SELECT inner_force_delegation_function INTO incremented_val FROM inner_force_delegation_function(add_val + 100);
RETURN incremented_val;
END;
$$ LANGUAGE plpgsql;
SELECT func_calls_forcepush_func_infrom();
BEGIN;
SELECT func_calls_forcepush_func_infrom();
COMMIT;
--
-- UDF calling another UDF in the SELECT targetList
-- fn()
-- {
-- select fn() into var;
-- }
--
CREATE OR REPLACE FUNCTION func_calls_forcepush_func_intarget()
RETURNS NUMERIC AS $$
DECLARE incremented_val NUMERIC;
DECLARE add_val INT;
BEGIN
add_val := get_val();
SELECT inner_force_delegation_function(100 + 100) INTO incremented_val OFFSET 0;
RETURN incremented_val;
END;
$$ LANGUAGE plpgsql;
SELECT func_calls_forcepush_func_intarget();
BEGIN;
SELECT func_calls_forcepush_func_intarget();
COMMIT;
--
-- Recursive function call with pushdown=true
--
CREATE OR REPLACE FUNCTION test_recursive(inp integer)
RETURNS INT AS $$
DECLARE var INT;
BEGIN
RAISE NOTICE 'input:%', inp;
if (inp > 1) then
inp := inp - 1;
var := forcepushdown_schema.test_recursive(inp);
RETURN var;
else
RETURN inp;
END if;
END;
$$ LANGUAGE plpgsql;
SELECT create_distributed_function('test_recursive(int)', '$1', colocate_with := 'test_nested', force_delegation := true);
BEGIN;
SELECT test_recursive(5);
END;
--
-- Distributed function gets delegated indirectly (as part of a query)
--
BEGIN;
-- Query lands on the shard with key = 300(shard __900089) and the function inserts locally
SELECT inner_force_delegation_function(id) FROM test_nested WHERE id = 300;
-- Query lands on the shard with key = 300(shard __900089) and the function inserts remotely
SELECT insert_data_non_distarg(id) FROM test_nested WHERE id = 300;
END;
--
-- Non constant distribution arguments
--
-- Param(PARAM_EXEC) node e.g. SELECT fn((SELECT col from test_nested where col=val))
BEGIN;
SELECT inner_force_delegation_function((SELECT id+112 FROM test_nested WHERE id=400));
END;
CREATE OR REPLACE FUNCTION test_non_constant(x int, y bigint)
RETURNS int
AS $$
DECLARE
BEGIN
RAISE NOTICE 'test_non_constant: % %', x, y;
RETURN x + y;
END;
$$ LANGUAGE plpgsql;
SELECT create_distributed_function(
'test_non_constant(int,bigint)',
'$1',
colocate_with := 'test_forcepushdown',
force_delegation := true);
SELECT count(*) FROM test_nested;
-- Result should print 99, count(*) from test_nested
WITH c AS (SELECT count(*) FROM test_nested),
b as (SELECT test_non_constant(99::int, (SELECT COUNT FROM c)))
SELECT COUNT(*) FROM b;
CREATE TABLE emp (
empname text NOT NULL,
salary integer
);
CREATE TABLE emp_audit(
operation char(1) NOT NULL,
stamp timestamp NOT NULL,
userid text NOT NULL,
empname text NOT NULL,
salary integer
);
SELECT create_distributed_table('emp','empname');
SELECT create_distributed_table('emp_audit','empname');
CREATE OR REPLACE FUNCTION inner_emp(empname text)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO emp VALUES (empname, 33);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION outer_emp()
RETURNS void
AS $$
DECLARE
BEGIN
PERFORM inner_emp('hello');
END;
$$ LANGUAGE plpgsql;
SELECT create_distributed_function('inner_emp(text)','empname', force_delegation := true);
SELECT outer_emp();
SELECT * from emp;
--
-- INSERT..SELECT
--
CREATE FUNCTION insert_select_data(a integer)
RETURNS void LANGUAGE plpgsql AS $fn$
BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown SELECT(a+1);
END;
$fn$;
SELECT create_distributed_function(
'insert_select_data(int)', 'a',
colocate_with := 'test_forcepushdown',
force_delegation := true
);
-- Function lands on worker1 and issues COPY ... INSERT on the worker2 into the shard_900021
BEGIN;
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (30);
-- This will fail
SELECT insert_select_data(20);
COMMIT;
-- Function lands on worker2 and issues COPY ... INSERT on the same node into the shard_900029
BEGIN;
-- This will pass
SELECT insert_select_data(21);
END;
-- Function lands on worker2 and issues COPY ... INSERT on the worker1 into the shard_900028
BEGIN;
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (30);
-- This will fail
SELECT insert_select_data(22);
END;
-- Functions lands on worker1 and issues COPY ... INSERT on the worker2 into the shard_900021
-- This will pass as there is no surrounding transaction
SELECT insert_select_data(20);
-- (21+1) and (20+1) should appear
SELECT * FROM forcepushdown_schema.test_forcepushdown ORDER BY 1;
CREATE FUNCTION insert_select_data_nonlocal(a integer)
RETURNS void LANGUAGE plpgsql AS $fn$
BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown(intcol)
SELECT intcol FROM forcepushdown_schema.test_forcepushdown_noncolocate;
END;
$fn$;
SELECT create_distributed_function(
'insert_select_data_nonlocal(int)', 'a',
colocate_with := 'test_forcepushdown',
force_delegation := true
);
INSERT INTO forcepushdown_schema.test_forcepushdown_noncolocate VALUES (30);
INSERT INTO forcepushdown_schema.test_forcepushdown_noncolocate VALUES (31);
INSERT INTO forcepushdown_schema.test_forcepushdown_noncolocate VALUES (32);
BEGIN;
INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (40);
-- This will fail
SELECT insert_select_data_nonlocal(41);
COMMIT;
-- Above 3 rows (30, 31, 32) should appear now
SELECT insert_select_data_nonlocal(40);
SELECT * FROM forcepushdown_schema.test_forcepushdown ORDER BY 1;
CREATE TABLE test_forcepushdown_char(data char(50) PRIMARY KEY);
SELECT create_distributed_table('test_forcepushdown_char', 'data', colocate_with := 'none');
CREATE TABLE test_forcepushdown_varchar(data varchar PRIMARY KEY);
SELECT create_distributed_table('test_forcepushdown_varchar', 'data', colocate_with := 'none');
CREATE TABLE test_forcepushdown_text(data text PRIMARY KEY);
SELECT create_distributed_table('test_forcepushdown_text', 'data', colocate_with := 'none');
CREATE FUNCTION insert_data_char(a char(50))
RETURNS void LANGUAGE plpgsql AS $fn$
BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown_char VALUES (a);
END;
$fn$;
SELECT create_distributed_function(
'insert_data_char(char)', 'a',
colocate_with := 'test_forcepushdown_char',
force_delegation := true
);
CREATE FUNCTION insert_data_varchar(a varchar)
RETURNS void LANGUAGE plpgsql AS $fn$
BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown_varchar VALUES (a);
END;
$fn$;
SELECT create_distributed_function(
'insert_data_varchar(varchar)', 'a',
colocate_with := 'test_forcepushdown_varchar',
force_delegation := true
);
CREATE FUNCTION insert_data_text(a text)
RETURNS void LANGUAGE plpgsql AS $fn$
BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown_text VALUES (a);
END;
$fn$;
SELECT create_distributed_function(
'insert_data_text(text)', 'a',
colocate_with := 'test_forcepushdown_text',
force_delegation := true
);
SELECT insert_data_varchar('VARCHAR');
BEGIN;
SELECT insert_data_varchar('VARCHAR2');
COMMIT;
SELECT insert_data_text('TEXT');
BEGIN;
SELECT insert_data_text('TEXT2');
COMMIT;
-- Char is failing as the datatype is represented differently in the
-- PL/PgSQL and the exec engine.
SELECT insert_data_char('CHAR');
BEGIN;
SELECT insert_data_char('CHAR');
COMMIT;
SELECT * FROM test_forcepushdown_char ORDER BY 1;
SELECT * FROM test_forcepushdown_varchar ORDER BY 1;
SELECT * FROM test_forcepushdown_text ORDER BY 1;
-- Test sub query
CREATE TABLE test_subquery(data int, result int);
SELECT create_distributed_table('test_subquery', 'data', colocate_with := 'none');
CREATE TABLE test_non_colocated(id int);
SELECT create_distributed_table('test_non_colocated', 'id', colocate_with := 'none');
CREATE FUNCTION select_data(a integer)
RETURNS void LANGUAGE plpgsql AS $fn$
DECLARE var INT;
BEGIN
SELECT result INTO var FROM forcepushdown_schema.test_subquery WHERE data =
(SELECT data FROM forcepushdown_schema.test_subquery WHERE data = a);
RAISE NOTICE 'Result: %', var;
END;
$fn$;
SELECT create_distributed_function(
'select_data(int)', 'a',
colocate_with := 'test_subquery',
force_delegation := true
);
CREATE FUNCTION select_data_noncolocate(a integer)
RETURNS void LANGUAGE plpgsql AS $fn$
DECLARE var INT;
BEGIN
-- Key is the same but colocation ID is different
SELECT data INTO var FROM forcepushdown_schema.test_subquery WHERE data =
(SELECT id FROM forcepushdown_schema.test_non_colocated WHERE id = a);
RAISE NOTICE 'Result: %', var;
END;
$fn$;
SELECT create_distributed_function(
'select_data_noncolocate(int)', 'a',
colocate_with := 'test_subquery',
force_delegation := true
);
CREATE FUNCTION insert_select_data_cte1(a integer)
RETURNS void LANGUAGE plpgsql AS $fn$
DECLARE var INT;
BEGIN
WITH ins AS (INSERT INTO forcepushdown_schema.test_subquery VALUES (a) RETURNING data)
SELECT ins.data INTO var FROM ins;
RAISE NOTICE 'Result: %', var;
END;
$fn$;
SELECT create_distributed_function(
'insert_select_data_cte1(int)', 'a',
colocate_with := 'test_subquery',
force_delegation := true
);
CREATE FUNCTION insert_select_data_cte2(a integer)
RETURNS void LANGUAGE plpgsql AS $fn$
DECLARE var INT;
BEGIN
WITH ins AS (INSERT INTO forcepushdown_schema.test_subquery VALUES (a) RETURNING data)
SELECT ins.data INTO var FROM forcepushdown_schema.test_subquery, ins WHERE forcepushdown_schema.test_subquery.data = a;
RAISE NOTICE 'Result: %', var;
END;
$fn$;
SELECT create_distributed_function(
'insert_select_data_cte2(int)', 'a',
colocate_with := 'test_subquery',
force_delegation := true
);
CREATE FUNCTION insert_data_cte_nondist(a integer)
RETURNS void LANGUAGE plpgsql AS $fn$
DECLARE var INT;
BEGIN
-- Inserting a non-distribution argument (a+1)
WITH ins AS (INSERT INTO forcepushdown_schema.test_subquery VALUES (a+1) RETURNING data)
SELECT ins.data INTO var FROM forcepushdown_schema.test_subquery, ins WHERE forcepushdown_schema.test_subquery.data = a;
RAISE NOTICE 'Result: %', var;
END;
$fn$;
SELECT create_distributed_function(
'insert_data_cte_nondist(int)', 'a',
colocate_with := 'test_subquery',
force_delegation := true
);
INSERT INTO forcepushdown_schema.test_subquery VALUES(100, -1);
-- This should pass
SELECT select_data(100);
BEGIN;
SELECT select_data(100);
END;
-- This should fail
SELECT select_data_noncolocate(100);
BEGIN;
SELECT select_data_noncolocate(100);
END;
-- This should pass
SELECT insert_select_data_cte1(200);
BEGIN;
SELECT insert_select_data_cte1(200);
COMMIT;
-- This should pass
SELECT insert_select_data_cte2(300);
BEGIN;
SELECT insert_select_data_cte2(300);
COMMIT;
-- This should fail
SELECT insert_data_cte_nondist(400);
BEGIN;
SELECT insert_data_cte_nondist(400);
COMMIT;
-- Rows 100, 200, 300 should be seen
SELECT * FROM forcepushdown_schema.test_subquery ORDER BY 1;
-- Query with targetList greater than 1
-- Function from FROM clause is not delegated outside of a BEGIN (for now)
SELECT 1,2,3 FROM select_data(100);
BEGIN;
-- Function from FROM clause is delegated
SELECT 1,2,3 FROM select_data(100);
END;
-- Test prepared statements
CREATE TABLE table_test_prepare(i int, j bigint);
SELECT create_distributed_table('table_test_prepare', 'i', colocate_with := 'none');
DROP FUNCTION test_prepare(int, int);
CREATE OR REPLACE FUNCTION test_prepare(x int, y int)
RETURNS bigint
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.table_test_prepare VALUES (x, y);
INSERT INTO forcepushdown_schema.table_test_prepare VALUES (y, x);
RETURN x + y;
END;
$$ LANGUAGE plpgsql;
SELECT create_distributed_function('test_prepare(int,int)','x',force_delegation :=true, colocate_with := 'table_test_prepare');
DROP FUNCTION outer_test_prepare(int, int);
CREATE OR REPLACE FUNCTION outer_test_prepare(x int, y int)
RETURNS void
AS $$
DECLARE
v int;
BEGIN
PERFORM FROM test_prepare(x, y);
PERFORM 1, 1 + a FROM test_prepare(x + 1, y + 1) a;
END;
$$ LANGUAGE plpgsql;
-- First 5 get delegated and succeeds
BEGIN;
SELECT outer_test_prepare(1,1);
SELECT outer_test_prepare(1,1);
SELECT outer_test_prepare(1,1);
SELECT outer_test_prepare(1,1);
SELECT outer_test_prepare(1,1);
-- All the above gets delegated and should see 5 * 4 rows
SELECT COUNT(*) FROM table_test_prepare;
-- 6th execution will be generic plan and should get delegated
SELECT outer_test_prepare(1,1);
SELECT outer_test_prepare(1,1);
END;
-- Fails as expected
SELECT outer_test_prepare(1,2);
SELECT COUNT(*) FROM table_test_prepare;
RESET client_min_messages;
SET citus.log_remote_commands TO off;
DROP SCHEMA forcepushdown_schema CASCADE;

View File

@ -346,9 +346,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
SET application_name to 'citus';
\set VERBOSITY terse
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ROLLBACK;
-- check the sanity of distributionArgumentIndex and colocationId
@ -356,18 +356,18 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
SET application_name to 'citus';
\set VERBOSITY terse
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ROLLBACK;
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
SET application_name to 'citus';
\set VERBOSITY terse
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ROLLBACK;
-- check with non-existing object
@ -375,9 +375,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
SET application_name to 'citus';
\set VERBOSITY terse
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ROLLBACK;
-- since citus_internal_add_object_metadata is strict function returns NULL
@ -386,9 +386,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
SET application_name to 'citus';
\set VERBOSITY terse
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ROLLBACK;
\c - postgres - :worker_1_port
@ -404,9 +404,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
CREATE PUBLICATION publication_test FOR TABLE publication_test_table;
SET ROLE metadata_sync_helper_role;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('publication', ARRAY['publication_test']::text[], ARRAY[]::text[], -1, 0))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('publication', ARRAY['publication_test']::text[], ARRAY[]::text[], -1, 0, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ROLLBACK;
-- Show that citus_internal_add_object_metadata checks the priviliges
@ -420,9 +420,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
LANGUAGE SQL;
SET ROLE metadata_sync_helper_role;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid)
AS (VALUES ('function', ARRAY['distribution_test_function']::text[], ARRAY['integer']::text[], -1, 0))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation)
AS (VALUES ('function', ARRAY['distribution_test_function']::text[], ARRAY['integer']::text[], -1, 0, false))
SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data;
ROLLBACK;
-- we do not allow wrong partmethod

View File

@ -9,5 +9,6 @@ SELECT attrelid::regclass, attname, atthasmissing, attmissingval
FROM pg_attribute
WHERE atthasmissing AND attrelid NOT IN ('pg_dist_node'::regclass,
'pg_dist_rebalance_strategy'::regclass,
'pg_dist_partition'::regclass)
'pg_dist_partition'::regclass,
'citus.pg_dist_object'::regclass)
ORDER BY attrelid, attname;

View File

@ -31,28 +31,18 @@ SELECT create_distributed_table('test_table', 'id');
-- first show that the views does not show
-- any shards on the coordinator as expected
SELECT * FROM citus_shards_on_worker;
SELECT * FROM citus_shard_indexes_on_worker;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names';
SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_names';
-- now show that we see the shards, but not the
-- indexes as there are no indexes
\c - - - :worker_1_port
\c postgresql://postgres@localhost::worker_1_port/regression?application_name=psql
SET search_path TO 'mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
-- also show that nested calls to pg_table_is_visible works fine
-- if both of the calls to the pg_table_is_visible haven't been
-- replaced, we would get 0 rows in the output
SELECT
pg_table_is_visible((SELECT
"t1"."Name"::regclass
FROM
citus_shards_on_worker as t1
WHERE
NOT pg_table_is_visible("t1"."Name"::regclass)
LIMIT
1));
-- shards are hidden when using psql as application_name
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
-- now create an index
\c - - - :master_port
@ -61,20 +51,69 @@ CREATE INDEX test_index ON mx_hide_shard_names.test_table(id);
-- now show that we see the shards, and the
-- indexes as well
\c - - - :worker_1_port
\c postgresql://postgres@localhost::worker_1_port/regression?application_name=psql
SET search_path TO 'mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
-- shards are hidden when using psql as application_name
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
-- changing application_name reveals the shards
SET application_name TO '';
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
RESET application_name;
-- shards are hidden again after GUCs are reset
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
-- changing application_name in transaction reveals the shards
BEGIN;
SET LOCAL application_name TO '';
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
ROLLBACK;
-- shards are hidden again after GUCs are reset
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
-- now with session-level GUC, but ROLLBACK;
BEGIN;
SET application_name TO '';
ROLLBACK;
-- shards are hidden again after GUCs are reset
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
-- we should hide correctly based on application_name with savepoints
BEGIN;
SAVEPOINT s1;
SET application_name TO '';
-- changing application_name reveals the shards
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
ROLLBACK TO SAVEPOINT s1;
-- shards are hidden again after GUCs are reset
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
ROLLBACK;
-- changing citus.hide_shards_from_app_name_prefixes reveals the shards
BEGIN;
SET LOCAL citus.hide_shards_from_app_name_prefixes TO 'notpsql';
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
ROLLBACK;
-- shards are hidden again after GUCs are reset
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
-- we should be able to select from the shards directly if we
-- know the name of the tables
SELECT count(*) FROM test_table_1130000;
-- disable the config so that table becomes visible
SELECT pg_table_is_visible('test_table_1130000'::regclass);
SET citus.override_table_visibility TO FALSE;
-- shards on the search_path still match pg_table_is_visible
SELECT pg_table_is_visible('test_table_1130000'::regclass);
-- shards on the search_path do not match citus_table_is_visible
SELECT citus_table_is_visible('test_table_1130000'::regclass);
\c - - - :master_port
-- make sure that we're resilient to the edge cases
-- such that the table name includes the shard number
@ -95,7 +134,7 @@ SET search_path TO 'mx_hide_shard_names';
-- name already exists :)
CREATE TABLE test_table_2_1130000(id int, time date);
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
\d
@ -111,14 +150,10 @@ CREATE INDEX test_index ON mx_hide_shard_names_2.test_table(id);
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SET search_path TO 'mx_hide_shard_names_2';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SET search_path TO 'mx_hide_shard_names_2, mx_hide_shard_names';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names_2' ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_names_2' ORDER BY 2;
-- now try very long table names
\c - - - :master_port
@ -137,7 +172,7 @@ SELECT create_distributed_table('too_long_12345678901234567890123456789012345678
\c - - - :worker_1_port
SET search_path TO 'mx_hide_shard_names_3';
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names_3' ORDER BY 2;
\d
@ -159,8 +194,8 @@ SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id');
\c - - - :worker_1_port
SET search_path TO "CiTuS.TeeN";
SELECT * FROM citus_shards_on_worker ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'CiTuS.TeeN' ORDER BY 2;
SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'CiTuS.TeeN' ORDER BY 2;
\d
\di

View File

@ -101,3 +101,28 @@ COPY dist_columnar TO STDOUT;
SELECT indexname FROM pg_indexes WHERE tablename = 'weird.table' ORDER BY indexname;
DROP SCHEMA dumper CASCADE;
CREATE SCHEMA dumper;
CREATE TABLE data (
key int,
value text
);
SELECT create_distributed_table('data', 'key');
COPY data FROM STDIN WITH (format csv, delimiter '|', escape '\');
1|{"this":"is","json":1}
2|{"$\"":9}
3|{"{}":" "}
4|{}
\.
-- run pg_dump on worker (which has shards)
\COPY output FROM PROGRAM 'PGAPPNAME=pg_dump pg_dump -f results/pg_dump.tmp -h localhost -p 57637 -U postgres -d regression -n dumper --quote-all-identifiers'
-- restore pg_dump from worker via coordinator
DROP SCHEMA dumper CASCADE;
\COPY (SELECT line FROM output WHERE line IS NOT NULL) TO PROGRAM 'psql -qtAX -h localhost -p 57636 -U postgres -d regression -f results/pg_dump.tmp'
-- check the tables (should not include shards)
SELECT tablename FROM pg_tables WHERE schemaname = 'dumper' ORDER BY 1;
DROP SCHEMA dumper CASCADE;