Merge branch 'master' into dev/yanwjin/separate_columnar2

pull/5661/head
ywj 2022-02-18 11:57:55 -08:00 committed by GitHub
commit 0319229b67
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
253 changed files with 8955 additions and 2356 deletions

View File

@ -32,6 +32,8 @@ why we ask this as well as instructions for how to proceed, see the
./configure
make
make install
# Optionally, you might instead want to use `make install-all`
# since `multi_extension` regression test would fail due to missing downgrade scripts.
cd src/test/regress
make check
```
@ -51,7 +53,7 @@ why we ask this as well as instructions for how to proceed, see the
autoconf flex git libcurl4-gnutls-dev libicu-dev \
libkrb5-dev liblz4-dev libpam0g-dev libreadline-dev \
libselinux1-dev libssl-dev libxslt1-dev libzstd-dev \
make uuid-dev
make uuid-dev mitmproxy
```
2. Get, build, and test the code
@ -62,6 +64,8 @@ why we ask this as well as instructions for how to proceed, see the
./configure
make
sudo make install
# Optionally, you might instead want to use `sudo make install-all`
# since `multi_extension` regression test would fail due to missing downgrade scripts.
cd src/test/regress
make check
```
@ -104,6 +108,8 @@ why we ask this as well as instructions for how to proceed, see the
PG_CONFIG=/usr/pgsql-14/bin/pg_config ./configure
make
sudo make install
# Optionally, you might instead want to use `sudo make install-all`
# since `multi_extension` regression test would fail due to missing downgrade scripts.
cd src/test/regress
make check
```

View File

@ -156,9 +156,9 @@ git merge "community/$PR_BRANCH"
familiar with the change.
5. You should rerun the `check-merge-to-enterprise` check on
`community/$PR_BRANCH`. You can use re-run from failed option in circle CI.
6. You can now merge the PR on enterprise. Be sure to NOT use "squash and merge",
6. You can now merge the PR on community. Be sure to NOT use "squash and merge",
but instead use the regular "merge commit" mode.
7. You can now merge the PR on community. Be sure to NOT use "squash and merge",
7. You can now merge the PR on enterprise. Be sure to NOT use "squash and merge",
but instead use the regular "merge commit" mode.
The subsequent PRs on community will be able to pass the

View File

@ -66,10 +66,10 @@ fi
git merge --abort
# If we have a conflict on enterprise merge on the master branch, we have a problem.
# Provide an error message to indicate that enterprise merge is needed.
# Provide an error message to indicate that enterprise merge is needed to fix this check.
if [[ $PR_BRANCH = master ]]; then
echo "ERROR: Master branch has merge conlicts with enterprise-master."
echo "Try re-running this job if you merged community PR before enterprise PR. Otherwise conflicts need to be resolved as a separate PR on enterprise."
echo "ERROR: Master branch has merge conflicts with enterprise-master."
echo "Try re-running this CI job after merging your changes into enterprise-master."
exit 1
fi

View File

@ -24,10 +24,12 @@
#include "catalog/pg_am.h"
#include "catalog/pg_publication.h"
#include "catalog/pg_trigger.h"
#include "catalog/pg_extension.h"
#include "catalog/storage.h"
#include "catalog/storage_xlog.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "commands/extension.h"
#include "executor/executor.h"
#include "nodes/makefuncs.h"
#include "optimizer/plancat.h"
@ -154,6 +156,20 @@ static void ColumnarReadMissingRowsIntoIndex(TableScanDesc scan, Relation indexR
static ItemPointerData TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort,
ItemPointer targetItemPointer);
/* functions for CheckCitusColumnarVersion */
static bool CheckAvailableVersionColumnar(int elevel);
static bool CheckInstalledVersionColumnar(int elevel);
static char * AvailableExtensionVersionColumnar(void);
static char * InstalledExtensionVersionColumnar(void);
static bool CitusColumnarHasBeenLoadedInternal(void);
static bool CitusColumnarHasBeenLoaded(void);
static bool CheckCitusColumnarVersion(int elevel);
static bool MajorVersionsCompatibleColumnar(char *leftVersion, char *rightVersion);
/* global variables for CheckCitusColumnarVersion */
static bool extensionLoadedColumnar = false;
static bool EnableVersionChecksColumnar = true;
static bool citusVersionKnownCompatibleColumnar = false;
/* Custom tuple slot ops used for columnar. Initialized in columnar_tableam_init(). */
static TupleTableSlotOps TTSOpsColumnar;
@ -171,7 +187,7 @@ columnar_beginscan(Relation relation, Snapshot snapshot,
ParallelTableScanDesc parallel_scan,
uint32 flags)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
int natts = relation->rd_att->natts;
@ -194,6 +210,7 @@ columnar_beginscan_extended(Relation relation, Snapshot snapshot,
ParallelTableScanDesc parallel_scan,
uint32 flags, Bitmapset *attr_needed, List *scanQual)
{
CheckCitusColumnarVersion(ERROR);
Oid relfilenode = relation->rd_node.relNode;
/*
@ -418,7 +435,7 @@ columnar_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
static IndexFetchTableData *
columnar_index_fetch_begin(Relation rel)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
Oid relfilenode = rel->rd_node.relNode;
if (PendingWritesInUpperTransactions(relfilenode, GetCurrentSubTransactionId()))
@ -643,7 +660,7 @@ static bool
columnar_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
Snapshot snapshot)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
uint64 rowNumber = tid_to_row_number(slot->tts_tid);
StripeMetadata *stripeMetadata = FindStripeByRowNumber(rel, rowNumber, snapshot);
@ -656,7 +673,7 @@ static TransactionId
columnar_index_delete_tuples(Relation rel,
TM_IndexDeleteOp *delstate)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
/*
* XXX: We didn't bother implementing index_delete_tuple for neither of
@ -717,7 +734,7 @@ static void
columnar_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid,
int options, BulkInsertState bistate)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
/*
* columnar_init_write_state allocates the write state in a longer
@ -765,7 +782,7 @@ static void
columnar_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
CommandId cid, int options, BulkInsertState bistate)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
ColumnarWriteState *writeState = columnar_init_write_state(relation,
RelationGetDescr(relation),
@ -841,7 +858,7 @@ columnar_relation_set_new_filenode(Relation rel,
TransactionId *freezeXid,
MultiXactId *minmulti)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
if (persistence == RELPERSISTENCE_UNLOGGED)
{
@ -878,8 +895,7 @@ columnar_relation_set_new_filenode(Relation rel,
static void
columnar_relation_nontransactional_truncate(Relation rel)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
RelFileNode relfilenode = rel->rd_node;
NonTransactionDropWriteState(relfilenode.relNode);
@ -926,7 +942,7 @@ columnar_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
double *tups_vacuumed,
double *tups_recently_dead)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
TupleDesc sourceDesc = RelationGetDescr(OldHeap);
TupleDesc targetDesc = RelationGetDescr(NewHeap);
@ -1024,7 +1040,7 @@ static void
columnar_vacuum_rel(Relation rel, VacuumParams *params,
BufferAccessStrategy bstrategy)
{
if (!CheckCitusVersion(WARNING))
if (!CheckCitusColumnarVersion(WARNING))
{
/*
* Skip if the extension catalogs are not up-to-date, but avoid
@ -1342,7 +1358,7 @@ columnar_index_build_range_scan(Relation columnarRelation,
void *callback_state,
TableScanDesc scan)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
if (start_blockno != 0 || numblocks != InvalidBlockNumber)
{
@ -1592,7 +1608,7 @@ columnar_index_validate_scan(Relation columnarRelation,
ValidateIndexState *
validateIndexState)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
ColumnarReportTotalVirtualBlocks(columnarRelation, snapshot,
PROGRESS_SCAN_BLOCKS_TOTAL);
@ -1764,7 +1780,7 @@ TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort, ItemPointer targetIt
static uint64
columnar_relation_size(Relation rel, ForkNumber forkNumber)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
uint64 nblocks = 0;
@ -1791,7 +1807,7 @@ columnar_relation_size(Relation rel, ForkNumber forkNumber)
static bool
columnar_relation_needs_toast_table(Relation rel)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
return false;
}
@ -1802,8 +1818,7 @@ columnar_estimate_rel_size(Relation rel, int32 *attr_widths,
BlockNumber *pages, double *tuples,
double *allvisfrac)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
RelationOpenSmgr(rel);
*pages = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM);
*tuples = ColumnarTableRowCount(rel);
@ -1910,6 +1925,15 @@ columnar_tableam_init()
TTSOpsColumnar = TTSOpsVirtual;
TTSOpsColumnar.copy_heap_tuple = ColumnarSlotCopyHeapTuple;
DefineCustomBoolVariable(
"columnar.enable_version_checks",
gettext_noop("Enables Version Check for Columnar"),
NULL,
&EnableVersionChecksColumnar,
true,
PGC_USERSET,
GUC_NO_SHOW_ALL,
NULL, NULL, NULL);
}
@ -1968,7 +1992,7 @@ ColumnarTableDropHook(Oid relid)
if (IsColumnarTableAmTable(relid))
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
/*
* Drop metadata. No need to drop storage here since for
@ -2093,8 +2117,7 @@ ColumnarProcessUtility(PlannedStmt *pstmt,
if (rel->rd_tableam == GetColumnarTableAmRoutine())
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
if (!ColumnarSupportsIndexAM(indexStmt->accessMethod))
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@ -2316,7 +2339,7 @@ PG_FUNCTION_INFO_V1(alter_columnar_table_set);
Datum
alter_columnar_table_set(PG_FUNCTION_ARGS)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
Oid relationId = PG_GETARG_OID(0);
@ -2440,7 +2463,7 @@ PG_FUNCTION_INFO_V1(alter_columnar_table_reset);
Datum
alter_columnar_table_reset(PG_FUNCTION_ARGS)
{
CheckCitusVersion(ERROR);
CheckCitusColumnarVersion(ERROR);
Oid relationId = PG_GETARG_OID(0);
@ -2579,3 +2602,318 @@ downgrade_columnar_storage(PG_FUNCTION_ARGS)
table_close(rel, AccessExclusiveLock);
PG_RETURN_VOID();
}
/*
* Code to check the Citus Version, helps remove dependency from Citus
*/
/*
* CitusColumnarHasBeenLoaded returns true if the citus extension has been created
* in the current database and the extension script has been executed. Otherwise,
* it returns false. The result is cached as this is called very frequently.
*/
bool
CitusColumnarHasBeenLoaded(void)
{
if (!extensionLoadedColumnar || creating_extension)
{
/*
* Refresh if we have not determined whether the extension has been
* loaded yet, or in case of ALTER EXTENSION since we want to treat
* Citus as "not loaded" during ALTER EXTENSION citus.
*/
bool extensionLoaded = CitusColumnarHasBeenLoadedInternal();
extensionLoadedColumnar = extensionLoaded;
}
return extensionLoadedColumnar;
}
/*
* CitusColumnarHasBeenLoadedInternal returns true if the citus extension has been created
* in the current database and the extension script has been executed. Otherwise,
* it returns false.
*/
static bool
CitusColumnarHasBeenLoadedInternal(void)
{
if (IsBinaryUpgrade)
{
/* never use Citus logic during pg_upgrade */
return false;
}
Oid citusExtensionOid = get_extension_oid("citus", true);
if (citusExtensionOid == InvalidOid)
{
/* Citus extension does not exist yet */
return false;
}
if (creating_extension && CurrentExtensionObject == citusExtensionOid)
{
/*
* We do not use Citus hooks during CREATE/ALTER EXTENSION citus
* since the objects used by the C code might be not be there yet.
*/
return false;
}
/* citus extension exists and has been created */
return true;
}
/*
* CheckCitusColumnarVersion checks whether there is a version mismatch between the
* available version and the loaded version or between the installed version
* and the loaded version. Returns true if compatible, false otherwise.
*
* As a side effect, this function also sets citusVersionKnownCompatible_Columnar global
* variable to true which reduces version check cost of next calls.
*/
bool
CheckCitusColumnarVersion(int elevel)
{
if (citusVersionKnownCompatibleColumnar ||
!CitusColumnarHasBeenLoaded() ||
!EnableVersionChecksColumnar)
{
return true;
}
if (CheckAvailableVersionColumnar(elevel) && CheckInstalledVersionColumnar(elevel))
{
citusVersionKnownCompatibleColumnar = true;
return true;
}
else
{
return false;
}
}
/*
* CheckAvailableVersion compares CITUS_EXTENSIONVERSION and the currently
* available version from the citus.control file. If they are not compatible,
* this function logs an error with the specified elevel and returns false,
* otherwise it returns true.
*/
bool
CheckAvailableVersionColumnar(int elevel)
{
if (!EnableVersionChecksColumnar)
{
return true;
}
char *availableVersion = AvailableExtensionVersionColumnar();
if (!MajorVersionsCompatibleColumnar(availableVersion, CITUS_EXTENSIONVERSION))
{
ereport(elevel, (errmsg("loaded Citus library version differs from latest "
"available extension version"),
errdetail("Loaded library requires %s, but the latest control "
"file specifies %s.", CITUS_MAJORVERSION,
availableVersion),
errhint("Restart the database to load the latest Citus "
"library.")));
pfree(availableVersion);
return false;
}
pfree(availableVersion);
return true;
}
/*
* CheckInstalledVersion compares CITUS_EXTENSIONVERSION and the
* extension's current version from the pg_extension catalog table. If they
* are not compatible, this function logs an error with the specified elevel,
* otherwise it returns true.
*/
static bool
CheckInstalledVersionColumnar(int elevel)
{
Assert(CitusColumnarHasBeenLoaded());
Assert(EnableVersionChecksColumnar);
char *installedVersion = InstalledExtensionVersionColumnar();
if (!MajorVersionsCompatibleColumnar(installedVersion, CITUS_EXTENSIONVERSION))
{
ereport(elevel, (errmsg("loaded Citus library version differs from installed "
"extension version"),
errdetail("Loaded library requires %s, but the installed "
"extension version is %s.", CITUS_MAJORVERSION,
installedVersion),
errhint("Run ALTER EXTENSION citus UPDATE and try again.")));
pfree(installedVersion);
return false;
}
pfree(installedVersion);
return true;
}
/*
* MajorVersionsCompatible checks whether both versions are compatible. They
* are if major and minor version numbers match, the schema version is
* ignored. Returns true if compatible, false otherwise.
*/
bool
MajorVersionsCompatibleColumnar(char *leftVersion, char *rightVersion)
{
const char schemaVersionSeparator = '-';
char *leftSeperatorPosition = strchr(leftVersion, schemaVersionSeparator);
char *rightSeperatorPosition = strchr(rightVersion, schemaVersionSeparator);
int leftComparisionLimit = 0;
int rightComparisionLimit = 0;
if (leftSeperatorPosition != NULL)
{
leftComparisionLimit = leftSeperatorPosition - leftVersion;
}
else
{
leftComparisionLimit = strlen(leftVersion);
}
if (rightSeperatorPosition != NULL)
{
rightComparisionLimit = rightSeperatorPosition - rightVersion;
}
else
{
rightComparisionLimit = strlen(leftVersion);
}
/* we can error out early if hypens are not in the same position */
if (leftComparisionLimit != rightComparisionLimit)
{
return false;
}
return strncmp(leftVersion, rightVersion, leftComparisionLimit) == 0;
}
/*
* AvailableExtensionVersion returns the Citus version from citus.control file. It also
* saves the result, thus consecutive calls to CitusExtensionAvailableVersion will
* not read the citus.control file again.
*/
static char *
AvailableExtensionVersionColumnar(void)
{
LOCAL_FCINFO(fcinfo, 0);
FmgrInfo flinfo;
bool goForward = true;
bool doCopy = false;
char *availableExtensionVersion;
EState *estate = CreateExecutorState();
ReturnSetInfo *extensionsResultSet = makeNode(ReturnSetInfo);
extensionsResultSet->econtext = GetPerTupleExprContext(estate);
extensionsResultSet->allowedModes = SFRM_Materialize;
fmgr_info(F_PG_AVAILABLE_EXTENSIONS, &flinfo);
InitFunctionCallInfoData(*fcinfo, &flinfo, 0, InvalidOid, NULL,
(Node *) extensionsResultSet);
/* pg_available_extensions returns result set containing all available extensions */
(*pg_available_extensions)(fcinfo);
TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat(
extensionsResultSet->setDesc,
&TTSOpsMinimalTuple);
bool hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward,
doCopy,
tupleTableSlot);
while (hasTuple)
{
bool isNull = false;
Datum extensionNameDatum = slot_getattr(tupleTableSlot, 1, &isNull);
char *extensionName = NameStr(*DatumGetName(extensionNameDatum));
if (strcmp(extensionName, "citus") == 0)
{
Datum availableVersion = slot_getattr(tupleTableSlot, 2, &isNull);
availableExtensionVersion = text_to_cstring(DatumGetTextPP(availableVersion));
ExecClearTuple(tupleTableSlot);
ExecDropSingleTupleTableSlot(tupleTableSlot);
return availableExtensionVersion;
}
ExecClearTuple(tupleTableSlot);
hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward,
doCopy, tupleTableSlot);
}
ExecDropSingleTupleTableSlot(tupleTableSlot);
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension is not found")));
}
/*
* InstalledExtensionVersion returns the Citus version in PostgreSQL pg_extension table.
*/
static char *
InstalledExtensionVersionColumnar(void)
{
ScanKeyData entry[1];
char *installedExtensionVersion = NULL;
Relation relation = table_open(ExtensionRelationId, AccessShareLock);
ScanKeyInit(&entry[0], Anum_pg_extension_extname, BTEqualStrategyNumber, F_NAMEEQ,
CStringGetDatum("citus"));
SysScanDesc scandesc = systable_beginscan(relation, ExtensionNameIndexId, true,
NULL, 1, entry);
HeapTuple extensionTuple = systable_getnext(scandesc);
/* We assume that there can be at most one matching tuple */
if (HeapTupleIsValid(extensionTuple))
{
int extensionIndex = Anum_pg_extension_extversion;
TupleDesc tupleDescriptor = RelationGetDescr(relation);
bool isNull = false;
Datum installedVersion = heap_getattr(extensionTuple, extensionIndex,
tupleDescriptor, &isNull);
if (isNull)
{
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension version is null")));
}
installedExtensionVersion = text_to_cstring(DatumGetTextPP(installedVersion));
}
else
{
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension is not loaded")));
}
systable_endscan(scandesc);
table_close(relation, AccessShareLock);
return installedExtensionVersion;
}

View File

@ -37,8 +37,7 @@ static char * CreateCollationDDLInternal(Oid collationId, Oid *collowner,
char **quotedCollationName);
static List * FilterNameListForDistributedCollations(List *objects, bool missing_ok,
List **addresses);
static void EnsureSequentialModeForCollationDDL(void);
static bool ShouldPropagateDefineCollationStmt(void);
/*
* GetCreateCollationDDLInternal returns a CREATE COLLATE sql string for the
@ -256,7 +255,7 @@ PreprocessDropCollationStmt(Node *node, const char *queryString,
char *dropStmtSql = DeparseTreeNode((Node *) stmt);
stmt->objects = oldCollations;
EnsureSequentialModeForCollationDDL();
EnsureSequentialMode(OBJECT_COLLATION);
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
@ -292,7 +291,7 @@ PreprocessAlterCollationOwnerStmt(Node *node, const char *queryString,
QualifyTreeNode((Node *) stmt);
char *sql = DeparseTreeNode((Node *) stmt);
EnsureSequentialModeForCollationDDL();
EnsureSequentialMode(OBJECT_COLLATION);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
@ -328,7 +327,7 @@ PreprocessRenameCollationStmt(Node *node, const char *queryString,
/* deparse sql*/
char *renameStmtSql = DeparseTreeNode((Node *) stmt);
EnsureSequentialModeForCollationDDL();
EnsureSequentialMode(OBJECT_COLLATION);
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
@ -363,7 +362,7 @@ PreprocessAlterCollationSchemaStmt(Node *node, const char *queryString,
QualifyTreeNode((Node *) stmt);
char *sql = DeparseTreeNode((Node *) stmt);
EnsureSequentialModeForCollationDDL();
EnsureSequentialMode(OBJECT_COLLATION);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
@ -453,47 +452,6 @@ AlterCollationSchemaStmtObjectAddress(Node *node, bool missing_ok)
}
/*
* EnsureSequentialModeForCollationDDL makes sure that the current transaction is already in
* sequential mode, or can still safely be put in sequential mode, it errors if that is
* not possible. The error contains information for the user to retry the transaction with
* sequential mode set from the beginning.
*
* As collations are node scoped objects there exists only 1 instance of the collation used by
* potentially multiple shards. To make sure all shards in the transaction can interact
* with the type the type needs to be visible on all connections used by the transaction,
* meaning we can only use 1 connection per node.
*/
static void
EnsureSequentialModeForCollationDDL(void)
{
if (!IsTransactionBlock())
{
/* we do not need to switch to sequential mode if we are not in a transaction */
return;
}
if (ParallelQueryExecutedInTransaction())
{
ereport(ERROR, (errmsg("cannot create or modify collation because there was a "
"parallel operation on a distributed table in the "
"transaction"),
errdetail("When creating or altering a collation, Citus needs to "
"perform all operations over a single connection per "
"node to ensure consistency."),
errhint("Try re-running the transaction with "
"\"SET LOCAL citus.multi_shard_modify_mode TO "
"\'sequential\';\"")));
}
ereport(DEBUG1, (errmsg("switching to sequential query execution mode"),
errdetail("Collation is created or altered. To make sure subsequent "
"commands see the collation correctly we need to make sure to "
"use only one connection for all future commands")));
SetLocalMultiShardModifyModeToSequential();
}
/*
* GenerateBackupNameForCollationCollision generates a new collation name for an existing collation.
* The name is generated in such a way that the new name doesn't overlap with an existing collation
@ -561,6 +519,26 @@ DefineCollationStmtObjectAddress(Node *node, bool missing_ok)
}
/*
* PreprocessDefineCollationStmt executed before the collation has been
* created locally to ensure that if the collation create statement will
* be propagated, the node is a coordinator node
*/
List *
PreprocessDefineCollationStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
Assert(castNode(DefineStmt, node)->kind == OBJECT_COLLATION);
if (ShouldPropagateDefineCollationStmt())
{
EnsureCoordinator();
}
return NIL;
}
/*
* PostprocessDefineCollationStmt executed after the collation has been
* created locally and before we create it on the worker nodes.
@ -573,16 +551,7 @@ PostprocessDefineCollationStmt(Node *node, const char *queryString)
{
Assert(castNode(DefineStmt, node)->kind == OBJECT_COLLATION);
if (!ShouldPropagate())
{
return NIL;
}
/*
* If the create collation command is a part of a multi-statement transaction,
* do not propagate it
*/
if (IsMultiStatementTransaction())
if (!ShouldPropagateDefineCollationStmt())
{
return NIL;
}
@ -590,13 +559,38 @@ PostprocessDefineCollationStmt(Node *node, const char *queryString)
ObjectAddress collationAddress =
DefineCollationStmtObjectAddress(node, false);
if (IsObjectDistributed(&collationAddress))
{
EnsureCoordinator();
}
EnsureDependenciesExistOnAllNodes(&collationAddress);
return NodeDDLTaskList(NON_COORDINATOR_NODES, CreateCollationDDLsIdempotent(
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make1(DISABLE_DDL_PROPAGATION);
commands = list_concat(commands, CreateCollationDDLsIdempotent(
collationAddress.objectId));
commands = lappend(commands, ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* ShouldPropagateDefineCollationStmt checks if collation define
* statement should be propagated. Don't propagate if:
* - metadata syncing if off
* - statement is part of a multi stmt transaction and the multi shard connection
* type is not sequential
*/
static bool
ShouldPropagateDefineCollationStmt()
{
if (!ShouldPropagate())
{
return false;
}
if (IsMultiStatementTransaction() &&
MultiShardConnectionType != SEQUENTIAL_CONNECTION)
{
return false;
}
return true;
}

View File

@ -60,6 +60,7 @@
#include "distributed/remote_commands.h"
#include "distributed/shared_library_init.h"
#include "distributed/worker_protocol.h"
#include "distributed/worker_shard_visibility.h"
#include "distributed/worker_transaction.h"
#include "distributed/version_compat.h"
#include "executor/executor.h"
@ -327,6 +328,7 @@ create_reference_table(PG_FUNCTION_ARGS)
* - we are on the coordinator
* - the current user is the owner of the table
* - relation kind is supported
* - relation is not a shard
*/
static void
EnsureCitusTableCanBeCreated(Oid relationOid)
@ -343,6 +345,14 @@ EnsureCitusTableCanBeCreated(Oid relationOid)
* will be performed in CreateDistributedTable.
*/
EnsureRelationKindSupported(relationOid);
/*
* When coordinator is added to the metadata, or on the workers,
* some of the relations of the coordinator node may/will be shards.
* We disallow creating distributed tables from shard relations, by
* erroring out here.
*/
ErrorIfRelationIsAKnownShard(relationOid);
}

View File

@ -29,7 +29,6 @@
#include "distributed/relation_access_tracking.h"
#include "distributed/worker_transaction.h"
static void EnsureSequentialModeForDatabaseDDL(void);
static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid);
static Oid get_database_owner(Oid db_oid);
@ -66,7 +65,7 @@ PreprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString,
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
EnsureSequentialModeForDatabaseDDL();
EnsureSequentialMode(OBJECT_DATABASE);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
@ -177,39 +176,3 @@ get_database_owner(Oid db_oid)
return dba;
}
/*
* EnsureSequentialModeForDatabaseDDL makes sure that the current transaction is already
* in sequential mode, or can still safely be put in sequential mode, it errors if that is
* not possible. The error contains information for the user to retry the transaction with
* sequential mode set from the beginning.
*/
static void
EnsureSequentialModeForDatabaseDDL(void)
{
if (!IsTransactionBlock())
{
/* we do not need to switch to sequential mode if we are not in a transaction */
return;
}
if (ParallelQueryExecutedInTransaction())
{
ereport(ERROR, (errmsg("cannot create or modify database because there was a "
"parallel operation on a distributed table in the "
"transaction"),
errdetail("When creating or altering a database, Citus needs to "
"perform all operations over a single connection per "
"node to ensure consistency."),
errhint("Try re-running the transaction with "
"\"SET LOCAL citus.multi_shard_modify_mode TO "
"\'sequential\';\"")));
}
ereport(DEBUG1, (errmsg("switching to sequential query execution mode"),
errdetail("Database is created or altered. To make sure subsequent "
"commands see the type correctly we need to make sure to "
"use only one connection for all future commands")));
SetLocalMultiShardModifyModeToSequential();
}

View File

@ -241,6 +241,17 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
return NIL;
}
/*
* Indices are created separately, however, they do show up in the dependency
* list for a table since they will have potentially their own dependencies.
* The commands will be added to both shards and metadata tables via the table
* creation commands.
*/
if (relKind == RELKIND_INDEX)
{
return NIL;
}
if (relKind == RELKIND_RELATION || relKind == RELKIND_PARTITIONED_TABLE ||
relKind == RELKIND_FOREIGN_TABLE)
{
@ -317,6 +328,11 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
return DDLCommands;
}
case OCLASS_TSCONFIG:
{
return CreateTextSearchConfigDDLCommandsIdempotent(dependency);
}
case OCLASS_TYPE:
{
return CreateTypeDDLCommandsIdempotent(dependency);
@ -396,6 +412,15 @@ ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort)
ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies)
{
if (IsObjectAddressOwnedByExtension(dependency, NULL))
{
/*
* we expect extension-owned objects to be created as a result
* of the extension being created.
*/
continue;
}
ddlCommands = list_concat(ddlCommands,
GetDependencyCreateDDLCommands(dependency));
}

View File

@ -175,7 +175,7 @@ static DistributeObjectOps Any_CreateFunction = {
.preprocess = PreprocessCreateFunctionStmt,
.postprocess = PostprocessCreateFunctionStmt,
.address = CreateFunctionStmtObjectAddress,
.markDistributed = false,
.markDistributed = true,
};
static DistributeObjectOps Any_CreatePolicy = {
.deparse = NULL,
@ -193,6 +193,14 @@ static DistributeObjectOps Any_CreateForeignServer = {
.address = CreateForeignServerStmtObjectAddress,
.markDistributed = true,
};
static DistributeObjectOps Any_CreateSchema = {
.deparse = DeparseCreateSchemaStmt,
.qualify = NULL,
.preprocess = PreprocessCreateSchemaStmt,
.postprocess = NULL,
.address = CreateSchemaStmtObjectAddress,
.markDistributed = true,
};
static DistributeObjectOps Any_CreateStatistics = {
.deparse = DeparseCreateStatisticsStmt,
.qualify = QualifyCreateStatisticsStmt,
@ -268,7 +276,7 @@ static DistributeObjectOps Collation_AlterOwner = {
static DistributeObjectOps Collation_Define = {
.deparse = NULL,
.qualify = NULL,
.preprocess = NULL,
.preprocess = PreprocessDefineCollationStmt,
.postprocess = PostprocessDefineCollationStmt,
.address = DefineCollationStmtObjectAddress,
.markDistributed = true,
@ -497,6 +505,62 @@ static DistributeObjectOps Sequence_Rename = {
.address = RenameSequenceStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps TextSearchConfig_Alter = {
.deparse = DeparseAlterTextSearchConfigurationStmt,
.qualify = QualifyAlterTextSearchConfigurationStmt,
.preprocess = PreprocessAlterTextSearchConfigurationStmt,
.postprocess = NULL,
.address = AlterTextSearchConfigurationStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps TextSearchConfig_AlterObjectSchema = {
.deparse = DeparseAlterTextSearchConfigurationSchemaStmt,
.qualify = QualifyAlterTextSearchConfigurationSchemaStmt,
.preprocess = PreprocessAlterTextSearchConfigurationSchemaStmt,
.postprocess = PostprocessAlterTextSearchConfigurationSchemaStmt,
.address = AlterTextSearchConfigurationSchemaStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps TextSearchConfig_AlterOwner = {
.deparse = DeparseAlterTextSearchConfigurationOwnerStmt,
.qualify = QualifyAlterTextSearchConfigurationOwnerStmt,
.preprocess = PreprocessAlterTextSearchConfigurationOwnerStmt,
.postprocess = PostprocessAlterTextSearchConfigurationOwnerStmt,
.address = AlterTextSearchConfigurationOwnerObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps TextSearchConfig_Comment = {
.deparse = DeparseTextSearchConfigurationCommentStmt,
.qualify = QualifyTextSearchConfigurationCommentStmt,
.preprocess = PreprocessTextSearchConfigurationCommentStmt,
.postprocess = NULL,
.address = TextSearchConfigurationCommentObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps TextSearchConfig_Define = {
.deparse = DeparseCreateTextSearchStmt,
.qualify = NULL,
.preprocess = NULL,
.postprocess = PostprocessCreateTextSearchConfigurationStmt,
.address = CreateTextSearchConfigurationObjectAddress,
.markDistributed = true,
};
static DistributeObjectOps TextSearchConfig_Drop = {
.deparse = DeparseDropTextSearchConfigurationStmt,
.qualify = QualifyDropTextSearchConfigurationStmt,
.preprocess = PreprocessDropTextSearchConfigurationStmt,
.postprocess = NULL,
.address = NULL,
.markDistributed = false,
};
static DistributeObjectOps TextSearchConfig_Rename = {
.deparse = DeparseRenameTextSearchConfigurationStmt,
.qualify = QualifyRenameTextSearchConfigurationStmt,
.preprocess = PreprocessRenameTextSearchConfigurationStmt,
.postprocess = NULL,
.address = RenameTextSearchConfigurationStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps Trigger_AlterObjectDepends = {
.deparse = NULL,
.qualify = NULL,
@ -538,7 +602,7 @@ static DistributeObjectOps Routine_Rename = {
.markDistributed = false,
};
static DistributeObjectOps Schema_Drop = {
.deparse = NULL,
.deparse = DeparseDropSchemaStmt,
.qualify = NULL,
.preprocess = PreprocessDropSchemaStmt,
.postprocess = NULL,
@ -803,6 +867,11 @@ GetDistributeObjectOps(Node *node)
return &Table_AlterObjectSchema;
}
case OBJECT_TSCONFIGURATION:
{
return &TextSearchConfig_AlterObjectSchema;
}
case OBJECT_TYPE:
{
return &Type_AlterObjectSchema;
@ -860,6 +929,11 @@ GetDistributeObjectOps(Node *node)
return &Statistics_AlterOwner;
}
case OBJECT_TSCONFIGURATION:
{
return &TextSearchConfig_AlterOwner;
}
case OBJECT_TYPE:
{
return &Type_AlterOwner;
@ -941,11 +1015,33 @@ GetDistributeObjectOps(Node *node)
return &Any_AlterTableMoveAll;
}
case T_AlterTSConfigurationStmt:
{
return &TextSearchConfig_Alter;
}
case T_ClusterStmt:
{
return &Any_Cluster;
}
case T_CommentStmt:
{
CommentStmt *stmt = castNode(CommentStmt, node);
switch (stmt->objtype)
{
case OBJECT_TSCONFIGURATION:
{
return &TextSearchConfig_Comment;
}
default:
{
return &NoDistributeOps;
}
}
}
case T_CompositeTypeStmt:
{
return &Any_CompositeType;
@ -976,6 +1072,11 @@ GetDistributeObjectOps(Node *node)
return &Any_CreatePolicy;
}
case T_CreateSchemaStmt:
{
return &Any_CreateSchema;
}
case T_CreateStatsStmt:
{
return &Any_CreateStatistics;
@ -1001,6 +1102,11 @@ GetDistributeObjectOps(Node *node)
return &Collation_Define;
}
case OBJECT_TSCONFIGURATION:
{
return &TextSearchConfig_Define;
}
default:
{
return &NoDistributeOps;
@ -1078,6 +1184,11 @@ GetDistributeObjectOps(Node *node)
return &Table_Drop;
}
case OBJECT_TSCONFIGURATION:
{
return &TextSearchConfig_Drop;
}
case OBJECT_TYPE:
{
return &Type_Drop;
@ -1177,6 +1288,11 @@ GetDistributeObjectOps(Node *node)
return &Statistics_Rename;
}
case OBJECT_TSCONFIGURATION:
{
return &TextSearchConfig_Rename;
}
case OBJECT_TYPE:
{
return &Type_Rename;

View File

@ -37,7 +37,6 @@ static void AddSchemaFieldIfMissing(CreateExtensionStmt *stmt);
static List * FilterDistributedExtensions(List *extensionObjectList);
static List * ExtensionNameListToObjectAddressList(List *extensionObjectList);
static void MarkExistingObjectDependenciesDistributedIfSupported(void);
static void EnsureSequentialModeForExtensionDDL(void);
static bool ShouldPropagateExtensionCommand(Node *parseTree);
static bool IsAlterExtensionSetSchemaCitus(Node *parseTree);
static Node * RecreateExtensionStmt(Oid extensionOid);
@ -163,7 +162,7 @@ PostprocessCreateExtensionStmt(Node *node, const char *queryString)
* Make sure that the current transaction is already in sequential mode,
* or can still safely be put in sequential mode
*/
EnsureSequentialModeForExtensionDDL();
EnsureSequentialMode(OBJECT_EXTENSION);
/*
* Here we append "schema" field to the "options" list (if not specified)
@ -274,7 +273,7 @@ PreprocessDropExtensionStmt(Node *node, const char *queryString,
* Make sure that the current transaction is already in sequential mode,
* or can still safely be put in sequential mode
*/
EnsureSequentialModeForExtensionDDL();
EnsureSequentialMode(OBJECT_EXTENSION);
List *distributedExtensionAddresses = ExtensionNameListToObjectAddressList(
distributedExtensions);
@ -409,7 +408,7 @@ PreprocessAlterExtensionSchemaStmt(Node *node, const char *queryString,
* Make sure that the current transaction is already in sequential mode,
* or can still safely be put in sequential mode
*/
EnsureSequentialModeForExtensionDDL();
EnsureSequentialMode(OBJECT_EXTENSION);
const char *alterExtensionStmtSql = DeparseTreeNode(node);
@ -478,7 +477,7 @@ PreprocessAlterExtensionUpdateStmt(Node *node, const char *queryString,
* Make sure that the current transaction is already in sequential mode,
* or can still safely be put in sequential mode
*/
EnsureSequentialModeForExtensionDDL();
EnsureSequentialMode(OBJECT_EXTENSION);
const char *alterExtensionStmtSql = DeparseTreeNode((Node *) alterExtensionStmt);
@ -603,44 +602,6 @@ PreprocessAlterExtensionContentsStmt(Node *node, const char *queryString,
}
/*
* EnsureSequentialModeForExtensionDDL makes sure that the current transaction is already in
* sequential mode, or can still safely be put in sequential mode, it errors if that is
* not possible. The error contains information for the user to retry the transaction with
* sequential mode set from the beginning.
*
* As extensions are node scoped objects there exists only 1 instance of the
* extension used by potentially multiple shards. To make sure all shards in
* the transaction can interact with the extension the extension needs to be
* visible on all connections used by the transaction, meaning we can only use
* 1 connection per node.
*/
static void
EnsureSequentialModeForExtensionDDL(void)
{
if (ParallelQueryExecutedInTransaction())
{
ereport(ERROR, (errmsg("cannot run extension command because there was a "
"parallel operation on a distributed table in the "
"transaction"),
errdetail(
"When running command on/for a distributed extension, Citus needs to "
"perform all operations over a single connection per "
"node to ensure consistency."),
errhint("Try re-running the transaction with "
"\"SET LOCAL citus.multi_shard_modify_mode TO "
"\'sequential\';\"")));
}
ereport(DEBUG1, (errmsg("switching to sequential query execution mode"),
errdetail(
"A command for a distributed extension is run. To make sure subsequent "
"commands see the type correctly we need to make sure to "
"use only one connection for all future commands")));
SetLocalMultiShardModifyModeToSequential();
}
/*
* ShouldPropagateExtensionCommand determines whether to propagate an extension
* command to the worker nodes.

View File

@ -25,6 +25,7 @@
#include "access/htup_details.h"
#include "access/xact.h"
#include "catalog/pg_aggregate.h"
#include "catalog/dependency.h"
#include "catalog/namespace.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
@ -38,6 +39,7 @@
#include "distributed/listutils.h"
#include "distributed/maintenanced.h"
#include "distributed/metadata_utility.h"
#include "distributed/metadata/dependency.h"
#include "distributed/coordinator_protocol.h"
#include "distributed/metadata/distobject.h"
#include "distributed/metadata/pg_dist_object.h"
@ -77,15 +79,14 @@ static int GetFunctionColocationId(Oid functionOid, char *colocateWithName, Oid
static void EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid
distributionColumnType, Oid
sourceRelationId);
static void EnsureSequentialModeForFunctionDDL(void);
static bool ShouldPropagateCreateFunction(CreateFunctionStmt *stmt);
static bool ShouldPropagateAlterFunction(const ObjectAddress *address);
static bool ShouldAddFunctionSignature(FunctionParameterMode mode);
static ObjectAddress * GetUndistributableDependency(ObjectAddress *functionAddress);
static ObjectAddress FunctionToObjectAddress(ObjectType objectType,
ObjectWithArgs *objectWithArgs,
bool missing_ok);
static void ErrorIfUnsupportedAlterFunctionStmt(AlterFunctionStmt *stmt);
static void ErrorIfFunctionDependsOnExtension(const ObjectAddress *functionAddress);
static char * quote_qualified_func_name(Oid funcOid);
static void DistributeFunctionWithDistributionArgument(RegProcedure funcOid,
char *distributionArgumentName,
@ -101,6 +102,9 @@ static void DistributeFunctionColocatedWithDistributedTable(RegProcedure funcOid
static void DistributeFunctionColocatedWithReferenceTable(const
ObjectAddress *functionAddress);
static void EnsureExtensionFunctionCanBeDistributed(const ObjectAddress functionAddress,
const ObjectAddress extensionAddress,
char *distributionArgumentName);
PG_FUNCTION_INFO_V1(create_distributed_function);
@ -127,6 +131,7 @@ create_distributed_function(PG_FUNCTION_ARGS)
char *colocateWithTableName = NULL;
bool *forceDelegationAddress = NULL;
bool forceDelegation = false;
ObjectAddress extensionAddress = { 0 };
/* if called on NULL input, error out */
if (funcOid == InvalidOid)
@ -187,22 +192,35 @@ create_distributed_function(PG_FUNCTION_ARGS)
EnsureFunctionOwner(funcOid);
ObjectAddressSet(functionAddress, ProcedureRelationId, funcOid);
ErrorIfFunctionDependsOnExtension(&functionAddress);
/*
* when we allow propagation within a transaction block we should make sure to only
* allow this in sequential mode
* If the function is owned by an extension, only update the
* pg_dist_object, and not propagate the CREATE FUNCTION. Function
* will be created by the virtue of the extension creation.
*/
EnsureSequentialModeForFunctionDDL();
if (IsObjectAddressOwnedByExtension(&functionAddress, &extensionAddress))
{
EnsureExtensionFunctionCanBeDistributed(functionAddress, extensionAddress,
distributionArgumentName);
}
else
{
/*
* when we allow propagation within a transaction block we should make sure
* to only allow this in sequential mode.
*/
EnsureSequentialMode(OBJECT_FUNCTION);
EnsureDependenciesExistOnAllNodes(&functionAddress);
EnsureDependenciesExistOnAllNodes(&functionAddress);
const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true);
const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid);
initStringInfo(&ddlCommand);
appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_METADATA_SYNC,
createFunctionSQL, alterFunctionOwnerSQL, ENABLE_METADATA_SYNC);
SendCommandToWorkersAsUser(NON_COORDINATOR_NODES, CurrentUserName(), ddlCommand.data);
const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true);
const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid);
initStringInfo(&ddlCommand);
appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_METADATA_SYNC,
createFunctionSQL, alterFunctionOwnerSQL, ENABLE_METADATA_SYNC);
SendCommandToWorkersAsUser(NON_COORDINATOR_NODES, CurrentUserName(),
ddlCommand.data);
}
MarkObjectDistributed(&functionAddress);
@ -744,7 +762,7 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
/*
* GetFunctionDDLCommand returns the complete "CREATE OR REPLACE FUNCTION ..." statement for
* the specified function followed by "ALTER FUNCTION .. SET OWNER ..".
* the specified function.
*
* useCreateOrReplace is ignored for non-aggregate functions.
*/
@ -1153,84 +1171,25 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
}
/*
* EnsureSequentialModeForFunctionDDL makes sure that the current transaction is already in
* sequential mode, or can still safely be put in sequential mode, it errors if that is
* not possible. The error contains information for the user to retry the transaction with
* sequential mode set from the beginning.
*
* As functions are node scoped objects there exists only 1 instance of the function used by
* potentially multiple shards. To make sure all shards in the transaction can interact
* with the function the function needs to be visible on all connections used by the transaction,
* meaning we can only use 1 connection per node.
*/
static void
EnsureSequentialModeForFunctionDDL(void)
{
if (ParallelQueryExecutedInTransaction())
{
ereport(ERROR, (errmsg("cannot create function because there was a "
"parallel operation on a distributed table in the "
"transaction"),
errdetail("When creating a distributed function, Citus needs to "
"perform all operations over a single connection per "
"node to ensure consistency."),
errhint("Try re-running the transaction with "
"\"SET LOCAL citus.multi_shard_modify_mode TO "
"\'sequential\';\"")));
}
ereport(DEBUG1, (errmsg("switching to sequential query execution mode"),
errdetail(
"A distributed function is created. To make sure subsequent "
"commands see the type correctly we need to make sure to "
"use only one connection for all future commands")));
SetLocalMultiShardModifyModeToSequential();
}
/*
* ShouldPropagateCreateFunction tests if we need to propagate a CREATE FUNCTION
* statement. We only propagate replace's of distributed functions to keep the function on
* the workers in sync with the one on the coordinator.
* statement.
*/
static bool
ShouldPropagateCreateFunction(CreateFunctionStmt *stmt)
{
if (creating_extension)
if (!ShouldPropagate())
{
/*
* extensions should be created separately on the workers, functions cascading
* from an extension should therefore not be propagated.
*/
return false;
}
if (!EnableMetadataSync)
{
/*
* we are configured to disable object propagation, should not propagate anything
*/
return false;
}
if (!stmt->replace)
{
/*
* Since we only care for a replace of distributed functions if the statement is
* not a replace we are going to ignore.
*/
return false;
}
/*
* Even though its a replace we should accept an non-existing function, it will just
* not be distributed
* If the create command is a part of a multi-statement transaction that is not in
* sequential mode, don't propagate.
*/
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, true);
if (!IsObjectDistributed(&address))
if (IsMultiStatementTransaction() &&
MultiShardConnectionType != SEQUENTIAL_CONNECTION)
{
/* do not propagate alter function for non-distributed functions */
return false;
}
@ -1274,12 +1233,10 @@ ShouldPropagateAlterFunction(const ObjectAddress *address)
/*
* PreprocessCreateFunctionStmt is called during the planning phase for CREATE [OR REPLACE]
* FUNCTION. We primarily care for the replace variant of this statement to keep
* distributed functions in sync. We bail via a check on ShouldPropagateCreateFunction
* which checks for the OR REPLACE modifier.
* FUNCTION before it is created on the local node internally.
*
* Since we use pg_get_functiondef to get the ddl command we actually do not do any
* planning here, instead we defer the plan creation to the processing step.
* planning here, instead we defer the plan creation to the postprocessing step.
*
* Instead we do our basic housekeeping where we make sure we are on the coordinator and
* can propagate the function in sequential mode.
@ -1297,10 +1254,10 @@ PreprocessCreateFunctionStmt(Node *node, const char *queryString,
EnsureCoordinator();
EnsureSequentialModeForFunctionDDL();
EnsureSequentialMode(OBJECT_FUNCTION);
/*
* ddl jobs will be generated during the Processing phase as we need the function to
* ddl jobs will be generated during the postprocessing phase as we need the function to
* be updated in the catalog to get its sql representation
*/
return NIL;
@ -1311,6 +1268,11 @@ PreprocessCreateFunctionStmt(Node *node, const char *queryString,
* PostprocessCreateFunctionStmt actually creates the plan we need to execute for function
* propagation. This is the downside of using pg_get_functiondef to get the sql statement.
*
* If function depends on any non-distributed relation (except sequence and composite type),
* Citus can not distribute it. In order to not to prevent users from creating local
* functions on the coordinator WARNING message will be sent to the customer about the case
* instead of erroring out.
*
* Besides creating the plan we also make sure all (new) dependencies of the function are
* created on all nodes.
*/
@ -1324,18 +1286,113 @@ PostprocessCreateFunctionStmt(Node *node, const char *queryString)
return NIL;
}
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
EnsureDependenciesExistOnAllNodes(&address);
ObjectAddress functionAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
List *commands = list_make4(DISABLE_DDL_PROPAGATION,
GetFunctionDDLCommand(address.objectId, true),
GetFunctionAlterOwnerCommand(address.objectId),
ENABLE_DDL_PROPAGATION);
if (IsObjectAddressOwnedByExtension(&functionAddress, NULL))
{
return NIL;
}
/*
* This check should have been valid for all objects not only for functions. Though,
* we do this limited check for now as functions are more likely to be used with
* such dependencies, and we want to scope it for now.
*/
ObjectAddress *undistributableDependency = GetUndistributableDependency(
&functionAddress);
if (undistributableDependency != NULL)
{
if (SupportedDependencyByCitus(undistributableDependency))
{
/*
* Citus can't distribute some relations as dependency, although those
* types as supported by Citus. So we can use get_rel_name directly
*/
RangeVar *functionRangeVar = makeRangeVarFromNameList(stmt->funcname);
char *functionName = functionRangeVar->relname;
char *dependentRelationName =
get_rel_name(undistributableDependency->objectId);
ereport(WARNING, (errmsg("Citus can't distribute function \"%s\" having "
"dependency on non-distributed relation \"%s\"",
functionName, dependentRelationName),
errdetail("Function will be created only locally"),
errhint("To distribute function, distribute dependent "
"relations first. Then, re-create the function")));
}
else
{
char *objectType = NULL;
#if PG_VERSION_NUM >= PG_VERSION_14
objectType = getObjectTypeDescription(undistributableDependency, false);
#else
objectType = getObjectTypeDescription(undistributableDependency);
#endif
ereport(WARNING, (errmsg("Citus can't distribute functions having "
"dependency on unsupported object of type \"%s\"",
objectType),
errdetail("Function will be created only locally")));
}
return NIL;
}
EnsureDependenciesExistOnAllNodes(&functionAddress);
List *commands = list_make1(DISABLE_DDL_PROPAGATION);
commands = list_concat(commands, CreateFunctionDDLCommandsIdempotent(
&functionAddress));
commands = list_concat(commands, list_make1(ENABLE_DDL_PROPAGATION));
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* GetUndistributableDependency checks whether object has any non-distributable
* dependency. If any one found, it will be returned.
*/
static ObjectAddress *
GetUndistributableDependency(ObjectAddress *objectAddress)
{
List *dependencies = GetAllDependenciesForObject(objectAddress);
ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies)
{
if (IsObjectDistributed(dependency))
{
continue;
}
if (!SupportedDependencyByCitus(dependency))
{
/*
* Since roles should be handled manually with Citus community, skip them.
*/
if (getObjectClass(dependency) != OCLASS_ROLE)
{
return dependency;
}
}
if (getObjectClass(dependency) == OCLASS_CLASS)
{
/*
* Citus can only distribute dependent non-distributed sequence
* and composite types.
*/
char relKind = get_rel_relkind(dependency->objectId);
if (relKind != RELKIND_SEQUENCE && relKind != RELKIND_COMPOSITE_TYPE)
{
return dependency;
}
}
}
return NULL;
}
/*
* CreateFunctionStmtObjectAddress returns the ObjectAddress for the subject of the
* CREATE [OR REPLACE] FUNCTION statement. If missing_ok is false it will error with the
@ -1416,7 +1473,7 @@ PreprocessAlterFunctionStmt(Node *node, const char *queryString,
EnsureCoordinator();
ErrorIfUnsupportedAlterFunctionStmt(stmt);
EnsureSequentialModeForFunctionDDL();
EnsureSequentialMode(OBJECT_FUNCTION);
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
@ -1450,7 +1507,7 @@ PreprocessRenameFunctionStmt(Node *node, const char *queryString,
}
EnsureCoordinator();
EnsureSequentialModeForFunctionDDL();
EnsureSequentialMode(OBJECT_FUNCTION);
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
@ -1482,7 +1539,7 @@ PreprocessAlterFunctionSchemaStmt(Node *node, const char *queryString,
}
EnsureCoordinator();
EnsureSequentialModeForFunctionDDL();
EnsureSequentialMode(OBJECT_FUNCTION);
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
@ -1515,7 +1572,7 @@ PreprocessAlterFunctionOwnerStmt(Node *node, const char *queryString,
}
EnsureCoordinator();
EnsureSequentialModeForFunctionDDL();
EnsureSequentialMode(OBJECT_FUNCTION);
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
@ -1605,7 +1662,7 @@ PreprocessDropFunctionStmt(Node *node, const char *queryString,
* types, so we block the call.
*/
EnsureCoordinator();
EnsureSequentialModeForFunctionDDL();
EnsureSequentialMode(OBJECT_FUNCTION);
/* remove the entries for the distributed objects on dropping */
ObjectAddress *address = NULL;
@ -2013,33 +2070,6 @@ ErrorIfUnsupportedAlterFunctionStmt(AlterFunctionStmt *stmt)
}
/*
* ErrorIfFunctionDependsOnExtension functions depending on extensions should raise an
* error informing the user why they can't be distributed.
*/
static void
ErrorIfFunctionDependsOnExtension(const ObjectAddress *functionAddress)
{
/* captures the extension address during lookup */
ObjectAddress extensionAddress = { 0 };
if (IsObjectAddressOwnedByExtension(functionAddress, &extensionAddress))
{
char *functionName =
getObjectIdentity_compat(functionAddress, /* missingOk: */ false);
char *extensionName =
getObjectIdentity_compat(&extensionAddress, /* missingOk: */ false);
ereport(ERROR, (errmsg("unable to create a distributed function from functions "
"owned by an extension"),
errdetail("Function \"%s\" has a dependency on extension \"%s\". "
"Functions depending on an extension cannot be "
"distributed. Create the function by creating the "
"extension on the workers.", functionName,
extensionName)));
}
}
/* returns the quoted qualified name of a given function oid */
static char *
quote_qualified_func_name(Oid funcOid)
@ -2048,3 +2078,54 @@ quote_qualified_func_name(Oid funcOid)
get_namespace_name(get_func_namespace(funcOid)),
get_func_name(funcOid));
}
/*
* EnsureExtensionFuncionCanBeCreated checks if the dependent objects
* (including extension) exists on all nodes, if not, creates them. In
* addition, it also checks if distribution argument is passed.
*/
static void
EnsureExtensionFunctionCanBeDistributed(const ObjectAddress functionAddress,
const ObjectAddress extensionAddress,
char *distributionArgumentName)
{
if (CitusExtensionObject(&extensionAddress))
{
/*
* Citus extension is a special case. It's the extension that
* provides the 'distributed capabilities' in the first place.
* Trying to distribute it's own function(s) doesn't make sense.
*/
ereport(ERROR, (errmsg("Citus extension functions(%s) "
"cannot be distributed.",
get_func_name(functionAddress.objectId))));
}
/*
* Distributing functions from extensions has the most benefit when
* distribution argument is specified.
*/
if (distributionArgumentName == NULL)
{
ereport(ERROR, (errmsg("Extension functions(%s) "
"without distribution argument "
"are not supported.",
get_func_name(functionAddress.objectId))));
}
/*
* Ensure corresponding extension is in pg_dist_object.
* Functions owned by an extension are depending internally on that extension,
* hence EnsureDependenciesExistOnAllNodes() creates the extension, which in
* turn creates the function, and thus we don't have to create it ourself like
* we do for non-extension functions.
*/
ereport(DEBUG1, (errmsg("Extension(%s) owning the "
"function(%s) is not distributed, "
"attempting to propogate the extension",
get_extension_name(extensionAddress.objectId),
get_func_name(functionAddress.objectId))));
EnsureDependenciesExistOnAllNodes(&functionAddress);
}

View File

@ -725,12 +725,6 @@ PostprocessIndexStmt(Node *node, const char *queryString)
{
IndexStmt *indexStmt = castNode(IndexStmt, node);
/* we are only processing CONCURRENT index statements */
if (!indexStmt->concurrent)
{
return NIL;
}
/* this logic only applies to the coordinator */
if (!IsCoordinator())
{
@ -747,14 +741,36 @@ PostprocessIndexStmt(Node *node, const char *queryString)
return NIL;
}
Oid indexRelationId = get_relname_relid(indexStmt->idxname, schemaId);
/* ensure dependencies of index exist on all nodes */
ObjectAddress address = { 0 };
ObjectAddressSet(address, RelationRelationId, indexRelationId);
EnsureDependenciesExistOnAllNodes(&address);
/* furtheron we are only processing CONCURRENT index statements */
if (!indexStmt->concurrent)
{
return NIL;
}
/*
* EnsureDependenciesExistOnAllNodes could have distributed objects that are required
* by this index. During the propagation process an active snapshout might be left as
* a side effect of inserting the local tuples via SPI. To not leak a snapshot like
* that we will pop any snapshot if we have any right before we commit.
*/
if (ActiveSnapshotSet())
{
PopActiveSnapshot();
}
/* commit the current transaction and start anew */
CommitTransactionCommand();
StartTransactionCommand();
/* get the affected relation and index */
Relation relation = table_openrv(indexStmt->relation, ShareUpdateExclusiveLock);
Oid indexRelationId = get_relname_relid(indexStmt->idxname,
schemaId);
Relation indexRelation = index_open(indexRelationId, RowExclusiveLock);
/* close relations but retain locks */

View File

@ -40,8 +40,39 @@
#include "utils/relcache.h"
static ObjectAddress GetObjectAddressBySchemaName(char *schemaName, bool missing_ok);
static List * FilterDistributedSchemas(List *schemas);
static void EnsureSequentialModeForSchemaDDL(void);
static bool SchemaHasDistributedTableWithFKey(char *schemaName);
static bool ShouldPropagateCreateSchemaStmt(void);
/*
* PreprocessCreateSchemaStmt is called during the planning phase for
* CREATE SCHEMA ..
*/
List *
PreprocessCreateSchemaStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
if (!ShouldPropagateCreateSchemaStmt())
{
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_SCHEMA);
/* deparse sql*/
const char *sql = DeparseTreeNode(node);
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
@ -53,76 +84,54 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
DropStmt *dropStatement = castNode(DropStmt, node);
Relation pgClass = NULL;
HeapTuple heapTuple = NULL;
SysScanDesc scanDescriptor = NULL;
ScanKeyData scanKey[1];
int scanKeyCount = 1;
Oid scanIndexId = InvalidOid;
bool useIndex = false;
Assert(dropStatement->removeType == OBJECT_SCHEMA);
if (dropStatement->behavior != DROP_CASCADE)
if (!ShouldPropagate())
{
return NIL;
}
Value *schemaValue = NULL;
foreach_ptr(schemaValue, dropStatement->objects)
EnsureCoordinator();
List *distributedSchemas = FilterDistributedSchemas(dropStatement->objects);
if (list_length(distributedSchemas) < 1)
{
const char *schemaString = strVal(schemaValue);
Oid namespaceOid = get_namespace_oid(schemaString, true);
if (namespaceOid == InvalidOid)
{
continue;
}
pgClass = table_open(RelationRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_class_relnamespace, BTEqualStrategyNumber,
F_OIDEQ, namespaceOid);
scanDescriptor = systable_beginscan(pgClass, scanIndexId, useIndex, NULL,
scanKeyCount, scanKey);
heapTuple = systable_getnext(scanDescriptor);
while (HeapTupleIsValid(heapTuple))
{
Form_pg_class relationForm = (Form_pg_class) GETSTRUCT(heapTuple);
char *relationName = NameStr(relationForm->relname);
Oid relationId = get_relname_relid(relationName, namespaceOid);
/* we're not interested in non-valid, non-distributed relations */
if (relationId == InvalidOid || !IsCitusTable(relationId))
{
heapTuple = systable_getnext(scanDescriptor);
continue;
}
if (IsCitusTableType(relationId, REFERENCE_TABLE))
{
/* prevent concurrent EnsureReferenceTablesExistOnAllNodes */
int colocationId = CreateReferenceTableColocationId();
LockColocationId(colocationId, ExclusiveLock);
}
/* invalidate foreign key cache if the table involved in any foreign key */
if (TableReferenced(relationId) || TableReferencing(relationId))
{
MarkInvalidateForeignKeyGraph();
systable_endscan(scanDescriptor);
table_close(pgClass, NoLock);
return NIL;
}
heapTuple = systable_getnext(scanDescriptor);
}
systable_endscan(scanDescriptor);
table_close(pgClass, NoLock);
return NIL;
}
return NIL;
EnsureSequentialMode(OBJECT_SCHEMA);
Value *schemaVal = NULL;
foreach_ptr(schemaVal, distributedSchemas)
{
if (SchemaHasDistributedTableWithFKey(strVal(schemaVal)))
{
MarkInvalidateForeignKeyGraph();
break;
}
}
/*
* We swap around the schema's in the statement to only contain the distributed
* schemas before deparsing. We need to restore the original list as postgres
* will execute on this statement locally, which requires all original schemas
* from the user to be present.
*/
List *originalObjects = dropStatement->objects;
dropStatement->objects = distributedSchemas;
const char *sql = DeparseTreeNode(node);
dropStatement->objects = originalObjects;
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
@ -194,7 +203,7 @@ PreprocessAlterSchemaRenameStmt(Node *node, const char *queryString,
/* deparse sql*/
const char *renameStmtSql = DeparseTreeNode(node);
EnsureSequentialModeForSchemaDDL();
EnsureSequentialMode(OBJECT_SCHEMA);
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
@ -205,6 +214,19 @@ PreprocessAlterSchemaRenameStmt(Node *node, const char *queryString,
}
/*
* CreateSchemaStmtObjectAddress returns the ObjectAddress of the schema that is
* the object of the CreateSchemaStmt. Errors if missing_ok is false.
*/
ObjectAddress
CreateSchemaStmtObjectAddress(Node *node, bool missing_ok)
{
CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node);
return GetObjectAddressBySchemaName(stmt->schemaname, missing_ok);
}
/*
* AlterSchemaRenameStmtObjectAddress returns the ObjectAddress of the schema that is
* the object of the RenameStmt. Errors if missing_ok is false.
@ -215,7 +237,17 @@ AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok)
RenameStmt *stmt = castNode(RenameStmt, node);
Assert(stmt->renameType == OBJECT_SCHEMA);
const char *schemaName = stmt->subname;
return GetObjectAddressBySchemaName(stmt->subname, missing_ok);
}
/*
* GetObjectAddressBySchemaName returns the ObjectAddress of the schema with the
* given name. Errors out if schema is not found and missing_ok is false.
*/
ObjectAddress
GetObjectAddressBySchemaName(char *schemaName, bool missing_ok)
{
Oid schemaOid = get_namespace_oid(schemaName, missing_ok);
ObjectAddress address = { 0 };
@ -261,38 +293,85 @@ FilterDistributedSchemas(List *schemas)
/*
* EnsureSequentialModeForSchemaDDL makes sure that the current transaction is already in
* sequential mode, or can still safely be put in sequential mode, it errors if that is
* not possible. The error contains information for the user to retry the transaction with
* sequential mode set from the beginning.
*
* Copy-pasted from type.c
* SchemaHasDistributedTableWithFKey takes a schema name and scans the relations within
* that schema. If any one of the relations has a foreign key relationship, it returns
* true. Returns false otherwise.
*/
static void
EnsureSequentialModeForSchemaDDL(void)
static bool
SchemaHasDistributedTableWithFKey(char *schemaName)
{
if (!IsTransactionBlock())
ScanKeyData scanKey[1];
int scanKeyCount = 1;
Oid scanIndexId = InvalidOid;
bool useIndex = false;
Oid namespaceOid = get_namespace_oid(schemaName, true);
if (namespaceOid == InvalidOid)
{
/* we do not need to switch to sequential mode if we are not in a transaction */
return;
return false;
}
if (ParallelQueryExecutedInTransaction())
Relation pgClass = table_open(RelationRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_class_relnamespace, BTEqualStrategyNumber,
F_OIDEQ, namespaceOid);
SysScanDesc scanDescriptor = systable_beginscan(pgClass, scanIndexId, useIndex, NULL,
scanKeyCount, scanKey);
HeapTuple heapTuple = systable_getnext(scanDescriptor);
while (HeapTupleIsValid(heapTuple))
{
ereport(ERROR, (errmsg("cannot create or modify schema because there was a "
"parallel operation on a distributed table in the "
"transaction"),
errdetail("When creating or altering a schema, Citus needs to "
"perform all operations over a single connection per "
"node to ensure consistency."),
errhint("Try re-running the transaction with "
"\"SET LOCAL citus.multi_shard_modify_mode TO "
"\'sequential\';\"")));
Form_pg_class relationForm = (Form_pg_class) GETSTRUCT(heapTuple);
char *relationName = NameStr(relationForm->relname);
Oid relationId = get_relname_relid(relationName, namespaceOid);
/* we're not interested in non-valid, non-distributed relations */
if (relationId == InvalidOid || !IsCitusTable(relationId))
{
heapTuple = systable_getnext(scanDescriptor);
continue;
}
/* invalidate foreign key cache if the table involved in any foreign key */
if (TableReferenced(relationId) || TableReferencing(relationId))
{
systable_endscan(scanDescriptor);
table_close(pgClass, NoLock);
return true;
}
heapTuple = systable_getnext(scanDescriptor);
}
ereport(DEBUG1, (errmsg("switching to sequential query execution mode"),
errdetail("Schema is created or altered. To make sure subsequent "
"commands see the schema correctly we need to make sure to "
"use only one connection for all future commands")));
SetLocalMultiShardModifyModeToSequential();
systable_endscan(scanDescriptor);
table_close(pgClass, NoLock);
return false;
}
/*
* ShouldPropagateCreateSchemaStmt gets called only for CreateSchemaStmt's.
* This function wraps the ShouldPropagate function which is commonly used
* for all object types; additionally it checks whether there's a multi-statement
* transaction ongoing or not. For transaction blocks, we require sequential mode
* with this function, for CREATE SCHEMA statements. If Citus has not already
* switched to sequential mode, we don't propagate.
*/
static bool
ShouldPropagateCreateSchemaStmt()
{
if (!ShouldPropagate())
{
return false;
}
if (IsMultiStatementTransaction() &&
MultiShardConnectionType != SEQUENTIAL_CONNECTION)
{
return false;
}
return true;
}

View File

@ -0,0 +1,935 @@
/*-------------------------------------------------------------------------
*
* text_search.c
* Commands for creating and altering TEXT SEARCH objects
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/genam.h"
#include "access/xact.h"
#include "catalog/namespace.h"
#include "catalog/objectaddress.h"
#include "catalog/pg_ts_config.h"
#include "catalog/pg_ts_config_map.h"
#include "catalog/pg_ts_dict.h"
#include "catalog/pg_ts_parser.h"
#include "commands/comment.h"
#include "commands/extension.h"
#include "fmgr.h"
#include "nodes/makefuncs.h"
#include "tsearch/ts_cache.h"
#include "tsearch/ts_public.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
#include "distributed/commands.h"
#include "distributed/commands/utility_hook.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "distributed/metadata/distobject.h"
#include "distributed/metadata_sync.h"
#include "distributed/multi_executor.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/worker_create_or_replace.h"
static List * GetDistributedTextSearchConfigurationNames(DropStmt *stmt);
static DefineStmt * GetTextSearchConfigDefineStmt(Oid tsconfigOid);
static List * GetTextSearchConfigCommentStmt(Oid tsconfigOid);
static List * get_ts_parser_namelist(Oid tsparserOid);
static List * GetTextSearchConfigMappingStmt(Oid tsconfigOid);
static List * GetTextSearchConfigOwnerStmts(Oid tsconfigOid);
static List * get_ts_dict_namelist(Oid tsdictOid);
static Oid get_ts_config_parser_oid(Oid tsconfigOid);
static char * get_ts_parser_tokentype_name(Oid parserOid, int32 tokentype);
/*
* PostprocessCreateTextSearchConfigurationStmt is called after the TEXT SEARCH
* CONFIGURATION has been created locally.
*
* Contrary to many other objects a text search configuration is often created as a copy
* of an existing configuration. After the copy there is no relation to the configuration
* that has been copied. This prevents our normal approach of ensuring dependencies to
* exist before forwarding a close ressemblance of the statement the user executed.
*
* Instead we recreate the object based on what we find in our own catalog, hence the
* amount of work we perform in the postprocess function, contrary to other objects.
*/
List *
PostprocessCreateTextSearchConfigurationStmt(Node *node, const char *queryString)
{
DefineStmt *stmt = castNode(DefineStmt, node);
Assert(stmt->kind == OBJECT_TSCONFIGURATION);
if (!ShouldPropagate())
{
return NIL;
}
/*
* If the create command is a part of a multi-statement transaction that is not in
* sequential mode, don't propagate. Instead we will rely on back filling.
*/
if (IsMultiStatementTransaction())
{
if (MultiShardConnectionType != SEQUENTIAL_CONNECTION)
{
return NIL;
}
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
EnsureDependenciesExistOnAllNodes(&address);
/*
* TEXT SEARCH CONFIGURATION objects are more complex with their mappings and the
* possibility of copying from existing templates that we will require the idempotent
* recreation commands to be run for successful propagation
*/
List *commands = CreateTextSearchConfigDDLCommandsIdempotent(&address);
commands = lcons(DISABLE_DDL_PROPAGATION, commands);
commands = lappend(commands, ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
List *
GetCreateTextSearchConfigStatements(const ObjectAddress *address)
{
Assert(address->classId == TSConfigRelationId);
List *stmts = NIL;
/* CREATE TEXT SEARCH CONFIGURATION ...*/
stmts = lappend(stmts, GetTextSearchConfigDefineStmt(address->objectId));
/* ALTER TEXT SEARCH CONFIGURATION ... OWNER TO ...*/
stmts = list_concat(stmts, GetTextSearchConfigOwnerStmts(address->objectId));
/* COMMENT ON TEXT SEARCH CONFIGURATION ... */
stmts = list_concat(stmts, GetTextSearchConfigCommentStmt(address->objectId));
/* ALTER TEXT SEARCH CONFIGURATION ... ADD MAPPING FOR ... WITH ... */
stmts = list_concat(stmts, GetTextSearchConfigMappingStmt(address->objectId));
return stmts;
}
/*
* CreateTextSearchConfigDDLCommandsIdempotent creates a list of ddl commands to recreate
* a TEXT SERACH CONFIGURATION object in an idempotent manner on workers.
*/
List *
CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address)
{
List *stmts = GetCreateTextSearchConfigStatements(address);
List *sqls = DeparseTreeNodes(stmts);
return list_make1(WrapCreateOrReplaceList(sqls));
}
/*
* PreprocessDropTextSearchConfigurationStmt prepares the statements we need to send to
* the workers. After we have dropped the schema's locally they also got removed from
* pg_dist_object so it is important to do all distribution checks before the change is
* made locally.
*/
List *
PreprocessDropTextSearchConfigurationStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
DropStmt *stmt = castNode(DropStmt, node);
Assert(stmt->removeType == OBJECT_TSCONFIGURATION);
if (!ShouldPropagate())
{
return NIL;
}
List *distributedObjects = GetDistributedTextSearchConfigurationNames(stmt);
if (list_length(distributedObjects) == 0)
{
/* no distributed objects to remove */
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
/*
* Temporarily replace the list of objects being dropped with only the list
* containing the distributed objects. After we have created the sql statement we
* restore the original list of objects to execute on locally.
*
* Because searchpaths on coordinator and workers might not be in sync we fully
* qualify the list before deparsing. This is safe because qualification doesn't
* change the original names in place, but insteads creates new ones.
*/
List *originalObjects = stmt->objects;
stmt->objects = distributedObjects;
QualifyTreeNode((Node *) stmt);
const char *dropStmtSql = DeparseTreeNode((Node *) stmt);
stmt->objects = originalObjects;
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) dropStmtSql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
}
/*
* GetDistributedTextSearchConfigurationNames iterates over all text search configurations
* dropped, and create a list containign all configurations that are distributed.
*/
static List *
GetDistributedTextSearchConfigurationNames(DropStmt *stmt)
{
List *objName = NULL;
List *distributedObjects = NIL;
foreach_ptr(objName, stmt->objects)
{
Oid tsconfigOid = get_ts_config_oid(objName, stmt->missing_ok);
if (!OidIsValid(tsconfigOid))
{
/* skip missing configuration names, they can't be dirstibuted */
continue;
}
ObjectAddress address = { 0 };
ObjectAddressSet(address, TSConfigRelationId, tsconfigOid);
if (!IsObjectDistributed(&address))
{
continue;
}
distributedObjects = lappend(distributedObjects, objName);
}
return distributedObjects;
}
/*
* PreprocessAlterTextSearchConfigurationStmt verifies if the configuration being altered
* is distributed in the cluster. If that is the case it will prepare the list of commands
* to send to the worker to apply the same changes remote.
*/
List *
PreprocessAlterTextSearchConfigurationStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
QualifyTreeNode((Node *) stmt);
const char *alterStmtSql = DeparseTreeNode((Node *) stmt);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) alterStmtSql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
}
/*
* PreprocessRenameTextSearchConfigurationStmt verifies if the configuration being altered
* is distributed in the cluster. If that is the case it will prepare the list of commands
* to send to the worker to apply the same changes remote.
*/
List *
PreprocessRenameTextSearchConfigurationStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
RenameStmt *stmt = castNode(RenameStmt, node);
Assert(stmt->renameType == OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
QualifyTreeNode((Node *) stmt);
char *ddlCommand = DeparseTreeNode((Node *) stmt);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) ddlCommand,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
}
/*
* PreprocessAlterTextSearchConfigurationSchemaStmt verifies if the configuration being
* altered is distributed in the cluster. If that is the case it will prepare the list of
* commands to send to the worker to apply the same changes remote.
*/
List *
PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryString,
ProcessUtilityContext
processUtilityContext)
{
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt,
stmt->missing_ok);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
}
/*
* PostprocessAlterTextSearchConfigurationSchemaStmt is invoked after the schema has been
* changed locally. Since changing the schema could result in new dependencies being found
* for this object we re-ensure all the dependencies for the configuration do exist. This
* is solely to propagate the new schema (and all its dependencies) if it was not already
* distributed in the cluster.
*/
List *
PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryString)
{
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt,
stmt->missing_ok);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
/* dependencies have changed (schema) let's ensure they exist */
EnsureDependenciesExistOnAllNodes(&address);
return NIL;
}
/*
* PreprocessTextSearchConfigurationCommentStmt propagates any comment on a distributed
* configuration to the workers. Since comments for configurations are promenently shown
* when listing all text search configurations this is purely a cosmetic thing when
* running in MX.
*/
List *
PreprocessTextSearchConfigurationCommentStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
CommentStmt *stmt = castNode(CommentStmt, node);
Assert(stmt->objtype == OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
}
/*
* PreprocessAlterTextSearchConfigurationOwnerStmt verifies if the configuration being
* altered is distributed in the cluster. If that is the case it will prepare the list of
* commands to send to the worker to apply the same changes remote.
*/
List *
PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryString,
ProcessUtilityContext
processUtilityContext)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
QualifyTreeNode((Node *) stmt);
char *sql = DeparseTreeNode((Node *) stmt);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* PostprocessAlterTextSearchConfigurationOwnerStmt is invoked after the owner has been
* changed locally. Since changing the owner could result in new dependencies being found
* for this object we re-ensure all the dependencies for the configuration do exist. This
* is solely to propagate the new owner (and all its dependencies) if it was not already
* distributed in the cluster.
*/
List *
PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryString)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
/* dependencies have changed (owner) let's ensure they exist */
EnsureDependenciesExistOnAllNodes(&address);
return NIL;
}
/*
* GetTextSearchConfigDefineStmt returns the DefineStmt for a TEXT SEARCH CONFIGURATION
* based on the configuration as defined in the catalog identified by tsconfigOid.
*
* This statement will only contain the parser, as all other properties for text search
* configurations are stored as mappings in a different catalog.
*/
static DefineStmt *
GetTextSearchConfigDefineStmt(Oid tsconfigOid)
{
HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
{
elog(ERROR, "cache lookup failed for text search configuration %u",
tsconfigOid);
}
Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup);
DefineStmt *stmt = makeNode(DefineStmt);
stmt->kind = OBJECT_TSCONFIGURATION;
stmt->defnames = get_ts_config_namelist(tsconfigOid);
List *parserNameList = get_ts_parser_namelist(config->cfgparser);
TypeName *parserTypeName = makeTypeNameFromNameList(parserNameList);
stmt->definition = list_make1(makeDefElem("parser", (Node *) parserTypeName, -1));
ReleaseSysCache(tup);
return stmt;
}
/*
* GetTextSearchConfigCommentStmt returns a list containing all entries to recreate a
* comment on the configuration identified by tsconfigOid. The list could be empty if
* there is no comment on a configuration.
*
* The reason for a list is for easy use when building a list of all statements to invoke
* to recreate the text search configuration. An empty list can easily be concatinated
* without inspection, contrary to a NULL ptr if we would return the CommentStmt struct.
*/
static List *
GetTextSearchConfigCommentStmt(Oid tsconfigOid)
{
char *comment = GetComment(tsconfigOid, TSConfigRelationId, 0);
if (!comment)
{
return NIL;
}
CommentStmt *stmt = makeNode(CommentStmt);
stmt->objtype = OBJECT_TSCONFIGURATION;
stmt->object = (Node *) get_ts_config_namelist(tsconfigOid);
stmt->comment = comment;
return list_make1(stmt);
}
/*
* GetTextSearchConfigMappingStmt returns a list of all mappings from token_types to
* dictionaries configured on a text search configuration identified by tsconfigOid.
*
* Many mappings can exist on a configuration which all require their own statement to
* recreate.
*/
static List *
GetTextSearchConfigMappingStmt(Oid tsconfigOid)
{
ScanKeyData mapskey = { 0 };
/* mapcfg = tsconfigOid */
ScanKeyInit(&mapskey,
Anum_pg_ts_config_map_mapcfg,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(tsconfigOid));
Relation maprel = table_open(TSConfigMapRelationId, AccessShareLock);
Relation mapidx = index_open(TSConfigMapIndexId, AccessShareLock);
SysScanDesc mapscan = systable_beginscan_ordered(maprel, mapidx, NULL, 1, &mapskey);
List *stmts = NIL;
AlterTSConfigurationStmt *stmt = NULL;
/*
* We iterate the config mappings on the index order filtered by mapcfg. Meaning we
* get equal maptokentype's in 1 run. By comparing the current tokentype to the last
* we know when we can create a new stmt and append the previous constructed one to
* the list.
*/
int lastTokType = -1;
/*
* We read all mappings filtered by config id, hence we only need to load the name
* once and can reuse for every statement.
*/
List *configName = get_ts_config_namelist(tsconfigOid);
Oid parserOid = get_ts_config_parser_oid(tsconfigOid);
HeapTuple maptup = NULL;
while ((maptup = systable_getnext_ordered(mapscan, ForwardScanDirection)) != NULL)
{
Form_pg_ts_config_map cfgmap = (Form_pg_ts_config_map) GETSTRUCT(maptup);
if (lastTokType != cfgmap->maptokentype)
{
/* creating a new statement, appending the previous one (if existing) */
if (stmt != NULL)
{
stmts = lappend(stmts, stmt);
}
stmt = makeNode(AlterTSConfigurationStmt);
stmt->cfgname = configName;
stmt->kind = ALTER_TSCONFIG_ADD_MAPPING;
stmt->tokentype = list_make1(makeString(
get_ts_parser_tokentype_name(parserOid,
cfgmap->
maptokentype)));
lastTokType = cfgmap->maptokentype;
}
stmt->dicts = lappend(stmt->dicts, get_ts_dict_namelist(cfgmap->mapdict));
}
/*
* If we have ran atleast 1 iteration above we have the last stmt not added to the
* stmts list.
*/
if (stmt != NULL)
{
stmts = lappend(stmts, stmt);
stmt = NULL;
}
systable_endscan_ordered(mapscan);
index_close(mapidx, NoLock);
table_close(maprel, NoLock);
return stmts;
}
/*
* GetTextSearchConfigOwnerStmts returns a potentially empty list of statements to change
* the ownership of a TEXT SEARCH CONFIGURATION object.
*
* The list is for convenienve when building a full list of statements to recreate the
* configuration.
*/
static List *
GetTextSearchConfigOwnerStmts(Oid tsconfigOid)
{
HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
{
elog(ERROR, "cache lookup failed for text search configuration %u",
tsconfigOid);
}
Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup);
AlterOwnerStmt *stmt = makeNode(AlterOwnerStmt);
stmt->objectType = OBJECT_TSCONFIGURATION;
stmt->object = (Node *) get_ts_config_namelist(tsconfigOid);
stmt->newowner = GetRoleSpecObjectForUser(config->cfgowner);
ReleaseSysCache(tup);
return list_make1(stmt);
}
/*
* get_ts_config_namelist based on the tsconfigOid this function creates the namelist that
* identifies the configuration in a fully qualified manner, irregardless of the schema
* existing on the search_path.
*/
List *
get_ts_config_namelist(Oid tsconfigOid)
{
HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
{
elog(ERROR, "cache lookup failed for text search configuration %u",
tsconfigOid);
}
Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup);
char *schema = get_namespace_name(config->cfgnamespace);
char *configName = pstrdup(NameStr(config->cfgname));
List *names = list_make2(makeString(schema), makeString(configName));
ReleaseSysCache(tup);
return names;
}
/*
* get_ts_dict_namelist based on the tsdictOid this function creates the namelist that
* identifies the dictionary in a fully qualified manner, irregardless of the schema
* existing on the search_path.
*/
static List *
get_ts_dict_namelist(Oid tsdictOid)
{
HeapTuple tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(tsdictOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
{
elog(ERROR, "cache lookup failed for text search dictionary %u", tsdictOid);
}
Form_pg_ts_dict dict = (Form_pg_ts_dict) GETSTRUCT(tup);
char *schema = get_namespace_name(dict->dictnamespace);
char *dictName = pstrdup(NameStr(dict->dictname));
List *names = list_make2(makeString(schema), makeString(dictName));
ReleaseSysCache(tup);
return names;
}
/*
* get_ts_config_parser_oid based on the tsconfigOid this function returns the Oid of the
* parser used in the configuration.
*/
static Oid
get_ts_config_parser_oid(Oid tsconfigOid)
{
HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
{
elog(ERROR, "cache lookup failed for text search configuration %u", tsconfigOid);
}
Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup);
Oid parserOid = config->cfgparser;
ReleaseSysCache(tup);
return parserOid;
}
/*
* get_ts_parser_tokentype_name returns the name of the token as known to the parser by
* its tokentype identifier. The parser used to resolve the token name is identified by
* parserOid and should be the same that emitted the tokentype to begin with.
*/
static char *
get_ts_parser_tokentype_name(Oid parserOid, int32 tokentype)
{
TSParserCacheEntry *parserCache = lookup_ts_parser_cache(parserOid);
if (!OidIsValid(parserCache->lextypeOid))
{
elog(ERROR, "method lextype isn't defined for text search parser %u", parserOid);
}
/* take lextypes from parser */
LexDescr *tokenlist = (LexDescr *) DatumGetPointer(
OidFunctionCall1(parserCache->lextypeOid, Int32GetDatum(0)));
/* and find the one with lexid = tokentype */
int tokenIndex = 0;
while (tokenlist && tokenlist[tokenIndex].lexid)
{
if (tokenlist[tokenIndex].lexid == tokentype)
{
return pstrdup(tokenlist[tokenIndex].alias);
}
tokenIndex++;
}
/* we haven't found the token */
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("token type \"%d\" does not exist in parser", tokentype)));
}
/*
* get_ts_parser_namelist based on the tsparserOid this function creates the namelist that
* identifies the parser in a fully qualified manner, irregardless of the schema existing
* on the search_path.
*/
static List *
get_ts_parser_namelist(Oid tsparserOid)
{
HeapTuple tup = SearchSysCache1(TSPARSEROID, ObjectIdGetDatum(tsparserOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
{
elog(ERROR, "cache lookup failed for text search parser %u",
tsparserOid);
}
Form_pg_ts_parser parser = (Form_pg_ts_parser) GETSTRUCT(tup);
char *schema = get_namespace_name(parser->prsnamespace);
char *parserName = pstrdup(NameStr(parser->prsname));
List *names = list_make2(makeString(schema), makeString(parserName));
ReleaseSysCache(tup);
return names;
}
/*
* CreateTextSearchConfigurationObjectAddress resolves the ObjectAddress for the object
* being created. If missing_pk is false the function will error, explaining to the user
* the text search configuration described in the statement doesn't exist.
*/
ObjectAddress
CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok)
{
DefineStmt *stmt = castNode(DefineStmt, node);
Assert(stmt->kind == OBJECT_TSCONFIGURATION);
Oid objid = get_ts_config_oid(stmt->defnames, missing_ok);
ObjectAddress address = { 0 };
ObjectAddressSet(address, TSConfigRelationId, objid);
return address;
}
/*
* RenameTextSearchConfigurationStmtObjectAddress resolves the ObjectAddress for the TEXT
* SEARCH CONFIGURATION being renamed. Optionally errors if the configuration does not
* exist based on the missing_ok flag passed in by the caller.
*/
ObjectAddress
RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok)
{
RenameStmt *stmt = castNode(RenameStmt, node);
Assert(stmt->renameType == OBJECT_TSCONFIGURATION);
Oid objid = get_ts_config_oid(castNode(List, stmt->object), missing_ok);
ObjectAddress address = { 0 };
ObjectAddressSet(address, TSConfigRelationId, objid);
return address;
}
/*
* AlterTextSearchConfigurationStmtObjectAddress resolves the ObjectAddress for the TEXT
* SEARCH CONFIGURATION being altered. Optionally errors if the configuration does not
* exist based on the missing_ok flag passed in by the caller.
*/
ObjectAddress
AlterTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok)
{
AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node);
Oid objid = get_ts_config_oid(stmt->cfgname, missing_ok);
ObjectAddress address = { 0 };
ObjectAddressSet(address, TSConfigRelationId, objid);
return address;
}
/*
* AlterTextSearchConfigurationSchemaStmtObjectAddress resolves the ObjectAddress for the
* TEXT SEARCH CONFIGURATION being moved to a different schema. Optionally errors if the
* configuration does not exist based on the missing_ok flag passed in by the caller.
*
* This can be called, either before or after the move of schema has been executed, hence
* the triple checking before the error might be thrown. Errors for non-existing schema's
* in edgecases will be raised by postgres while executing the move.
*/
ObjectAddress
AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok)
{
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
Oid objid = get_ts_config_oid(castNode(List, stmt->object), true);
if (!OidIsValid(objid))
{
/*
* couldn't find the text search configuration, might have already been moved to
* the new schema, we construct a new sequence name that uses the new schema to
* search in.
*/
char *schemaname = NULL;
char *config_name = NULL;
DeconstructQualifiedName(castNode(List, stmt->object), &schemaname, &config_name);
char *newSchemaName = stmt->newschema;
List *names = list_make2(makeString(newSchemaName), makeString(config_name));
objid = get_ts_config_oid(names, true);
if (!missing_ok && !OidIsValid(objid))
{
/*
* if the text search config id is still invalid we couldn't find it, error
* with the same message postgres would error with if missing_ok is false
* (not ok to miss)
*/
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("text search configuration \"%s\" does not exist",
NameListToString(castNode(List, stmt->object)))));
}
}
ObjectAddress sequenceAddress = { 0 };
ObjectAddressSet(sequenceAddress, TSConfigRelationId, objid);
return sequenceAddress;
}
/*
* TextSearchConfigurationCommentObjectAddress resolves the ObjectAddress for the TEXT
* SEARCH CONFIGURATION on which the comment is placed. Optionally errors if the
* configuration does not exist based on the missing_ok flag passed in by the caller.
*/
ObjectAddress
TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok)
{
CommentStmt *stmt = castNode(CommentStmt, node);
Assert(stmt->objtype == OBJECT_TSCONFIGURATION);
Oid objid = get_ts_config_oid(castNode(List, stmt->object), missing_ok);
ObjectAddress address = { 0 };
ObjectAddressSet(address, TSConfigRelationId, objid);
return address;
}
/*
* AlterTextSearchConfigurationOwnerObjectAddress resolves the ObjectAddress for the TEXT
* SEARCH CONFIGURATION for which the owner is changed. Optionally errors if the
* configuration does not exist based on the missing_ok flag passed in by the caller.
*/
ObjectAddress
AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Relation relation = NULL;
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
return get_object_address(stmt->objectType, stmt->object, &relation, AccessShareLock,
missing_ok);
}
/*
* GenerateBackupNameForTextSearchConfiguration generates a safe name that is not in use
* already that can be used to rename an existing TEXT SEARCH CONFIGURATION to allow the
* configuration with a specific name to be created, even if this would not have been
* possible due to name collisions.
*/
char *
GenerateBackupNameForTextSearchConfiguration(const ObjectAddress *address)
{
Assert(address->classId == TSConfigRelationId);
List *names = get_ts_config_namelist(address->objectId);
RangeVar *rel = makeRangeVarFromNameList(names);
char *newName = palloc0(NAMEDATALEN);
char suffix[NAMEDATALEN] = { 0 };
char *baseName = rel->relname;
int baseLength = strlen(baseName);
int count = 0;
while (true)
{
int suffixLength = SafeSnprintf(suffix, NAMEDATALEN - 1, "(citus_backup_%d)",
count);
/* trim the base name at the end to leave space for the suffix and trailing \0 */
baseLength = Min(baseLength, NAMEDATALEN - suffixLength - 1);
/* clear newName before copying the potentially trimmed baseName and suffix */
memset(newName, 0, NAMEDATALEN);
strncpy_s(newName, NAMEDATALEN, baseName, baseLength);
strncpy_s(newName + baseLength, NAMEDATALEN - baseLength, suffix,
suffixLength);
rel->relname = newName;
List *newNameList = MakeNameListFromRangeVar(rel);
Oid tsconfigOid = get_ts_config_oid(newNameList, true);
if (!OidIsValid(tsconfigOid))
{
return newName;
}
count++;
}
}

View File

@ -267,13 +267,17 @@ ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement)
ErrorIfIllegallyChangingKnownShard(relationId);
if (IsCitusTable(relationId) && IsForeignTable(relationId))
/*
* We allow truncating foreign tables that are added to metadata
* only on the coordinator, as user mappings are not propagated.
*/
if (IsForeignTable(relationId) &&
IsCitusTableType(relationId, CITUS_LOCAL_TABLE) &&
!IsCoordinator())
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("truncating distributed foreign tables is "
"currently unsupported"),
errhint("Consider undistributing table before TRUNCATE, "
"and then distribute or add to metadata again")));
errmsg("truncating foreign tables that are added to metadata "
"can only be excuted on the coordinator")));
}
}
}

View File

@ -92,7 +92,6 @@ bool EnableCreateTypePropagation = true;
static List * FilterNameListForDistributedTypes(List *objects, bool missing_ok);
static List * TypeNameListToObjectAddresses(List *objects);
static TypeName * MakeTypeNameFromRangeVar(const RangeVar *relation);
static void EnsureSequentialModeForTypeDDL(void);
static Oid GetTypeOwner(Oid typeOid);
/* recreate functions */
@ -158,7 +157,7 @@ PreprocessCompositeTypeStmt(Node *node, const char *queryString,
* when we allow propagation within a transaction block we should make sure to only
* allow this in sequential mode
*/
EnsureSequentialModeForTypeDDL();
EnsureSequentialMode(OBJECT_TYPE);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) compositeTypeStmtSql,
@ -223,7 +222,7 @@ PreprocessAlterTypeStmt(Node *node, const char *queryString,
* regardless if in a transaction or not. If we would not propagate the alter
* statement the types would be different on worker and coordinator.
*/
EnsureSequentialModeForTypeDDL();
EnsureSequentialMode(OBJECT_TYPE);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) alterTypeStmtSql,
@ -266,7 +265,7 @@ PreprocessCreateEnumStmt(Node *node, const char *queryString,
* when we allow propagation within a transaction block we should make sure to only
* allow this in sequential mode
*/
EnsureSequentialModeForTypeDDL();
EnsureSequentialMode(OBJECT_TYPE);
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
@ -325,7 +324,7 @@ PreprocessAlterEnumStmt(Node *node, const char *queryString,
* (adding values to an enum can not run in a transaction anyway and would error by
* postgres already).
*/
EnsureSequentialModeForTypeDDL();
EnsureSequentialMode(OBJECT_TYPE);
/*
* managing types can only be done on the coordinator if ddl propagation is on. when
@ -405,7 +404,7 @@ PreprocessDropTypeStmt(Node *node, const char *queryString,
char *dropStmtSql = DeparseTreeNode((Node *) stmt);
stmt->objects = oldTypes;
EnsureSequentialModeForTypeDDL();
EnsureSequentialMode(OBJECT_TYPE);
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
@ -442,7 +441,7 @@ PreprocessRenameTypeStmt(Node *node, const char *queryString,
/* deparse sql*/
const char *renameStmtSql = DeparseTreeNode(node);
EnsureSequentialModeForTypeDDL();
EnsureSequentialMode(OBJECT_TYPE);
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
@ -480,7 +479,7 @@ PreprocessRenameTypeAttributeStmt(Node *node, const char *queryString,
const char *sql = DeparseTreeNode((Node *) stmt);
EnsureSequentialModeForTypeDDL();
EnsureSequentialMode(OBJECT_TYPE);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
@ -513,7 +512,7 @@ PreprocessAlterTypeSchemaStmt(Node *node, const char *queryString,
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
EnsureSequentialModeForTypeDDL();
EnsureSequentialMode(OBJECT_TYPE);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
@ -572,7 +571,7 @@ PreprocessAlterTypeOwnerStmt(Node *node, const char *queryString,
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
EnsureSequentialModeForTypeDDL();
EnsureSequentialMode(OBJECT_TYPE);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
@ -958,6 +957,20 @@ CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress)
return NIL;
}
HeapTuple tup = SearchSysCacheCopy1(TYPEOID, ObjectIdGetDatum(typeAddress->objectId));
if (!HeapTupleIsValid(tup))
{
elog(ERROR, "cache lookup failed for type %u", typeAddress->objectId);
}
/* Don't send any command if the type is a table's row type */
Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup);
if (typTup->typtype == TYPTYPE_COMPOSITE &&
get_rel_relkind(typTup->typrelid) != RELKIND_COMPOSITE_TYPE)
{
return NIL;
}
Node *stmt = CreateTypeStmtByObjectAddress(typeAddress);
/* capture ddl command for recreation and wrap in create if not exists construct */
@ -1116,47 +1129,6 @@ MakeTypeNameFromRangeVar(const RangeVar *relation)
}
/*
* EnsureSequentialModeForTypeDDL makes sure that the current transaction is already in
* sequential mode, or can still safely be put in sequential mode, it errors if that is
* not possible. The error contains information for the user to retry the transaction with
* sequential mode set from the beginning.
*
* As types are node scoped objects there exists only 1 instance of the type used by
* potentially multiple shards. To make sure all shards in the transaction can interact
* with the type the type needs to be visible on all connections used by the transaction,
* meaning we can only use 1 connection per node.
*/
static void
EnsureSequentialModeForTypeDDL(void)
{
if (!IsTransactionBlock())
{
/* we do not need to switch to sequential mode if we are not in a transaction */
return;
}
if (ParallelQueryExecutedInTransaction())
{
ereport(ERROR, (errmsg("cannot create or modify type because there was a "
"parallel operation on a distributed table in the "
"transaction"),
errdetail("When creating or altering a type, Citus needs to "
"perform all operations over a single connection per "
"node to ensure consistency."),
errhint("Try re-running the transaction with "
"\"SET LOCAL citus.multi_shard_modify_mode TO "
"\'sequential\';\"")));
}
ereport(DEBUG1, (errmsg("switching to sequential query execution mode"),
errdetail("Type is created or altered. To make sure subsequent "
"commands see the type correctly we need to make sure to "
"use only one connection for all future commands")));
SetLocalMultiShardModifyModeToSequential();
}
/*
* ShouldPropagateTypeCreate returns if we should propagate the creation of a type.
*

View File

@ -10,6 +10,7 @@
#include "postgres.h"
#include "distributed/backend_data.h"
#include "distributed/citus_safe_lib.h"
#include "distributed/connection_management.h"
#include "distributed/metadata_cache.h"
@ -232,6 +233,10 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
*/
char nodePortString[12] = "";
StringInfo applicationName = makeStringInfo();
appendStringInfo(applicationName, "%s%ld", CITUS_APPLICATION_NAME_PREFIX,
GetGlobalPID());
/*
* This function has three sections:
* - Initialize the keywords and values (to be copied later) of global parameters
@ -260,7 +265,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
key->database,
key->user,
GetDatabaseEncodingName(),
CITUS_APPLICATION_NAME
applicationName->data
};
/*

View File

@ -19,6 +19,7 @@
#include "access/hash.h"
#include "commands/dbcommands.h"
#include "distributed/backend_data.h"
#include "distributed/connection_management.h"
#include "distributed/errormessage.h"
#include "distributed/error_codes.h"
@ -1459,7 +1460,7 @@ IsRebalancerInternalBackend(void)
bool
IsCitusInternalBackend(void)
{
return application_name && strcmp(application_name, CITUS_APPLICATION_NAME) == 0;
return ExtractGlobalPID(application_name) != INVALID_CITUS_INTERNAL_BACKEND_GPID;
}

View File

@ -17,6 +17,7 @@
#include "distributed/commands.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
/*
* DeparseTreeNode aims to be the inverse of postgres' ParseTreeNode. Currently with
@ -35,3 +36,20 @@ DeparseTreeNode(Node *stmt)
return ops->deparse(stmt);
}
/*
* DeparseTreeNodes deparses all stmts in the list from the statement datastructure into
* sql statements.
*/
List *
DeparseTreeNodes(List *stmts)
{
List *sqls = NIL;
Node *stmt = NULL;
foreach_ptr(stmt, stmts)
{
sqls = lappend(sqls, DeparseTreeNode(stmt));
}
return sqls;
}

View File

@ -14,16 +14,47 @@
#include "distributed/citus_ruleutils.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "lib/stringinfo.h"
#include "nodes/nodes.h"
#include "utils/builtins.h"
static void AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt);
static void AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt);
static void AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt);
static void AppendGrantOnSchemaPrivileges(StringInfo buf, GrantStmt *stmt);
static void AppendGrantOnSchemaSchemas(StringInfo buf, GrantStmt *stmt);
static void AppendGrantOnSchemaGrantees(StringInfo buf, GrantStmt *stmt);
static void AppendAlterSchemaRenameStmt(StringInfo buf, RenameStmt *stmt);
char *
DeparseCreateSchemaStmt(Node *node)
{
CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node);
StringInfoData str = { 0 };
initStringInfo(&str);
AppendCreateSchemaStmt(&str, stmt);
return str.data;
}
char *
DeparseDropSchemaStmt(Node *node)
{
DropStmt *stmt = castNode(DropStmt, node);
StringInfoData str = { 0 };
initStringInfo(&str);
AppendDropSchemaStmt(&str, stmt);
return str.data;
}
char *
DeparseGrantOnSchemaStmt(Node *node)
{
@ -53,6 +84,70 @@ DeparseAlterSchemaRenameStmt(Node *node)
}
static void
AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt)
{
if (stmt->schemaElts != NIL)
{
elog(ERROR, "schema creating is not supported with other create commands");
}
if (stmt->schemaname == NULL)
{
elog(ERROR, "schema name should be specified");
}
appendStringInfoString(buf, "CREATE SCHEMA ");
if (stmt->if_not_exists)
{
appendStringInfoString(buf, "IF NOT EXISTS ");
}
appendStringInfo(buf, "%s ", quote_identifier(stmt->schemaname));
if (stmt->authrole != NULL)
{
appendStringInfo(buf, "AUTHORIZATION %s", RoleSpecString(stmt->authrole, true));
}
}
static void
AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt)
{
Assert(stmt->removeType == OBJECT_SCHEMA);
appendStringInfoString(buf, "DROP SCHEMA ");
if (stmt->missing_ok)
{
appendStringInfoString(buf, "IF EXISTS ");
}
Value *schemaValue = NULL;
foreach_ptr(schemaValue, stmt->objects)
{
const char *schemaString = quote_identifier(strVal(schemaValue));
appendStringInfo(buf, "%s", schemaString);
if (schemaValue != llast(stmt->objects))
{
appendStringInfoString(buf, ", ");
}
}
if (stmt->behavior == DROP_CASCADE)
{
appendStringInfoString(buf, " CASCADE");
}
else if (stmt->behavior == DROP_RESTRICT)
{
appendStringInfoString(buf, " RESTRICT");
}
}
static void
AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt)
{

View File

@ -0,0 +1,377 @@
/*-------------------------------------------------------------------------
*
* deparse_text_search.c
* All routines to deparse text search statements.
* This file contains all entry points specific for text search statement deparsing.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "catalog/namespace.h"
#include "utils/builtins.h"
#include "distributed/citus_ruleutils.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
static void AppendDefElemList(StringInfo buf, List *defelms);
static void AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes);
static void AppendStringInfoDictnames(StringInfo buf, List *dicts);
/*
* DeparseCreateTextSearchStmt returns the sql for a DefineStmt defining a TEXT SEARCH
* CONFIGURATION
*
* Although the syntax is mutually exclusive on the two arguments that can be passed in
* the deparser will syntactically correct multiple definitions if provided. *
*/
char *
DeparseCreateTextSearchStmt(Node *node)
{
DefineStmt *stmt = castNode(DefineStmt, node);
StringInfoData buf = { 0 };
initStringInfo(&buf);
const char *identifier = NameListToQuotedString(stmt->defnames);
appendStringInfo(&buf, "CREATE TEXT SEARCH CONFIGURATION %s ", identifier);
appendStringInfoString(&buf, "(");
AppendDefElemList(&buf, stmt->definition);
appendStringInfoString(&buf, ");");
return buf.data;
}
/*
* AppendDefElemList specialization to append a comma separated list of definitions to a
* define statement.
*
* Currently only supports String and TypeName entries. Will error on others.
*/
static void
AppendDefElemList(StringInfo buf, List *defelems)
{
DefElem *defelem = NULL;
bool first = true;
foreach_ptr(defelem, defelems)
{
if (!first)
{
appendStringInfoString(buf, ", ");
}
first = false;
/* extract identifier from defelem */
const char *identifier = NULL;
switch (nodeTag(defelem->arg))
{
case T_String:
{
identifier = quote_identifier(strVal(defelem->arg));
break;
}
case T_TypeName:
{
TypeName *typeName = castNode(TypeName, defelem->arg);
identifier = NameListToQuotedString(typeName->names);
break;
}
default:
{
ereport(ERROR, (errmsg("unexpected argument during deparsing of "
"TEXT SEARCH CONFIGURATION definition")));
}
}
/* stringify */
appendStringInfo(buf, "%s = %s", defelem->defname, identifier);
}
}
/*
* DeparseDropTextSearchConfigurationStmt returns the sql representation for a DROP TEXT
* SEARCH CONFIGURATION ... statment. Supports dropping multiple configurations at once.
*/
char *
DeparseDropTextSearchConfigurationStmt(Node *node)
{
DropStmt *stmt = castNode(DropStmt, node);
Assert(stmt->removeType == OBJECT_TSCONFIGURATION);
StringInfoData buf = { 0 };
initStringInfo(&buf);
appendStringInfoString(&buf, "DROP TEXT SEARCH CONFIGURATION ");
List *nameList = NIL;
bool first = true;
foreach_ptr(nameList, stmt->objects)
{
if (!first)
{
appendStringInfoString(&buf, ", ");
}
first = false;
appendStringInfoString(&buf, NameListToQuotedString(nameList));
}
if (stmt->behavior == DROP_CASCADE)
{
appendStringInfoString(&buf, " CASCADE");
}
appendStringInfoString(&buf, ";");
return buf.data;
}
/*
* DeparseRenameTextSearchConfigurationStmt returns the sql representation of a ALTER TEXT
* SEARCH CONFIGURATION ... RENAME TO ... statement.
*/
char *
DeparseRenameTextSearchConfigurationStmt(Node *node)
{
RenameStmt *stmt = castNode(RenameStmt, node);
Assert(stmt->renameType == OBJECT_TSCONFIGURATION);
StringInfoData buf = { 0 };
initStringInfo(&buf);
char *identifier = NameListToQuotedString(castNode(List, stmt->object));
appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s RENAME TO %s;",
identifier, quote_identifier(stmt->newname));
return buf.data;
}
/*
* DeparseAlterTextSearchConfigurationStmt returns the ql representation of any generic
* ALTER TEXT SEARCH CONFIGURATION .... statement. The statements supported include:
* - ALTER TEXT SEARCH CONFIGURATIONS ... ADD MAPPING FOR [, ...] WITH [, ...]
* - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING FOR [, ...] WITH [, ...]
* - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING REPLACE ... WITH ...
* - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING FOR [, ...] REPLACE ... WITH ...
* - ALTER TEXT SEARCH CONFIGURATIONS ... DROP MAPPING [ IF EXISTS ] FOR ...
*/
char *
DeparseAlterTextSearchConfigurationStmt(Node *node)
{
AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node);
StringInfoData buf = { 0 };
initStringInfo(&buf);
char *identifier = NameListToQuotedString(castNode(List, stmt->cfgname));
appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s", identifier);
switch (stmt->kind)
{
case ALTER_TSCONFIG_ADD_MAPPING:
{
appendStringInfoString(&buf, " ADD MAPPING FOR ");
AppendStringInfoTokentypeList(&buf, stmt->tokentype);
appendStringInfoString(&buf, " WITH ");
AppendStringInfoDictnames(&buf, stmt->dicts);
break;
}
case ALTER_TSCONFIG_ALTER_MAPPING_FOR_TOKEN:
{
appendStringInfoString(&buf, " ALTER MAPPING FOR ");
AppendStringInfoTokentypeList(&buf, stmt->tokentype);
appendStringInfoString(&buf, " WITH ");
AppendStringInfoDictnames(&buf, stmt->dicts);
break;
}
case ALTER_TSCONFIG_REPLACE_DICT:
case ALTER_TSCONFIG_REPLACE_DICT_FOR_TOKEN:
{
appendStringInfoString(&buf, " ALTER MAPPING");
if (list_length(stmt->tokentype) > 0)
{
appendStringInfoString(&buf, " FOR ");
AppendStringInfoTokentypeList(&buf, stmt->tokentype);
}
if (list_length(stmt->dicts) != 2)
{
elog(ERROR, "unexpected number of dictionaries while deparsing ALTER "
"TEXT SEARCH CONFIGURATION ... ALTER MAPPING [FOR ...] REPLACE "
"statement.");
}
appendStringInfo(&buf, " REPLACE %s",
NameListToQuotedString(linitial(stmt->dicts)));
appendStringInfo(&buf, " WITH %s",
NameListToQuotedString(lsecond(stmt->dicts)));
break;
}
case ALTER_TSCONFIG_DROP_MAPPING:
{
appendStringInfoString(&buf, " DROP MAPPING");
if (stmt->missing_ok)
{
appendStringInfoString(&buf, " IF EXISTS");
}
appendStringInfoString(&buf, " FOR ");
AppendStringInfoTokentypeList(&buf, stmt->tokentype);
break;
}
default:
{
elog(ERROR, "unable to deparse unsupported ALTER TEXT SEARCH STATEMENT");
}
}
appendStringInfoString(&buf, ";");
return buf.data;
}
/*
* DeparseAlterTextSearchConfigurationSchemaStmt returns the sql statement representing
* ALTER TEXT SEARCH CONFIGURATION ... SET SCHEMA ... statements.
*/
char *
DeparseAlterTextSearchConfigurationSchemaStmt(Node *node)
{
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
StringInfoData buf = { 0 };
initStringInfo(&buf);
appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s SET SCHEMA %s;",
NameListToQuotedString(castNode(List, stmt->object)),
quote_identifier(stmt->newschema));
return buf.data;
}
/*
* DeparseTextSearchConfigurationCommentStmt returns the sql statement representing
* COMMENT ON TEXT SEARCH CONFIGURATION ... IS ...
*/
char *
DeparseTextSearchConfigurationCommentStmt(Node *node)
{
CommentStmt *stmt = castNode(CommentStmt, node);
Assert(stmt->objtype == OBJECT_TSCONFIGURATION);
StringInfoData buf = { 0 };
initStringInfo(&buf);
appendStringInfo(&buf, "COMMENT ON TEXT SEARCH CONFIGURATION %s IS ",
NameListToQuotedString(castNode(List, stmt->object)));
if (stmt->comment == NULL)
{
appendStringInfoString(&buf, "NULL");
}
else
{
appendStringInfoString(&buf, quote_literal_cstr(stmt->comment));
}
appendStringInfoString(&buf, ";");
return buf.data;
}
/*
* AppendStringInfoTokentypeList specializes in adding a comma separated list of
* token_tyoe's to TEXT SEARCH CONFIGURATION commands
*/
static void
AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes)
{
Value *tokentype = NULL;
bool first = true;
foreach_ptr(tokentype, tokentypes)
{
if (nodeTag(tokentype) != T_String)
{
elog(ERROR,
"unexpected tokentype for deparsing in text search configuration");
}
if (!first)
{
appendStringInfoString(buf, ", ");
}
first = false;
appendStringInfoString(buf, strVal(tokentype));
}
}
/*
* AppendStringInfoDictnames specializes in appending a comma separated list of
* dictionaries to TEXT SEARCH CONFIGURATION commands.
*/
static void
AppendStringInfoDictnames(StringInfo buf, List *dicts)
{
List *dictNames = NIL;
bool first = true;
foreach_ptr(dictNames, dicts)
{
if (!first)
{
appendStringInfoString(buf, ", ");
}
first = false;
char *dictIdentifier = NameListToQuotedString(dictNames);
appendStringInfoString(buf, dictIdentifier);
}
}
/*
* DeparseAlterTextSearchConfigurationOwnerStmt returns the sql statement representing
* ALTER TEXT SEARCH CONFIGURATION ... ONWER TO ... commands.
*/
char *
DeparseAlterTextSearchConfigurationOwnerStmt(Node *node)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
StringInfoData buf = { 0 };
initStringInfo(&buf);
appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s OWNER TO %s;",
NameListToQuotedString(castNode(List, stmt->object)),
RoleSpecString(stmt->newowner, true));
return buf.data;
}

View File

@ -0,0 +1,278 @@
/*-------------------------------------------------------------------------
*
* qualify_text_search_stmts.c
* Functions specialized in fully qualifying all text search statements. These
* functions are dispatched from qualify.c
*
* Fully qualifying text search statements consists of adding the schema name
* to the subject of the types as well as any other branch of the parsetree.
*
* Goal would be that the deparser functions for these statements can
* serialize the statement without any external lookups.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/htup_details.h"
#include "catalog/namespace.h"
#include "catalog/pg_ts_config.h"
#include "catalog/pg_ts_dict.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
static Oid get_ts_config_namespace(Oid tsconfigOid);
static Oid get_ts_dict_namespace(Oid tsdictOid);
/*
* QualifyDropTextSearchConfigurationStmt adds any missing schema names to text search
* configurations being dropped. All configurations are expected to exists before fully
* qualifying the statement. Errors will be raised for objects not existing. Non-existing
* objects are expected to not be distributed.
*/
void
QualifyDropTextSearchConfigurationStmt(Node *node)
{
DropStmt *stmt = castNode(DropStmt, node);
Assert(stmt->removeType == OBJECT_TSCONFIGURATION);
List *qualifiedObjects = NIL;
List *objName = NIL;
foreach_ptr(objName, stmt->objects)
{
char *schemaName = NULL;
char *tsconfigName = NULL;
DeconstructQualifiedName(objName, &schemaName, &tsconfigName);
if (!schemaName)
{
Oid tsconfigOid = get_ts_config_oid(objName, false);
Oid namespaceOid = get_ts_config_namespace(tsconfigOid);
schemaName = get_namespace_name(namespaceOid);
objName = list_make2(makeString(schemaName),
makeString(tsconfigName));
}
qualifiedObjects = lappend(qualifiedObjects, objName);
}
stmt->objects = qualifiedObjects;
}
/*
* QualifyAlterTextSearchConfigurationStmt adds the schema name (if missing) to the name
* of the text search configurations, as well as the dictionaries referenced.
*/
void
QualifyAlterTextSearchConfigurationStmt(Node *node)
{
AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node);
char *schemaName = NULL;
char *objName = NULL;
DeconstructQualifiedName(stmt->cfgname, &schemaName, &objName);
/* fully qualify the cfgname being altered */
if (!schemaName)
{
Oid tsconfigOid = get_ts_config_oid(stmt->cfgname, false);
Oid namespaceOid = get_ts_config_namespace(tsconfigOid);
schemaName = get_namespace_name(namespaceOid);
stmt->cfgname = list_make2(makeString(schemaName),
makeString(objName));
}
/* fully qualify the dicts */
bool useNewDicts = false;
List *dicts = NULL;
List *dictName = NIL;
foreach_ptr(dictName, stmt->dicts)
{
DeconstructQualifiedName(dictName, &schemaName, &objName);
/* fully qualify the cfgname being altered */
if (!schemaName)
{
Oid dictOid = get_ts_dict_oid(dictName, false);
Oid namespaceOid = get_ts_dict_namespace(dictOid);
schemaName = get_namespace_name(namespaceOid);
useNewDicts = true;
dictName = list_make2(makeString(schemaName), makeString(objName));
}
dicts = lappend(dicts, dictName);
}
if (useNewDicts)
{
/* swap original dicts with the new list */
stmt->dicts = dicts;
}
else
{
/* we don't use the new list, everything was already qualified, free-ing */
list_free(dicts);
}
}
/*
* QualifyRenameTextSearchConfigurationStmt adds the schema name (if missing) to the
* configuration being renamed. The new name will kept be without schema name since this
* command cannot be used to change the schema of a configuration.
*/
void
QualifyRenameTextSearchConfigurationStmt(Node *node)
{
RenameStmt *stmt = castNode(RenameStmt, node);
Assert(stmt->renameType == OBJECT_TSCONFIGURATION);
char *schemaName = NULL;
char *objName = NULL;
DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName);
/* fully qualify the cfgname being altered */
if (!schemaName)
{
Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false);
Oid namespaceOid = get_ts_config_namespace(tsconfigOid);
schemaName = get_namespace_name(namespaceOid);
stmt->object = (Node *) list_make2(makeString(schemaName),
makeString(objName));
}
}
/*
* QualifyAlterTextSearchConfigurationSchemaStmt adds the schema name (if missing) for the
* text search being moved to a new schema.
*/
void
QualifyAlterTextSearchConfigurationSchemaStmt(Node *node)
{
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
char *schemaName = NULL;
char *objName = NULL;
DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName);
if (!schemaName)
{
Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false);
Oid namespaceOid = get_ts_config_namespace(tsconfigOid);
schemaName = get_namespace_name(namespaceOid);
stmt->object = (Node *) list_make2(makeString(schemaName),
makeString(objName));
}
}
/*
* QualifyTextSearchConfigurationCommentStmt adds the schema name (if missing) to the
* configuration name on which the comment is created.
*/
void
QualifyTextSearchConfigurationCommentStmt(Node *node)
{
CommentStmt *stmt = castNode(CommentStmt, node);
Assert(stmt->objtype == OBJECT_TSCONFIGURATION);
char *schemaName = NULL;
char *objName = NULL;
DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName);
if (!schemaName)
{
Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false);
Oid namespaceOid = get_ts_config_namespace(tsconfigOid);
schemaName = get_namespace_name(namespaceOid);
stmt->object = (Node *) list_make2(makeString(schemaName),
makeString(objName));
}
}
/*
* QualifyAlterTextSearchConfigurationOwnerStmt adds the schema name (if missing) to the
* configuration for which the owner is changing.
*/
void
QualifyAlterTextSearchConfigurationOwnerStmt(Node *node)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
char *schemaName = NULL;
char *objName = NULL;
DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName);
if (!schemaName)
{
Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false);
Oid namespaceOid = get_ts_config_namespace(tsconfigOid);
schemaName = get_namespace_name(namespaceOid);
stmt->object = (Node *) list_make2(makeString(schemaName),
makeString(objName));
}
}
/*
* get_ts_config_namespace returns the oid of the namespace which is housing the text
* search configuration identified by tsconfigOid.
*/
static Oid
get_ts_config_namespace(Oid tsconfigOid)
{
HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid));
if (HeapTupleIsValid(tup))
{
Form_pg_ts_config cfgform = (Form_pg_ts_config) GETSTRUCT(tup);
Oid namespaceOid = cfgform->cfgnamespace;
ReleaseSysCache(tup);
return namespaceOid;
}
return InvalidOid;
}
/*
* get_ts_dict_namespace returns the oid of the namespace which is housing the text
* search dictionary identified by tsdictOid.
*/
static Oid
get_ts_dict_namespace(Oid tsdictOid)
{
HeapTuple tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(tsdictOid));
if (HeapTupleIsValid(tup))
{
Form_pg_ts_dict cfgform = (Form_pg_ts_dict) GETSTRUCT(tup);
Oid namespaceOid = cfgform->dictnamespace;
ReleaseSysCache(tup);
return namespaceOid;
}
return InvalidOid;
}

View File

@ -32,6 +32,7 @@
#include "distributed/distributed_planner.h"
#include "distributed/multi_router_planner.h"
#include "distributed/multi_server_executor.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/resource_lock.h"
#include "distributed/transaction_management.h"
#include "distributed/version_compat.h"
@ -81,6 +82,7 @@ int ExecutorLevel = 0;
/* local function forward declarations */
static Relation StubRelation(TupleDesc tupleDescriptor);
static char * GetObjectTypeString(ObjectType objType);
static bool AlterTableConstraintCheck(QueryDesc *queryDesc);
static List * FindCitusCustomScanStates(PlanState *planState);
static bool CitusCustomScanStateWalker(PlanState *planState,
@ -235,16 +237,17 @@ CitusExecutorRun(QueryDesc *queryDesc,
* transactions.
*/
CitusTableCacheFlushInvalidatedEntries();
/*
* Within a 2PC, when a function is delegated to a remote node, we pin
* the distribution argument as the shard key for all the SQL in the
* function's block. The restriction is imposed to not to access other
* nodes from the current node and violate the transactional integrity
* of the 2PC. Now that the query is ending, reset the shard key to NULL.
*/
ResetAllowedShardKeyValue();
InTopLevelDelegatedFunctionCall = false;
}
/*
* Within a 2PC, when a function is delegated to a remote node, we pin
* the distribution argument as the shard key for all the SQL in the
* function's block. The restriction is imposed to not to access other
* nodes from the current node, and violate the transactional integrity
* of the 2PC. Now that the query is ending, reset the shard key to NULL.
*/
CheckAndResetAllowedShardKeyValueIfNeeded();
}
PG_CATCH();
{
@ -258,13 +261,15 @@ CitusExecutorRun(QueryDesc *queryDesc,
if (ExecutorLevel == 0 && PlannerLevel == 0)
{
/*
* In case of an exception, reset the pinned shard-key, for more
* details see the function header.
*/
ResetAllowedShardKeyValue();
InTopLevelDelegatedFunctionCall = false;
}
/*
* In case of an exception, reset the pinned shard-key, for more
* details see the function header.
*/
CheckAndResetAllowedShardKeyValueIfNeeded();
PG_RE_THROW();
}
PG_END_TRY();
@ -691,6 +696,103 @@ SetLocalMultiShardModifyModeToSequential()
}
/*
* EnsureSequentialMode makes sure that the current transaction is already in
* sequential mode, or can still safely be put in sequential mode, it errors if that is
* not possible. The error contains information for the user to retry the transaction with
* sequential mode set from the beginning.
*
* Takes an ObjectType to use in the error/debug messages.
*/
void
EnsureSequentialMode(ObjectType objType)
{
char *objTypeString = GetObjectTypeString(objType);
if (ParallelQueryExecutedInTransaction())
{
ereport(ERROR, (errmsg("cannot run %s command because there was a "
"parallel operation on a distributed table in the "
"transaction", objTypeString),
errdetail("When running command on/for a distributed %s, Citus "
"needs to perform all operations over a single "
"connection per node to ensure consistency.",
objTypeString),
errhint("Try re-running the transaction with "
"\"SET LOCAL citus.multi_shard_modify_mode TO "
"\'sequential\';\"")));
}
ereport(DEBUG1, (errmsg("switching to sequential query execution mode"),
errdetail(
"A command for a distributed %s is run. To make sure subsequent "
"commands see the %s correctly we need to make sure to "
"use only one connection for all future commands",
objTypeString, objTypeString)));
SetLocalMultiShardModifyModeToSequential();
}
/*
* GetObjectTypeString takes an ObjectType and returns the string version of it.
* We (for now) call this function only in EnsureSequentialMode, and use the returned
* string to generate error/debug messages.
*
* If GetObjectTypeString gets called with an ObjectType that is not in the switch
* statement, the function will return the string "object", and emit a debug message.
* In that case, make sure you've added the newly supported type to the switch statement.
*/
static char *
GetObjectTypeString(ObjectType objType)
{
switch (objType)
{
case OBJECT_COLLATION:
{
return "collation";
}
case OBJECT_DATABASE:
{
return "database";
}
case OBJECT_EXTENSION:
{
return "extension";
}
case OBJECT_FUNCTION:
{
return "function";
}
case OBJECT_SCHEMA:
{
return "schema";
}
case OBJECT_TSCONFIGURATION:
{
return "text search configuration";
}
case OBJECT_TYPE:
{
return "type";
}
default:
{
ereport(DEBUG1, (errmsg("unsupported object type"),
errdetail("Please add string conversion for the object.")));
return "object";
}
}
}
/*
* AlterTableConstraintCheck returns if the given query is an ALTER TABLE
* constraint check query.

View File

@ -124,6 +124,7 @@ typedef struct ViewDependencyNode
static List * GetRelationSequenceDependencyList(Oid relationId);
static List * GetRelationTriggerFunctionDependencyList(Oid relationId);
static List * GetRelationStatsSchemaDependencyList(Oid relationId);
static List * GetRelationIndicesDependencyList(Oid relationId);
static DependencyDefinition * CreateObjectAddressDependencyDef(Oid classId, Oid objectId);
static List * CreateObjectAddressDependencyDefList(Oid classId, List *objectIdList);
static ObjectAddress DependencyDefinitionObjectAddress(DependencyDefinition *definition);
@ -155,6 +156,8 @@ static bool FollowAllSupportedDependencies(ObjectAddressCollector *collector,
DependencyDefinition *definition);
static bool FollowNewSupportedDependencies(ObjectAddressCollector *collector,
DependencyDefinition *definition);
static bool FollowAllDependencies(ObjectAddressCollector *collector,
DependencyDefinition *definition);
static void ApplyAddToDependencyList(ObjectAddressCollector *collector,
DependencyDefinition *definition);
static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector,
@ -211,15 +214,42 @@ GetDependenciesForObject(const ObjectAddress *target)
/*
* GetAllDependenciesForObject returns a list of all the ObjectAddresses to be
* created in order before the target object could safely be created on a
* worker. As a caller, you probably need GetDependenciesForObject() which
* eliminates already distributed objects from the returned list.
* GetAllSupportedDependenciesForObject returns a list of all the ObjectAddresses to be
* created in order before the target object could safely be created on a worker, if all
* dependent objects are distributable. As a caller, you probably need to use
* GetDependenciesForObject() which eliminates already distributed objects from the returned
* list.
*
* Some of the object might already be created on a worker. It should be created
* in an idempotent way.
*/
List *
GetAllSupportedDependenciesForObject(const ObjectAddress *target)
{
ObjectAddressCollector collector = { 0 };
InitObjectAddressCollector(&collector);
RecurseObjectDependencies(*target,
&ExpandCitusSupportedTypes,
&FollowAllSupportedDependencies,
&ApplyAddToDependencyList,
&collector);
return collector.dependencyList;
}
/*
* GetAllDependenciesForObject returns a list of all the dependent objects of the given
* object irrespective of whether the dependent object is supported by Citus or not, if
* the object can be found as dependency with RecurseObjectDependencies and
* ExpandCitusSupportedTypes.
*
* This function will be used to provide meaningful error messages if any dependent
* object for a given object is not supported. If you want to create dependencies for
* an object, you probably need to use GetDependenciesForObject().
*/
List *
GetAllDependenciesForObject(const ObjectAddress *target)
{
ObjectAddressCollector collector = { 0 };
@ -227,7 +257,7 @@ GetAllDependenciesForObject(const ObjectAddress *target)
RecurseObjectDependencies(*target,
&ExpandCitusSupportedTypes,
&FollowAllSupportedDependencies,
&FollowAllDependencies,
&ApplyAddToDependencyList,
&collector);
@ -639,6 +669,11 @@ SupportedDependencyByCitus(const ObjectAddress *address)
return true;
}
case OCLASS_TSCONFIG:
{
return true;
}
case OCLASS_TYPE:
{
switch (get_typtype(address->objectId))
@ -686,7 +721,8 @@ SupportedDependencyByCitus(const ObjectAddress *address)
relKind == RELKIND_RELATION ||
relKind == RELKIND_PARTITIONED_TABLE ||
relKind == RELKIND_FOREIGN_TABLE ||
relKind == RELKIND_SEQUENCE)
relKind == RELKIND_SEQUENCE ||
relKind == RELKIND_INDEX)
{
return true;
}
@ -896,10 +932,61 @@ FollowAllSupportedDependencies(ObjectAddressCollector *collector,
/*
* ApplyAddToDependencyList is an apply function for RecurseObjectDependencies that will collect
* all the ObjectAddresses for pg_depend entries to the context. The context here is
* assumed to be a (ObjectAddressCollector *) to the location where all ObjectAddresses
* will be collected.
* FollowAllDependencies applies filters on pg_depend entries to follow the dependency
* tree of objects in depth first order. We will visit all objects irrespective of it is
* supported by Citus or not.
*/
static bool
FollowAllDependencies(ObjectAddressCollector *collector,
DependencyDefinition *definition)
{
if (definition->mode == DependencyPgDepend)
{
/*
* For dependencies found in pg_depend:
*
* Follow only normal and extension dependencies. The latter is used to reach the
* extensions, the objects that directly depend on the extension are eliminated
* during the "apply" phase.
*
* Other dependencies are internal dependencies and managed by postgres.
*/
if (definition->data.pg_depend.deptype != DEPENDENCY_NORMAL &&
definition->data.pg_depend.deptype != DEPENDENCY_EXTENSION)
{
return false;
}
}
/* rest of the tests are to see if we want to follow the actual dependency */
ObjectAddress address = DependencyDefinitionObjectAddress(definition);
/*
* If the object is already in our dependency list we do not have to follow any
* further
*/
if (IsObjectAddressCollected(address, collector))
{
return false;
}
if (CitusExtensionObject(&address))
{
/* following citus extension could complicate role management */
return false;
}
return true;
}
/*
* ApplyAddToDependencyList is an apply function for RecurseObjectDependencies that will
* collect all the ObjectAddresses for pg_depend entries to the context, except it is
* extension owned one.
*
* The context here is assumed to be a (ObjectAddressCollector *) to the location where
* all ObjectAddresses will be collected.
*/
static void
ApplyAddToDependencyList(ObjectAddressCollector *collector,
@ -1005,6 +1092,17 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe
List *sequenceDependencyList = GetRelationSequenceDependencyList(relationId);
result = list_concat(result, sequenceDependencyList);
/*
* Tables could have indexes. Indexes themself could have dependencies that
* need to be propagated. eg. TEXT SEARCH CONFIGRUATIONS. Here we add the
* addresses of all indices to the list of objects to vist, as to make sure we
* create all objects required by the indices before we create the table
* including indices.
*/
List *indexDependencyList = GetRelationIndicesDependencyList(relationId);
result = list_concat(result, indexDependencyList);
}
default:
@ -1048,6 +1146,28 @@ GetRelationStatsSchemaDependencyList(Oid relationId)
}
/*
* CollectIndexOids implements PGIndexProcessor to create a list of all index oids
*/
static void
CollectIndexOids(Form_pg_index formPgIndex, List **oids, int flags)
{
*oids = lappend_oid(*oids, formPgIndex->indexrelid);
}
/*
* GetRelationIndicesDependencyList creates a list of ObjectAddressDependencies for the
* indexes on a given relation.
*/
static List *
GetRelationIndicesDependencyList(Oid relationId)
{
List *indexIds = ExecuteFunctionOnEachTableIndex(relationId, CollectIndexOids, 0);
return CreateObjectAddressDependencyDefList(RelationRelationId, indexIds);
}
/*
* GetRelationTriggerFunctionDependencyList returns a list of DependencyDefinition
* objects for the functions that triggers of the relation with relationId depends.

View File

@ -405,6 +405,21 @@ GetDistributedObjectAddressList(void)
}
/*
* GetRoleSpecObjectForUser creates a RoleSpec object for the given roleOid.
*/
RoleSpec *
GetRoleSpecObjectForUser(Oid roleOid)
{
RoleSpec *roleSpec = makeNode(RoleSpec);
roleSpec->roletype = OidIsValid(roleOid) ? ROLESPEC_CSTRING : ROLESPEC_PUBLIC;
roleSpec->rolename = OidIsValid(roleOid) ? GetUserNameFromId(roleOid, false) : NULL;
roleSpec->location = -1;
return roleSpec;
}
/*
* UpdateDistributedObjectColocationId gets an old and a new colocationId
* and updates the colocationId of all tuples in citus.pg_dist_object which

View File

@ -201,6 +201,9 @@ static bool workerNodeHashValid = false;
/* default value is -1, for coordinator it's 0 and for worker nodes > 0 */
static int32 LocalGroupId = -1;
/* default value is -1, increases with every node starting from 1 */
static int32 LocalNodeId = -1;
/* built first time through in InitializeDistCache */
static ScanKeyData DistPartitionScanKey[1];
static ScanKeyData DistShardScanKey[1];
@ -3618,6 +3621,62 @@ GetLocalGroupId(void)
}
/*
* GetNodeId returns the node identifier of the local node.
*/
int32
GetLocalNodeId(void)
{
InitializeCaches();
/*
* Already set the node id, no need to read the heap again.
*/
if (LocalNodeId != -1)
{
return LocalNodeId;
}
uint32 nodeId = -1;
int32 localGroupId = GetLocalGroupId();
bool includeNodesFromOtherClusters = false;
List *workerNodeList = ReadDistNode(includeNodesFromOtherClusters);
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList)
{
if (workerNode->groupId == localGroupId &&
workerNode->isActive)
{
nodeId = workerNode->nodeId;
break;
}
}
/*
* nodeId is -1 if we cannot find an active node whose group id is
* localGroupId in pg_dist_node.
*/
if (nodeId == -1)
{
elog(DEBUG4, "there is no active node with group id '%d' on pg_dist_node",
localGroupId);
/*
* This is expected if the coordinator is not added to the metadata.
* We'll return 0 for this case and for all cases so views can function almost normally
*/
nodeId = 0;
}
LocalNodeId = nodeId;
return nodeId;
}
/*
* RegisterLocalGroupIdCacheCallbacks registers the callbacks required to
* maintain LocalGroupId at a consistent value. It's separate from
@ -4019,6 +4078,7 @@ InvalidateMetadataSystemCache(void)
memset(&MetadataCache, 0, sizeof(MetadataCache));
workerNodeHashValid = false;
LocalGroupId = -1;
LocalNodeId = -1;
}
@ -4110,6 +4170,7 @@ InvalidateNodeRelationCacheCallback(Datum argument, Oid relationId)
if (relationId == InvalidOid || relationId == MetadataCache.distNodeRelationId)
{
workerNodeHashValid = false;
LocalNodeId = -1;
}
}

View File

@ -102,7 +102,6 @@ static GrantStmt * GenerateGrantStmtForRights(ObjectType objectType,
bool withGrantOption);
static List * GetObjectsForGrantStmt(ObjectType objectType, Oid objectId);
static AccessPriv * GetAccessPrivObjectForGrantStmt(char *permission);
static RoleSpec * GetRoleSpecObjectForGrantStmt(Oid roleOid);
static List * GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid,
AclItem *aclItem);
static void SetLocalEnableMetadataSync(bool state);
@ -1782,7 +1781,7 @@ GenerateGrantStmtForRights(ObjectType objectType,
stmt->objtype = objectType;
stmt->objects = GetObjectsForGrantStmt(objectType, objectId);
stmt->privileges = list_make1(GetAccessPrivObjectForGrantStmt(permission));
stmt->grantees = list_make1(GetRoleSpecObjectForGrantStmt(roleOid));
stmt->grantees = list_make1(GetRoleSpecObjectForUser(roleOid));
stmt->grant_option = withGrantOption;
return stmt;
@ -1831,22 +1830,6 @@ GetAccessPrivObjectForGrantStmt(char *permission)
}
/*
* GetRoleSpecObjectForGrantStmt creates a RoleSpec object for the given roleOid.
* It will be used when creating GrantStmt objects.
*/
static RoleSpec *
GetRoleSpecObjectForGrantStmt(Oid roleOid)
{
RoleSpec *roleSpec = makeNode(RoleSpec);
roleSpec->roletype = OidIsValid(roleOid) ? ROLESPEC_CSTRING : ROLESPEC_PUBLIC;
roleSpec->rolename = OidIsValid(roleOid) ? GetUserNameFromId(roleOid, false) : NULL;
roleSpec->location = -1;
return roleSpec;
}
/*
* SetLocalEnableMetadataSync sets the enable_metadata_sync locally
*/

View File

@ -410,6 +410,7 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr,
case OBJECT_FUNCTION:
case OBJECT_PROCEDURE:
case OBJECT_AGGREGATE:
case OBJECT_TSCONFIGURATION:
case OBJECT_TYPE:
case OBJECT_FOREIGN_SERVER:
case OBJECT_SEQUENCE:

View File

@ -313,16 +313,6 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
return NULL;
}
if (fromFuncExpr && !IsMultiStatementTransaction())
{
/*
* For now, let's not push the function from the FROM clause unless it's in a
* multistatement transaction with the forceDelegation flag ON.
*/
ereport(DEBUG2, (errmsg("function from the FROM clause is not pushed")));
return NULL;
}
/* dissuade the planner from trying a generic plan with parameters */
(void) expression_tree_walker((Node *) funcExpr->args, contain_param_walker,
&walkerParamContext);
@ -733,6 +723,16 @@ FunctionInFromClause(List *fromlist, Query *query)
static void
EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId)
{
/*
* If the distribution key is already set, the key is fixed until
* the force-delegation function returns. All nested force-delegation
* functions must use the same key.
*/
if (AllowedDistributionColumnValue.isActive)
{
return;
}
/*
* The saved distribution argument need to persist through the life
* of the query, both during the planning (where we save) and execution
@ -744,6 +744,7 @@ EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId)
colocationId));
AllowedDistributionColumnValue.distributionColumnValue = copyObject(distArgument);
AllowedDistributionColumnValue.colocationId = colocationId;
AllowedDistributionColumnValue.executorLevel = ExecutorLevel;
AllowedDistributionColumnValue.isActive = true;
MemoryContextSwitchTo(oldcontext);
}
@ -757,15 +758,22 @@ EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId)
* the 2PC. Reset the distribution argument value once the function ends.
*/
void
ResetAllowedShardKeyValue(void)
CheckAndResetAllowedShardKeyValueIfNeeded(void)
{
if (AllowedDistributionColumnValue.isActive)
/*
* If no distribution argument is pinned or the pinned argument was
* set by a nested-executor from upper level, nothing to reset.
*/
if (!AllowedDistributionColumnValue.isActive ||
ExecutorLevel > AllowedDistributionColumnValue.executorLevel)
{
pfree(AllowedDistributionColumnValue.distributionColumnValue);
AllowedDistributionColumnValue.isActive = false;
return;
}
InTopLevelDelegatedFunctionCall = false;
Assert(ExecutorLevel == AllowedDistributionColumnValue.executorLevel);
pfree(AllowedDistributionColumnValue.distributionColumnValue);
AllowedDistributionColumnValue.isActive = false;
AllowedDistributionColumnValue.executorLevel = 0;
}
@ -777,6 +785,7 @@ bool
IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId)
{
Assert(AllowedDistributionColumnValue.isActive);
Assert(ExecutorLevel > AllowedDistributionColumnValue.executorLevel);
ereport(DEBUG4, errmsg("Comparing saved:%s with Shard key: %s colocationid:%d:%d",
pretty_format_node_dump(

View File

@ -460,6 +460,7 @@ StartupCitusBackend(void)
InitializeMaintenanceDaemonBackend();
InitializeBackendData();
RegisterConnectionCleanup();
AssignGlobalPID();
}
@ -514,6 +515,9 @@ CitusCleanupConnectionsAtExit(int code, Datum arg)
* are already given away.
*/
DeallocateReservedConnections();
/* we don't want any monitoring view/udf to show already exited backends */
UnSetGlobalPID();
}
@ -1985,7 +1989,6 @@ NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source)
{
/* this array _must_ be kept in an order usable by bsearch */
const char *allowedConninfoKeywords[] = {
"application_name",
"connect_timeout",
#if defined(ENABLE_GSS) && defined(ENABLE_SSPI)
"gsslib",

View File

@ -14,6 +14,26 @@
#include "udfs/worker_drop_sequence_dependency/11.0-1.sql"
#include "udfs/worker_drop_shell_table/11.0-1.sql"
#include "udfs/get_all_active_transactions/11.0-1.sql"
#include "udfs/get_global_active_transactions/11.0-1.sql"
#include "udfs/citus_worker_stat_activity/11.0-1.sql"
#include "udfs/worker_create_or_replace_object/11.0-1.sql"
CREATE VIEW citus.citus_worker_stat_activity AS
SELECT * FROM pg_catalog.citus_worker_stat_activity();
ALTER VIEW citus.citus_worker_stat_activity SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_worker_stat_activity TO PUBLIC;
#include "udfs/citus_dist_stat_activity/11.0-1.sql"
CREATE VIEW citus.citus_dist_stat_activity AS
SELECT * FROM pg_catalog.citus_dist_stat_activity();
ALTER VIEW citus.citus_dist_stat_activity SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_dist_stat_activity TO PUBLIC;
-- we have to recreate this view because recreated citus_dist_stat_activity that this view depends
#include "udfs/citus_lock_waits/11.0-1.sql"
DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text);
DROP FUNCTION pg_catalog.master_get_table_metadata(text);

View File

@ -21,13 +21,7 @@ ALTER FUNCTION citus.restore_isolation_tester_func SET SCHEMA citus_internal;
GRANT USAGE ON SCHEMA citus TO public;
#include "udfs/pg_dist_shard_placement_trigger_func/9.0-1.sql"
CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$;
COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object';
#include "udfs/worker_create_or_replace_object/9.0-1.sql"
CREATE OR REPLACE FUNCTION pg_catalog.master_unmark_object_distributed(classid oid, objid oid, objsubid int)
RETURNS void

View File

@ -84,3 +84,131 @@ DROP FUNCTION pg_catalog.citus_shards_on_worker();
DROP FUNCTION pg_catalog.citus_shard_indexes_on_worker();
#include "../udfs/create_distributed_function/9.0-1.sql"
ALTER TABLE citus.pg_dist_object DROP COLUMN force_delegation;
SET search_path = 'pg_catalog';
DROP FUNCTION IF EXISTS get_all_active_transactions();
CREATE OR REPLACE FUNCTION get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL,
OUT transaction_number int8, OUT transaction_stamp timestamptz)
RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$get_all_active_transactions$$;
COMMENT ON FUNCTION get_all_active_transactions(OUT datid oid, OUT datname text, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL,
OUT transaction_number int8, OUT transaction_stamp timestamptz)
IS 'returns distributed transaction ids of active distributed transactions';
DROP FUNCTION IF EXISTS get_global_active_transactions();
CREATE FUNCTION get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$get_global_active_transactions$$;
COMMENT ON FUNCTION get_global_active_transactions(OUT database_id oid, OUT process_id int, OUT initiator_node_identifier int4, OUT transaction_number int8, OUT transaction_stamp timestamptz)
IS 'returns distributed transaction ids of active distributed transactions from each node of the cluster';
RESET search_path;
DROP FUNCTION pg_catalog.citus_dist_stat_activity CASCADE;
CREATE OR REPLACE FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text)
RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$citus_dist_stat_activity$$;
COMMENT ON FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text)
IS 'returns distributed transaction activity on distributed tables';
CREATE VIEW citus.citus_dist_stat_activity AS
SELECT * FROM pg_catalog.citus_dist_stat_activity();
ALTER VIEW citus.citus_dist_stat_activity SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_dist_stat_activity TO PUBLIC;
SET search_path = 'pg_catalog';
-- we have to recreate this view because we drop citus_dist_stat_activity that this view depends
CREATE VIEW citus.citus_lock_waits AS
WITH
citus_dist_stat_activity AS
(
SELECT * FROM citus_dist_stat_activity
),
unique_global_wait_edges AS
(
SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges()
),
citus_dist_stat_activity_with_node_id AS
(
SELECT
citus_dist_stat_activity.*, (CASE citus_dist_stat_activity.distributed_query_host_name WHEN 'coordinator_host' THEN 0 ELSE pg_dist_node.nodeid END) as initiator_node_id
FROM
citus_dist_stat_activity LEFT JOIN pg_dist_node
ON
citus_dist_stat_activity.distributed_query_host_name = pg_dist_node.nodename AND
citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport
)
SELECT
waiting.pid AS waiting_pid,
blocking.pid AS blocking_pid,
waiting.query AS blocked_statement,
blocking.query AS current_statement_in_blocking_process,
waiting.initiator_node_id AS waiting_node_id,
blocking.initiator_node_id AS blocking_node_id,
waiting.distributed_query_host_name AS waiting_node_name,
blocking.distributed_query_host_name AS blocking_node_name,
waiting.distributed_query_host_port AS waiting_node_port,
blocking.distributed_query_host_port AS blocking_node_port
FROM
unique_global_wait_edges
JOIN
citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id)
JOIN
citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id);
ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC;
DROP FUNCTION citus_worker_stat_activity CASCADE;
CREATE OR REPLACE FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text)
RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$citus_worker_stat_activity$$;
COMMENT ON FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text)
IS 'returns distributed transaction activity on shards of distributed tables';
CREATE VIEW citus.citus_worker_stat_activity AS
SELECT * FROM pg_catalog.citus_worker_stat_activity();
ALTER VIEW citus.citus_worker_stat_activity SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_worker_stat_activity TO PUBLIC;
DROP FUNCTION pg_catalog.worker_create_or_replace_object(text[]);
#include "../udfs/worker_create_or_replace_object/9.0-1.sql"
RESET search_path;

View File

@ -0,0 +1,19 @@
DROP FUNCTION IF EXISTS pg_catalog.citus_dist_stat_activity CASCADE;
CREATE OR REPLACE FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8)
RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$citus_dist_stat_activity$$;
COMMENT ON FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8)
IS 'returns distributed transaction activity on distributed tables';

View File

@ -0,0 +1,19 @@
DROP FUNCTION IF EXISTS pg_catalog.citus_dist_stat_activity CASCADE;
CREATE OR REPLACE FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8)
RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$citus_dist_stat_activity$$;
COMMENT ON FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8)
IS 'returns distributed transaction activity on distributed tables';

View File

@ -0,0 +1,44 @@
SET search_path = 'pg_catalog';
CREATE VIEW citus.citus_lock_waits AS
WITH
citus_dist_stat_activity AS
(
SELECT * FROM citus_dist_stat_activity
),
unique_global_wait_edges AS
(
SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges()
),
citus_dist_stat_activity_with_node_id AS
(
SELECT
citus_dist_stat_activity.*, (CASE citus_dist_stat_activity.distributed_query_host_name WHEN 'coordinator_host' THEN 0 ELSE pg_dist_node.nodeid END) as initiator_node_id
FROM
citus_dist_stat_activity LEFT JOIN pg_dist_node
ON
citus_dist_stat_activity.distributed_query_host_name = pg_dist_node.nodename AND
citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport
)
SELECT
waiting.pid AS waiting_pid,
blocking.pid AS blocking_pid,
waiting.query AS blocked_statement,
blocking.query AS current_statement_in_blocking_process,
waiting.initiator_node_id AS waiting_node_id,
blocking.initiator_node_id AS blocking_node_id,
waiting.distributed_query_host_name AS waiting_node_name,
blocking.distributed_query_host_name AS blocking_node_name,
waiting.distributed_query_host_port AS waiting_node_port,
blocking.distributed_query_host_port AS blocking_node_port
FROM
unique_global_wait_edges
JOIN
citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id)
JOIN
citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id);
ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC;
RESET search_path;

View File

@ -0,0 +1,44 @@
SET search_path = 'pg_catalog';
CREATE VIEW citus.citus_lock_waits AS
WITH
citus_dist_stat_activity AS
(
SELECT * FROM citus_dist_stat_activity
),
unique_global_wait_edges AS
(
SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges()
),
citus_dist_stat_activity_with_node_id AS
(
SELECT
citus_dist_stat_activity.*, (CASE citus_dist_stat_activity.distributed_query_host_name WHEN 'coordinator_host' THEN 0 ELSE pg_dist_node.nodeid END) as initiator_node_id
FROM
citus_dist_stat_activity LEFT JOIN pg_dist_node
ON
citus_dist_stat_activity.distributed_query_host_name = pg_dist_node.nodename AND
citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport
)
SELECT
waiting.pid AS waiting_pid,
blocking.pid AS blocking_pid,
waiting.query AS blocked_statement,
blocking.query AS current_statement_in_blocking_process,
waiting.initiator_node_id AS waiting_node_id,
blocking.initiator_node_id AS blocking_node_id,
waiting.distributed_query_host_name AS waiting_node_name,
blocking.distributed_query_host_name AS blocking_node_name,
waiting.distributed_query_host_port AS waiting_node_port,
blocking.distributed_query_host_port AS blocking_node_port
FROM
unique_global_wait_edges
JOIN
citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id)
JOIN
citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id);
ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC;
RESET search_path;

View File

@ -0,0 +1,19 @@
DROP FUNCTION IF EXISTS pg_catalog.citus_worker_stat_activity CASCADE;
CREATE OR REPLACE FUNCTION pg_catalog.citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8)
RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$citus_worker_stat_activity$$;
COMMENT ON FUNCTION pg_catalog.citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8)
IS 'returns distributed transaction activity on shards of distributed tables';

View File

@ -0,0 +1,19 @@
DROP FUNCTION IF EXISTS pg_catalog.citus_worker_stat_activity CASCADE;
CREATE OR REPLACE FUNCTION pg_catalog.citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8)
RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$citus_worker_stat_activity$$;
COMMENT ON FUNCTION pg_catalog.citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8)
IS 'returns distributed transaction activity on shards of distributed tables';

View File

@ -0,0 +1,12 @@
DROP FUNCTION IF EXISTS pg_catalog.get_all_active_transactions();
CREATE OR REPLACE FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4,
OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz,
OUT global_pid int8)
RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$get_all_active_transactions$$;
COMMENT ON FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT datname text, OUT process_id int, OUT initiator_node_identifier int4,
OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz,
OUT global_pid int8)
IS 'returns transaction information for all Citus initiated transactions';

View File

@ -0,0 +1,12 @@
DROP FUNCTION IF EXISTS pg_catalog.get_all_active_transactions();
CREATE OR REPLACE FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4,
OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz,
OUT global_pid int8)
RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$get_all_active_transactions$$;
COMMENT ON FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT datname text, OUT process_id int, OUT initiator_node_identifier int4,
OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz,
OUT global_pid int8)
IS 'returns transaction information for all Citus initiated transactions';

View File

@ -0,0 +1,9 @@
DROP FUNCTION IF EXISTS pg_catalog.get_global_active_transactions();
CREATE OR REPLACE FUNCTION pg_catalog.get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$get_global_active_transactions$$;
COMMENT ON FUNCTION pg_catalog.get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8)
IS 'returns transaction information for all Citus initiated transactions from each node of the cluster';

View File

@ -0,0 +1,9 @@
DROP FUNCTION IF EXISTS pg_catalog.get_global_active_transactions();
CREATE OR REPLACE FUNCTION pg_catalog.get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$get_global_active_transactions$$;
COMMENT ON FUNCTION pg_catalog.get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8)
IS 'returns transaction information for all Citus initiated transactions from each node of the cluster';

View File

@ -0,0 +1,15 @@
CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$;
COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object';
CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statements text[])
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_create_or_replace_object_array$$;
COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statements text[])
IS 'takes a lost of sql statements, before executing these it will check if the object already exists in that exact state otherwise replaces that named object with the new object';

View File

@ -0,0 +1,6 @@
CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$;
COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object';

View File

@ -0,0 +1,15 @@
CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$;
COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object';
CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statements text[])
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_create_or_replace_object_array$$;
COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statements text[])
IS 'takes a lost of sql statements, before executing these it will check if the object already exists in that exact state otherwise replaces that named object with the new object';

View File

@ -47,7 +47,7 @@ citus_get_all_dependencies_for_object(PG_FUNCTION_ARGS)
ObjectAddress address = { 0 };
ObjectAddressSubSet(address, classid, objid, objsubid);
List *dependencies = GetAllDependenciesForObject(&address);
List *dependencies = GetAllSupportedDependenciesForObject(&address);
ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies)
{

View File

@ -15,6 +15,9 @@
#include "distributed/pg_version_constants.h"
#include "miscadmin.h"
#include "unistd.h"
#include "safe_lib.h"
#include "funcapi.h"
#include "access/htup_details.h"
@ -43,7 +46,7 @@
#define GET_ACTIVE_TRANSACTION_QUERY "SELECT * FROM get_all_active_transactions();"
#define ACTIVE_TRANSACTION_COLUMN_COUNT 6
#define ACTIVE_TRANSACTION_COLUMN_COUNT 7
/*
* Each backend's data reside in the shared memory
@ -78,6 +81,7 @@ typedef struct BackendManagementShmemData
static void StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc
tupleDescriptor);
static uint64 GenerateGlobalPID(void);
static shmem_startup_hook_type prev_shmem_startup_hook = NULL;
static BackendManagementShmemData *backendManagementShmemData = NULL;
@ -315,6 +319,7 @@ get_global_active_transactions(PG_FUNCTION_ARGS)
values[3] = ParseBoolField(result, rowIndex, 3);
values[4] = ParseIntField(result, rowIndex, 4);
values[5] = ParseTimestampTzField(result, rowIndex, 5);
values[6] = ParseIntField(result, rowIndex, 6);
tuplestore_putvalues(tupleStore, tupleDescriptor, values, isNulls);
}
@ -384,8 +389,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
SpinLockAcquire(&currentBackend->mutex);
/* we're only interested in backends initiated by Citus */
if (currentBackend->citusBackend.initiatorNodeIdentifier < 0)
if (currentBackend->globalPID == INVALID_CITUS_INTERNAL_BACKEND_GPID)
{
SpinLockRelease(&currentBackend->mutex);
continue;
@ -427,6 +431,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
values[3] = !coordinatorOriginatedQuery;
values[4] = UInt64GetDatum(transactionNumber);
values[5] = TimestampTzGetDatum(transactionIdTimestamp);
values[6] = UInt64GetDatum(currentBackend->globalPID);
tuplestore_putvalues(tupleStore, tupleDescriptor, values, isNulls);
@ -631,6 +636,7 @@ InitializeBackendData(void)
/* zero out the backend data */
UnSetDistributedTransactionId();
UnSetGlobalPID();
UnlockBackendSharedMemory();
}
@ -664,6 +670,24 @@ UnSetDistributedTransactionId(void)
}
/*
* UnSetGlobalPID resets the global pid for the current backend.
*/
void
UnSetGlobalPID(void)
{
/* backend does not exist if the extension is not created */
if (MyBackendData)
{
SpinLockAcquire(&MyBackendData->mutex);
MyBackendData->globalPID = 0;
SpinLockRelease(&MyBackendData->mutex);
}
}
/*
* LockBackendSharedMemory is a simple wrapper around LWLockAcquire on the
* shared memory lock.
@ -780,6 +804,109 @@ MarkCitusInitiatedCoordinatorBackend(void)
}
/*
* AssignGlobalPID assigns a global process id for the current backend.
* If this is a Citus initiated backend, which means it is distributed part of a distributed
* query, then this function assigns the global pid extracted from the application name.
* If not, this function assigns a new generated global pid.
*/
void
AssignGlobalPID(void)
{
uint64 globalPID = INVALID_CITUS_INTERNAL_BACKEND_GPID;
if (!IsCitusInternalBackend())
{
globalPID = GenerateGlobalPID();
}
else
{
globalPID = ExtractGlobalPID(application_name);
}
SpinLockAcquire(&MyBackendData->mutex);
MyBackendData->globalPID = globalPID;
SpinLockRelease(&MyBackendData->mutex);
}
/*
* GetGlobalPID returns the global process id of the current backend.
*/
uint64
GetGlobalPID(void)
{
uint64 globalPID = INVALID_CITUS_INTERNAL_BACKEND_GPID;
if (MyBackendData)
{
SpinLockAcquire(&MyBackendData->mutex);
globalPID = MyBackendData->globalPID;
SpinLockRelease(&MyBackendData->mutex);
}
return globalPID;
}
/*
* GenerateGlobalPID generates the global process id for the current backend.
*/
static uint64
GenerateGlobalPID(void)
{
/*
* We try to create a human readable global pid that consists of node id and process id.
* By multiplying node id with 10^10 and adding pid we generate a number where the smallest
* 10 digit represent the pid and the remaining digits are the node id.
*
* Both node id and pid are 32 bit. We use 10^10 to fit all possible pids. Some very large
* node ids might cause overflow. But even for the applications that scale around 50 nodes every
* day it'd take about 100K years. So we are not worried.
*/
return (((uint64) GetLocalNodeId()) * 10000000000) + getpid();
}
/*
* ExtractGlobalPID extracts the global process id from the application name and returns it
* if the application name is not compatible with Citus' application names returns 0.
*/
uint64
ExtractGlobalPID(char *applicationName)
{
/* does application name exist */
if (!applicationName)
{
return INVALID_CITUS_INTERNAL_BACKEND_GPID;
}
/* we create our own copy of application name incase the original changes */
char *applicationNameCopy = pstrdup(applicationName);
uint64 prefixLength = strlen(CITUS_APPLICATION_NAME_PREFIX);
/* does application name start with Citus's application name prefix */
if (strncmp(applicationNameCopy, CITUS_APPLICATION_NAME_PREFIX, prefixLength) != 0)
{
return INVALID_CITUS_INTERNAL_BACKEND_GPID;
}
/* are the remaining characters of the application name numbers */
uint64 numberOfRemainingChars = strlen(applicationNameCopy) - prefixLength;
if (numberOfRemainingChars <= 0 ||
!strisdigit_s(applicationNameCopy + prefixLength, numberOfRemainingChars))
{
return INVALID_CITUS_INTERNAL_BACKEND_GPID;
}
char *globalPIDString = &applicationNameCopy[prefixLength];
uint64 globalPID = strtoul(globalPIDString, NULL, 10);
return globalPID;
}
/*
* CurrentDistributedTransactionNumber returns the transaction number of the
* current distributed transaction. The caller must make sure a distributed

View File

@ -108,7 +108,7 @@
* showing the initiator_node_id we expand it to initiator_node_host and
* initiator_node_port.
*/
#define CITUS_DIST_STAT_ACTIVITY_QUERY_COLS 23
#define CITUS_DIST_STAT_ACTIVITY_QUERY_COLS 24
#define CITUS_DIST_STAT_ADDITIONAL_COLS 3
#define CITUS_DIST_STAT_ACTIVITY_COLS \
CITUS_DIST_STAT_ACTIVITY_QUERY_COLS + CITUS_DIST_STAT_ADDITIONAL_COLS
@ -147,14 +147,20 @@ SELECT \
pg_stat_activity.backend_xid, \
pg_stat_activity.backend_xmin, \
pg_stat_activity.query, \
pg_stat_activity.backend_type \
pg_stat_activity.backend_type, \
dist_txs.global_pid \
FROM \
pg_stat_activity \
INNER JOIN \
get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \
get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp, global_pid) \
ON pg_stat_activity.pid = dist_txs.process_id \
WHERE \
dist_txs.worker_query = false;"
backend_type = 'client backend' \
AND \
pg_stat_activity.query NOT ILIKE '%stat_activity%' \
AND \
pg_stat_activity.application_name NOT SIMILAR TO 'citus_internal gpid=\\d+'; \
"
#define CITUS_WORKER_STAT_ACTIVITY_QUERY \
"\
@ -181,14 +187,15 @@ SELECT \
pg_stat_activity.backend_xid, \
pg_stat_activity.backend_xmin, \
pg_stat_activity.query, \
pg_stat_activity.backend_type \
pg_stat_activity.backend_type, \
dist_txs.global_id \
FROM \
pg_stat_activity \
LEFT JOIN \
get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \
JOIN \
get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp, global_id) \
ON pg_stat_activity.pid = dist_txs.process_id \
WHERE \
pg_stat_activity.application_name = 'citus_internal' \
pg_stat_activity.application_name SIMILAR TO 'citus_internal gpid=\\d+' \
AND \
pg_stat_activity.query NOT ILIKE '%stat_activity%';"
@ -223,6 +230,7 @@ typedef struct CitusDistStat
TransactionId backend_xmin;
text *query;
text *backend_type;
uint64 global_pid;
} CitusDistStat;
@ -501,6 +509,7 @@ ParseCitusDistStat(PGresult *result, int64 rowIndex)
citusDistStat->backend_xmin = ParseXIDField(result, rowIndex, 20);
citusDistStat->query = ParseTextField(result, rowIndex, 21);
citusDistStat->backend_type = ParseTextField(result, rowIndex, 22);
citusDistStat->global_pid = ParseIntField(result, rowIndex, 23);
return citusDistStat;
}
@ -688,6 +697,7 @@ HeapTupleToCitusDistStat(HeapTuple result, TupleDesc rowDescriptor)
citusDistStat->backend_xmin = ParseXIDFieldFromHeapTuple(result, rowDescriptor, 21);
citusDistStat->query = ParseTextFieldFromHeapTuple(result, rowDescriptor, 22);
citusDistStat->backend_type = ParseTextFieldFromHeapTuple(result, rowDescriptor, 23);
citusDistStat->global_pid = ParseIntFieldFromHeapTuple(result, rowDescriptor, 24);
return citusDistStat;
}
@ -1098,6 +1108,8 @@ ReturnCitusDistStats(List *citusStatsList, FunctionCallInfo fcinfo)
nulls[25] = true;
}
values[26] = Int32GetDatum(citusDistStat->global_pid);
tuplestore_putvalues(tupleStore, tupleDesc, values, nulls);
}
}

View File

@ -557,7 +557,8 @@ ResetGlobalVariables()
MetadataSyncOnCommit = false;
InTopLevelDelegatedFunctionCall = false;
ResetWorkerErrorIndication();
AllowedDistributionColumnValue.isActive = false;
memset(&AllowedDistributionColumnValue, 0,
sizeof(AllowedDistributionColumn));
}

View File

@ -1012,8 +1012,8 @@ CitusRangeVarCallbackForLockTable(const RangeVar *rangeVar, Oid relationId,
return;
}
/* we only allow tables and views to be locked */
if (!RegularTable(relationId))
/* we only allow tables, views and foreign tables to be locked */
if (!RegularTable(relationId) && !IsForeignTable(relationId))
{
ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not a table", rangeVar->relname)));

View File

@ -13,8 +13,10 @@
#include "catalog/dependency.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_ts_config.h"
#include "catalog/pg_type.h"
#include "fmgr.h"
#include "funcapi.h"
#include "nodes/makefuncs.h"
#include "nodes/nodes.h"
#include "parser/parse_type.h"
@ -28,13 +30,17 @@
#include "distributed/commands.h"
#include "distributed/commands/utility_hook.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "distributed/metadata/distobject.h"
#include "distributed/worker_create_or_replace.h"
#include "distributed/worker_protocol.h"
static const char * CreateStmtByObjectAddress(const ObjectAddress *address);
static List * CreateStmtListByObjectAddress(const ObjectAddress *address);
static bool CompareStringList(List *list1, List *list2);
PG_FUNCTION_INFO_V1(worker_create_or_replace_object);
PG_FUNCTION_INFO_V1(worker_create_or_replace_object_array);
static bool WorkerCreateOrReplaceObject(List *sqlStatements);
/*
@ -51,6 +57,37 @@ WrapCreateOrReplace(const char *sql)
}
/*
* WrapCreateOrReplaceList takes a list of sql commands and wraps it in a call to citus'
* udf to create or replace the existing object based on its create commands.
*/
char *
WrapCreateOrReplaceList(List *sqls)
{
StringInfoData textArrayLitteral = { 0 };
initStringInfo(&textArrayLitteral);
appendStringInfoString(&textArrayLitteral, "ARRAY[");
const char *sql = NULL;
bool first = true;
foreach_ptr(sql, sqls)
{
if (!first)
{
appendStringInfoString(&textArrayLitteral, ", ");
}
appendStringInfoString(&textArrayLitteral, quote_literal_cstr(sql));
first = false;
}
appendStringInfoString(&textArrayLitteral, "]::text[]");
StringInfoData buf = { 0 };
initStringInfo(&buf);
appendStringInfo(&buf, CREATE_OR_REPLACE_COMMAND, textArrayLitteral.data);
return buf.data;
}
/*
* worker_create_or_replace_object(statement text)
*
@ -73,35 +110,102 @@ Datum
worker_create_or_replace_object(PG_FUNCTION_ARGS)
{
text *sqlStatementText = PG_GETARG_TEXT_P(0);
const char *sqlStatement = text_to_cstring(sqlStatementText);
Node *parseTree = ParseTreeNode(sqlStatement);
char *sqlStatement = text_to_cstring(sqlStatementText);
List *sqlStatements = list_make1(sqlStatement);
PG_RETURN_BOOL(WorkerCreateOrReplaceObject(sqlStatements));
}
/*
* worker_create_or_replace_object(statements text[])
*
* function is called, by the coordinator, with a CREATE statement for an object. This
* function implements the CREATE ... IF NOT EXISTS functionality for objects that do not
* have this functionality or where their implementation is not sufficient.
*
* Besides checking if an object of said name exists it tries to compare the object to be
* created with the one in the local catalog. If there is a difference the one in the local
* catalog will be renamed after which the statement can be executed on this worker to
* create the object. If more statements are provided, all are compared in order with the
* statements generated on the worker. This works assuming a) both citus versions are the
* same, b) the objects are exactly the same.
*
* Renaming has two purposes
* - free the identifier for creation
* - non destructive if there is data store that would be destroyed if the object was
* used in a table on this node, eg. types. If the type would be dropped with a cascade
* it would drop any column holding user data for this type.
*/
Datum
worker_create_or_replace_object_array(PG_FUNCTION_ARGS)
{
List *sqlStatements = NIL;
Datum *textArray = NULL;
int length = 0;
deconstruct_array(PG_GETARG_ARRAYTYPE_P(0), TEXTOID, -1, false, 'i', &textArray,
NULL, &length);
for (int i = 0; i < length; i++)
{
sqlStatements = lappend(sqlStatements, TextDatumGetCString(textArray[i]));
}
if (list_length(sqlStatements) < 1)
{
ereport(ERROR, (errmsg("expected atleast 1 statement to be provided")));
}
PG_RETURN_BOOL(WorkerCreateOrReplaceObject(sqlStatements));
}
/*
* WorkerCreateOrReplaceObject implements the logic used by both variants of
* worker_create_or_replace_object to either create the object or coming to the conclusion
* the object already exists in the correct state.
*
* Returns true if the object has been created, false if it was already in the exact state
* it was asked for.
*/
static bool
WorkerCreateOrReplaceObject(List *sqlStatements)
{
/*
* since going to the drop statement might require some resolving we will do a check
* if the type actually exists instead of adding the IF EXISTS keyword to the
* statement.
* To check which object we are changing we find the object address from the first
* statement passed into the UDF. Later we will check if all object addresses are the
* same.
*
* Although many of the objects will only have one statement in this call, more
* complex objects might come with a list of statements. We assume they all are on the
* same subject.
*/
Node *parseTree = ParseTreeNode(linitial(sqlStatements));
ObjectAddress address = GetObjectAddressFromParseTree(parseTree, true);
if (ObjectExists(&address))
{
const char *localSqlStatement = CreateStmtByObjectAddress(&address);
/*
* Object with name from statement is already found locally, check if states are
* identical. If objects differ we will rename the old object (non- destructively)
* as to make room to create the new object according to the spec sent.
*/
if (strcmp(sqlStatement, localSqlStatement) == 0)
/*
* Based on the local catalog we generate the list of commands we would send to
* recreate our version of the object. This we can compare to what the coordinator
* sent us. If they match we don't do anything.
*/
List *localSqlStatements = CreateStmtListByObjectAddress(&address);
if (CompareStringList(sqlStatements, localSqlStatements))
{
/*
* TODO string compare is a poor man's comparison, but calling equal on the
* parsetree's returns false because there is extra information list character
* position of some sort
*/
/*
* parseTree sent by the coordinator is the same as we would create for our
* object, therefore we can omit the create statement locally and not create
* the object as it already exists.
* statements sent by the coordinator are the same as we would create for our
* object, therefore we can omit the statements locally and not create the
* object as it already exists in the correct shape.
*
* We let the coordinator know we didn't create the object.
*/
PG_RETURN_BOOL(false);
return false;
}
char *newName = GenerateBackupNameForCollision(&address);
@ -113,12 +217,47 @@ worker_create_or_replace_object(PG_FUNCTION_ARGS)
NULL, None_Receiver, NULL);
}
/* apply create statement locally */
ProcessUtilityParseTree(parseTree, sqlStatement, PROCESS_UTILITY_QUERY, NULL,
None_Receiver, NULL);
/* apply all statement locally */
char *sqlStatement = NULL;
foreach_ptr(sqlStatement, sqlStatements)
{
parseTree = ParseTreeNode(sqlStatement);
ProcessUtilityParseTree(parseTree, sqlStatement, PROCESS_UTILITY_QUERY, NULL,
None_Receiver, NULL);
/* TODO verify all statements are about exactly 1 subject, mostly a sanity check
* to prevent unintentional use of this UDF, needs to come after the local
* execution to be able to actually resolve the ObjectAddress of the newly created
* object */
}
/* type has been created */
PG_RETURN_BOOL(true);
return true;
}
static bool
CompareStringList(List *list1, List *list2)
{
if (list_length(list1) != list_length(list2))
{
return false;
}
ListCell *cell1 = NULL;
ListCell *cell2 = NULL;
forboth(cell1, list1, cell2, list2)
{
const char *str1 = lfirst(cell1);
const char *str2 = lfirst(cell2);
if (strcmp(str1, str2) != 0)
{
return false;
}
}
return true;
}
@ -130,24 +269,38 @@ worker_create_or_replace_object(PG_FUNCTION_ARGS)
* therefore you cannot equal this tree against parsed statement. Instead it can be
* deparsed to do a string comparison.
*/
static const char *
CreateStmtByObjectAddress(const ObjectAddress *address)
static List *
CreateStmtListByObjectAddress(const ObjectAddress *address)
{
switch (getObjectClass(address))
{
case OCLASS_COLLATION:
{
return CreateCollationDDL(address->objectId);
return list_make1(CreateCollationDDL(address->objectId));
}
case OCLASS_PROC:
{
return GetFunctionDDLCommand(address->objectId, false);
return list_make1(GetFunctionDDLCommand(address->objectId, false));
}
case OCLASS_TSCONFIG:
{
/*
* We do support TEXT SEARCH CONFIGURATION, however, we can't recreate the
* object in 1 command. Since the returned text is compared to the create
* statement sql we always want the sql to be different compared to the
* canonical creation sql we return here, hence we return an empty string, as
* that should never match the sql we have passed in for the creation.
*/
List *stmts = GetCreateTextSearchConfigStatements(address);
return DeparseTreeNodes(stmts);
}
case OCLASS_TYPE:
{
return DeparseTreeNode(CreateTypeStmtByObjectAddress(address));
return list_make1(DeparseTreeNode(CreateTypeStmtByObjectAddress(address)));
}
default:
@ -179,6 +332,11 @@ GenerateBackupNameForCollision(const ObjectAddress *address)
return GenerateBackupNameForProcCollision(address);
}
case OCLASS_TSCONFIG:
{
return GenerateBackupNameForTextSearchConfiguration(address);
}
case OCLASS_TYPE:
{
return GenerateBackupNameForTypeCollision(address);
@ -256,6 +414,25 @@ CreateRenameTypeStmt(const ObjectAddress *address, char *newName)
}
/*
* CreateRenameTextSearchStmt creates a rename statement for a text search configuration
* based on its ObjectAddress. The rename statement will rename the existing object on its
* address to the value provided in newName.
*/
static RenameStmt *
CreateRenameTextSearchStmt(const ObjectAddress *address, char *newName)
{
Assert(address->classId == TSConfigRelationId);
RenameStmt *stmt = makeNode(RenameStmt);
stmt->renameType = OBJECT_TSCONFIGURATION;
stmt->object = (Node *) get_ts_config_namelist(address->objectId);
stmt->newname = newName;
return stmt;
}
/*
* CreateRenameTypeStmt creates a rename statement for a type based on its ObjectAddress.
* The rename statement will rename the existing object on its address to the value
@ -325,6 +502,11 @@ CreateRenameStatement(const ObjectAddress *address, char *newName)
return CreateRenameProcStmt(address, newName);
}
case OCLASS_TSCONFIG:
{
return CreateRenameTextSearchStmt(address, newName);
}
case OCLASS_TYPE:
{
return CreateRenameTypeStmt(address, newName);

View File

@ -50,7 +50,6 @@ typedef struct ColumnarScanDescData *ColumnarScanDesc;
const TableAmRoutine * GetColumnarTableAmRoutine(void);
extern void columnar_tableam_init(void);
extern bool CheckCitusVersion(int elevel);
extern TableScanDesc columnar_beginscan_extended(Relation relation, Snapshot snapshot,
int nkeys, ScanKey key,
ParallelTableScanDesc parallel_scan,

View File

@ -50,6 +50,7 @@ typedef struct BackendData
Oid userId;
slock_t mutex;
bool cancelledDueToDeadlock;
uint64 globalPID;
CitusInitiatedBackend citusBackend;
DistributedTransactionId transactionId;
} BackendData;
@ -61,8 +62,12 @@ extern void InitializeBackendData(void);
extern void LockBackendSharedMemory(LWLockMode lockMode);
extern void UnlockBackendSharedMemory(void);
extern void UnSetDistributedTransactionId(void);
extern void UnSetGlobalPID(void);
extern void AssignDistributedTransactionId(void);
extern void MarkCitusInitiatedCoordinatorBackend(void);
extern void AssignGlobalPID(void);
extern uint64 GetGlobalPID(void);
extern uint64 ExtractGlobalPID(char *applicationName);
extern void GetBackendDataForProc(PGPROC *proc, BackendData *result);
extern void CancelTransactionDueToDeadlock(PGPROC *proc);
extern bool MyBackendGotCancelledDueToDeadlock(bool clearState);
@ -73,4 +78,6 @@ extern int GetAllActiveClientBackendCount(void);
extern void IncrementClientBackendCounter(void);
extern void DecrementClientBackendCounter(void);
#define INVALID_CITUS_INTERNAL_BACKEND_GPID 0
#endif /* BACKEND_DATA_H */

View File

@ -151,6 +151,8 @@ extern ObjectAddress AlterCollationSchemaStmtObjectAddress(Node *stmt,
extern List * PostprocessAlterCollationSchemaStmt(Node *stmt, const char *queryString);
extern char * GenerateBackupNameForCollationCollision(const ObjectAddress *address);
extern ObjectAddress DefineCollationStmtObjectAddress(Node *stmt, bool missing_ok);
extern List * PreprocessDefineCollationStmt(Node *stmt, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern List * PostprocessDefineCollationStmt(Node *stmt, const char *queryString);
/* database.c - forward declarations */
@ -366,6 +368,8 @@ extern ObjectAddress AlterRoleSetStmtObjectAddress(Node *node,
extern List * GenerateCreateOrAlterRoleCommand(Oid roleOid);
/* schema.c - forward declarations */
extern List * PreprocessCreateSchemaStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern List * PreprocessDropSchemaStmt(Node *dropSchemaStatement,
const char *queryString,
ProcessUtilityContext processUtilityContext);
@ -375,6 +379,7 @@ extern List * PreprocessGrantOnSchemaStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern List * PreprocessAlterSchemaRenameStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern ObjectAddress CreateSchemaStmtObjectAddress(Node *node, bool missing_ok);
extern ObjectAddress AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok);
/* sequence.c - forward declarations */
@ -462,6 +467,54 @@ extern Oid GetSequenceOid(Oid relationId, AttrNumber attnum);
extern bool ConstrTypeUsesIndex(ConstrType constrType);
/* text_search.c - forward declarations */
extern List * PostprocessCreateTextSearchConfigurationStmt(Node *node,
const char *queryString);
extern List * GetCreateTextSearchConfigStatements(const ObjectAddress *address);
extern List * CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address);
extern List * PreprocessDropTextSearchConfigurationStmt(Node *node,
const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PreprocessAlterTextSearchConfigurationStmt(Node *node,
const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PreprocessRenameTextSearchConfigurationStmt(Node *node,
const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node,
const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node,
const char *queryString);
extern List * PreprocessTextSearchConfigurationCommentStmt(Node *node,
const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node,
const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node,
const char *queryString);
extern ObjectAddress CreateTextSearchConfigurationObjectAddress(Node *node,
bool missing_ok);
extern ObjectAddress RenameTextSearchConfigurationStmtObjectAddress(Node *node,
bool missing_ok);
extern ObjectAddress AlterTextSearchConfigurationStmtObjectAddress(Node *node,
bool missing_ok);
extern ObjectAddress AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node,
bool missing_ok);
extern ObjectAddress TextSearchConfigurationCommentObjectAddress(Node *node,
bool missing_ok);
extern ObjectAddress AlterTextSearchConfigurationOwnerObjectAddress(Node *node,
bool missing_ok);
extern char * GenerateBackupNameForTextSearchConfiguration(const ObjectAddress *address);
extern List * get_ts_config_namelist(Oid tsconfigOid);
/* truncate.c - forward declarations */
extern void PreprocessTruncateStatement(TruncateStmt *truncateStatement);

View File

@ -29,7 +29,7 @@
#define ERROR_BUFFER_SIZE 256
/* application name used for internal connections in Citus */
#define CITUS_APPLICATION_NAME "citus_internal"
#define CITUS_APPLICATION_NAME_PREFIX "citus_internal gpid="
/* application name used for internal connections in rebalancer */
#define CITUS_REBALANCER_NAME "citus_rebalancer"

View File

@ -31,6 +31,7 @@ extern void AssertObjectTypeIsFunctional(ObjectType type);
extern void QualifyTreeNode(Node *stmt);
extern char * DeparseTreeNode(Node *stmt);
extern List * DeparseTreeNodes(List *stmts);
/* forward declarations for deparse_attribute_stmts.c */
extern char * DeparseRenameAttributeStmt(Node *);
@ -59,7 +60,18 @@ extern char * DeparseAlterTableStmt(Node *node);
extern void QualifyAlterTableSchemaStmt(Node *stmt);
/* foward declarations fro deparse_text_search.c */
extern char * DeparseCreateTextSearchStmt(Node *node);
extern char * DeparseDropTextSearchConfigurationStmt(Node *node);
extern char * DeparseRenameTextSearchConfigurationStmt(Node *node);
extern char * DeparseAlterTextSearchConfigurationStmt(Node *node);
extern char * DeparseAlterTextSearchConfigurationSchemaStmt(Node *node);
extern char * DeparseTextSearchConfigurationCommentStmt(Node *node);
extern char * DeparseAlterTextSearchConfigurationOwnerStmt(Node *node);
/* forward declarations for deparse_schema_stmts.c */
extern char * DeparseCreateSchemaStmt(Node *node);
extern char * DeparseDropSchemaStmt(Node *node);
extern char * DeparseGrantOnSchemaStmt(Node *stmt);
extern char * DeparseAlterSchemaRenameStmt(Node *stmt);
@ -138,6 +150,14 @@ extern char * DeparseAlterExtensionStmt(Node *stmt);
/* forward declarations for deparse_database_stmts.c */
extern char * DeparseAlterDatabaseOwnerStmt(Node *node);
/* forward declatations for depatse_text_search_stmts.c */
extern void QualifyDropTextSearchConfigurationStmt(Node *node);
extern void QualifyAlterTextSearchConfigurationStmt(Node *node);
extern void QualifyRenameTextSearchConfigurationStmt(Node *node);
extern void QualifyAlterTextSearchConfigurationSchemaStmt(Node *node);
extern void QualifyTextSearchConfigurationCommentStmt(Node *node);
extern void QualifyAlterTextSearchConfigurationOwnerStmt(Node *node);
/* forward declarations for deparse_sequence_stmts.c */
extern char * DeparseDropSequenceStmt(Node *node);
extern char * DeparseRenameSequenceStmt(Node *node);

View File

@ -23,7 +23,7 @@ extern bool InTopLevelDelegatedFunctionCall;
extern bool InDelegatedProcedureCall;
PlannedStmt * TryToDelegateFunctionCall(DistributedPlanningContext *planContext);
extern void ResetAllowedShardKeyValue(void);
extern void CheckAndResetAllowedShardKeyValueIfNeeded(void);
extern bool IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId);
#endif /* FUNCTION_CALL_DELEGATION_H */

View File

@ -19,6 +19,7 @@
extern List * GetUniqueDependenciesList(List *objectAddressesList);
extern List * GetDependenciesForObject(const ObjectAddress *target);
extern List * GetAllSupportedDependenciesForObject(const ObjectAddress *target);
extern List * GetAllDependenciesForObject(const ObjectAddress *target);
extern List * OrderObjectAddressListInDependencyOrder(List *objectAddressList);
extern bool SupportedDependencyByCitus(const ObjectAddress *address);

View File

@ -30,8 +30,8 @@ extern bool IsObjectAddressOwnedByExtension(const ObjectAddress *target,
ObjectAddress *extensionAddress);
extern ObjectAddress PgGetObjectAddress(char *ttype, ArrayType *namearr,
ArrayType *argsarr);
extern List * GetDistributedObjectAddressList(void);
extern RoleSpec * GetRoleSpecObjectForUser(Oid roleOid);
extern void UpdateDistributedObjectColocationId(uint32 oldColocationId, uint32
newColocationId);
#endif /* CITUS_METADATA_DISTOBJECT_H */

View File

@ -165,6 +165,7 @@ extern CitusTableCacheEntry * LookupCitusTableCacheEntry(Oid relationId);
extern DistObjectCacheEntry * LookupDistObjectCacheEntry(Oid classid, Oid objid, int32
objsubid);
extern int32 GetLocalGroupId(void);
extern int32 GetLocalNodeId(void);
extern void CitusTableCacheFlushInvalidatedEntries(void);
extern Oid LookupShardRelationFromCatalog(int64 shardId, bool missing_ok);
extern List * ShardPlacementListIncludingOrphanedPlacements(uint64 shardId);

View File

@ -139,6 +139,7 @@ extern void ExecuteQueryIntoDestReceiver(Query *query, ParamListInfo params,
extern void ExecutePlanIntoDestReceiver(PlannedStmt *queryPlan, ParamListInfo params,
DestReceiver *dest);
extern void SetLocalMultiShardModifyModeToSequential(void);
extern void EnsureSequentialMode(ObjectType objType);
extern void SetLocalForceMaxQueryParallelization(void);
extern void SortTupleStore(CitusScanState *scanState);
extern bool DistributedPlanModifiesDatabase(DistributedPlan *plan);

View File

@ -70,6 +70,9 @@ typedef struct AllowedDistributionColumn
Const *distributionColumnValue;
uint32 colocationId;
bool isActive;
/* In nested executor, track the level at which value is set */
int executorLevel;
} AllowedDistributionColumn;
/*

View File

@ -19,6 +19,7 @@
#define CREATE_OR_REPLACE_COMMAND "SELECT worker_create_or_replace_object(%s);"
extern char * WrapCreateOrReplace(const char *sql);
extern char * WrapCreateOrReplaceList(List *sqls);
extern char * GenerateBackupNameForCollision(const ObjectAddress *address);
extern RenameStmt * CreateRenameStatement(const ObjectAddress *address, char *newName);

View File

@ -875,6 +875,7 @@ BEGIN
RETURN $1 * $1;
END;
$function$;
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION square_func(int)
RETURNS int
LANGUAGE plpgsql
@ -883,6 +884,7 @@ BEGIN
RETURN $1 * $1;
END;
$function$;
RESET citus.enable_metadata_sync;
SELECT const_function(1), string_agg(a::character, ',') FROM t1;
NOTICE: stable_fn called
CONTEXT: PL/pgSQL function const_function(integer) line XX at RAISE

View File

@ -165,8 +165,8 @@ SELECT count(*) FROM t; -- parallel execution;
(1 row)
ALTER DATABASE regression OWNER TO database_owner_2; -- should ERROR
ERROR: cannot create or modify database because there was a parallel operation on a distributed table in the transaction
DETAIL: When creating or altering a database, Citus needs to perform all operations over a single connection per node to ensure consistency.
ERROR: cannot run database command because there was a parallel operation on a distributed table in the transaction
DETAIL: When running command on/for a distributed database, Citus needs to perform all operations over a single connection per node to ensure consistency.
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK;
-- list the owners of the current database on all nodes

View File

@ -193,8 +193,8 @@ FOR EACH STATEMENT EXECUTE FUNCTION dummy_function();
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1507008, 'interesting!schema', E'CREATE TRIGGER "trigger\\''name"
BEFORE INSERT ON "interesting!schema"."citus_local!_table"
FOR EACH STATEMENT EXECUTE FUNCTION dummy_function();')
CREATE EXTENSION seg;
BEGIN;
CREATE EXTENSION seg;
-- ALTER TRIGGER DEPENDS ON
ALTER TRIGGER "trigger\'name" ON "interesting!schema"."citus_local!_table" DEPENDS ON EXTENSION seg;
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1507008, 'interesting!schema', E'ALTER TRIGGER "trigger\\''name" ON "interesting!schema"."citus_local!_table" DEPENDS ON EXTENSION seg;')
@ -578,5 +578,5 @@ NOTICE: executing the command locally: SELECT val FROM citus_local_table_trigge
ROLLBACK;
-- cleanup at exit
SET client_min_messages TO ERROR;
DROP SCHEMA citus_local_table_triggers, "interesting!schema" CASCADE;
NOTICE: drop cascades to 22 other objects

View File

@ -58,6 +58,7 @@ CREATE TABLE postgres_local_table(a int, b int);
-- We shouldn't use LIMIT in INSERT SELECT queries to make the test faster as
-- LIMIT would force planner to wrap SELECT query in an intermediate result and
-- this might reduce the coverage of the test cases.
SET citus.enable_metadata_sync TO OFF;
CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$
BEGIN
SET client_min_messages to ERROR;
@ -74,6 +75,7 @@ CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$
RESET client_min_messages;
END;
$$ LANGUAGE plpgsql;
RESET citus.enable_metadata_sync;
---------------------------------------------------------------------
---- SELECT ----
---------------------------------------------------------------------

View File

@ -265,6 +265,13 @@ set columnar.compression = 'pglz';
INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100);
set columnar.compression to default;
-- create a user that can not truncate
SELECT run_command_on_workers($$CREATE USER truncate_user;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(2 rows)
CREATE USER truncate_user;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.

View File

@ -523,6 +523,7 @@ BEGIN
RETURN trunc(random() * (end_int-start_int) + start_int);
END;
$$ LANGUAGE 'plpgsql' STRICT;
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE PROCEDURE coordinator_evaluation.test_procedure(int)
LANGUAGE plpgsql
AS $procedure$
@ -532,6 +533,7 @@ BEGIN
PERFORM DISTINCT value FROM coordinator_evaluation_table_2 WHERE key = filterKey;
END;
$procedure$;
RESET citus.enable_metadata_sync;
-- we couldn't find a meaningful query to write for this
-- however this query fails before https://github.com/citusdata/citus/pull/3454
SET client_min_messages TO DEBUG2;
@ -564,6 +566,7 @@ BEGIN
INSERT INTO coordinator_evaluation_table_2 VALUES (filterKey, filterKey);
END;
$procedure$;
DEBUG: switching to sequential query execution mode
RESET citus.log_remote_commands ;
RESET client_min_messages;
-- these calls would INSERT key = 101, so test if insert succeeded

View File

@ -833,11 +833,13 @@ EXECUTE router_with_only_function;
SET citus.log_local_commands TO ON;
SET search_path TO coordinator_evaluation_combinations_modify;
-- returns 2 on the worker
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION get_constant_stable()
RETURNS INT AS $$
BEGIN
RETURN 2;
END; $$ language plpgsql STABLE;
RESET citus.enable_metadata_sync;
-- all local values
INSERT INTO user_info_data (user_id, u_data) VALUES
(3, '(''test3'', 3)'), (4, '(''test4'', 4)'), (7, '(''test7'', 7)'),

View File

@ -898,9 +898,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in
-- a helper function which return true if the coordinated
-- trannsaction uses 2PC
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION coordinated_transaction_should_use_2PC()
RETURNS BOOL LANGUAGE C STRICT VOLATILE AS 'citus',
$$coordinated_transaction_should_use_2PC$$;
RESET citus.enable_metadata_sync;
-- a local SELECT followed by remote SELECTs
-- does not trigger 2PC
BEGIN;

View File

@ -155,16 +155,6 @@ ORDER BY 1,2,3;
SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA collation_tests CASCADE;
DROP SCHEMA collation_tests2 CASCADE;
-- This is hacky, but we should clean-up the resources as below
\c - - - :worker_1_port
SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA collation_tests CASCADE;
DROP SCHEMA collation_tests2 CASCADE;
\c - - - :worker_2_port
SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA collation_tests CASCADE;
DROP SCHEMA collation_tests2 CASCADE;
\c - - - :master_port
DROP USER collationuser;
SELECT run_command_on_workers($$DROP USER collationuser;$$);
run_command_on_workers
@ -173,3 +163,19 @@ SELECT run_command_on_workers($$DROP USER collationuser;$$);
(localhost,57638,t,"DROP ROLE")
(2 rows)
\c - - - :worker_1_port
-- test creating a collation on a worker
CREATE COLLATION another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk');
ERROR: operation is not allowed on this node
HINT: Connect to the coordinator and run it again.
-- test if creating a collation on a worker on a local
-- schema raises the right error
SET citus.enable_ddl_propagation TO off;
CREATE SCHEMA collation_creation_on_worker;
SET citus.enable_ddl_propagation TO on;
CREATE COLLATION collation_creation_on_worker.another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk');
ERROR: operation is not allowed on this node
HINT: Connect to the coordinator and run it again.
SET citus.enable_ddl_propagation TO off;
DROP SCHEMA collation_creation_on_worker;
SET citus.enable_ddl_propagation TO on;

View File

@ -1,11 +1,4 @@
CREATE SCHEMA collation_conflict;
SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE SCHEMA")
(localhost,57638,t,"CREATE SCHEMA")
(2 rows)
\c - - - :worker_1_port
SET search_path TO collation_conflict;
SET citus.enable_metadata_sync TO off;

View File

@ -469,8 +469,7 @@ ALTER FUNCTION eq(macaddr,macaddr) DEPENDS ON EXTENSION citus;
ERROR: distrtibuted functions are not allowed to depend on an extension
DETAIL: Function "function_tests.eq(pg_catalog.macaddr,pg_catalog.macaddr)" is already distributed. Functions from extensions are expected to be created on the workers by the extension they depend on.
SELECT create_distributed_function('pg_catalog.citus_drop_trigger()');
ERROR: unable to create a distributed function from functions owned by an extension
DETAIL: Function "pg_catalog.citus_drop_trigger()" has a dependency on extension "citus". Functions depending on an extension cannot be distributed. Create the function by creating the extension on the workers.
ERROR: Citus extension functions(citus_drop_trigger) cannot be distributed.
DROP FUNCTION eq(macaddr,macaddr);
-- call should fail as function should have been dropped
SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('0123456789ab','ba9876543210');$$) ORDER BY 1,2;
@ -1064,16 +1063,9 @@ SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isa
\c - - - :worker_1_port
UPDATE pg_dist_local_group SET groupid = 0;
TRUNCATE pg_dist_node;
SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA function_tests CASCADE;
DROP SCHEMA function_tests2 CASCADE;
SET search_path TO function_tests, function_tests2;
\c - - - :worker_2_port
UPDATE pg_dist_local_group SET groupid = 0;
TRUNCATE pg_dist_node;
SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA function_tests CASCADE;
DROP SCHEMA function_tests2 CASCADE;
\c - - - :master_port
SET client_min_messages TO ERROR;
DROP USER functionuser;

View File

@ -1,15 +1,9 @@
-- This is designed to test worker_create_or_replace_object in PG11 with aggregates
-- Note in PG12 we use CREATE OR REPLACE AGGREGATE, thus the renaming does not occur
CREATE SCHEMA proc_conflict;
SELECT run_command_on_workers($$CREATE SCHEMA proc_conflict;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE SCHEMA")
(localhost,57638,t,"CREATE SCHEMA")
(2 rows)
\c - - - :worker_1_port
SET search_path TO proc_conflict;
SET citus.enable_metadata_sync TO OFF;
CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$
BEGIN
RETURN state * 2 + i;
@ -19,6 +13,7 @@ CREATE AGGREGATE existing_agg(int) (
SFUNC = existing_func,
STYPE = int
);
RESET citus.enable_metadata_sync;
\c - - - :master_port
SET search_path TO proc_conflict;
CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$
@ -69,6 +64,7 @@ DROP AGGREGATE existing_agg(int) CASCADE;
DROP FUNCTION existing_func(int, int) CASCADE;
\c - - - :worker_1_port
SET search_path TO proc_conflict;
SET citus.enable_metadata_sync TO OFF;
CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$
BEGIN
RETURN state * 3 + i;
@ -78,6 +74,7 @@ CREATE AGGREGATE existing_agg(int) (
SFUNC = existing_func,
STYPE = int
);
RESET citus.enable_metadata_sync;
\c - - - :master_port
SET search_path TO proc_conflict;
CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$

View File

@ -173,21 +173,7 @@ SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');
SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA procedure_tests CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
DROP SCHEMA procedure_tests2 CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
DROP USER procedureuser;
SELECT 1 FROM run_command_on_workers($$DROP USER procedureuser;$$);
?column?

View File

@ -456,21 +456,7 @@ SELECT * FROM field_indirection_test_2 ORDER BY 1,2,3;
-- clear objects
SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA type_tests CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
DROP SCHEMA type_tests2 CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
DROP USER typeuser;
SELECT run_command_on_workers($$DROP USER typeuser;$$);
run_command_on_workers

View File

@ -1,12 +1,5 @@
SET citus.next_shard_id TO 20020000;
CREATE SCHEMA type_conflict;
SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE SCHEMA")
(localhost,57638,t,"CREATE SCHEMA")
(2 rows)
-- create a type on a worker that should not cause data loss once overwritten with a type
-- from the coordinator
\c - - :public_worker_1_host :worker_1_port

View File

@ -64,10 +64,3 @@ SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumso
-- clear objects
SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA xact_enum_type CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)

View File

@ -47,7 +47,6 @@ WHERE n.nspname IN ('drop_partitioned_table', 'schema1')
AND c.relkind IN ('r','p')
ORDER BY 1, 2;
\c - - - :worker_1_port
CREATE SCHEMA drop_partitioned_table;
SET search_path = drop_partitioned_table;
CREATE VIEW tables_info AS
SELECT n.nspname as "Schema",
@ -395,11 +394,4 @@ NOTICE: issuing ROLLBACK
NOTICE: issuing ROLLBACK
DROP SCHEMA drop_partitioned_table CASCADE;
NOTICE: drop cascades to 3 other objects
SELECT run_command_on_workers('DROP SCHEMA IF EXISTS drop_partitioned_table CASCADE');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
SET search_path TO public;

View File

@ -253,11 +253,3 @@ ORDER BY placementid;
RESET SEARCH_PATH;
DROP SCHEMA add_remove_node CASCADE;
NOTICE: drop cascades to table add_remove_node.user_table
SELECT * FROM run_command_on_workers('DROP SCHEMA IF EXISTS add_remove_node CASCADE')
ORDER BY nodeport;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 9060 | t | DROP SCHEMA
localhost | 57637 | t | DROP SCHEMA
(2 rows)

View File

@ -225,8 +225,8 @@ SELECT count(*) FROM single_replicatated WHERE key = 100;
RESET client_min_messages;
-- verify get_global_active_transactions works when a timeout happens on a connection
SELECT get_global_active_transactions();
get_global_active_transactions
SELECT * FROM get_global_active_transactions() WHERE transaction_number != 0;
datid | process_id | initiator_node_identifier | worker_query | transaction_number | transaction_stamp | global_pid
---------------------------------------------------------------------
(0 rows)

View File

@ -4,7 +4,9 @@
-- We have to keep two copies of this failure test
-- because if the shards are created via the executor
-- cancellations are processed, otherwise they are not
SET citus.enable_ddl_propagation TO OFF;
CREATE SCHEMA create_distributed_table_non_empty_failure;
SET citus.enable_ddl_propagation TO ON;
SET search_path TO 'create_distributed_table_non_empty_failure';
SET citus.next_shard_id TO 11000000;
SELECT citus.mitmproxy('conn.allow()');
@ -100,13 +102,6 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata
(localhost,57637,t,1)
(2 rows)
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS create_distributed_table_non_empty_failure$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,"DROP SCHEMA")
(localhost,57637,t,"DROP SCHEMA")
(2 rows)
-- this triggers a schema creation which prevents further transactions around dependency propagation
SELECT citus.mitmproxy('conn.allow()');
mitmproxy

View File

@ -193,7 +193,9 @@ SELECT citus.mitmproxy('conn.allow()');
DROP TABLE ref_table;
DROP SCHEMA failure_reference_table;
SET citus.enable_ddl_propagation TO OFF;
CREATE SCHEMA failure_reference_table;
SET citus.enable_ddl_propagation TO ON;
CREATE TABLE ref_table(id int);
INSERT INTO ref_table VALUES(1),(2),(3);
-- Test in transaction

View File

@ -1,7 +1,9 @@
--
-- failure_create_table adds failure tests for creating table without data.
--
SET citus.enable_ddl_propagation TO OFF;
CREATE SCHEMA failure_create_table;
SET citus.enable_ddl_propagation TO ON;
SET search_path TO 'failure_create_table';
SELECT citus.mitmproxy('conn.allow()');
mitmproxy

View File

@ -228,7 +228,10 @@ BEGIN
DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT modify_fast_path_plpsql(1,1);
DEBUG: function does not have co-located tables
DEBUG: Deferred pruning for a fast-path router query
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement
@ -241,6 +244,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem
(1 row)
SELECT modify_fast_path_plpsql(2,2);
DEBUG: function does not have co-located tables
DEBUG: Deferred pruning for a fast-path router query
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement
@ -253,6 +257,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem
(1 row)
SELECT modify_fast_path_plpsql(3,3);
DEBUG: function does not have co-located tables
DEBUG: Deferred pruning for a fast-path router query
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement
@ -265,6 +270,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem
(1 row)
SELECT modify_fast_path_plpsql(4,4);
DEBUG: function does not have co-located tables
DEBUG: Deferred pruning for a fast-path router query
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement
@ -277,6 +283,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem
(1 row)
SELECT modify_fast_path_plpsql(5,5);
DEBUG: function does not have co-located tables
DEBUG: Deferred pruning for a fast-path router query
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement
@ -289,6 +296,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem
(1 row)
SELECT modify_fast_path_plpsql(6,6);
DEBUG: function does not have co-located tables
DEBUG: Deferred pruning for a fast-path router query
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement
@ -301,6 +309,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem
(1 row)
SELECT modify_fast_path_plpsql(6,6);
DEBUG: function does not have co-located tables
modify_fast_path_plpsql
---------------------------------------------------------------------

View File

@ -332,6 +332,7 @@ BEGIN;
ERROR: insert or update on table "local_table_5_1518073" violates foreign key constraint "local_table_5_col_1_fkey1_1518073"
ROLLBACK;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE SCHEMA another_schema_fkeys_between_local_ref;
CREATE TABLE another_schema_fkeys_between_local_ref.local_table_6 (col_1 INT PRIMARY KEY);
-- first convert local tables to citus local tables in graph
@ -376,6 +377,7 @@ BEGIN;
ROLLBACK;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE local_table_6 (col_1 INT PRIMARY KEY);
-- first convert local tables to citus local tables in graph
ALTER TABLE local_table_2 ADD CONSTRAINT fkey_11 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_1) ON DELETE CASCADE;

View File

@ -293,6 +293,8 @@ BEGIN
RETURN ret_val;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION func_calls_forcepush_func()
RETURNS NUMERIC AS $$
DECLARE incremented_val NUMERIC;
@ -302,9 +304,11 @@ BEGIN
RETURN incremented_val;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('func_calls_forcepush_func()');
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -312,7 +316,7 @@ DETAIL: A distributed function is created. To make sure subsequent commands see
SELECT create_distributed_function('inner_force_delegation_function(int)', '$1', colocate_with := 'test_nested', force_delegation := true);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -354,12 +358,16 @@ PL/pgSQL function func_calls_forcepush_func() line XX at SQL statement
101
(1 row)
-- Block distributing that function as distributing it causes
-- different test output on PG 14.
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION get_val()
RETURNS INT AS $$
BEGIN
RETURN 100::INT;
END;
$$ LANGUAGE plpgsql;
RESET citus.enable_metadata_sync;
--
-- UDF calling another UDF in a FROM clause
-- fn()
@ -377,7 +385,10 @@ BEGIN
RETURN incremented_val;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT func_calls_forcepush_func_infrom();
DEBUG: function does not have co-located tables
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT inner_force_delegation_function FROM inner_force_delegation_function(add_val + 100)"
PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement
@ -395,6 +406,7 @@ PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement
BEGIN;
SELECT func_calls_forcepush_func_infrom();
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT inner_force_delegation_function FROM inner_force_delegation_function(add_val + 100)"
PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement
@ -428,7 +440,10 @@ BEGIN
RETURN incremented_val;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT func_calls_forcepush_func_intarget();
DEBUG: function does not have co-located tables
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT inner_force_delegation_function(100 + 100) OFFSET 0"
PL/pgSQL function func_calls_forcepush_func_intarget() line XX at SQL statement
@ -446,6 +461,7 @@ PL/pgSQL function func_calls_forcepush_func_intarget() line XX at SQL statement
BEGIN;
SELECT func_calls_forcepush_func_intarget();
DEBUG: not pushing down function calls in a multi-statement transaction
NOTICE: inner_force_delegation_function():201
DETAIL: from localhost:xxxxx
CONTEXT: SQL statement "SELECT inner_force_delegation_function(100 + 100) OFFSET 0"
@ -473,9 +489,11 @@ BEGIN
END if;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('test_recursive(int)', '$1', colocate_with := 'test_nested', force_delegation := true);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -544,13 +562,15 @@ BEGIN
RETURN x + y;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'test_non_constant(int,bigint)',
'$1',
colocate_with := 'test_forcepushdown',
force_delegation := true);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -610,6 +630,8 @@ BEGIN
INSERT INTO emp VALUES (empname, 33);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION outer_emp()
RETURNS void
AS $$
@ -618,15 +640,18 @@ BEGIN
PERFORM inner_emp('hello');
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('inner_emp(text)','empname', force_delegation := true);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT outer_emp();
DEBUG: function does not have co-located tables
DEBUG: Skipping pushdown of function from a PL/PgSQL simple expression
CONTEXT: SQL statement "SELECT inner_emp('hello')"
PL/pgSQL function outer_emp() line XX at PERFORM
@ -650,13 +675,15 @@ BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown SELECT(a+1);
END;
$fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'insert_select_data(int)', 'a',
colocate_with := 'test_forcepushdown',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -725,13 +752,15 @@ BEGIN
SELECT intcol FROM forcepushdown_schema.test_forcepushdown_noncolocate;
END;
$fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'insert_select_data_nonlocal(int)', 'a',
colocate_with := 'test_forcepushdown',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -803,13 +832,15 @@ BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown_char VALUES (a);
END;
$fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'insert_data_char(char)', 'a',
colocate_with := 'test_forcepushdown_char',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -821,13 +852,15 @@ BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown_varchar VALUES (a);
END;
$fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'insert_data_varchar(varchar)', 'a',
colocate_with := 'test_forcepushdown_varchar',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -839,13 +872,15 @@ BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown_text VALUES (a);
END;
$fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'insert_data_text(text)', 'a',
colocate_with := 'test_forcepushdown_text',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -947,13 +982,15 @@ BEGIN
RAISE NOTICE 'Result: %', var;
END;
$fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'select_data(int)', 'a',
colocate_with := 'test_subquery',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -969,13 +1006,15 @@ BEGIN
RAISE NOTICE 'Result: %', var;
END;
$fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'select_data_noncolocate(int)', 'a',
colocate_with := 'test_subquery',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -990,13 +1029,15 @@ BEGIN
RAISE NOTICE 'Result: %', var;
END;
$fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'insert_select_data_cte1(int)', 'a',
colocate_with := 'test_subquery',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -1011,13 +1052,15 @@ BEGIN
RAISE NOTICE 'Result: %', var;
END;
$fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'insert_select_data_cte2(int)', 'a',
colocate_with := 'test_subquery',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -1033,13 +1076,15 @@ BEGIN
RAISE NOTICE 'Result: %', var;
END;
$fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'insert_data_cte_nondist(int)', 'a',
colocate_with := 'test_subquery',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -1162,18 +1207,11 @@ SELECT * FROM forcepushdown_schema.test_subquery ORDER BY 1;
(5 rows)
-- Query with targetList greater than 1
-- Function from FROM clause is not delegated outside of a BEGIN (for now)
-- Function from FROM clause is delegated outside of a BEGIN
SELECT 1,2,3 FROM select_data(100);
DEBUG: generating subplan XXX_1 for subquery SELECT data FROM forcepushdown_schema.test_subquery WHERE (data OPERATOR(pg_catalog.=) 100)
CONTEXT: SQL statement "SELECT result FROM forcepushdown_schema.test_subquery WHERE data =
(SELECT data FROM forcepushdown_schema.test_subquery WHERE data = a)"
PL/pgSQL function select_data(integer) line XX at SQL statement
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT result FROM forcepushdown_schema.test_subquery WHERE (data OPERATOR(pg_catalog.=) (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)))
CONTEXT: SQL statement "SELECT result FROM forcepushdown_schema.test_subquery WHERE data =
(SELECT data FROM forcepushdown_schema.test_subquery WHERE data = a)"
PL/pgSQL function select_data(integer) line XX at SQL statement
DEBUG: pushing down the function call
NOTICE: Result: -1
CONTEXT: PL/pgSQL function select_data(integer) line XX at RAISE
DETAIL: from localhost:xxxxx
?column? | ?column? | ?column?
---------------------------------------------------------------------
1 | 2 | 3
@ -1210,9 +1248,11 @@ BEGIN
RETURN x + y;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('test_prepare(int,int)','x',force_delegation :=true, colocate_with := 'table_test_prepare');
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -1230,9 +1270,12 @@ BEGIN
PERFORM 1, 1 + a FROM test_prepare(x + 1, y + 1) a;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
-- First 5 get delegated and succeeds
BEGIN;
SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1251,6 +1294,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
(1 row)
SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1269,6 +1313,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
(1 row)
SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1287,6 +1332,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
(1 row)
SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1305,6 +1351,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
(1 row)
SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1331,6 +1378,7 @@ SELECT COUNT(*) FROM table_test_prepare;
-- 6th execution will be generic plan and should get delegated
SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1349,6 +1397,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
(1 row)
SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1369,6 +1418,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
END;
-- Fails as expected
SELECT outer_test_prepare(1,2);
DEBUG: function does not have co-located tables
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1404,10 +1454,12 @@ BEGIN
RETURN x;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('test(int)', 'x',
colocate_with := 'test_perform', force_delegation := true);
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
@ -1425,10 +1477,471 @@ NOTICE: INPUT 3
CONTEXT: PL/pgSQL function test(integer) line XX at RAISE
SQL statement "SELECT test(3)"
PL/pgSQL function inline_code_block line XX at PERFORM
CREATE TABLE testnested_table (x int, y int);
SELECT create_distributed_table('testnested_table','x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE OR REPLACE FUNCTION inner_fn(x int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
-- Non-force function calling force-delegation function
CREATE OR REPLACE FUNCTION outer_local_fn()
RETURNS void
AS $$
DECLARE
BEGIN
PERFORM 1 FROM inner_fn(1);
INSERT INTO forcepushdown_schema.testnested_table VALUES (2,3);
PERFORM 1 FROM inner_fn(4);
INSERT INTO forcepushdown_schema.testnested_table VALUES (5,6);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('inner_fn(int)','x',
colocate_with:='testnested_table', force_delegation := true);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT outer_local_fn();
DEBUG: function does not have co-located tables
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT 1 FROM inner_fn(1)"
PL/pgSQL function outer_local_fn() line XX at PERFORM
DEBUG: pushing down the function call
CONTEXT: SQL statement "SELECT 1 FROM inner_fn(1)"
PL/pgSQL function outer_local_fn() line XX at PERFORM
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT 1 FROM inner_fn(4)"
PL/pgSQL function outer_local_fn() line XX at PERFORM
DEBUG: pushing down the function call
CONTEXT: SQL statement "SELECT 1 FROM inner_fn(4)"
PL/pgSQL function outer_local_fn() line XX at PERFORM
outer_local_fn
---------------------------------------------------------------------
(1 row)
-- Rows from 1-6 should appear
SELECT * FROM testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
1 | 1
2 | 3
4 | 4
5 | 6
(4 rows)
BEGIN;
SELECT outer_local_fn();
DEBUG: not pushing down function calls in a multi-statement transaction
outer_local_fn
---------------------------------------------------------------------
(1 row)
END;
SELECT * FROM testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
1 | 1
1 | 1
2 | 3
2 | 3
4 | 4
4 | 4
5 | 6
5 | 6
(8 rows)
DROP FUNCTION inner_fn(int);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
DROP FUNCTION outer_local_fn();
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
TRUNCATE TABLE testnested_table;
CREATE OR REPLACE FUNCTION inner_fn(x int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
-- Force-delegation function calling non-force function
CREATE OR REPLACE FUNCTION outer_fn(y int, z int)
RETURNS void
AS $$
DECLARE
BEGIN
PERFORM 1 FROM forcepushdown_schema.inner_fn(y);
INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y);
PERFORM 1 FROM forcepushdown_schema.inner_fn(z);
INSERT INTO forcepushdown_schema.testnested_table VALUES (z,z);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('inner_fn(int)','x',
colocate_with:='testnested_table', force_delegation := false);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_function('outer_fn(int, int)','y',
colocate_with:='testnested_table', force_delegation := true);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT outer_fn(1, 2);
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x)"
PL/pgSQL function forcepushdown_schema.inner_fn(integer) line XX at SQL statement
SQL statement "SELECT 1 FROM forcepushdown_schema.inner_fn(z)"
PL/pgSQL function forcepushdown_schema.outer_fn(integer,integer) line XX at PERFORM
while executing command on localhost:xxxxx
BEGIN;
SELECT outer_fn(1, 2);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x)"
PL/pgSQL function forcepushdown_schema.inner_fn(integer) line XX at SQL statement
SQL statement "SELECT 1 FROM forcepushdown_schema.inner_fn(z)"
PL/pgSQL function forcepushdown_schema.outer_fn(integer,integer) line XX at PERFORM
while executing command on localhost:xxxxx
END;
-- No rows
SELECT * FROM testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
(0 rows)
-- Force-delegation function calling force-delegation function
CREATE OR REPLACE FUNCTION force_push_inner(y int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION force_push_outer(x int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x);
PERFORM forcepushdown_schema.force_push_inner(x+1) LIMIT 1;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'force_push_outer(int)', 'x',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_function(
'force_push_inner(int)', 'y',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
-- Keys 7,8,9,14 fall on one node and 15 on a different node
-- Function gets delegated to node with shard-key = 7 and inner function
-- will not be delegated but inserts shard-key = 8 locally
SELECT force_push_outer(7);
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)"
PL/pgSQL function forcepushdown_schema.force_push_inner(integer) line XX at SQL statement
SQL statement "SELECT forcepushdown_schema.force_push_inner(x+1) LIMIT 1"
PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at PERFORM
while executing command on localhost:xxxxx
BEGIN;
-- Function gets delegated to node with shard-key = 8 and inner function
-- will not be delegated but inserts shard-key = 9 locally
SELECT force_push_outer(8);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)"
PL/pgSQL function forcepushdown_schema.force_push_inner(integer) line XX at SQL statement
SQL statement "SELECT forcepushdown_schema.force_push_inner(x+1) LIMIT 1"
PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at PERFORM
while executing command on localhost:xxxxx
END;
BEGIN;
-- Function gets delegated to node with shard-key = 14 and inner function
-- will not be delegated but fails to insert shard-key = 15 remotely
SELECT force_push_outer(14);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)"
PL/pgSQL function forcepushdown_schema.force_push_inner(integer) line XX at SQL statement
SQL statement "SELECT forcepushdown_schema.force_push_inner(x+1) LIMIT 1"
PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at PERFORM
while executing command on localhost:xxxxx
END;
SELECT * FROM testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
(0 rows)
--
-- Function-1() --> function-2() --> function-3()
--
CREATE OR REPLACE FUNCTION force_push_1(x int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x);
PERFORM forcepushdown_schema.force_push_2(x+1) LIMIT 1;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION force_push_2(y int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y);
PERFORM forcepushdown_schema.force_push_3(y+1) LIMIT 1;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION force_push_3(z int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (z,z);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'force_push_1(int)', 'x',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_function(
'force_push_2(int)', 'y',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_function(
'force_push_3(int)', 'z',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
TRUNCATE TABLE testnested_table;
BEGIN;
-- All local inserts
SELECT force_push_1(7);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)"
PL/pgSQL function forcepushdown_schema.force_push_2(integer) line XX at SQL statement
SQL statement "SELECT forcepushdown_schema.force_push_2(x+1) LIMIT 1"
PL/pgSQL function forcepushdown_schema.force_push_1(integer) line XX at PERFORM
while executing command on localhost:xxxxx
END;
BEGIN;
-- Local(shard-keys 13, 15) + remote insert (shard-key 14)
SELECT force_push_1(13);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)"
PL/pgSQL function forcepushdown_schema.force_push_2(integer) line XX at SQL statement
SQL statement "SELECT forcepushdown_schema.force_push_2(x+1) LIMIT 1"
PL/pgSQL function forcepushdown_schema.force_push_1(integer) line XX at PERFORM
while executing command on localhost:xxxxx
END;
SELECT * FROM testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
(0 rows)
TRUNCATE TABLE testnested_table;
CREATE OR REPLACE FUNCTION force_push_inner(y int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION force_push_outer(x int)
RETURNS void
AS $$
DECLARE
BEGIN
PERFORM FROM forcepushdown_schema.force_push_inner(x);
INSERT INTO forcepushdown_schema.testnested_table VALUES (x+1,x+1);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'force_push_inner(int)', 'y',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_function(
'force_push_outer(int)', 'x',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
BEGIN;
SELECT force_push_outer(7);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (x+1,x+1)"
PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at SQL statement
while executing command on localhost:xxxxx
END;
TABLE testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
(0 rows)
CREATE OR REPLACE FUNCTION force_push_inner(y int)
RETURNS void
AS $$
DECLARE
BEGIN
RAISE NOTICE '%', y;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION force_push_outer(x int)
RETURNS void
AS $$
DECLARE
BEGIN
PERFORM FROM forcepushdown_schema.force_push_inner(x+1);
INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
BEGIN;
SELECT force_push_outer(9);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
NOTICE: 10
DETAIL: from localhost:xxxxx
force_push_outer
---------------------------------------------------------------------
(1 row)
END;
TABLE testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
9 | 9
(1 row)
RESET client_min_messages;
SET citus.log_remote_commands TO off;
DROP SCHEMA forcepushdown_schema CASCADE;
NOTICE: drop cascades to 38 other objects
NOTICE: drop cascades to 46 other objects
DETAIL: drop cascades to table test_forcepushdown
drop cascades to table test_forcepushdown_noncolocate
drop cascades to function insert_data(integer)
@ -1467,3 +1980,11 @@ drop cascades to function test_prepare(integer,integer)
drop cascades to function outer_test_prepare(integer,integer)
drop cascades to table test_perform
drop cascades to function test(integer)
drop cascades to table testnested_table
drop cascades to function inner_fn(integer)
drop cascades to function outer_fn(integer,integer)
drop cascades to function force_push_inner(integer)
drop cascades to function force_push_outer(integer)
drop cascades to function force_push_1(integer)
drop cascades to function force_push_2(integer)
drop cascades to function force_push_3(integer)

View File

@ -0,0 +1,321 @@
CREATE SCHEMA function_propagation_schema;
SET search_path TO 'function_propagation_schema';
-- Check whether supported dependencies can be distributed while propagating functions
-- Check types
SET citus.enable_metadata_sync TO OFF;
CREATE TYPE function_prop_type AS (a int, b int);
RESET citus.enable_metadata_sync;
CREATE OR REPLACE FUNCTION func_1(param_1 function_prop_type)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
-- Check all dependent objects and function depends on all nodes
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema'::regnamespace::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(schema,{function_propagation_schema},{})
(1 row)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type'::regtype::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(type,{function_propagation_schema.function_prop_type},{})
(1 row)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_1'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(function,"{function_propagation_schema,func_1}",{function_propagation_schema.function_prop_type})
(1 row)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema'::regnamespace::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (schema,{function_propagation_schema},{})
localhost | 57638 | t | (schema,{function_propagation_schema},{})
(2 rows)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type'::regtype::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (type,{function_propagation_schema.function_prop_type},{})
localhost | 57638 | t | (type,{function_propagation_schema.function_prop_type},{})
(2 rows)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_1'::regproc::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (function,"{function_propagation_schema,func_1}",{function_propagation_schema.function_prop_type})
localhost | 57638 | t | (function,"{function_propagation_schema,func_1}",{function_propagation_schema.function_prop_type})
(2 rows)
SET citus.enable_metadata_sync TO OFF;
CREATE TYPE function_prop_type_2 AS (a int, b int);
RESET citus.enable_metadata_sync;
CREATE OR REPLACE FUNCTION func_2(param_1 int)
RETURNS function_prop_type_2
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_2'::regtype::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(type,{function_propagation_schema.function_prop_type_2},{})
(1 row)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_2'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(function,"{function_propagation_schema,func_2}",{integer})
(1 row)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_2'::regtype::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (type,{function_propagation_schema.function_prop_type_2},{})
localhost | 57638 | t | (type,{function_propagation_schema.function_prop_type_2},{})
(2 rows)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_2'::regproc::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (function,"{function_propagation_schema,func_2}",{integer})
localhost | 57638 | t | (function,"{function_propagation_schema,func_2}",{integer})
(2 rows)
-- Have a separate check for type created in transaction
BEGIN;
CREATE TYPE function_prop_type_3 AS (a int, b int);
COMMIT;
-- Objects in the body part is not found as dependency
CREATE OR REPLACE FUNCTION func_3(param_1 int)
RETURNS int
LANGUAGE plpgsql AS
$$
DECLARE
internal_param1 function_prop_type_3;
BEGIN
return 1;
END;
$$;
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_3'::regtype::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(0 rows)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_3'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(function,"{function_propagation_schema,func_3}",{integer})
(1 row)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_3'::regproc::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (function,"{function_propagation_schema,func_3}",{integer})
localhost | 57638 | t | (function,"{function_propagation_schema,func_3}",{integer})
(2 rows)
-- Check table
CREATE TABLE function_prop_table(a int, b int);
-- Non-distributed table is not distributed as dependency
CREATE OR REPLACE FUNCTION func_4(param_1 function_prop_table)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
WARNING: Citus can't distribute function "func_4" having dependency on non-distributed relation "function_prop_table"
DETAIL: Function will be created only locally
HINT: To distribute function, distribute dependent relations first. Then, re-create the function
CREATE OR REPLACE FUNCTION func_5(param_1 int)
RETURNS function_prop_table
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
WARNING: Citus can't distribute function "func_5" having dependency on non-distributed relation "function_prop_table"
DETAIL: Function will be created only locally
HINT: To distribute function, distribute dependent relations first. Then, re-create the function
-- Functions can be created with distributed table dependency
SELECT create_distributed_table('function_prop_table', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE OR REPLACE FUNCTION func_6(param_1 function_prop_table)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table})
(1 row)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table})
localhost | 57638 | t | (function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table})
(2 rows)
-- Views are not supported
CREATE VIEW function_prop_view AS SELECT * FROM function_prop_table;
CREATE OR REPLACE FUNCTION func_7(param_1 function_prop_view)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
WARNING: Citus can't distribute functions having dependency on unsupported object of type "view"
DETAIL: Function will be created only locally
CREATE OR REPLACE FUNCTION func_8(param_1 int)
RETURNS function_prop_view
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
WARNING: Citus can't distribute functions having dependency on unsupported object of type "view"
DETAIL: Function will be created only locally
-- Check within transaction
BEGIN;
CREATE TYPE type_in_transaction AS (a int, b int);
CREATE OR REPLACE FUNCTION func_in_transaction(param_1 type_in_transaction)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
-- Within transaction functions are not distributed
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(0 rows)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(0 rows)
COMMIT;
-- Show that recreating it outside transaction distributes the function and dependencies
CREATE OR REPLACE FUNCTION func_in_transaction(param_1 type_in_transaction)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(type,{function_propagation_schema.type_in_transaction},{})
(1 row)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction})
(1 row)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (type,{function_propagation_schema.type_in_transaction},{})
localhost | 57638 | t | (type,{function_propagation_schema.type_in_transaction},{})
(2 rows)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction})
localhost | 57638 | t | (function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction})
(2 rows)
-- Test for SQL function with unsupported object in function body
CREATE TABLE table_in_sql_body(id int);
CREATE FUNCTION max_of_table()
RETURNS int
LANGUAGE SQL AS
$$
SELECT max(id) FROM table_in_sql_body
$$;
-- Show that only function has propagated, since the table is not resolved as dependency
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regclass::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(0 rows)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.max_of_table'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(function,"{function_propagation_schema,max_of_table}",{})
(1 row)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.max_of_table'::regproc::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (function,"{function_propagation_schema,max_of_table}",{})
localhost | 57638 | t | (function,"{function_propagation_schema,max_of_table}",{})
(2 rows)
-- Check extension owned table
CREATE TABLE extension_owned_table(a int);
SELECT run_command_on_workers($$
CREATE TABLE function_propagation_schema.extension_owned_table(a int);
$$
);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE TABLE")
(localhost,57638,t,"CREATE TABLE")
(2 rows)
CREATE EXTENSION seg;
ALTER EXTENSION seg ADD TABLE extension_owned_table;
NOTICE: Citus does not propagate adding/dropping member objects
HINT: You can add/drop the member objects on the workers as well.
SELECT run_command_on_workers($$
ALTER EXTENSION seg ADD TABLE function_propagation_schema.extension_owned_table;
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"ALTER EXTENSION")
(localhost,57638,t,"ALTER EXTENSION")
(2 rows)
CREATE OR REPLACE FUNCTION func_for_ext_check(param_1 extension_owned_table)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
RESET search_path;
SET client_min_messages TO WARNING;
DROP SCHEMA function_propagation_schema CASCADE;

View File

@ -6,7 +6,9 @@ CREATE SCHEMA dist_schema;
CREATE TABLE dist_schema.dist_table (id int);
CREATE SCHEMA another_dist_schema;
CREATE TABLE another_dist_schema.dist_table (id int);
SET citus.enable_ddl_propagation TO off;
CREATE SCHEMA non_dist_schema;
SET citus.enable_ddl_propagation TO on;
-- create roles on all nodes
SELECT run_command_on_coordinator_and_workers('CREATE USER role_1');
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
@ -193,24 +195,9 @@ SET citus.enable_alter_role_propagation TO ON;
ALTER ROLE role_1 NOSUPERUSER;
SET citus.enable_alter_role_propagation TO OFF;
DROP TABLE dist_schema.dist_table, another_dist_schema.dist_table;
SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema');
run_command_on_coordinator_and_workers
---------------------------------------------------------------------
(1 row)
SELECT run_command_on_coordinator_and_workers('DROP SCHEMA another_dist_schema');
run_command_on_coordinator_and_workers
---------------------------------------------------------------------
(1 row)
SELECT run_command_on_coordinator_and_workers('DROP SCHEMA non_dist_schema');
run_command_on_coordinator_and_workers
---------------------------------------------------------------------
(1 row)
DROP SCHEMA dist_schema;
DROP SCHEMA another_dist_schema;
DROP SCHEMA non_dist_schema;
-- test if the grantors are propagated correctly
-- first remove one of the worker nodes
SET citus.shard_replication_factor TO 1;
@ -319,12 +306,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER
\c - - - :master_port
DROP TABLE grantor_schema.grantor_table;
SELECT run_command_on_coordinator_and_workers('DROP SCHEMA grantor_schema CASCADE');
run_command_on_coordinator_and_workers
---------------------------------------------------------------------
(1 row)
DROP SCHEMA grantor_schema CASCADE;
-- test distributing the schema with another user
CREATE SCHEMA dist_schema;
GRANT ALL ON SCHEMA dist_schema TO role_1 WITH GRANT OPTION;
@ -352,12 +334,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY
\c - - - :master_port
DROP TABLE dist_schema.dist_table;
SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema CASCADE');
run_command_on_coordinator_and_workers
---------------------------------------------------------------------
(1 row)
DROP SCHEMA dist_schema CASCADE;
-- test grants on public schema
-- first remove one of the worker nodes
SET citus.shard_replication_factor TO 1;

View File

@ -15,16 +15,16 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s3-begin:
BEGIN;
BEGIN;
step s1-alter-table:
ALTER TABLE test_table ADD COLUMN x INT;
step s2-sleep:
SELECT pg_sleep(0.5);
SELECT pg_sleep(0.5);
pg_sleep
---------------------------------------------------------------------
@ -32,7 +32,7 @@ pg_sleep
(1 row)
step s2-view-dist:
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC;
query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
---------------------------------------------------------------------
@ -42,7 +42,7 @@ query |query_hostname |query_hostport|d
(1 row)
step s3-view-worker:
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
---------------------------------------------------------------------
@ -61,13 +61,13 @@ SELECT worker_apply_shard_ddl_command (1300001, 'public', '
(4 rows)
step s2-rollback:
ROLLBACK;
ROLLBACK;
step s1-commit:
COMMIT;
step s3-rollback:
ROLLBACK;
ROLLBACK;
starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-insert s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback
@ -85,16 +85,16 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s3-begin:
BEGIN;
BEGIN;
step s1-insert:
INSERT INTO test_table VALUES (100, 100);
INSERT INTO test_table VALUES (100, 100);
step s2-sleep:
SELECT pg_sleep(0.5);
SELECT pg_sleep(0.5);
pg_sleep
---------------------------------------------------------------------
@ -102,17 +102,17 @@ pg_sleep
(1 row)
step s2-view-dist:
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC;
query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
---------------------------------------------------------------------
INSERT INTO test_table VALUES (100, 100);
INSERT INTO test_table VALUES (100, 100);
|coordinator_host| 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression
(1 row)
step s3-view-worker:
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
---------------------------------------------------------------------
@ -120,13 +120,13 @@ INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)|local
(1 row)
step s2-rollback:
ROLLBACK;
ROLLBACK;
step s1-commit:
COMMIT;
step s3-rollback:
ROLLBACK;
ROLLBACK;
starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback
@ -144,10 +144,10 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s3-begin:
BEGIN;
BEGIN;
step s1-select:
SELECT count(*) FROM test_table;
@ -158,7 +158,7 @@ count
(1 row)
step s2-sleep:
SELECT pg_sleep(0.5);
SELECT pg_sleep(0.5);
pg_sleep
---------------------------------------------------------------------
@ -166,7 +166,7 @@ pg_sleep
(1 row)
step s2-view-dist:
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC;
query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
---------------------------------------------------------------------
@ -176,7 +176,7 @@ query |query_hostname |query_hostport|distribute
(1 row)
step s3-view-worker:
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
---------------------------------------------------------------------
@ -187,13 +187,13 @@ SELECT count(*) AS count FROM public.test_table_1300011 test_table WHERE true|lo
(4 rows)
step s2-rollback:
ROLLBACK;
ROLLBACK;
step s1-commit:
COMMIT;
step s3-rollback:
ROLLBACK;
ROLLBACK;
starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select-router s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback
@ -211,10 +211,10 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s3-begin:
BEGIN;
BEGIN;
step s1-select-router:
SELECT count(*) FROM test_table WHERE column1 = 55;
@ -225,7 +225,7 @@ count
(1 row)
step s2-sleep:
SELECT pg_sleep(0.5);
SELECT pg_sleep(0.5);
pg_sleep
---------------------------------------------------------------------
@ -233,7 +233,7 @@ pg_sleep
(1 row)
step s2-view-dist:
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC;
query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
---------------------------------------------------------------------
@ -243,7 +243,7 @@ query |query_hostname |query_
(1 row)
step s3-view-worker:
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
---------------------------------------------------------------------
@ -251,11 +251,11 @@ SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column
(1 row)
step s2-rollback:
ROLLBACK;
ROLLBACK;
step s1-commit:
COMMIT;
step s3-rollback:
ROLLBACK;
ROLLBACK;

View File

@ -94,7 +94,8 @@ step s1-verify-current-xact-is-on-worker:
get_current_transaction_id() as xact,
run_command_on_workers($$
SELECT row(initiator_node_identifier, transaction_number)
FROM get_all_active_transactions();
FROM get_all_active_transactions()
WHERE transaction_number != 0;
$$) as remote
ORDER BY remote.nodeport ASC;

View File

@ -1,6 +1,6 @@
Parsed test spec with 3 sessions
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -164,22 +164,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -187,7 +171,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -357,22 +341,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -380,7 +348,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -550,22 +518,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -573,7 +525,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -739,22 +691,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -762,7 +698,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -934,22 +870,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -957,7 +877,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -1129,22 +1049,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -1152,7 +1056,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2-begin s1-add-worker s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2-begin s1-add-worker s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -1324,22 +1228,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -1347,7 +1235,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -1534,22 +1422,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -1557,7 +1429,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1-add-worker s2-create-schema s3-create-schema2 s2-create-table s3-create-table s1-commit s3-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1-add-worker s2-create-schema s3-create-schema2 s2-create-table s3-create-table s1-commit s3-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -1753,22 +1625,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -1776,7 +1632,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-type s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-type s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -1932,22 +1788,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -1955,7 +1795,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-create-type s1-add-worker s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-create-type s1-add-worker s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -2110,22 +1950,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -2133,7 +1957,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-type s2-create-table-with-type s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-type s2-create-table-with-type s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -2309,22 +2133,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -2332,7 +2140,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -2508,22 +2316,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -2531,7 +2323,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -2714,22 +2506,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------
@ -2737,7 +2513,7 @@ master_remove_node
(2 rows)
starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas
starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas
?column?
---------------------------------------------------------------------
1
@ -2922,22 +2698,6 @@ step s3-drop-coordinator-schemas:
DROP SCHEMA IF EXISTS myschema CASCADE;
DROP SCHEMA IF EXISTS myschema2 CASCADE;
step s3-drop-worker-schemas:
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$);
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
master_remove_node
---------------------------------------------------------------------

View File

@ -28,7 +28,7 @@ step s1-print:
count
---------------------------------------------------------------------
4
7
(1 row)
extname|extversion|nspname
@ -89,7 +89,7 @@ step s1-print:
count
---------------------------------------------------------------------
4
7
(1 row)
extname|extversion|nspname
@ -158,7 +158,7 @@ step s1-print:
count
---------------------------------------------------------------------
3
6
(1 row)
extname|extversion|nspname
@ -214,7 +214,7 @@ step s1-print:
count
---------------------------------------------------------------------
5
7
(1 row)
extname|extversion|nspname
@ -275,7 +275,7 @@ step s1-print:
count
---------------------------------------------------------------------
4
6
(1 row)
extname|extversion|nspname

View File

@ -35,8 +35,8 @@ step s2-begin-insert:
step s3-as-admin:
-- Admin should be able to see all transactions
SELECT count(*) FROM get_all_active_transactions();
SELECT count(*) FROM get_global_active_transactions();
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
count
---------------------------------------------------------------------
@ -51,8 +51,8 @@ count
step s3-as-user-1:
-- User should only be able to see its own transactions
SET ROLE test_user_1;
SELECT count(*) FROM get_all_active_transactions();
SELECT count(*) FROM get_global_active_transactions();
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
count
---------------------------------------------------------------------
@ -67,8 +67,8 @@ count
step s3-as-readonly:
-- Other user should not see transactions
SET ROLE test_readonly;
SELECT count(*) FROM get_all_active_transactions();
SELECT count(*) FROM get_global_active_transactions();
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
count
---------------------------------------------------------------------
@ -83,8 +83,8 @@ count
step s3-as-monitor:
-- Monitor should see all transactions
SET ROLE test_monitor;
SELECT count(*) FROM get_all_active_transactions();
SELECT count(*) FROM get_global_active_transactions();
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
count
---------------------------------------------------------------------

View File

@ -0,0 +1,145 @@
Parsed test spec with 2 sessions
starting permutation: s1-start-session-level-connection s1-worker-begin s1-worker-select s2-coordinator-citus_dist_stat_activity s2-coordinator-citus_worker_stat_activity s1-worker-commit s1-stop-session-level-connection
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
start_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s1-worker-begin:
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
run_commands_on_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s1-worker-select:
SELECT run_commands_on_session_level_connection_to_node('SET citus.enable_local_execution TO off; SET citus.force_max_query_parallelization TO ON; SELECT * FROM dist_table');
run_commands_on_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s2-coordinator-citus_dist_stat_activity:
SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' and query NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
?column?
---------------------------------------------------------------------
t
(1 row)
step s2-coordinator-citus_worker_stat_activity:
SELECT query FROM citus_worker_stat_activity() WHERE global_pid IN (
SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%'
)
ORDER BY 1;
query
---------------------------------------------------------------------
SELECT a, b FROM public.dist_table_12345000 dist_table WHERE true
SELECT a, b FROM public.dist_table_12345001 dist_table WHERE true
SELECT a, b FROM public.dist_table_12345002 dist_table WHERE true
SELECT a, b FROM public.dist_table_12345003 dist_table WHERE true
(4 rows)
step s1-worker-commit:
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
run_commands_on_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s1-stop-session-level-connection:
SELECT stop_session_level_connection_to_node();
stop_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-coordinator-begin s1-coordinator-select s2-coordinator-citus_dist_stat_activity s2-coordinator-citus_worker_stat_activity s2-coordinator-get_all_active_transactions s2-coordinator-get_global_active_transactions s1-coordinator-commit
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-coordinator-begin:
BEGIN;
step s1-coordinator-select:
SET citus.enable_local_execution TO off;
SET citus.force_max_query_parallelization TO ON;
SELECT * FROM dist_table;
a|b
---------------------------------------------------------------------
(0 rows)
step s2-coordinator-citus_dist_stat_activity:
SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' and query NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
?column?
---------------------------------------------------------------------
t
(1 row)
step s2-coordinator-citus_worker_stat_activity:
SELECT query FROM citus_worker_stat_activity() WHERE global_pid IN (
SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%'
)
ORDER BY 1;
query
---------------------------------------------------------------------
SELECT a, b FROM public.dist_table_12345000 dist_table WHERE true
SELECT a, b FROM public.dist_table_12345001 dist_table WHERE true
SELECT a, b FROM public.dist_table_12345002 dist_table WHERE true
SELECT a, b FROM public.dist_table_12345003 dist_table WHERE true
(4 rows)
step s2-coordinator-get_all_active_transactions:
SELECT count(*) FROM get_all_active_transactions() WHERE global_pid IN (
SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%'
);
count
---------------------------------------------------------------------
1
(1 row)
step s2-coordinator-get_global_active_transactions:
SELECT count(*) FROM get_global_active_transactions() WHERE global_pid IN (
SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%'
)
AND transaction_number != 0;
count
---------------------------------------------------------------------
5
(1 row)
step s1-coordinator-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -0,0 +1,716 @@
unused step name: s3-debug
Parsed test spec with 3 sessions
starting permutation: s3-compare-snapshot
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-start-metadata-sync-to-same-node s1-commit s2-commit s3-compare-snapshot
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-start-metadata-sync-to-same-node:
SELECT start_metadata_sync_to_node('localhost', 57638);
<waiting ...>
step s1-commit:
COMMIT;
step s2-start-metadata-sync-to-same-node: <... completed>
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-start-metadata-sync-to-another-node s1-commit s2-commit s3-compare-snapshot
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-start-metadata-sync-to-another-node:
SELECT start_metadata_sync_to_node('localhost', 57637);
<waiting ...>
step s1-commit:
COMMIT;
step s2-start-metadata-sync-to-another-node: <... completed>
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-alter-table s1-commit s2-commit s3-compare-snapshot
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-alter-table:
ALTER TABLE dist_table ADD COLUMN z int;
<waiting ...>
step s1-commit:
COMMIT;
step s2-alter-table: <... completed>
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-drop-table s1-commit s2-commit s3-compare-snapshot
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-drop-table:
DROP TABLE dist_table;
<waiting ...>
step s1-commit:
COMMIT;
step s2-drop-table: <... completed>
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-create-dist-table s1-commit s2-commit s3-compare-snapshot
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-create-dist-table:
CREATE TABLE new_dist_table(id int, data int);
SELECT create_distributed_table('new_dist_table', 'id');
<waiting ...>
step s1-commit:
COMMIT;
step s2-create-dist-table: <... completed>
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-create-ref-table s1-commit s2-commit s3-compare-snapshot
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-create-ref-table:
CREATE TABLE new_ref_table(id int, data int);
SELECT create_reference_table('new_ref_table');
<waiting ...>
step s1-commit:
COMMIT;
step s2-create-ref-table: <... completed>
create_reference_table
---------------------------------------------------------------------
(1 row)
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-attach-partition s1-commit s2-commit s3-compare-snapshot
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-attach-partition:
ALTER TABLE dist_partitioned_table ATTACH PARTITION dist_partitioned_table_p1 FOR VALUES FROM (1) TO (9);
<waiting ...>
step s1-commit:
COMMIT;
step s2-attach-partition: <... completed>
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s2-attach-partition s1-begin s2-begin s1-start-metadata-sync s2-detach-partition s1-commit s2-commit s3-compare-snapshot
step s2-attach-partition:
ALTER TABLE dist_partitioned_table ATTACH PARTITION dist_partitioned_table_p1 FOR VALUES FROM (1) TO (9);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-detach-partition:
ALTER TABLE dist_partitioned_table DETACH PARTITION dist_partitioned_table_p1;
<waiting ...>
step s1-commit:
COMMIT;
step s2-detach-partition: <... completed>
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s2-attach-partition s1-begin s2-begin s1-start-metadata-sync s2-create-partition-of s1-commit s2-commit s3-compare-snapshot
step s2-attach-partition:
ALTER TABLE dist_partitioned_table ATTACH PARTITION dist_partitioned_table_p1 FOR VALUES FROM (1) TO (9);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-create-partition-of:
CREATE TABLE dist_partitioned_table_p2 PARTITION OF dist_partitioned_table FOR VALUES FROM (10) TO (20);
<waiting ...>
step s1-commit:
COMMIT;
step s2-create-partition-of: <... completed>
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-add-fk s1-commit s2-commit s3-compare-snapshot
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-add-fk:
ALTER TABLE dist_table ADD CONSTRAINT y_fk FOREIGN KEY (y) REFERENCES ref_table(y);
<waiting ...>
step s1-commit:
COMMIT;
step s2-add-fk: <... completed>
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s2-add-fk s1-begin s2-begin s1-start-metadata-sync s2-drop-fk s1-commit s2-commit s3-compare-snapshot
step s2-add-fk:
ALTER TABLE dist_table ADD CONSTRAINT y_fk FOREIGN KEY (y) REFERENCES ref_table(y);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-drop-fk:
ALTER TABLE dist_table DROP CONSTRAINT y_fk;
<waiting ...>
step s1-commit:
COMMIT;
step s2-drop-fk: <... completed>
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-create-type s1-commit s2-commit s3-compare-snapshot
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-create-type:
CREATE TYPE my_type AS (a int, b int);
step s1-commit:
COMMIT;
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)
starting permutation: s1-begin s2-begin s1-start-metadata-sync s2-create-dist-func s1-commit s2-commit s3-compare-snapshot
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-start-metadata-sync:
SELECT start_metadata_sync_to_node('localhost', 57638);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
step s2-create-dist-func:
CREATE FUNCTION squares(int) RETURNS SETOF RECORD
AS $$ SELECT i, i * i FROM generate_series(1, $1) i $$
LANGUAGE SQL;
SELECT create_distributed_function('squares(int)');
<waiting ...>
step s1-commit:
COMMIT;
step s2-create-dist-func: <... completed>
create_distributed_function
---------------------------------------------------------------------
(1 row)
step s2-commit:
COMMIT;
step s3-compare-snapshot:
SELECT count(*) = 0 AS same_metadata_in_workers
FROM
(
(
SELECT unnest(activate_node_snapshot())
EXCEPT
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
)
UNION
(
SELECT unnest(result::text[]) AS unnested_result
FROM run_command_on_workers($$SELECT activate_node_snapshot()$$)
EXCEPT
SELECT unnest(activate_node_snapshot())
)
) AS foo;
same_metadata_in_workers
---------------------------------------------------------------------
t
(1 row)

View File

@ -83,14 +83,17 @@ pg_sleep
(1 row)
step s2-view-dist:
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' AND query NOT ILIKE '%pg_isolation_test_session_is_blocked%' AND query NOT ILIKE '%BEGIN%' ORDER BY query DESC;
query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
---------------------------------------------------------------------
SELECT check_distributed_deadlocks();
|coordinator_host| 57636| | 0|idle |Client |ClientRead|postgres|regression
update ref_table set a = a + 1;
|coordinator_host| 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression
(1 row)
|coordinator_host| 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression
(2 rows)
step s2-view-worker:
SELECT query, query_hostname, query_hostport, distributed_query_host_name,
@ -101,7 +104,7 @@ step s2-view-worker:
query NOT ILIKE '%dump_local_wait_edges%'
ORDER BY query, query_hostport DESC;
query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
---------------------------------------------------------------------
UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression
UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression
@ -136,8 +139,8 @@ step s1-update-ref-table:
step s2-active-transactions:
-- Admin should be able to see all transactions
SELECT count(*) FROM get_all_active_transactions();
SELECT count(*) FROM get_global_active_transactions();
SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0;
SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0;
count
---------------------------------------------------------------------

View File

@ -1602,6 +1602,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c
(1 row)
DROP SCHEMA local_dist_join_mixed CASCADE;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed schema is run. To make sure subsequent commands see the schema correctly we need to make sure to use only one connection for all future commands
NOTICE: drop cascades to 7 other objects
DETAIL: drop cascades to table distributed
drop cascades to table reference

Some files were not shown because too many files have changed in this diff Show More