Merge branch 'master' into fix/subscript-jsonb

pull/5692/head
Onur Tirtir 2022-02-22 01:25:12 +03:00 committed by GitHub
commit 8e607328a0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
207 changed files with 7184 additions and 1288 deletions

View File

@ -131,8 +131,6 @@ cd build
cmake .. cmake ..
make -j5 make -j5
sudo make install sudo make install
# Optionally, you might instead want to use `sudo make install-all`
# since `multi_extension` regression test would fail due to missing downgrade scripts.
cd ../.. cd ../..
git clone https://github.com/citusdata/tools.git git clone https://github.com/citusdata/tools.git

View File

@ -156,9 +156,9 @@ git merge "community/$PR_BRANCH"
familiar with the change. familiar with the change.
5. You should rerun the `check-merge-to-enterprise` check on 5. You should rerun the `check-merge-to-enterprise` check on
`community/$PR_BRANCH`. You can use re-run from failed option in circle CI. `community/$PR_BRANCH`. You can use re-run from failed option in circle CI.
6. You can now merge the PR on enterprise. Be sure to NOT use "squash and merge", 6. You can now merge the PR on community. Be sure to NOT use "squash and merge",
but instead use the regular "merge commit" mode. but instead use the regular "merge commit" mode.
7. You can now merge the PR on community. Be sure to NOT use "squash and merge", 7. You can now merge the PR on enterprise. Be sure to NOT use "squash and merge",
but instead use the regular "merge commit" mode. but instead use the regular "merge commit" mode.
The subsequent PRs on community will be able to pass the The subsequent PRs on community will be able to pass the

View File

@ -66,10 +66,10 @@ fi
git merge --abort git merge --abort
# If we have a conflict on enterprise merge on the master branch, we have a problem. # If we have a conflict on enterprise merge on the master branch, we have a problem.
# Provide an error message to indicate that enterprise merge is needed. # Provide an error message to indicate that enterprise merge is needed to fix this check.
if [[ $PR_BRANCH = master ]]; then if [[ $PR_BRANCH = master ]]; then
echo "ERROR: Master branch has merge conlicts with enterprise-master." echo "ERROR: Master branch has merge conflicts with enterprise-master."
echo "Try re-running this job if you merged community PR before enterprise PR. Otherwise conflicts need to be resolved as a separate PR on enterprise." echo "Try re-running this CI job after merging your changes into enterprise-master."
exit 1 exit 1
fi fi

View File

@ -24,10 +24,12 @@
#include "catalog/pg_am.h" #include "catalog/pg_am.h"
#include "catalog/pg_publication.h" #include "catalog/pg_publication.h"
#include "catalog/pg_trigger.h" #include "catalog/pg_trigger.h"
#include "catalog/pg_extension.h"
#include "catalog/storage.h" #include "catalog/storage.h"
#include "catalog/storage_xlog.h" #include "catalog/storage_xlog.h"
#include "commands/progress.h" #include "commands/progress.h"
#include "commands/vacuum.h" #include "commands/vacuum.h"
#include "commands/extension.h"
#include "executor/executor.h" #include "executor/executor.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
#include "optimizer/plancat.h" #include "optimizer/plancat.h"
@ -154,6 +156,20 @@ static void ColumnarReadMissingRowsIntoIndex(TableScanDesc scan, Relation indexR
static ItemPointerData TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort, static ItemPointerData TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort,
ItemPointer targetItemPointer); ItemPointer targetItemPointer);
/* functions for CheckCitusColumnarVersion */
static bool CheckAvailableVersionColumnar(int elevel);
static bool CheckInstalledVersionColumnar(int elevel);
static char * AvailableExtensionVersionColumnar(void);
static char * InstalledExtensionVersionColumnar(void);
static bool CitusColumnarHasBeenLoadedInternal(void);
static bool CitusColumnarHasBeenLoaded(void);
static bool CheckCitusColumnarVersion(int elevel);
static bool MajorVersionsCompatibleColumnar(char *leftVersion, char *rightVersion);
/* global variables for CheckCitusColumnarVersion */
static bool extensionLoadedColumnar = false;
static bool EnableVersionChecksColumnar = true;
static bool citusVersionKnownCompatibleColumnar = false;
/* Custom tuple slot ops used for columnar. Initialized in columnar_tableam_init(). */ /* Custom tuple slot ops used for columnar. Initialized in columnar_tableam_init(). */
static TupleTableSlotOps TTSOpsColumnar; static TupleTableSlotOps TTSOpsColumnar;
@ -171,7 +187,7 @@ columnar_beginscan(Relation relation, Snapshot snapshot,
ParallelTableScanDesc parallel_scan, ParallelTableScanDesc parallel_scan,
uint32 flags) uint32 flags)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
int natts = relation->rd_att->natts; int natts = relation->rd_att->natts;
@ -194,6 +210,7 @@ columnar_beginscan_extended(Relation relation, Snapshot snapshot,
ParallelTableScanDesc parallel_scan, ParallelTableScanDesc parallel_scan,
uint32 flags, Bitmapset *attr_needed, List *scanQual) uint32 flags, Bitmapset *attr_needed, List *scanQual)
{ {
CheckCitusColumnarVersion(ERROR);
Oid relfilenode = relation->rd_node.relNode; Oid relfilenode = relation->rd_node.relNode;
/* /*
@ -418,7 +435,7 @@ columnar_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
static IndexFetchTableData * static IndexFetchTableData *
columnar_index_fetch_begin(Relation rel) columnar_index_fetch_begin(Relation rel)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
Oid relfilenode = rel->rd_node.relNode; Oid relfilenode = rel->rd_node.relNode;
if (PendingWritesInUpperTransactions(relfilenode, GetCurrentSubTransactionId())) if (PendingWritesInUpperTransactions(relfilenode, GetCurrentSubTransactionId()))
@ -643,7 +660,7 @@ static bool
columnar_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, columnar_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
Snapshot snapshot) Snapshot snapshot)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
uint64 rowNumber = tid_to_row_number(slot->tts_tid); uint64 rowNumber = tid_to_row_number(slot->tts_tid);
StripeMetadata *stripeMetadata = FindStripeByRowNumber(rel, rowNumber, snapshot); StripeMetadata *stripeMetadata = FindStripeByRowNumber(rel, rowNumber, snapshot);
@ -656,7 +673,7 @@ static TransactionId
columnar_index_delete_tuples(Relation rel, columnar_index_delete_tuples(Relation rel,
TM_IndexDeleteOp *delstate) TM_IndexDeleteOp *delstate)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
/* /*
* XXX: We didn't bother implementing index_delete_tuple for neither of * XXX: We didn't bother implementing index_delete_tuple for neither of
@ -717,7 +734,7 @@ static void
columnar_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid, columnar_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid,
int options, BulkInsertState bistate) int options, BulkInsertState bistate)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
/* /*
* columnar_init_write_state allocates the write state in a longer * columnar_init_write_state allocates the write state in a longer
@ -765,7 +782,7 @@ static void
columnar_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, columnar_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
CommandId cid, int options, BulkInsertState bistate) CommandId cid, int options, BulkInsertState bistate)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
ColumnarWriteState *writeState = columnar_init_write_state(relation, ColumnarWriteState *writeState = columnar_init_write_state(relation,
RelationGetDescr(relation), RelationGetDescr(relation),
@ -841,7 +858,7 @@ columnar_relation_set_new_filenode(Relation rel,
TransactionId *freezeXid, TransactionId *freezeXid,
MultiXactId *minmulti) MultiXactId *minmulti)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
if (persistence == RELPERSISTENCE_UNLOGGED) if (persistence == RELPERSISTENCE_UNLOGGED)
{ {
@ -878,8 +895,7 @@ columnar_relation_set_new_filenode(Relation rel,
static void static void
columnar_relation_nontransactional_truncate(Relation rel) columnar_relation_nontransactional_truncate(Relation rel)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
RelFileNode relfilenode = rel->rd_node; RelFileNode relfilenode = rel->rd_node;
NonTransactionDropWriteState(relfilenode.relNode); NonTransactionDropWriteState(relfilenode.relNode);
@ -926,7 +942,7 @@ columnar_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
double *tups_vacuumed, double *tups_vacuumed,
double *tups_recently_dead) double *tups_recently_dead)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
TupleDesc sourceDesc = RelationGetDescr(OldHeap); TupleDesc sourceDesc = RelationGetDescr(OldHeap);
TupleDesc targetDesc = RelationGetDescr(NewHeap); TupleDesc targetDesc = RelationGetDescr(NewHeap);
@ -1024,7 +1040,7 @@ static void
columnar_vacuum_rel(Relation rel, VacuumParams *params, columnar_vacuum_rel(Relation rel, VacuumParams *params,
BufferAccessStrategy bstrategy) BufferAccessStrategy bstrategy)
{ {
if (!CheckCitusVersion(WARNING)) if (!CheckCitusColumnarVersion(WARNING))
{ {
/* /*
* Skip if the extension catalogs are not up-to-date, but avoid * Skip if the extension catalogs are not up-to-date, but avoid
@ -1342,7 +1358,7 @@ columnar_index_build_range_scan(Relation columnarRelation,
void *callback_state, void *callback_state,
TableScanDesc scan) TableScanDesc scan)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
if (start_blockno != 0 || numblocks != InvalidBlockNumber) if (start_blockno != 0 || numblocks != InvalidBlockNumber)
{ {
@ -1592,7 +1608,7 @@ columnar_index_validate_scan(Relation columnarRelation,
ValidateIndexState * ValidateIndexState *
validateIndexState) validateIndexState)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
ColumnarReportTotalVirtualBlocks(columnarRelation, snapshot, ColumnarReportTotalVirtualBlocks(columnarRelation, snapshot,
PROGRESS_SCAN_BLOCKS_TOTAL); PROGRESS_SCAN_BLOCKS_TOTAL);
@ -1764,7 +1780,7 @@ TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort, ItemPointer targetIt
static uint64 static uint64
columnar_relation_size(Relation rel, ForkNumber forkNumber) columnar_relation_size(Relation rel, ForkNumber forkNumber)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
uint64 nblocks = 0; uint64 nblocks = 0;
@ -1791,7 +1807,7 @@ columnar_relation_size(Relation rel, ForkNumber forkNumber)
static bool static bool
columnar_relation_needs_toast_table(Relation rel) columnar_relation_needs_toast_table(Relation rel)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
return false; return false;
} }
@ -1802,8 +1818,7 @@ columnar_estimate_rel_size(Relation rel, int32 *attr_widths,
BlockNumber *pages, double *tuples, BlockNumber *pages, double *tuples,
double *allvisfrac) double *allvisfrac)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
RelationOpenSmgr(rel); RelationOpenSmgr(rel);
*pages = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM); *pages = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM);
*tuples = ColumnarTableRowCount(rel); *tuples = ColumnarTableRowCount(rel);
@ -1910,6 +1925,15 @@ columnar_tableam_init()
TTSOpsColumnar = TTSOpsVirtual; TTSOpsColumnar = TTSOpsVirtual;
TTSOpsColumnar.copy_heap_tuple = ColumnarSlotCopyHeapTuple; TTSOpsColumnar.copy_heap_tuple = ColumnarSlotCopyHeapTuple;
DefineCustomBoolVariable(
"columnar.enable_version_checks",
gettext_noop("Enables Version Check for Columnar"),
NULL,
&EnableVersionChecksColumnar,
true,
PGC_USERSET,
GUC_NO_SHOW_ALL,
NULL, NULL, NULL);
} }
@ -1968,7 +1992,7 @@ ColumnarTableDropHook(Oid relid)
if (IsColumnarTableAmTable(relid)) if (IsColumnarTableAmTable(relid))
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
/* /*
* Drop metadata. No need to drop storage here since for * Drop metadata. No need to drop storage here since for
@ -2093,8 +2117,7 @@ ColumnarProcessUtility(PlannedStmt *pstmt,
if (rel->rd_tableam == GetColumnarTableAmRoutine()) if (rel->rd_tableam == GetColumnarTableAmRoutine())
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
if (!ColumnarSupportsIndexAM(indexStmt->accessMethod)) if (!ColumnarSupportsIndexAM(indexStmt->accessMethod))
{ {
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@ -2316,7 +2339,7 @@ PG_FUNCTION_INFO_V1(alter_columnar_table_set);
Datum Datum
alter_columnar_table_set(PG_FUNCTION_ARGS) alter_columnar_table_set(PG_FUNCTION_ARGS)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
Oid relationId = PG_GETARG_OID(0); Oid relationId = PG_GETARG_OID(0);
@ -2440,7 +2463,7 @@ PG_FUNCTION_INFO_V1(alter_columnar_table_reset);
Datum Datum
alter_columnar_table_reset(PG_FUNCTION_ARGS) alter_columnar_table_reset(PG_FUNCTION_ARGS)
{ {
CheckCitusVersion(ERROR); CheckCitusColumnarVersion(ERROR);
Oid relationId = PG_GETARG_OID(0); Oid relationId = PG_GETARG_OID(0);
@ -2579,3 +2602,318 @@ downgrade_columnar_storage(PG_FUNCTION_ARGS)
table_close(rel, AccessExclusiveLock); table_close(rel, AccessExclusiveLock);
PG_RETURN_VOID(); PG_RETURN_VOID();
} }
/*
* Code to check the Citus Version, helps remove dependency from Citus
*/
/*
* CitusColumnarHasBeenLoaded returns true if the citus extension has been created
* in the current database and the extension script has been executed. Otherwise,
* it returns false. The result is cached as this is called very frequently.
*/
bool
CitusColumnarHasBeenLoaded(void)
{
if (!extensionLoadedColumnar || creating_extension)
{
/*
* Refresh if we have not determined whether the extension has been
* loaded yet, or in case of ALTER EXTENSION since we want to treat
* Citus as "not loaded" during ALTER EXTENSION citus.
*/
bool extensionLoaded = CitusColumnarHasBeenLoadedInternal();
extensionLoadedColumnar = extensionLoaded;
}
return extensionLoadedColumnar;
}
/*
* CitusColumnarHasBeenLoadedInternal returns true if the citus extension has been created
* in the current database and the extension script has been executed. Otherwise,
* it returns false.
*/
static bool
CitusColumnarHasBeenLoadedInternal(void)
{
if (IsBinaryUpgrade)
{
/* never use Citus logic during pg_upgrade */
return false;
}
Oid citusExtensionOid = get_extension_oid("citus", true);
if (citusExtensionOid == InvalidOid)
{
/* Citus extension does not exist yet */
return false;
}
if (creating_extension && CurrentExtensionObject == citusExtensionOid)
{
/*
* We do not use Citus hooks during CREATE/ALTER EXTENSION citus
* since the objects used by the C code might be not be there yet.
*/
return false;
}
/* citus extension exists and has been created */
return true;
}
/*
* CheckCitusColumnarVersion checks whether there is a version mismatch between the
* available version and the loaded version or between the installed version
* and the loaded version. Returns true if compatible, false otherwise.
*
* As a side effect, this function also sets citusVersionKnownCompatible_Columnar global
* variable to true which reduces version check cost of next calls.
*/
bool
CheckCitusColumnarVersion(int elevel)
{
if (citusVersionKnownCompatibleColumnar ||
!CitusColumnarHasBeenLoaded() ||
!EnableVersionChecksColumnar)
{
return true;
}
if (CheckAvailableVersionColumnar(elevel) && CheckInstalledVersionColumnar(elevel))
{
citusVersionKnownCompatibleColumnar = true;
return true;
}
else
{
return false;
}
}
/*
* CheckAvailableVersion compares CITUS_EXTENSIONVERSION and the currently
* available version from the citus.control file. If they are not compatible,
* this function logs an error with the specified elevel and returns false,
* otherwise it returns true.
*/
bool
CheckAvailableVersionColumnar(int elevel)
{
if (!EnableVersionChecksColumnar)
{
return true;
}
char *availableVersion = AvailableExtensionVersionColumnar();
if (!MajorVersionsCompatibleColumnar(availableVersion, CITUS_EXTENSIONVERSION))
{
ereport(elevel, (errmsg("loaded Citus library version differs from latest "
"available extension version"),
errdetail("Loaded library requires %s, but the latest control "
"file specifies %s.", CITUS_MAJORVERSION,
availableVersion),
errhint("Restart the database to load the latest Citus "
"library.")));
pfree(availableVersion);
return false;
}
pfree(availableVersion);
return true;
}
/*
* CheckInstalledVersion compares CITUS_EXTENSIONVERSION and the
* extension's current version from the pg_extension catalog table. If they
* are not compatible, this function logs an error with the specified elevel,
* otherwise it returns true.
*/
static bool
CheckInstalledVersionColumnar(int elevel)
{
Assert(CitusColumnarHasBeenLoaded());
Assert(EnableVersionChecksColumnar);
char *installedVersion = InstalledExtensionVersionColumnar();
if (!MajorVersionsCompatibleColumnar(installedVersion, CITUS_EXTENSIONVERSION))
{
ereport(elevel, (errmsg("loaded Citus library version differs from installed "
"extension version"),
errdetail("Loaded library requires %s, but the installed "
"extension version is %s.", CITUS_MAJORVERSION,
installedVersion),
errhint("Run ALTER EXTENSION citus UPDATE and try again.")));
pfree(installedVersion);
return false;
}
pfree(installedVersion);
return true;
}
/*
* MajorVersionsCompatible checks whether both versions are compatible. They
* are if major and minor version numbers match, the schema version is
* ignored. Returns true if compatible, false otherwise.
*/
bool
MajorVersionsCompatibleColumnar(char *leftVersion, char *rightVersion)
{
const char schemaVersionSeparator = '-';
char *leftSeperatorPosition = strchr(leftVersion, schemaVersionSeparator);
char *rightSeperatorPosition = strchr(rightVersion, schemaVersionSeparator);
int leftComparisionLimit = 0;
int rightComparisionLimit = 0;
if (leftSeperatorPosition != NULL)
{
leftComparisionLimit = leftSeperatorPosition - leftVersion;
}
else
{
leftComparisionLimit = strlen(leftVersion);
}
if (rightSeperatorPosition != NULL)
{
rightComparisionLimit = rightSeperatorPosition - rightVersion;
}
else
{
rightComparisionLimit = strlen(leftVersion);
}
/* we can error out early if hypens are not in the same position */
if (leftComparisionLimit != rightComparisionLimit)
{
return false;
}
return strncmp(leftVersion, rightVersion, leftComparisionLimit) == 0;
}
/*
* AvailableExtensionVersion returns the Citus version from citus.control file. It also
* saves the result, thus consecutive calls to CitusExtensionAvailableVersion will
* not read the citus.control file again.
*/
static char *
AvailableExtensionVersionColumnar(void)
{
LOCAL_FCINFO(fcinfo, 0);
FmgrInfo flinfo;
bool goForward = true;
bool doCopy = false;
char *availableExtensionVersion;
EState *estate = CreateExecutorState();
ReturnSetInfo *extensionsResultSet = makeNode(ReturnSetInfo);
extensionsResultSet->econtext = GetPerTupleExprContext(estate);
extensionsResultSet->allowedModes = SFRM_Materialize;
fmgr_info(F_PG_AVAILABLE_EXTENSIONS, &flinfo);
InitFunctionCallInfoData(*fcinfo, &flinfo, 0, InvalidOid, NULL,
(Node *) extensionsResultSet);
/* pg_available_extensions returns result set containing all available extensions */
(*pg_available_extensions)(fcinfo);
TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat(
extensionsResultSet->setDesc,
&TTSOpsMinimalTuple);
bool hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward,
doCopy,
tupleTableSlot);
while (hasTuple)
{
bool isNull = false;
Datum extensionNameDatum = slot_getattr(tupleTableSlot, 1, &isNull);
char *extensionName = NameStr(*DatumGetName(extensionNameDatum));
if (strcmp(extensionName, "citus") == 0)
{
Datum availableVersion = slot_getattr(tupleTableSlot, 2, &isNull);
availableExtensionVersion = text_to_cstring(DatumGetTextPP(availableVersion));
ExecClearTuple(tupleTableSlot);
ExecDropSingleTupleTableSlot(tupleTableSlot);
return availableExtensionVersion;
}
ExecClearTuple(tupleTableSlot);
hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward,
doCopy, tupleTableSlot);
}
ExecDropSingleTupleTableSlot(tupleTableSlot);
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension is not found")));
}
/*
* InstalledExtensionVersion returns the Citus version in PostgreSQL pg_extension table.
*/
static char *
InstalledExtensionVersionColumnar(void)
{
ScanKeyData entry[1];
char *installedExtensionVersion = NULL;
Relation relation = table_open(ExtensionRelationId, AccessShareLock);
ScanKeyInit(&entry[0], Anum_pg_extension_extname, BTEqualStrategyNumber, F_NAMEEQ,
CStringGetDatum("citus"));
SysScanDesc scandesc = systable_beginscan(relation, ExtensionNameIndexId, true,
NULL, 1, entry);
HeapTuple extensionTuple = systable_getnext(scandesc);
/* We assume that there can be at most one matching tuple */
if (HeapTupleIsValid(extensionTuple))
{
int extensionIndex = Anum_pg_extension_extversion;
TupleDesc tupleDescriptor = RelationGetDescr(relation);
bool isNull = false;
Datum installedVersion = heap_getattr(extensionTuple, extensionIndex,
tupleDescriptor, &isNull);
if (isNull)
{
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension version is null")));
}
installedExtensionVersion = text_to_cstring(DatumGetTextPP(installedVersion));
}
else
{
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension is not loaded")));
}
systable_endscan(scandesc);
table_close(relation, AccessShareLock);
return installedExtensionVersion;
}

View File

@ -1059,7 +1059,8 @@ CreateTableConversion(TableConversionParameters *params)
} }
relation_close(relation, NoLock); relation_close(relation, NoLock);
con->distributionKey = con->distributionKey =
BuildDistributionKeyFromColumnName(relation, con->distributionColumn); BuildDistributionKeyFromColumnName(con->relationId, con->distributionColumn,
NoLock);
con->originalAccessMethod = NULL; con->originalAccessMethod = NULL;
if (!PartitionedTable(con->relationId) && !IsForeignTable(con->relationId)) if (!PartitionedTable(con->relationId) && !IsForeignTable(con->relationId))
@ -1175,6 +1176,9 @@ CreateDistributedTableLike(TableConversionState *con)
newShardCount = con->shardCount; newShardCount = con->shardCount;
} }
char *distributionColumnName =
ColumnToColumnName(con->newRelationId, (Node *) newDistributionKey);
Oid originalRelationId = con->relationId; Oid originalRelationId = con->relationId;
if (con->originalDistributionKey != NULL && PartitionTable(originalRelationId)) if (con->originalDistributionKey != NULL && PartitionTable(originalRelationId))
{ {
@ -1190,16 +1194,13 @@ CreateDistributedTableLike(TableConversionState *con)
*/ */
Oid parentRelationId = PartitionParentOid(originalRelationId); Oid parentRelationId = PartitionParentOid(originalRelationId);
Var *parentDistKey = DistPartitionKeyOrError(parentRelationId); Var *parentDistKey = DistPartitionKeyOrError(parentRelationId);
char *parentDistKeyColumnName = distributionColumnName =
ColumnToColumnName(parentRelationId, nodeToString(parentDistKey)); ColumnToColumnName(parentRelationId, (Node *) parentDistKey);
newDistributionKey =
FindColumnWithNameOnTargetRelation(parentRelationId, parentDistKeyColumnName,
con->newRelationId);
} }
char partitionMethod = PartitionMethod(con->relationId); char partitionMethod = PartitionMethod(con->relationId);
CreateDistributedTable(con->newRelationId, newDistributionKey, partitionMethod,
CreateDistributedTable(con->newRelationId, distributionColumnName, partitionMethod,
newShardCount, true, newColocateWith, false); newShardCount, true, newColocateWith, false);
} }

View File

@ -0,0 +1,127 @@
/*-------------------------------------------------------------------------
*
* citus_global_signal.c
* Commands for Citus' overriden versions of pg_cancel_backend
* and pg_terminate_backend statements.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "distributed/backend_data.h"
#include "distributed/metadata_cache.h"
#include "distributed/worker_manager.h"
#include "lib/stringinfo.h"
#include "signal.h"
static bool CitusSignalBackend(uint64 globalPID, uint64 timeout, int sig);
PG_FUNCTION_INFO_V1(pg_cancel_backend);
PG_FUNCTION_INFO_V1(pg_terminate_backend);
/*
* pg_cancel_backend overrides the Postgres' pg_cancel_backend to cancel
* a query with a global pid so a query can be cancelled from another node.
*
* To cancel a query that is on another node, a pg_cancel_backend command is sent
* to that node. This new command is sent with pid instead of global pid, so original
* pg_cancel_backend function is used.
*/
Datum
pg_cancel_backend(PG_FUNCTION_ARGS)
{
CheckCitusVersion(ERROR);
uint64 pid = PG_GETARG_INT64(0);
int sig = SIGINT;
uint64 timeout = 0;
bool success = CitusSignalBackend(pid, timeout, sig);
PG_RETURN_BOOL(success);
}
/*
* pg_terminate_backend overrides the Postgres' pg_terminate_backend to terminate
* a query with a global pid so a query can be terminated from another node.
*
* To terminate a query that is on another node, a pg_terminate_backend command is sent
* to that node. This new command is sent with pid instead of global pid, so original
* pg_terminate_backend function is used.
*/
Datum
pg_terminate_backend(PG_FUNCTION_ARGS)
{
CheckCitusVersion(ERROR);
uint64 pid = PG_GETARG_INT64(0);
uint64 timeout = PG_GETARG_INT64(1);
int sig = SIGTERM;
bool success = CitusSignalBackend(pid, timeout, sig);
PG_RETURN_BOOL(success);
}
/*
* CitusSignalBackend gets a global pid and and ends the original query with the global pid
* that might have started in another node by connecting to that node and running either
* pg_cancel_backend or pg_terminate_backend based on the withTerminate argument.
*/
static bool
CitusSignalBackend(uint64 globalPID, uint64 timeout, int sig)
{
Assert((sig == SIGINT) || (sig == SIGTERM));
#if PG_VERSION_NUM < PG_VERSION_14
if (timeout != 0)
{
elog(ERROR, "timeout parameter is only supported on Postgres 14 or later");
}
#endif
int nodeId = ExtractNodeIdFromGlobalPID(globalPID);
int processId = ExtractProcessIdFromGlobalPID(globalPID);
WorkerNode *workerNode = FindNodeWithNodeId(nodeId);
StringInfo cancelQuery = makeStringInfo();
if (sig == SIGINT)
{
appendStringInfo(cancelQuery, "SELECT pg_cancel_backend(%d::integer)", processId);
}
else
{
#if PG_VERSION_NUM >= PG_VERSION_14
appendStringInfo(cancelQuery,
"SELECT pg_terminate_backend(%d::integer, %lu::bigint)",
processId, timeout);
#else
appendStringInfo(cancelQuery, "SELECT pg_terminate_backend(%d::integer)",
processId);
#endif
}
StringInfo queryResult = makeStringInfo();
bool reportResultError = true;
bool success = ExecuteRemoteQueryOrCommand(workerNode->workerName,
workerNode->workerPort, cancelQuery->data,
queryResult, reportResultError);
if (success && queryResult && strcmp(queryResult->data, "f") == 0)
{
success = false;
}
return success;
}

View File

@ -37,7 +37,7 @@ static char * CreateCollationDDLInternal(Oid collationId, Oid *collowner,
char **quotedCollationName); char **quotedCollationName);
static List * FilterNameListForDistributedCollations(List *objects, bool missing_ok, static List * FilterNameListForDistributedCollations(List *objects, bool missing_ok,
List **addresses); List **addresses);
static bool ShouldPropagateDefineCollationStmt(void);
/* /*
* GetCreateCollationDDLInternal returns a CREATE COLLATE sql string for the * GetCreateCollationDDLInternal returns a CREATE COLLATE sql string for the
@ -519,6 +519,26 @@ DefineCollationStmtObjectAddress(Node *node, bool missing_ok)
} }
/*
* PreprocessDefineCollationStmt executed before the collation has been
* created locally to ensure that if the collation create statement will
* be propagated, the node is a coordinator node
*/
List *
PreprocessDefineCollationStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
Assert(castNode(DefineStmt, node)->kind == OBJECT_COLLATION);
if (ShouldPropagateDefineCollationStmt())
{
EnsureCoordinator();
}
return NIL;
}
/* /*
* PostprocessDefineCollationStmt executed after the collation has been * PostprocessDefineCollationStmt executed after the collation has been
* created locally and before we create it on the worker nodes. * created locally and before we create it on the worker nodes.
@ -531,16 +551,7 @@ PostprocessDefineCollationStmt(Node *node, const char *queryString)
{ {
Assert(castNode(DefineStmt, node)->kind == OBJECT_COLLATION); Assert(castNode(DefineStmt, node)->kind == OBJECT_COLLATION);
if (!ShouldPropagate()) if (!ShouldPropagateDefineCollationStmt())
{
return NIL;
}
/*
* If the create collation command is a part of a multi-statement transaction,
* do not propagate it
*/
if (IsMultiStatementTransaction())
{ {
return NIL; return NIL;
} }
@ -548,13 +559,38 @@ PostprocessDefineCollationStmt(Node *node, const char *queryString)
ObjectAddress collationAddress = ObjectAddress collationAddress =
DefineCollationStmtObjectAddress(node, false); DefineCollationStmtObjectAddress(node, false);
if (IsObjectDistributed(&collationAddress))
{
EnsureCoordinator();
}
EnsureDependenciesExistOnAllNodes(&collationAddress); EnsureDependenciesExistOnAllNodes(&collationAddress);
return NodeDDLTaskList(NON_COORDINATOR_NODES, CreateCollationDDLsIdempotent( /* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make1(DISABLE_DDL_PROPAGATION);
commands = list_concat(commands, CreateCollationDDLsIdempotent(
collationAddress.objectId)); collationAddress.objectId));
commands = lappend(commands, ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* ShouldPropagateDefineCollationStmt checks if collation define
* statement should be propagated. Don't propagate if:
* - metadata syncing if off
* - statement is part of a multi stmt transaction and the multi shard connection
* type is not sequential
*/
static bool
ShouldPropagateDefineCollationStmt()
{
if (!ShouldPropagate())
{
return false;
}
if (IsMultiStatementTransaction() &&
MultiShardConnectionType != SEQUENTIAL_CONNECTION)
{
return false;
}
return true;
} }

View File

@ -159,30 +159,14 @@ master_create_distributed_table(PG_FUNCTION_ARGS)
char *colocateWithTableName = NULL; char *colocateWithTableName = NULL;
bool viaDeprecatedAPI = true; bool viaDeprecatedAPI = true;
/*
* Lock target relation with an exclusive lock - there's no way to make
* sense of this table until we've committed, and we don't want multiple
* backends manipulating this relation.
*/
Relation relation = try_relation_open(relationId, ExclusiveLock);
if (relation == NULL)
{
ereport(ERROR, (errmsg("could not create distributed table: "
"relation does not exist")));
}
char *distributionColumnName = text_to_cstring(distributionColumnText); char *distributionColumnName = text_to_cstring(distributionColumnText);
Var *distributionColumn = BuildDistributionKeyFromColumnName(relation, Assert(distributionColumnName != NULL);
distributionColumnName);
Assert(distributionColumn != NULL);
char distributionMethod = LookupDistributionMethod(distributionMethodOid); char distributionMethod = LookupDistributionMethod(distributionMethodOid);
CreateDistributedTable(relationId, distributionColumn, distributionMethod, CreateDistributedTable(relationId, distributionColumnName, distributionMethod,
ShardCount, false, colocateWithTableName, viaDeprecatedAPI); ShardCount, false, colocateWithTableName, viaDeprecatedAPI);
relation_close(relation, NoLock);
PG_RETURN_VOID(); PG_RETURN_VOID();
} }
@ -249,9 +233,8 @@ create_distributed_table(PG_FUNCTION_ARGS)
relation_close(relation, NoLock); relation_close(relation, NoLock);
char *distributionColumnName = text_to_cstring(distributionColumnText); char *distributionColumnName = text_to_cstring(distributionColumnText);
Var *distributionColumn = BuildDistributionKeyFromColumnName(relation, Assert(distributionColumnName != NULL);
distributionColumnName);
Assert(distributionColumn != NULL);
char distributionMethod = LookupDistributionMethod(distributionMethodOid); char distributionMethod = LookupDistributionMethod(distributionMethodOid);
if (shardCount < 1 || shardCount > MAX_SHARD_COUNT) if (shardCount < 1 || shardCount > MAX_SHARD_COUNT)
@ -261,7 +244,7 @@ create_distributed_table(PG_FUNCTION_ARGS)
shardCount, MAX_SHARD_COUNT))); shardCount, MAX_SHARD_COUNT)));
} }
CreateDistributedTable(relationId, distributionColumn, distributionMethod, CreateDistributedTable(relationId, distributionColumnName, distributionMethod,
shardCount, shardCountIsStrict, colocateWithTableName, shardCount, shardCountIsStrict, colocateWithTableName,
viaDeprecatedAPI); viaDeprecatedAPI);
@ -281,7 +264,7 @@ create_reference_table(PG_FUNCTION_ARGS)
Oid relationId = PG_GETARG_OID(0); Oid relationId = PG_GETARG_OID(0);
char *colocateWithTableName = NULL; char *colocateWithTableName = NULL;
Var *distributionColumn = NULL; char *distributionColumnName = NULL;
bool viaDeprecatedAPI = false; bool viaDeprecatedAPI = false;
@ -317,7 +300,7 @@ create_reference_table(PG_FUNCTION_ARGS)
errdetail("There are no active worker nodes."))); errdetail("There are no active worker nodes.")));
} }
CreateDistributedTable(relationId, distributionColumn, DISTRIBUTE_BY_NONE, CreateDistributedTable(relationId, distributionColumnName, DISTRIBUTE_BY_NONE,
ShardCount, false, colocateWithTableName, viaDeprecatedAPI); ShardCount, false, colocateWithTableName, viaDeprecatedAPI);
PG_RETURN_VOID(); PG_RETURN_VOID();
} }
@ -385,9 +368,10 @@ EnsureRelationExists(Oid relationId)
* day, once we deprecate master_create_distribute_table completely. * day, once we deprecate master_create_distribute_table completely.
*/ */
void void
CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributionMethod, CreateDistributedTable(Oid relationId, char *distributionColumnName,
int shardCount, bool shardCountIsStrict, char distributionMethod, int shardCount,
char *colocateWithTableName, bool viaDeprecatedAPI) bool shardCountIsStrict, char *colocateWithTableName,
bool viaDeprecatedAPI)
{ {
/* /*
* EnsureTableNotDistributed errors out when relation is a citus table but * EnsureTableNotDistributed errors out when relation is a citus table but
@ -443,6 +427,8 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio
DropFKeysRelationInvolvedWithTableType(relationId, INCLUDE_LOCAL_TABLES); DropFKeysRelationInvolvedWithTableType(relationId, INCLUDE_LOCAL_TABLES);
} }
LockRelationOid(relationId, ExclusiveLock);
/* /*
* Ensure that the sequences used in column defaults of the table * Ensure that the sequences used in column defaults of the table
* have proper types * have proper types
@ -463,22 +449,9 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio
colocateWithTableName, colocateWithTableName,
viaDeprecatedAPI); viaDeprecatedAPI);
/* Var *distributionColumn = BuildDistributionKeyFromColumnName(relationId,
* Due to dropping columns, the parent's distribution key may not match the distributionColumnName,
* partition's distribution key. The input distributionColumn belongs to ExclusiveLock);
* the parent. That's why we override the distribution column of partitions
* here. See issue #5123 for details.
*/
if (PartitionTable(relationId))
{
Oid parentRelationId = PartitionParentOid(relationId);
char *distributionColumnName =
ColumnToColumnName(parentRelationId, nodeToString(distributionColumn));
distributionColumn =
FindColumnWithNameOnTargetRelation(parentRelationId, distributionColumnName,
relationId);
}
/* /*
* ColocationIdForNewTable assumes caller acquires lock on relationId. In our case, * ColocationIdForNewTable assumes caller acquires lock on relationId. In our case,
@ -567,7 +540,7 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio
foreach_oid(partitionRelationId, partitionList) foreach_oid(partitionRelationId, partitionList)
{ {
CreateDistributedTable(partitionRelationId, distributionColumn, CreateDistributedTable(partitionRelationId, distributionColumnName,
distributionMethod, shardCount, false, distributionMethod, shardCount, false,
parentRelationName, viaDeprecatedAPI); parentRelationName, viaDeprecatedAPI);
} }

View File

@ -241,6 +241,17 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
return NIL; return NIL;
} }
/*
* Indices are created separately, however, they do show up in the dependency
* list for a table since they will have potentially their own dependencies.
* The commands will be added to both shards and metadata tables via the table
* creation commands.
*/
if (relKind == RELKIND_INDEX)
{
return NIL;
}
if (relKind == RELKIND_RELATION || relKind == RELKIND_PARTITIONED_TABLE || if (relKind == RELKIND_RELATION || relKind == RELKIND_PARTITIONED_TABLE ||
relKind == RELKIND_FOREIGN_TABLE) relKind == RELKIND_FOREIGN_TABLE)
{ {
@ -317,6 +328,11 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
return DDLCommands; return DDLCommands;
} }
case OCLASS_TSCONFIG:
{
return CreateTextSearchConfigDDLCommandsIdempotent(dependency);
}
case OCLASS_TYPE: case OCLASS_TYPE:
{ {
return CreateTypeDDLCommandsIdempotent(dependency); return CreateTypeDDLCommandsIdempotent(dependency);

View File

@ -175,7 +175,7 @@ static DistributeObjectOps Any_CreateFunction = {
.preprocess = PreprocessCreateFunctionStmt, .preprocess = PreprocessCreateFunctionStmt,
.postprocess = PostprocessCreateFunctionStmt, .postprocess = PostprocessCreateFunctionStmt,
.address = CreateFunctionStmtObjectAddress, .address = CreateFunctionStmtObjectAddress,
.markDistributed = false, .markDistributed = true,
}; };
static DistributeObjectOps Any_CreatePolicy = { static DistributeObjectOps Any_CreatePolicy = {
.deparse = NULL, .deparse = NULL,
@ -276,7 +276,7 @@ static DistributeObjectOps Collation_AlterOwner = {
static DistributeObjectOps Collation_Define = { static DistributeObjectOps Collation_Define = {
.deparse = NULL, .deparse = NULL,
.qualify = NULL, .qualify = NULL,
.preprocess = NULL, .preprocess = PreprocessDefineCollationStmt,
.postprocess = PostprocessDefineCollationStmt, .postprocess = PostprocessDefineCollationStmt,
.address = DefineCollationStmtObjectAddress, .address = DefineCollationStmtObjectAddress,
.markDistributed = true, .markDistributed = true,
@ -505,6 +505,62 @@ static DistributeObjectOps Sequence_Rename = {
.address = RenameSequenceStmtObjectAddress, .address = RenameSequenceStmtObjectAddress,
.markDistributed = false, .markDistributed = false,
}; };
static DistributeObjectOps TextSearchConfig_Alter = {
.deparse = DeparseAlterTextSearchConfigurationStmt,
.qualify = QualifyAlterTextSearchConfigurationStmt,
.preprocess = PreprocessAlterTextSearchConfigurationStmt,
.postprocess = NULL,
.address = AlterTextSearchConfigurationStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps TextSearchConfig_AlterObjectSchema = {
.deparse = DeparseAlterTextSearchConfigurationSchemaStmt,
.qualify = QualifyAlterTextSearchConfigurationSchemaStmt,
.preprocess = PreprocessAlterTextSearchConfigurationSchemaStmt,
.postprocess = PostprocessAlterTextSearchConfigurationSchemaStmt,
.address = AlterTextSearchConfigurationSchemaStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps TextSearchConfig_AlterOwner = {
.deparse = DeparseAlterTextSearchConfigurationOwnerStmt,
.qualify = QualifyAlterTextSearchConfigurationOwnerStmt,
.preprocess = PreprocessAlterTextSearchConfigurationOwnerStmt,
.postprocess = PostprocessAlterTextSearchConfigurationOwnerStmt,
.address = AlterTextSearchConfigurationOwnerObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps TextSearchConfig_Comment = {
.deparse = DeparseTextSearchConfigurationCommentStmt,
.qualify = QualifyTextSearchConfigurationCommentStmt,
.preprocess = PreprocessTextSearchConfigurationCommentStmt,
.postprocess = NULL,
.address = TextSearchConfigurationCommentObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps TextSearchConfig_Define = {
.deparse = DeparseCreateTextSearchStmt,
.qualify = NULL,
.preprocess = NULL,
.postprocess = PostprocessCreateTextSearchConfigurationStmt,
.address = CreateTextSearchConfigurationObjectAddress,
.markDistributed = true,
};
static DistributeObjectOps TextSearchConfig_Drop = {
.deparse = DeparseDropTextSearchConfigurationStmt,
.qualify = QualifyDropTextSearchConfigurationStmt,
.preprocess = PreprocessDropTextSearchConfigurationStmt,
.postprocess = NULL,
.address = NULL,
.markDistributed = false,
};
static DistributeObjectOps TextSearchConfig_Rename = {
.deparse = DeparseRenameTextSearchConfigurationStmt,
.qualify = QualifyRenameTextSearchConfigurationStmt,
.preprocess = PreprocessRenameTextSearchConfigurationStmt,
.postprocess = NULL,
.address = RenameTextSearchConfigurationStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps Trigger_AlterObjectDepends = { static DistributeObjectOps Trigger_AlterObjectDepends = {
.deparse = NULL, .deparse = NULL,
.qualify = NULL, .qualify = NULL,
@ -811,6 +867,11 @@ GetDistributeObjectOps(Node *node)
return &Table_AlterObjectSchema; return &Table_AlterObjectSchema;
} }
case OBJECT_TSCONFIGURATION:
{
return &TextSearchConfig_AlterObjectSchema;
}
case OBJECT_TYPE: case OBJECT_TYPE:
{ {
return &Type_AlterObjectSchema; return &Type_AlterObjectSchema;
@ -868,6 +929,11 @@ GetDistributeObjectOps(Node *node)
return &Statistics_AlterOwner; return &Statistics_AlterOwner;
} }
case OBJECT_TSCONFIGURATION:
{
return &TextSearchConfig_AlterOwner;
}
case OBJECT_TYPE: case OBJECT_TYPE:
{ {
return &Type_AlterOwner; return &Type_AlterOwner;
@ -949,11 +1015,33 @@ GetDistributeObjectOps(Node *node)
return &Any_AlterTableMoveAll; return &Any_AlterTableMoveAll;
} }
case T_AlterTSConfigurationStmt:
{
return &TextSearchConfig_Alter;
}
case T_ClusterStmt: case T_ClusterStmt:
{ {
return &Any_Cluster; return &Any_Cluster;
} }
case T_CommentStmt:
{
CommentStmt *stmt = castNode(CommentStmt, node);
switch (stmt->objtype)
{
case OBJECT_TSCONFIGURATION:
{
return &TextSearchConfig_Comment;
}
default:
{
return &NoDistributeOps;
}
}
}
case T_CompositeTypeStmt: case T_CompositeTypeStmt:
{ {
return &Any_CompositeType; return &Any_CompositeType;
@ -1014,6 +1102,11 @@ GetDistributeObjectOps(Node *node)
return &Collation_Define; return &Collation_Define;
} }
case OBJECT_TSCONFIGURATION:
{
return &TextSearchConfig_Define;
}
default: default:
{ {
return &NoDistributeOps; return &NoDistributeOps;
@ -1091,6 +1184,11 @@ GetDistributeObjectOps(Node *node)
return &Table_Drop; return &Table_Drop;
} }
case OBJECT_TSCONFIGURATION:
{
return &TextSearchConfig_Drop;
}
case OBJECT_TYPE: case OBJECT_TYPE:
{ {
return &Type_Drop; return &Type_Drop;
@ -1190,6 +1288,11 @@ GetDistributeObjectOps(Node *node)
return &Statistics_Rename; return &Statistics_Rename;
} }
case OBJECT_TSCONFIGURATION:
{
return &TextSearchConfig_Rename;
}
case OBJECT_TYPE: case OBJECT_TYPE:
{ {
return &Type_Rename; return &Type_Rename;

View File

@ -25,6 +25,7 @@
#include "access/htup_details.h" #include "access/htup_details.h"
#include "access/xact.h" #include "access/xact.h"
#include "catalog/pg_aggregate.h" #include "catalog/pg_aggregate.h"
#include "catalog/dependency.h"
#include "catalog/namespace.h" #include "catalog/namespace.h"
#include "catalog/pg_proc.h" #include "catalog/pg_proc.h"
#include "catalog/pg_type.h" #include "catalog/pg_type.h"
@ -38,6 +39,7 @@
#include "distributed/listutils.h" #include "distributed/listutils.h"
#include "distributed/maintenanced.h" #include "distributed/maintenanced.h"
#include "distributed/metadata_utility.h" #include "distributed/metadata_utility.h"
#include "distributed/metadata/dependency.h"
#include "distributed/coordinator_protocol.h" #include "distributed/coordinator_protocol.h"
#include "distributed/metadata/distobject.h" #include "distributed/metadata/distobject.h"
#include "distributed/metadata/pg_dist_object.h" #include "distributed/metadata/pg_dist_object.h"
@ -80,6 +82,7 @@ static void EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid
static bool ShouldPropagateCreateFunction(CreateFunctionStmt *stmt); static bool ShouldPropagateCreateFunction(CreateFunctionStmt *stmt);
static bool ShouldPropagateAlterFunction(const ObjectAddress *address); static bool ShouldPropagateAlterFunction(const ObjectAddress *address);
static bool ShouldAddFunctionSignature(FunctionParameterMode mode); static bool ShouldAddFunctionSignature(FunctionParameterMode mode);
static ObjectAddress * GetUndistributableDependency(ObjectAddress *functionAddress);
static ObjectAddress FunctionToObjectAddress(ObjectType objectType, static ObjectAddress FunctionToObjectAddress(ObjectType objectType,
ObjectWithArgs *objectWithArgs, ObjectWithArgs *objectWithArgs,
bool missing_ok); bool missing_ok);
@ -759,7 +762,7 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
/* /*
* GetFunctionDDLCommand returns the complete "CREATE OR REPLACE FUNCTION ..." statement for * GetFunctionDDLCommand returns the complete "CREATE OR REPLACE FUNCTION ..." statement for
* the specified function followed by "ALTER FUNCTION .. SET OWNER ..". * the specified function.
* *
* useCreateOrReplace is ignored for non-aggregate functions. * useCreateOrReplace is ignored for non-aggregate functions.
*/ */
@ -1170,46 +1173,23 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
/* /*
* ShouldPropagateCreateFunction tests if we need to propagate a CREATE FUNCTION * ShouldPropagateCreateFunction tests if we need to propagate a CREATE FUNCTION
* statement. We only propagate replace's of distributed functions to keep the function on * statement.
* the workers in sync with the one on the coordinator.
*/ */
static bool static bool
ShouldPropagateCreateFunction(CreateFunctionStmt *stmt) ShouldPropagateCreateFunction(CreateFunctionStmt *stmt)
{ {
if (creating_extension) if (!ShouldPropagate())
{ {
/*
* extensions should be created separately on the workers, functions cascading
* from an extension should therefore not be propagated.
*/
return false;
}
if (!EnableMetadataSync)
{
/*
* we are configured to disable object propagation, should not propagate anything
*/
return false;
}
if (!stmt->replace)
{
/*
* Since we only care for a replace of distributed functions if the statement is
* not a replace we are going to ignore.
*/
return false; return false;
} }
/* /*
* Even though its a replace we should accept an non-existing function, it will just * If the create command is a part of a multi-statement transaction that is not in
* not be distributed * sequential mode, don't propagate.
*/ */
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, true); if (IsMultiStatementTransaction() &&
if (!IsObjectDistributed(&address)) MultiShardConnectionType != SEQUENTIAL_CONNECTION)
{ {
/* do not propagate alter function for non-distributed functions */
return false; return false;
} }
@ -1253,12 +1233,10 @@ ShouldPropagateAlterFunction(const ObjectAddress *address)
/* /*
* PreprocessCreateFunctionStmt is called during the planning phase for CREATE [OR REPLACE] * PreprocessCreateFunctionStmt is called during the planning phase for CREATE [OR REPLACE]
* FUNCTION. We primarily care for the replace variant of this statement to keep * FUNCTION before it is created on the local node internally.
* distributed functions in sync. We bail via a check on ShouldPropagateCreateFunction
* which checks for the OR REPLACE modifier.
* *
* Since we use pg_get_functiondef to get the ddl command we actually do not do any * Since we use pg_get_functiondef to get the ddl command we actually do not do any
* planning here, instead we defer the plan creation to the processing step. * planning here, instead we defer the plan creation to the postprocessing step.
* *
* Instead we do our basic housekeeping where we make sure we are on the coordinator and * Instead we do our basic housekeeping where we make sure we are on the coordinator and
* can propagate the function in sequential mode. * can propagate the function in sequential mode.
@ -1279,7 +1257,7 @@ PreprocessCreateFunctionStmt(Node *node, const char *queryString,
EnsureSequentialMode(OBJECT_FUNCTION); EnsureSequentialMode(OBJECT_FUNCTION);
/* /*
* ddl jobs will be generated during the Processing phase as we need the function to * ddl jobs will be generated during the postprocessing phase as we need the function to
* be updated in the catalog to get its sql representation * be updated in the catalog to get its sql representation
*/ */
return NIL; return NIL;
@ -1290,6 +1268,11 @@ PreprocessCreateFunctionStmt(Node *node, const char *queryString,
* PostprocessCreateFunctionStmt actually creates the plan we need to execute for function * PostprocessCreateFunctionStmt actually creates the plan we need to execute for function
* propagation. This is the downside of using pg_get_functiondef to get the sql statement. * propagation. This is the downside of using pg_get_functiondef to get the sql statement.
* *
* If function depends on any non-distributed relation (except sequence and composite type),
* Citus can not distribute it. In order to not to prevent users from creating local
* functions on the coordinator WARNING message will be sent to the customer about the case
* instead of erroring out.
*
* Besides creating the plan we also make sure all (new) dependencies of the function are * Besides creating the plan we also make sure all (new) dependencies of the function are
* created on all nodes. * created on all nodes.
*/ */
@ -1303,18 +1286,113 @@ PostprocessCreateFunctionStmt(Node *node, const char *queryString)
return NIL; return NIL;
} }
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); ObjectAddress functionAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
EnsureDependenciesExistOnAllNodes(&address);
List *commands = list_make4(DISABLE_DDL_PROPAGATION, if (IsObjectAddressOwnedByExtension(&functionAddress, NULL))
GetFunctionDDLCommand(address.objectId, true), {
GetFunctionAlterOwnerCommand(address.objectId), return NIL;
ENABLE_DDL_PROPAGATION); }
/*
* This check should have been valid for all objects not only for functions. Though,
* we do this limited check for now as functions are more likely to be used with
* such dependencies, and we want to scope it for now.
*/
ObjectAddress *undistributableDependency = GetUndistributableDependency(
&functionAddress);
if (undistributableDependency != NULL)
{
if (SupportedDependencyByCitus(undistributableDependency))
{
/*
* Citus can't distribute some relations as dependency, although those
* types as supported by Citus. So we can use get_rel_name directly
*/
RangeVar *functionRangeVar = makeRangeVarFromNameList(stmt->funcname);
char *functionName = functionRangeVar->relname;
char *dependentRelationName =
get_rel_name(undistributableDependency->objectId);
ereport(WARNING, (errmsg("Citus can't distribute function \"%s\" having "
"dependency on non-distributed relation \"%s\"",
functionName, dependentRelationName),
errdetail("Function will be created only locally"),
errhint("To distribute function, distribute dependent "
"relations first. Then, re-create the function")));
}
else
{
char *objectType = NULL;
#if PG_VERSION_NUM >= PG_VERSION_14
objectType = getObjectTypeDescription(undistributableDependency, false);
#else
objectType = getObjectTypeDescription(undistributableDependency);
#endif
ereport(WARNING, (errmsg("Citus can't distribute functions having "
"dependency on unsupported object of type \"%s\"",
objectType),
errdetail("Function will be created only locally")));
}
return NIL;
}
EnsureDependenciesExistOnAllNodes(&functionAddress);
List *commands = list_make1(DISABLE_DDL_PROPAGATION);
commands = list_concat(commands, CreateFunctionDDLCommandsIdempotent(
&functionAddress));
commands = list_concat(commands, list_make1(ENABLE_DDL_PROPAGATION));
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
} }
/*
* GetUndistributableDependency checks whether object has any non-distributable
* dependency. If any one found, it will be returned.
*/
static ObjectAddress *
GetUndistributableDependency(ObjectAddress *objectAddress)
{
List *dependencies = GetAllDependenciesForObject(objectAddress);
ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies)
{
if (IsObjectDistributed(dependency))
{
continue;
}
if (!SupportedDependencyByCitus(dependency))
{
/*
* Since roles should be handled manually with Citus community, skip them.
*/
if (getObjectClass(dependency) != OCLASS_ROLE)
{
return dependency;
}
}
if (getObjectClass(dependency) == OCLASS_CLASS)
{
/*
* Citus can only distribute dependent non-distributed sequence
* and composite types.
*/
char relKind = get_rel_relkind(dependency->objectId);
if (relKind != RELKIND_SEQUENCE && relKind != RELKIND_COMPOSITE_TYPE)
{
return dependency;
}
}
}
return NULL;
}
/* /*
* CreateFunctionStmtObjectAddress returns the ObjectAddress for the subject of the * CreateFunctionStmtObjectAddress returns the ObjectAddress for the subject of the
* CREATE [OR REPLACE] FUNCTION statement. If missing_ok is false it will error with the * CREATE [OR REPLACE] FUNCTION statement. If missing_ok is false it will error with the

View File

@ -725,12 +725,6 @@ PostprocessIndexStmt(Node *node, const char *queryString)
{ {
IndexStmt *indexStmt = castNode(IndexStmt, node); IndexStmt *indexStmt = castNode(IndexStmt, node);
/* we are only processing CONCURRENT index statements */
if (!indexStmt->concurrent)
{
return NIL;
}
/* this logic only applies to the coordinator */ /* this logic only applies to the coordinator */
if (!IsCoordinator()) if (!IsCoordinator())
{ {
@ -747,14 +741,36 @@ PostprocessIndexStmt(Node *node, const char *queryString)
return NIL; return NIL;
} }
Oid indexRelationId = get_relname_relid(indexStmt->idxname, schemaId);
/* ensure dependencies of index exist on all nodes */
ObjectAddress address = { 0 };
ObjectAddressSet(address, RelationRelationId, indexRelationId);
EnsureDependenciesExistOnAllNodes(&address);
/* furtheron we are only processing CONCURRENT index statements */
if (!indexStmt->concurrent)
{
return NIL;
}
/*
* EnsureDependenciesExistOnAllNodes could have distributed objects that are required
* by this index. During the propagation process an active snapshout might be left as
* a side effect of inserting the local tuples via SPI. To not leak a snapshot like
* that we will pop any snapshot if we have any right before we commit.
*/
if (ActiveSnapshotSet())
{
PopActiveSnapshot();
}
/* commit the current transaction and start anew */ /* commit the current transaction and start anew */
CommitTransactionCommand(); CommitTransactionCommand();
StartTransactionCommand(); StartTransactionCommand();
/* get the affected relation and index */ /* get the affected relation and index */
Relation relation = table_openrv(indexStmt->relation, ShareUpdateExclusiveLock); Relation relation = table_openrv(indexStmt->relation, ShareUpdateExclusiveLock);
Oid indexRelationId = get_relname_relid(indexStmt->idxname,
schemaId);
Relation indexRelation = index_open(indexRelationId, RowExclusiveLock); Relation indexRelation = index_open(indexRelationId, RowExclusiveLock);
/* close relations but retain locks */ /* close relations but retain locks */

View File

@ -86,13 +86,6 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString,
DropStmt *dropStatement = castNode(DropStmt, node); DropStmt *dropStatement = castNode(DropStmt, node);
Assert(dropStatement->removeType == OBJECT_SCHEMA); Assert(dropStatement->removeType == OBJECT_SCHEMA);
if (!ShouldPropagate())
{
return NIL;
}
EnsureCoordinator();
List *distributedSchemas = FilterDistributedSchemas(dropStatement->objects); List *distributedSchemas = FilterDistributedSchemas(dropStatement->objects);
if (list_length(distributedSchemas) < 1) if (list_length(distributedSchemas) < 1)
@ -100,6 +93,13 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString,
return NIL; return NIL;
} }
if (!ShouldPropagate())
{
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_SCHEMA); EnsureSequentialMode(OBJECT_SCHEMA);
Value *schemaVal = NULL; Value *schemaVal = NULL;

View File

@ -378,6 +378,8 @@ PostprocessCreateTableStmtPartitionOf(CreateStmt *createStatement, const
} }
Var *parentDistributionColumn = DistPartitionKeyOrError(parentRelationId); Var *parentDistributionColumn = DistPartitionKeyOrError(parentRelationId);
char *distributionColumnName =
ColumnToColumnName(parentRelationId, (Node *) parentDistributionColumn);
char parentDistributionMethod = DISTRIBUTE_BY_HASH; char parentDistributionMethod = DISTRIBUTE_BY_HASH;
char *parentRelationName = generate_qualified_relation_name(parentRelationId); char *parentRelationName = generate_qualified_relation_name(parentRelationId);
bool viaDeprecatedAPI = false; bool viaDeprecatedAPI = false;
@ -385,7 +387,7 @@ PostprocessCreateTableStmtPartitionOf(CreateStmt *createStatement, const
SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(parentRelationId, SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(parentRelationId,
relationId); relationId);
CreateDistributedTable(relationId, parentDistributionColumn, CreateDistributedTable(relationId, distributionColumnName,
parentDistributionMethod, ShardCount, false, parentDistributionMethod, ShardCount, false,
parentRelationName, viaDeprecatedAPI); parentRelationName, viaDeprecatedAPI);
} }
@ -573,13 +575,8 @@ static void
DistributePartitionUsingParent(Oid parentCitusRelationId, Oid partitionRelationId) DistributePartitionUsingParent(Oid parentCitusRelationId, Oid partitionRelationId)
{ {
Var *distributionColumn = DistPartitionKeyOrError(parentCitusRelationId); Var *distributionColumn = DistPartitionKeyOrError(parentCitusRelationId);
char *distributionColumnName = char *distributionColumnName = ColumnToColumnName(parentCitusRelationId,
ColumnToColumnName(parentCitusRelationId, (Node *) distributionColumn);
nodeToString(distributionColumn));
distributionColumn =
FindColumnWithNameOnTargetRelation(parentCitusRelationId,
distributionColumnName,
partitionRelationId);
char distributionMethod = DISTRIBUTE_BY_HASH; char distributionMethod = DISTRIBUTE_BY_HASH;
char *parentRelationName = generate_qualified_relation_name(parentCitusRelationId); char *parentRelationName = generate_qualified_relation_name(parentCitusRelationId);
@ -588,7 +585,7 @@ DistributePartitionUsingParent(Oid parentCitusRelationId, Oid partitionRelationI
SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong( SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(
parentCitusRelationId, partitionRelationId); parentCitusRelationId, partitionRelationId);
CreateDistributedTable(partitionRelationId, distributionColumn, CreateDistributedTable(partitionRelationId, distributionColumnName,
distributionMethod, ShardCount, false, distributionMethod, ShardCount, false,
parentRelationName, viaDeprecatedAPI); parentRelationName, viaDeprecatedAPI);
} }

View File

@ -0,0 +1,935 @@
/*-------------------------------------------------------------------------
*
* text_search.c
* Commands for creating and altering TEXT SEARCH objects
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/genam.h"
#include "access/xact.h"
#include "catalog/namespace.h"
#include "catalog/objectaddress.h"
#include "catalog/pg_ts_config.h"
#include "catalog/pg_ts_config_map.h"
#include "catalog/pg_ts_dict.h"
#include "catalog/pg_ts_parser.h"
#include "commands/comment.h"
#include "commands/extension.h"
#include "fmgr.h"
#include "nodes/makefuncs.h"
#include "tsearch/ts_cache.h"
#include "tsearch/ts_public.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
#include "distributed/commands.h"
#include "distributed/commands/utility_hook.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "distributed/metadata/distobject.h"
#include "distributed/metadata_sync.h"
#include "distributed/multi_executor.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/worker_create_or_replace.h"
static List * GetDistributedTextSearchConfigurationNames(DropStmt *stmt);
static DefineStmt * GetTextSearchConfigDefineStmt(Oid tsconfigOid);
static List * GetTextSearchConfigCommentStmt(Oid tsconfigOid);
static List * get_ts_parser_namelist(Oid tsparserOid);
static List * GetTextSearchConfigMappingStmt(Oid tsconfigOid);
static List * GetTextSearchConfigOwnerStmts(Oid tsconfigOid);
static List * get_ts_dict_namelist(Oid tsdictOid);
static Oid get_ts_config_parser_oid(Oid tsconfigOid);
static char * get_ts_parser_tokentype_name(Oid parserOid, int32 tokentype);
/*
* PostprocessCreateTextSearchConfigurationStmt is called after the TEXT SEARCH
* CONFIGURATION has been created locally.
*
* Contrary to many other objects a text search configuration is often created as a copy
* of an existing configuration. After the copy there is no relation to the configuration
* that has been copied. This prevents our normal approach of ensuring dependencies to
* exist before forwarding a close ressemblance of the statement the user executed.
*
* Instead we recreate the object based on what we find in our own catalog, hence the
* amount of work we perform in the postprocess function, contrary to other objects.
*/
List *
PostprocessCreateTextSearchConfigurationStmt(Node *node, const char *queryString)
{
DefineStmt *stmt = castNode(DefineStmt, node);
Assert(stmt->kind == OBJECT_TSCONFIGURATION);
if (!ShouldPropagate())
{
return NIL;
}
/*
* If the create command is a part of a multi-statement transaction that is not in
* sequential mode, don't propagate. Instead we will rely on back filling.
*/
if (IsMultiStatementTransaction())
{
if (MultiShardConnectionType != SEQUENTIAL_CONNECTION)
{
return NIL;
}
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
EnsureDependenciesExistOnAllNodes(&address);
/*
* TEXT SEARCH CONFIGURATION objects are more complex with their mappings and the
* possibility of copying from existing templates that we will require the idempotent
* recreation commands to be run for successful propagation
*/
List *commands = CreateTextSearchConfigDDLCommandsIdempotent(&address);
commands = lcons(DISABLE_DDL_PROPAGATION, commands);
commands = lappend(commands, ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
List *
GetCreateTextSearchConfigStatements(const ObjectAddress *address)
{
Assert(address->classId == TSConfigRelationId);
List *stmts = NIL;
/* CREATE TEXT SEARCH CONFIGURATION ...*/
stmts = lappend(stmts, GetTextSearchConfigDefineStmt(address->objectId));
/* ALTER TEXT SEARCH CONFIGURATION ... OWNER TO ...*/
stmts = list_concat(stmts, GetTextSearchConfigOwnerStmts(address->objectId));
/* COMMENT ON TEXT SEARCH CONFIGURATION ... */
stmts = list_concat(stmts, GetTextSearchConfigCommentStmt(address->objectId));
/* ALTER TEXT SEARCH CONFIGURATION ... ADD MAPPING FOR ... WITH ... */
stmts = list_concat(stmts, GetTextSearchConfigMappingStmt(address->objectId));
return stmts;
}
/*
* CreateTextSearchConfigDDLCommandsIdempotent creates a list of ddl commands to recreate
* a TEXT SERACH CONFIGURATION object in an idempotent manner on workers.
*/
List *
CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address)
{
List *stmts = GetCreateTextSearchConfigStatements(address);
List *sqls = DeparseTreeNodes(stmts);
return list_make1(WrapCreateOrReplaceList(sqls));
}
/*
* PreprocessDropTextSearchConfigurationStmt prepares the statements we need to send to
* the workers. After we have dropped the schema's locally they also got removed from
* pg_dist_object so it is important to do all distribution checks before the change is
* made locally.
*/
List *
PreprocessDropTextSearchConfigurationStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
DropStmt *stmt = castNode(DropStmt, node);
Assert(stmt->removeType == OBJECT_TSCONFIGURATION);
if (!ShouldPropagate())
{
return NIL;
}
List *distributedObjects = GetDistributedTextSearchConfigurationNames(stmt);
if (list_length(distributedObjects) == 0)
{
/* no distributed objects to remove */
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
/*
* Temporarily replace the list of objects being dropped with only the list
* containing the distributed objects. After we have created the sql statement we
* restore the original list of objects to execute on locally.
*
* Because searchpaths on coordinator and workers might not be in sync we fully
* qualify the list before deparsing. This is safe because qualification doesn't
* change the original names in place, but insteads creates new ones.
*/
List *originalObjects = stmt->objects;
stmt->objects = distributedObjects;
QualifyTreeNode((Node *) stmt);
const char *dropStmtSql = DeparseTreeNode((Node *) stmt);
stmt->objects = originalObjects;
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) dropStmtSql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
}
/*
* GetDistributedTextSearchConfigurationNames iterates over all text search configurations
* dropped, and create a list containign all configurations that are distributed.
*/
static List *
GetDistributedTextSearchConfigurationNames(DropStmt *stmt)
{
List *objName = NULL;
List *distributedObjects = NIL;
foreach_ptr(objName, stmt->objects)
{
Oid tsconfigOid = get_ts_config_oid(objName, stmt->missing_ok);
if (!OidIsValid(tsconfigOid))
{
/* skip missing configuration names, they can't be dirstibuted */
continue;
}
ObjectAddress address = { 0 };
ObjectAddressSet(address, TSConfigRelationId, tsconfigOid);
if (!IsObjectDistributed(&address))
{
continue;
}
distributedObjects = lappend(distributedObjects, objName);
}
return distributedObjects;
}
/*
* PreprocessAlterTextSearchConfigurationStmt verifies if the configuration being altered
* is distributed in the cluster. If that is the case it will prepare the list of commands
* to send to the worker to apply the same changes remote.
*/
List *
PreprocessAlterTextSearchConfigurationStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
QualifyTreeNode((Node *) stmt);
const char *alterStmtSql = DeparseTreeNode((Node *) stmt);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) alterStmtSql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
}
/*
* PreprocessRenameTextSearchConfigurationStmt verifies if the configuration being altered
* is distributed in the cluster. If that is the case it will prepare the list of commands
* to send to the worker to apply the same changes remote.
*/
List *
PreprocessRenameTextSearchConfigurationStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
RenameStmt *stmt = castNode(RenameStmt, node);
Assert(stmt->renameType == OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
QualifyTreeNode((Node *) stmt);
char *ddlCommand = DeparseTreeNode((Node *) stmt);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) ddlCommand,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
}
/*
* PreprocessAlterTextSearchConfigurationSchemaStmt verifies if the configuration being
* altered is distributed in the cluster. If that is the case it will prepare the list of
* commands to send to the worker to apply the same changes remote.
*/
List *
PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryString,
ProcessUtilityContext
processUtilityContext)
{
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt,
stmt->missing_ok);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
}
/*
* PostprocessAlterTextSearchConfigurationSchemaStmt is invoked after the schema has been
* changed locally. Since changing the schema could result in new dependencies being found
* for this object we re-ensure all the dependencies for the configuration do exist. This
* is solely to propagate the new schema (and all its dependencies) if it was not already
* distributed in the cluster.
*/
List *
PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryString)
{
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt,
stmt->missing_ok);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
/* dependencies have changed (schema) let's ensure they exist */
EnsureDependenciesExistOnAllNodes(&address);
return NIL;
}
/*
* PreprocessTextSearchConfigurationCommentStmt propagates any comment on a distributed
* configuration to the workers. Since comments for configurations are promenently shown
* when listing all text search configurations this is purely a cosmetic thing when
* running in MX.
*/
List *
PreprocessTextSearchConfigurationCommentStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
CommentStmt *stmt = castNode(CommentStmt, node);
Assert(stmt->objtype == OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
}
/*
* PreprocessAlterTextSearchConfigurationOwnerStmt verifies if the configuration being
* altered is distributed in the cluster. If that is the case it will prepare the list of
* commands to send to the worker to apply the same changes remote.
*/
List *
PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryString,
ProcessUtilityContext
processUtilityContext)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
QualifyTreeNode((Node *) stmt);
char *sql = DeparseTreeNode((Node *) stmt);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* PostprocessAlterTextSearchConfigurationOwnerStmt is invoked after the owner has been
* changed locally. Since changing the owner could result in new dependencies being found
* for this object we re-ensure all the dependencies for the configuration do exist. This
* is solely to propagate the new owner (and all its dependencies) if it was not already
* distributed in the cluster.
*/
List *
PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryString)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
/* dependencies have changed (owner) let's ensure they exist */
EnsureDependenciesExistOnAllNodes(&address);
return NIL;
}
/*
* GetTextSearchConfigDefineStmt returns the DefineStmt for a TEXT SEARCH CONFIGURATION
* based on the configuration as defined in the catalog identified by tsconfigOid.
*
* This statement will only contain the parser, as all other properties for text search
* configurations are stored as mappings in a different catalog.
*/
static DefineStmt *
GetTextSearchConfigDefineStmt(Oid tsconfigOid)
{
HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
{
elog(ERROR, "cache lookup failed for text search configuration %u",
tsconfigOid);
}
Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup);
DefineStmt *stmt = makeNode(DefineStmt);
stmt->kind = OBJECT_TSCONFIGURATION;
stmt->defnames = get_ts_config_namelist(tsconfigOid);
List *parserNameList = get_ts_parser_namelist(config->cfgparser);
TypeName *parserTypeName = makeTypeNameFromNameList(parserNameList);
stmt->definition = list_make1(makeDefElem("parser", (Node *) parserTypeName, -1));
ReleaseSysCache(tup);
return stmt;
}
/*
* GetTextSearchConfigCommentStmt returns a list containing all entries to recreate a
* comment on the configuration identified by tsconfigOid. The list could be empty if
* there is no comment on a configuration.
*
* The reason for a list is for easy use when building a list of all statements to invoke
* to recreate the text search configuration. An empty list can easily be concatinated
* without inspection, contrary to a NULL ptr if we would return the CommentStmt struct.
*/
static List *
GetTextSearchConfigCommentStmt(Oid tsconfigOid)
{
char *comment = GetComment(tsconfigOid, TSConfigRelationId, 0);
if (!comment)
{
return NIL;
}
CommentStmt *stmt = makeNode(CommentStmt);
stmt->objtype = OBJECT_TSCONFIGURATION;
stmt->object = (Node *) get_ts_config_namelist(tsconfigOid);
stmt->comment = comment;
return list_make1(stmt);
}
/*
* GetTextSearchConfigMappingStmt returns a list of all mappings from token_types to
* dictionaries configured on a text search configuration identified by tsconfigOid.
*
* Many mappings can exist on a configuration which all require their own statement to
* recreate.
*/
static List *
GetTextSearchConfigMappingStmt(Oid tsconfigOid)
{
ScanKeyData mapskey = { 0 };
/* mapcfg = tsconfigOid */
ScanKeyInit(&mapskey,
Anum_pg_ts_config_map_mapcfg,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(tsconfigOid));
Relation maprel = table_open(TSConfigMapRelationId, AccessShareLock);
Relation mapidx = index_open(TSConfigMapIndexId, AccessShareLock);
SysScanDesc mapscan = systable_beginscan_ordered(maprel, mapidx, NULL, 1, &mapskey);
List *stmts = NIL;
AlterTSConfigurationStmt *stmt = NULL;
/*
* We iterate the config mappings on the index order filtered by mapcfg. Meaning we
* get equal maptokentype's in 1 run. By comparing the current tokentype to the last
* we know when we can create a new stmt and append the previous constructed one to
* the list.
*/
int lastTokType = -1;
/*
* We read all mappings filtered by config id, hence we only need to load the name
* once and can reuse for every statement.
*/
List *configName = get_ts_config_namelist(tsconfigOid);
Oid parserOid = get_ts_config_parser_oid(tsconfigOid);
HeapTuple maptup = NULL;
while ((maptup = systable_getnext_ordered(mapscan, ForwardScanDirection)) != NULL)
{
Form_pg_ts_config_map cfgmap = (Form_pg_ts_config_map) GETSTRUCT(maptup);
if (lastTokType != cfgmap->maptokentype)
{
/* creating a new statement, appending the previous one (if existing) */
if (stmt != NULL)
{
stmts = lappend(stmts, stmt);
}
stmt = makeNode(AlterTSConfigurationStmt);
stmt->cfgname = configName;
stmt->kind = ALTER_TSCONFIG_ADD_MAPPING;
stmt->tokentype = list_make1(makeString(
get_ts_parser_tokentype_name(parserOid,
cfgmap->
maptokentype)));
lastTokType = cfgmap->maptokentype;
}
stmt->dicts = lappend(stmt->dicts, get_ts_dict_namelist(cfgmap->mapdict));
}
/*
* If we have ran atleast 1 iteration above we have the last stmt not added to the
* stmts list.
*/
if (stmt != NULL)
{
stmts = lappend(stmts, stmt);
stmt = NULL;
}
systable_endscan_ordered(mapscan);
index_close(mapidx, NoLock);
table_close(maprel, NoLock);
return stmts;
}
/*
* GetTextSearchConfigOwnerStmts returns a potentially empty list of statements to change
* the ownership of a TEXT SEARCH CONFIGURATION object.
*
* The list is for convenienve when building a full list of statements to recreate the
* configuration.
*/
static List *
GetTextSearchConfigOwnerStmts(Oid tsconfigOid)
{
HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
{
elog(ERROR, "cache lookup failed for text search configuration %u",
tsconfigOid);
}
Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup);
AlterOwnerStmt *stmt = makeNode(AlterOwnerStmt);
stmt->objectType = OBJECT_TSCONFIGURATION;
stmt->object = (Node *) get_ts_config_namelist(tsconfigOid);
stmt->newowner = GetRoleSpecObjectForUser(config->cfgowner);
ReleaseSysCache(tup);
return list_make1(stmt);
}
/*
* get_ts_config_namelist based on the tsconfigOid this function creates the namelist that
* identifies the configuration in a fully qualified manner, irregardless of the schema
* existing on the search_path.
*/
List *
get_ts_config_namelist(Oid tsconfigOid)
{
HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
{
elog(ERROR, "cache lookup failed for text search configuration %u",
tsconfigOid);
}
Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup);
char *schema = get_namespace_name(config->cfgnamespace);
char *configName = pstrdup(NameStr(config->cfgname));
List *names = list_make2(makeString(schema), makeString(configName));
ReleaseSysCache(tup);
return names;
}
/*
* get_ts_dict_namelist based on the tsdictOid this function creates the namelist that
* identifies the dictionary in a fully qualified manner, irregardless of the schema
* existing on the search_path.
*/
static List *
get_ts_dict_namelist(Oid tsdictOid)
{
HeapTuple tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(tsdictOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
{
elog(ERROR, "cache lookup failed for text search dictionary %u", tsdictOid);
}
Form_pg_ts_dict dict = (Form_pg_ts_dict) GETSTRUCT(tup);
char *schema = get_namespace_name(dict->dictnamespace);
char *dictName = pstrdup(NameStr(dict->dictname));
List *names = list_make2(makeString(schema), makeString(dictName));
ReleaseSysCache(tup);
return names;
}
/*
* get_ts_config_parser_oid based on the tsconfigOid this function returns the Oid of the
* parser used in the configuration.
*/
static Oid
get_ts_config_parser_oid(Oid tsconfigOid)
{
HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
{
elog(ERROR, "cache lookup failed for text search configuration %u", tsconfigOid);
}
Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup);
Oid parserOid = config->cfgparser;
ReleaseSysCache(tup);
return parserOid;
}
/*
* get_ts_parser_tokentype_name returns the name of the token as known to the parser by
* its tokentype identifier. The parser used to resolve the token name is identified by
* parserOid and should be the same that emitted the tokentype to begin with.
*/
static char *
get_ts_parser_tokentype_name(Oid parserOid, int32 tokentype)
{
TSParserCacheEntry *parserCache = lookup_ts_parser_cache(parserOid);
if (!OidIsValid(parserCache->lextypeOid))
{
elog(ERROR, "method lextype isn't defined for text search parser %u", parserOid);
}
/* take lextypes from parser */
LexDescr *tokenlist = (LexDescr *) DatumGetPointer(
OidFunctionCall1(parserCache->lextypeOid, Int32GetDatum(0)));
/* and find the one with lexid = tokentype */
int tokenIndex = 0;
while (tokenlist && tokenlist[tokenIndex].lexid)
{
if (tokenlist[tokenIndex].lexid == tokentype)
{
return pstrdup(tokenlist[tokenIndex].alias);
}
tokenIndex++;
}
/* we haven't found the token */
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("token type \"%d\" does not exist in parser", tokentype)));
}
/*
* get_ts_parser_namelist based on the tsparserOid this function creates the namelist that
* identifies the parser in a fully qualified manner, irregardless of the schema existing
* on the search_path.
*/
static List *
get_ts_parser_namelist(Oid tsparserOid)
{
HeapTuple tup = SearchSysCache1(TSPARSEROID, ObjectIdGetDatum(tsparserOid));
if (!HeapTupleIsValid(tup)) /* should not happen */
{
elog(ERROR, "cache lookup failed for text search parser %u",
tsparserOid);
}
Form_pg_ts_parser parser = (Form_pg_ts_parser) GETSTRUCT(tup);
char *schema = get_namespace_name(parser->prsnamespace);
char *parserName = pstrdup(NameStr(parser->prsname));
List *names = list_make2(makeString(schema), makeString(parserName));
ReleaseSysCache(tup);
return names;
}
/*
* CreateTextSearchConfigurationObjectAddress resolves the ObjectAddress for the object
* being created. If missing_pk is false the function will error, explaining to the user
* the text search configuration described in the statement doesn't exist.
*/
ObjectAddress
CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok)
{
DefineStmt *stmt = castNode(DefineStmt, node);
Assert(stmt->kind == OBJECT_TSCONFIGURATION);
Oid objid = get_ts_config_oid(stmt->defnames, missing_ok);
ObjectAddress address = { 0 };
ObjectAddressSet(address, TSConfigRelationId, objid);
return address;
}
/*
* RenameTextSearchConfigurationStmtObjectAddress resolves the ObjectAddress for the TEXT
* SEARCH CONFIGURATION being renamed. Optionally errors if the configuration does not
* exist based on the missing_ok flag passed in by the caller.
*/
ObjectAddress
RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok)
{
RenameStmt *stmt = castNode(RenameStmt, node);
Assert(stmt->renameType == OBJECT_TSCONFIGURATION);
Oid objid = get_ts_config_oid(castNode(List, stmt->object), missing_ok);
ObjectAddress address = { 0 };
ObjectAddressSet(address, TSConfigRelationId, objid);
return address;
}
/*
* AlterTextSearchConfigurationStmtObjectAddress resolves the ObjectAddress for the TEXT
* SEARCH CONFIGURATION being altered. Optionally errors if the configuration does not
* exist based on the missing_ok flag passed in by the caller.
*/
ObjectAddress
AlterTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok)
{
AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node);
Oid objid = get_ts_config_oid(stmt->cfgname, missing_ok);
ObjectAddress address = { 0 };
ObjectAddressSet(address, TSConfigRelationId, objid);
return address;
}
/*
* AlterTextSearchConfigurationSchemaStmtObjectAddress resolves the ObjectAddress for the
* TEXT SEARCH CONFIGURATION being moved to a different schema. Optionally errors if the
* configuration does not exist based on the missing_ok flag passed in by the caller.
*
* This can be called, either before or after the move of schema has been executed, hence
* the triple checking before the error might be thrown. Errors for non-existing schema's
* in edgecases will be raised by postgres while executing the move.
*/
ObjectAddress
AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok)
{
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
Oid objid = get_ts_config_oid(castNode(List, stmt->object), true);
if (!OidIsValid(objid))
{
/*
* couldn't find the text search configuration, might have already been moved to
* the new schema, we construct a new sequence name that uses the new schema to
* search in.
*/
char *schemaname = NULL;
char *config_name = NULL;
DeconstructQualifiedName(castNode(List, stmt->object), &schemaname, &config_name);
char *newSchemaName = stmt->newschema;
List *names = list_make2(makeString(newSchemaName), makeString(config_name));
objid = get_ts_config_oid(names, true);
if (!missing_ok && !OidIsValid(objid))
{
/*
* if the text search config id is still invalid we couldn't find it, error
* with the same message postgres would error with if missing_ok is false
* (not ok to miss)
*/
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("text search configuration \"%s\" does not exist",
NameListToString(castNode(List, stmt->object)))));
}
}
ObjectAddress sequenceAddress = { 0 };
ObjectAddressSet(sequenceAddress, TSConfigRelationId, objid);
return sequenceAddress;
}
/*
* TextSearchConfigurationCommentObjectAddress resolves the ObjectAddress for the TEXT
* SEARCH CONFIGURATION on which the comment is placed. Optionally errors if the
* configuration does not exist based on the missing_ok flag passed in by the caller.
*/
ObjectAddress
TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok)
{
CommentStmt *stmt = castNode(CommentStmt, node);
Assert(stmt->objtype == OBJECT_TSCONFIGURATION);
Oid objid = get_ts_config_oid(castNode(List, stmt->object), missing_ok);
ObjectAddress address = { 0 };
ObjectAddressSet(address, TSConfigRelationId, objid);
return address;
}
/*
* AlterTextSearchConfigurationOwnerObjectAddress resolves the ObjectAddress for the TEXT
* SEARCH CONFIGURATION for which the owner is changed. Optionally errors if the
* configuration does not exist based on the missing_ok flag passed in by the caller.
*/
ObjectAddress
AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Relation relation = NULL;
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
return get_object_address(stmt->objectType, stmt->object, &relation, AccessShareLock,
missing_ok);
}
/*
* GenerateBackupNameForTextSearchConfiguration generates a safe name that is not in use
* already that can be used to rename an existing TEXT SEARCH CONFIGURATION to allow the
* configuration with a specific name to be created, even if this would not have been
* possible due to name collisions.
*/
char *
GenerateBackupNameForTextSearchConfiguration(const ObjectAddress *address)
{
Assert(address->classId == TSConfigRelationId);
List *names = get_ts_config_namelist(address->objectId);
RangeVar *rel = makeRangeVarFromNameList(names);
char *newName = palloc0(NAMEDATALEN);
char suffix[NAMEDATALEN] = { 0 };
char *baseName = rel->relname;
int baseLength = strlen(baseName);
int count = 0;
while (true)
{
int suffixLength = SafeSnprintf(suffix, NAMEDATALEN - 1, "(citus_backup_%d)",
count);
/* trim the base name at the end to leave space for the suffix and trailing \0 */
baseLength = Min(baseLength, NAMEDATALEN - suffixLength - 1);
/* clear newName before copying the potentially trimmed baseName and suffix */
memset(newName, 0, NAMEDATALEN);
strncpy_s(newName, NAMEDATALEN, baseName, baseLength);
strncpy_s(newName + baseLength, NAMEDATALEN - baseLength, suffix,
suffixLength);
rel->relname = newName;
List *newNameList = MakeNameListFromRangeVar(rel);
Oid tsconfigOid = get_ts_config_oid(newNameList, true);
if (!OidIsValid(tsconfigOid))
{
return newName;
}
count++;
}
}

View File

@ -267,13 +267,17 @@ ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement)
ErrorIfIllegallyChangingKnownShard(relationId); ErrorIfIllegallyChangingKnownShard(relationId);
if (IsCitusTable(relationId) && IsForeignTable(relationId)) /*
* We allow truncating foreign tables that are added to metadata
* only on the coordinator, as user mappings are not propagated.
*/
if (IsForeignTable(relationId) &&
IsCitusTableType(relationId, CITUS_LOCAL_TABLE) &&
!IsCoordinator())
{ {
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("truncating distributed foreign tables is " errmsg("truncating foreign tables that are added to metadata "
"currently unsupported"), "can only be excuted on the coordinator")));
errhint("Consider undistributing table before TRUNCATE, "
"and then distribute or add to metadata again")));
} }
} }
} }

View File

@ -957,6 +957,20 @@ CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress)
return NIL; return NIL;
} }
HeapTuple tup = SearchSysCacheCopy1(TYPEOID, ObjectIdGetDatum(typeAddress->objectId));
if (!HeapTupleIsValid(tup))
{
elog(ERROR, "cache lookup failed for type %u", typeAddress->objectId);
}
/* Don't send any command if the type is a table's row type */
Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup);
if (typTup->typtype == TYPTYPE_COMPOSITE &&
get_rel_relkind(typTup->typrelid) != RELKIND_COMPOSITE_TYPE)
{
return NIL;
}
Node *stmt = CreateTypeStmtByObjectAddress(typeAddress); Node *stmt = CreateTypeStmtByObjectAddress(typeAddress);
/* capture ddl command for recreation and wrap in create if not exists construct */ /* capture ddl command for recreation and wrap in create if not exists construct */

View File

@ -17,6 +17,7 @@
#include "distributed/commands.h" #include "distributed/commands.h"
#include "distributed/deparser.h" #include "distributed/deparser.h"
#include "distributed/listutils.h"
/* /*
* DeparseTreeNode aims to be the inverse of postgres' ParseTreeNode. Currently with * DeparseTreeNode aims to be the inverse of postgres' ParseTreeNode. Currently with
@ -35,3 +36,20 @@ DeparseTreeNode(Node *stmt)
return ops->deparse(stmt); return ops->deparse(stmt);
} }
/*
* DeparseTreeNodes deparses all stmts in the list from the statement datastructure into
* sql statements.
*/
List *
DeparseTreeNodes(List *stmts)
{
List *sqls = NIL;
Node *stmt = NULL;
foreach_ptr(stmt, stmts)
{
sqls = lappend(sqls, DeparseTreeNode(stmt));
}
return sqls;
}

View File

@ -0,0 +1,377 @@
/*-------------------------------------------------------------------------
*
* deparse_text_search.c
* All routines to deparse text search statements.
* This file contains all entry points specific for text search statement deparsing.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "catalog/namespace.h"
#include "utils/builtins.h"
#include "distributed/citus_ruleutils.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
static void AppendDefElemList(StringInfo buf, List *defelms);
static void AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes);
static void AppendStringInfoDictnames(StringInfo buf, List *dicts);
/*
* DeparseCreateTextSearchStmt returns the sql for a DefineStmt defining a TEXT SEARCH
* CONFIGURATION
*
* Although the syntax is mutually exclusive on the two arguments that can be passed in
* the deparser will syntactically correct multiple definitions if provided. *
*/
char *
DeparseCreateTextSearchStmt(Node *node)
{
DefineStmt *stmt = castNode(DefineStmt, node);
StringInfoData buf = { 0 };
initStringInfo(&buf);
const char *identifier = NameListToQuotedString(stmt->defnames);
appendStringInfo(&buf, "CREATE TEXT SEARCH CONFIGURATION %s ", identifier);
appendStringInfoString(&buf, "(");
AppendDefElemList(&buf, stmt->definition);
appendStringInfoString(&buf, ");");
return buf.data;
}
/*
* AppendDefElemList specialization to append a comma separated list of definitions to a
* define statement.
*
* Currently only supports String and TypeName entries. Will error on others.
*/
static void
AppendDefElemList(StringInfo buf, List *defelems)
{
DefElem *defelem = NULL;
bool first = true;
foreach_ptr(defelem, defelems)
{
if (!first)
{
appendStringInfoString(buf, ", ");
}
first = false;
/* extract identifier from defelem */
const char *identifier = NULL;
switch (nodeTag(defelem->arg))
{
case T_String:
{
identifier = quote_identifier(strVal(defelem->arg));
break;
}
case T_TypeName:
{
TypeName *typeName = castNode(TypeName, defelem->arg);
identifier = NameListToQuotedString(typeName->names);
break;
}
default:
{
ereport(ERROR, (errmsg("unexpected argument during deparsing of "
"TEXT SEARCH CONFIGURATION definition")));
}
}
/* stringify */
appendStringInfo(buf, "%s = %s", defelem->defname, identifier);
}
}
/*
* DeparseDropTextSearchConfigurationStmt returns the sql representation for a DROP TEXT
* SEARCH CONFIGURATION ... statment. Supports dropping multiple configurations at once.
*/
char *
DeparseDropTextSearchConfigurationStmt(Node *node)
{
DropStmt *stmt = castNode(DropStmt, node);
Assert(stmt->removeType == OBJECT_TSCONFIGURATION);
StringInfoData buf = { 0 };
initStringInfo(&buf);
appendStringInfoString(&buf, "DROP TEXT SEARCH CONFIGURATION ");
List *nameList = NIL;
bool first = true;
foreach_ptr(nameList, stmt->objects)
{
if (!first)
{
appendStringInfoString(&buf, ", ");
}
first = false;
appendStringInfoString(&buf, NameListToQuotedString(nameList));
}
if (stmt->behavior == DROP_CASCADE)
{
appendStringInfoString(&buf, " CASCADE");
}
appendStringInfoString(&buf, ";");
return buf.data;
}
/*
* DeparseRenameTextSearchConfigurationStmt returns the sql representation of a ALTER TEXT
* SEARCH CONFIGURATION ... RENAME TO ... statement.
*/
char *
DeparseRenameTextSearchConfigurationStmt(Node *node)
{
RenameStmt *stmt = castNode(RenameStmt, node);
Assert(stmt->renameType == OBJECT_TSCONFIGURATION);
StringInfoData buf = { 0 };
initStringInfo(&buf);
char *identifier = NameListToQuotedString(castNode(List, stmt->object));
appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s RENAME TO %s;",
identifier, quote_identifier(stmt->newname));
return buf.data;
}
/*
* DeparseAlterTextSearchConfigurationStmt returns the ql representation of any generic
* ALTER TEXT SEARCH CONFIGURATION .... statement. The statements supported include:
* - ALTER TEXT SEARCH CONFIGURATIONS ... ADD MAPPING FOR [, ...] WITH [, ...]
* - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING FOR [, ...] WITH [, ...]
* - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING REPLACE ... WITH ...
* - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING FOR [, ...] REPLACE ... WITH ...
* - ALTER TEXT SEARCH CONFIGURATIONS ... DROP MAPPING [ IF EXISTS ] FOR ...
*/
char *
DeparseAlterTextSearchConfigurationStmt(Node *node)
{
AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node);
StringInfoData buf = { 0 };
initStringInfo(&buf);
char *identifier = NameListToQuotedString(castNode(List, stmt->cfgname));
appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s", identifier);
switch (stmt->kind)
{
case ALTER_TSCONFIG_ADD_MAPPING:
{
appendStringInfoString(&buf, " ADD MAPPING FOR ");
AppendStringInfoTokentypeList(&buf, stmt->tokentype);
appendStringInfoString(&buf, " WITH ");
AppendStringInfoDictnames(&buf, stmt->dicts);
break;
}
case ALTER_TSCONFIG_ALTER_MAPPING_FOR_TOKEN:
{
appendStringInfoString(&buf, " ALTER MAPPING FOR ");
AppendStringInfoTokentypeList(&buf, stmt->tokentype);
appendStringInfoString(&buf, " WITH ");
AppendStringInfoDictnames(&buf, stmt->dicts);
break;
}
case ALTER_TSCONFIG_REPLACE_DICT:
case ALTER_TSCONFIG_REPLACE_DICT_FOR_TOKEN:
{
appendStringInfoString(&buf, " ALTER MAPPING");
if (list_length(stmt->tokentype) > 0)
{
appendStringInfoString(&buf, " FOR ");
AppendStringInfoTokentypeList(&buf, stmt->tokentype);
}
if (list_length(stmt->dicts) != 2)
{
elog(ERROR, "unexpected number of dictionaries while deparsing ALTER "
"TEXT SEARCH CONFIGURATION ... ALTER MAPPING [FOR ...] REPLACE "
"statement.");
}
appendStringInfo(&buf, " REPLACE %s",
NameListToQuotedString(linitial(stmt->dicts)));
appendStringInfo(&buf, " WITH %s",
NameListToQuotedString(lsecond(stmt->dicts)));
break;
}
case ALTER_TSCONFIG_DROP_MAPPING:
{
appendStringInfoString(&buf, " DROP MAPPING");
if (stmt->missing_ok)
{
appendStringInfoString(&buf, " IF EXISTS");
}
appendStringInfoString(&buf, " FOR ");
AppendStringInfoTokentypeList(&buf, stmt->tokentype);
break;
}
default:
{
elog(ERROR, "unable to deparse unsupported ALTER TEXT SEARCH STATEMENT");
}
}
appendStringInfoString(&buf, ";");
return buf.data;
}
/*
* DeparseAlterTextSearchConfigurationSchemaStmt returns the sql statement representing
* ALTER TEXT SEARCH CONFIGURATION ... SET SCHEMA ... statements.
*/
char *
DeparseAlterTextSearchConfigurationSchemaStmt(Node *node)
{
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
StringInfoData buf = { 0 };
initStringInfo(&buf);
appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s SET SCHEMA %s;",
NameListToQuotedString(castNode(List, stmt->object)),
quote_identifier(stmt->newschema));
return buf.data;
}
/*
* DeparseTextSearchConfigurationCommentStmt returns the sql statement representing
* COMMENT ON TEXT SEARCH CONFIGURATION ... IS ...
*/
char *
DeparseTextSearchConfigurationCommentStmt(Node *node)
{
CommentStmt *stmt = castNode(CommentStmt, node);
Assert(stmt->objtype == OBJECT_TSCONFIGURATION);
StringInfoData buf = { 0 };
initStringInfo(&buf);
appendStringInfo(&buf, "COMMENT ON TEXT SEARCH CONFIGURATION %s IS ",
NameListToQuotedString(castNode(List, stmt->object)));
if (stmt->comment == NULL)
{
appendStringInfoString(&buf, "NULL");
}
else
{
appendStringInfoString(&buf, quote_literal_cstr(stmt->comment));
}
appendStringInfoString(&buf, ";");
return buf.data;
}
/*
* AppendStringInfoTokentypeList specializes in adding a comma separated list of
* token_tyoe's to TEXT SEARCH CONFIGURATION commands
*/
static void
AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes)
{
Value *tokentype = NULL;
bool first = true;
foreach_ptr(tokentype, tokentypes)
{
if (nodeTag(tokentype) != T_String)
{
elog(ERROR,
"unexpected tokentype for deparsing in text search configuration");
}
if (!first)
{
appendStringInfoString(buf, ", ");
}
first = false;
appendStringInfoString(buf, strVal(tokentype));
}
}
/*
* AppendStringInfoDictnames specializes in appending a comma separated list of
* dictionaries to TEXT SEARCH CONFIGURATION commands.
*/
static void
AppendStringInfoDictnames(StringInfo buf, List *dicts)
{
List *dictNames = NIL;
bool first = true;
foreach_ptr(dictNames, dicts)
{
if (!first)
{
appendStringInfoString(buf, ", ");
}
first = false;
char *dictIdentifier = NameListToQuotedString(dictNames);
appendStringInfoString(buf, dictIdentifier);
}
}
/*
* DeparseAlterTextSearchConfigurationOwnerStmt returns the sql statement representing
* ALTER TEXT SEARCH CONFIGURATION ... ONWER TO ... commands.
*/
char *
DeparseAlterTextSearchConfigurationOwnerStmt(Node *node)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
StringInfoData buf = { 0 };
initStringInfo(&buf);
appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s OWNER TO %s;",
NameListToQuotedString(castNode(List, stmt->object)),
RoleSpecString(stmt->newowner, true));
return buf.data;
}

View File

@ -0,0 +1,278 @@
/*-------------------------------------------------------------------------
*
* qualify_text_search_stmts.c
* Functions specialized in fully qualifying all text search statements. These
* functions are dispatched from qualify.c
*
* Fully qualifying text search statements consists of adding the schema name
* to the subject of the types as well as any other branch of the parsetree.
*
* Goal would be that the deparser functions for these statements can
* serialize the statement without any external lookups.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/htup_details.h"
#include "catalog/namespace.h"
#include "catalog/pg_ts_config.h"
#include "catalog/pg_ts_dict.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
static Oid get_ts_config_namespace(Oid tsconfigOid);
static Oid get_ts_dict_namespace(Oid tsdictOid);
/*
* QualifyDropTextSearchConfigurationStmt adds any missing schema names to text search
* configurations being dropped. All configurations are expected to exists before fully
* qualifying the statement. Errors will be raised for objects not existing. Non-existing
* objects are expected to not be distributed.
*/
void
QualifyDropTextSearchConfigurationStmt(Node *node)
{
DropStmt *stmt = castNode(DropStmt, node);
Assert(stmt->removeType == OBJECT_TSCONFIGURATION);
List *qualifiedObjects = NIL;
List *objName = NIL;
foreach_ptr(objName, stmt->objects)
{
char *schemaName = NULL;
char *tsconfigName = NULL;
DeconstructQualifiedName(objName, &schemaName, &tsconfigName);
if (!schemaName)
{
Oid tsconfigOid = get_ts_config_oid(objName, false);
Oid namespaceOid = get_ts_config_namespace(tsconfigOid);
schemaName = get_namespace_name(namespaceOid);
objName = list_make2(makeString(schemaName),
makeString(tsconfigName));
}
qualifiedObjects = lappend(qualifiedObjects, objName);
}
stmt->objects = qualifiedObjects;
}
/*
* QualifyAlterTextSearchConfigurationStmt adds the schema name (if missing) to the name
* of the text search configurations, as well as the dictionaries referenced.
*/
void
QualifyAlterTextSearchConfigurationStmt(Node *node)
{
AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node);
char *schemaName = NULL;
char *objName = NULL;
DeconstructQualifiedName(stmt->cfgname, &schemaName, &objName);
/* fully qualify the cfgname being altered */
if (!schemaName)
{
Oid tsconfigOid = get_ts_config_oid(stmt->cfgname, false);
Oid namespaceOid = get_ts_config_namespace(tsconfigOid);
schemaName = get_namespace_name(namespaceOid);
stmt->cfgname = list_make2(makeString(schemaName),
makeString(objName));
}
/* fully qualify the dicts */
bool useNewDicts = false;
List *dicts = NULL;
List *dictName = NIL;
foreach_ptr(dictName, stmt->dicts)
{
DeconstructQualifiedName(dictName, &schemaName, &objName);
/* fully qualify the cfgname being altered */
if (!schemaName)
{
Oid dictOid = get_ts_dict_oid(dictName, false);
Oid namespaceOid = get_ts_dict_namespace(dictOid);
schemaName = get_namespace_name(namespaceOid);
useNewDicts = true;
dictName = list_make2(makeString(schemaName), makeString(objName));
}
dicts = lappend(dicts, dictName);
}
if (useNewDicts)
{
/* swap original dicts with the new list */
stmt->dicts = dicts;
}
else
{
/* we don't use the new list, everything was already qualified, free-ing */
list_free(dicts);
}
}
/*
* QualifyRenameTextSearchConfigurationStmt adds the schema name (if missing) to the
* configuration being renamed. The new name will kept be without schema name since this
* command cannot be used to change the schema of a configuration.
*/
void
QualifyRenameTextSearchConfigurationStmt(Node *node)
{
RenameStmt *stmt = castNode(RenameStmt, node);
Assert(stmt->renameType == OBJECT_TSCONFIGURATION);
char *schemaName = NULL;
char *objName = NULL;
DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName);
/* fully qualify the cfgname being altered */
if (!schemaName)
{
Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false);
Oid namespaceOid = get_ts_config_namespace(tsconfigOid);
schemaName = get_namespace_name(namespaceOid);
stmt->object = (Node *) list_make2(makeString(schemaName),
makeString(objName));
}
}
/*
* QualifyAlterTextSearchConfigurationSchemaStmt adds the schema name (if missing) for the
* text search being moved to a new schema.
*/
void
QualifyAlterTextSearchConfigurationSchemaStmt(Node *node)
{
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
char *schemaName = NULL;
char *objName = NULL;
DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName);
if (!schemaName)
{
Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false);
Oid namespaceOid = get_ts_config_namespace(tsconfigOid);
schemaName = get_namespace_name(namespaceOid);
stmt->object = (Node *) list_make2(makeString(schemaName),
makeString(objName));
}
}
/*
* QualifyTextSearchConfigurationCommentStmt adds the schema name (if missing) to the
* configuration name on which the comment is created.
*/
void
QualifyTextSearchConfigurationCommentStmt(Node *node)
{
CommentStmt *stmt = castNode(CommentStmt, node);
Assert(stmt->objtype == OBJECT_TSCONFIGURATION);
char *schemaName = NULL;
char *objName = NULL;
DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName);
if (!schemaName)
{
Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false);
Oid namespaceOid = get_ts_config_namespace(tsconfigOid);
schemaName = get_namespace_name(namespaceOid);
stmt->object = (Node *) list_make2(makeString(schemaName),
makeString(objName));
}
}
/*
* QualifyAlterTextSearchConfigurationOwnerStmt adds the schema name (if missing) to the
* configuration for which the owner is changing.
*/
void
QualifyAlterTextSearchConfigurationOwnerStmt(Node *node)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_TSCONFIGURATION);
char *schemaName = NULL;
char *objName = NULL;
DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName);
if (!schemaName)
{
Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false);
Oid namespaceOid = get_ts_config_namespace(tsconfigOid);
schemaName = get_namespace_name(namespaceOid);
stmt->object = (Node *) list_make2(makeString(schemaName),
makeString(objName));
}
}
/*
* get_ts_config_namespace returns the oid of the namespace which is housing the text
* search configuration identified by tsconfigOid.
*/
static Oid
get_ts_config_namespace(Oid tsconfigOid)
{
HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid));
if (HeapTupleIsValid(tup))
{
Form_pg_ts_config cfgform = (Form_pg_ts_config) GETSTRUCT(tup);
Oid namespaceOid = cfgform->cfgnamespace;
ReleaseSysCache(tup);
return namespaceOid;
}
return InvalidOid;
}
/*
* get_ts_dict_namespace returns the oid of the namespace which is housing the text
* search dictionary identified by tsdictOid.
*/
static Oid
get_ts_dict_namespace(Oid tsdictOid)
{
HeapTuple tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(tsdictOid));
if (HeapTupleIsValid(tup))
{
Form_pg_ts_dict cfgform = (Form_pg_ts_dict) GETSTRUCT(tup);
Oid namespaceOid = cfgform->dictnamespace;
ReleaseSysCache(tup);
return namespaceOid;
}
return InvalidOid;
}

View File

@ -237,16 +237,17 @@ CitusExecutorRun(QueryDesc *queryDesc,
* transactions. * transactions.
*/ */
CitusTableCacheFlushInvalidatedEntries(); CitusTableCacheFlushInvalidatedEntries();
InTopLevelDelegatedFunctionCall = false;
}
/* /*
* Within a 2PC, when a function is delegated to a remote node, we pin * Within a 2PC, when a function is delegated to a remote node, we pin
* the distribution argument as the shard key for all the SQL in the * the distribution argument as the shard key for all the SQL in the
* function's block. The restriction is imposed to not to access other * function's block. The restriction is imposed to not to access other
* nodes from the current node and violate the transactional integrity * nodes from the current node, and violate the transactional integrity
* of the 2PC. Now that the query is ending, reset the shard key to NULL. * of the 2PC. Now that the query is ending, reset the shard key to NULL.
*/ */
ResetAllowedShardKeyValue(); CheckAndResetAllowedShardKeyValueIfNeeded();
}
} }
PG_CATCH(); PG_CATCH();
{ {
@ -260,12 +261,14 @@ CitusExecutorRun(QueryDesc *queryDesc,
if (ExecutorLevel == 0 && PlannerLevel == 0) if (ExecutorLevel == 0 && PlannerLevel == 0)
{ {
InTopLevelDelegatedFunctionCall = false;
}
/* /*
* In case of an exception, reset the pinned shard-key, for more * In case of an exception, reset the pinned shard-key, for more
* details see the function header. * details see the function header.
*/ */
ResetAllowedShardKeyValue(); CheckAndResetAllowedShardKeyValueIfNeeded();
}
PG_RE_THROW(); PG_RE_THROW();
} }
@ -770,6 +773,11 @@ GetObjectTypeString(ObjectType objType)
return "schema"; return "schema";
} }
case OBJECT_TSCONFIGURATION:
{
return "text search configuration";
}
case OBJECT_TYPE: case OBJECT_TYPE:
{ {
return "type"; return "type";

View File

@ -124,6 +124,7 @@ typedef struct ViewDependencyNode
static List * GetRelationSequenceDependencyList(Oid relationId); static List * GetRelationSequenceDependencyList(Oid relationId);
static List * GetRelationTriggerFunctionDependencyList(Oid relationId); static List * GetRelationTriggerFunctionDependencyList(Oid relationId);
static List * GetRelationStatsSchemaDependencyList(Oid relationId); static List * GetRelationStatsSchemaDependencyList(Oid relationId);
static List * GetRelationIndicesDependencyList(Oid relationId);
static DependencyDefinition * CreateObjectAddressDependencyDef(Oid classId, Oid objectId); static DependencyDefinition * CreateObjectAddressDependencyDef(Oid classId, Oid objectId);
static List * CreateObjectAddressDependencyDefList(Oid classId, List *objectIdList); static List * CreateObjectAddressDependencyDefList(Oid classId, List *objectIdList);
static ObjectAddress DependencyDefinitionObjectAddress(DependencyDefinition *definition); static ObjectAddress DependencyDefinitionObjectAddress(DependencyDefinition *definition);
@ -155,6 +156,8 @@ static bool FollowAllSupportedDependencies(ObjectAddressCollector *collector,
DependencyDefinition *definition); DependencyDefinition *definition);
static bool FollowNewSupportedDependencies(ObjectAddressCollector *collector, static bool FollowNewSupportedDependencies(ObjectAddressCollector *collector,
DependencyDefinition *definition); DependencyDefinition *definition);
static bool FollowAllDependencies(ObjectAddressCollector *collector,
DependencyDefinition *definition);
static void ApplyAddToDependencyList(ObjectAddressCollector *collector, static void ApplyAddToDependencyList(ObjectAddressCollector *collector,
DependencyDefinition *definition); DependencyDefinition *definition);
static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector, static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector,
@ -211,15 +214,42 @@ GetDependenciesForObject(const ObjectAddress *target)
/* /*
* GetAllDependenciesForObject returns a list of all the ObjectAddresses to be * GetAllSupportedDependenciesForObject returns a list of all the ObjectAddresses to be
* created in order before the target object could safely be created on a * created in order before the target object could safely be created on a worker, if all
* worker. As a caller, you probably need GetDependenciesForObject() which * dependent objects are distributable. As a caller, you probably need to use
* eliminates already distributed objects from the returned list. * GetDependenciesForObject() which eliminates already distributed objects from the returned
* list.
* *
* Some of the object might already be created on a worker. It should be created * Some of the object might already be created on a worker. It should be created
* in an idempotent way. * in an idempotent way.
*/ */
List * List *
GetAllSupportedDependenciesForObject(const ObjectAddress *target)
{
ObjectAddressCollector collector = { 0 };
InitObjectAddressCollector(&collector);
RecurseObjectDependencies(*target,
&ExpandCitusSupportedTypes,
&FollowAllSupportedDependencies,
&ApplyAddToDependencyList,
&collector);
return collector.dependencyList;
}
/*
* GetAllDependenciesForObject returns a list of all the dependent objects of the given
* object irrespective of whether the dependent object is supported by Citus or not, if
* the object can be found as dependency with RecurseObjectDependencies and
* ExpandCitusSupportedTypes.
*
* This function will be used to provide meaningful error messages if any dependent
* object for a given object is not supported. If you want to create dependencies for
* an object, you probably need to use GetDependenciesForObject().
*/
List *
GetAllDependenciesForObject(const ObjectAddress *target) GetAllDependenciesForObject(const ObjectAddress *target)
{ {
ObjectAddressCollector collector = { 0 }; ObjectAddressCollector collector = { 0 };
@ -227,7 +257,7 @@ GetAllDependenciesForObject(const ObjectAddress *target)
RecurseObjectDependencies(*target, RecurseObjectDependencies(*target,
&ExpandCitusSupportedTypes, &ExpandCitusSupportedTypes,
&FollowAllSupportedDependencies, &FollowAllDependencies,
&ApplyAddToDependencyList, &ApplyAddToDependencyList,
&collector); &collector);
@ -639,6 +669,11 @@ SupportedDependencyByCitus(const ObjectAddress *address)
return true; return true;
} }
case OCLASS_TSCONFIG:
{
return true;
}
case OCLASS_TYPE: case OCLASS_TYPE:
{ {
switch (get_typtype(address->objectId)) switch (get_typtype(address->objectId))
@ -686,7 +721,8 @@ SupportedDependencyByCitus(const ObjectAddress *address)
relKind == RELKIND_RELATION || relKind == RELKIND_RELATION ||
relKind == RELKIND_PARTITIONED_TABLE || relKind == RELKIND_PARTITIONED_TABLE ||
relKind == RELKIND_FOREIGN_TABLE || relKind == RELKIND_FOREIGN_TABLE ||
relKind == RELKIND_SEQUENCE) relKind == RELKIND_SEQUENCE ||
relKind == RELKIND_INDEX)
{ {
return true; return true;
} }
@ -896,10 +932,61 @@ FollowAllSupportedDependencies(ObjectAddressCollector *collector,
/* /*
* ApplyAddToDependencyList is an apply function for RecurseObjectDependencies that will collect * FollowAllDependencies applies filters on pg_depend entries to follow the dependency
* all the ObjectAddresses for pg_depend entries to the context. The context here is * tree of objects in depth first order. We will visit all objects irrespective of it is
* assumed to be a (ObjectAddressCollector *) to the location where all ObjectAddresses * supported by Citus or not.
* will be collected. */
static bool
FollowAllDependencies(ObjectAddressCollector *collector,
DependencyDefinition *definition)
{
if (definition->mode == DependencyPgDepend)
{
/*
* For dependencies found in pg_depend:
*
* Follow only normal and extension dependencies. The latter is used to reach the
* extensions, the objects that directly depend on the extension are eliminated
* during the "apply" phase.
*
* Other dependencies are internal dependencies and managed by postgres.
*/
if (definition->data.pg_depend.deptype != DEPENDENCY_NORMAL &&
definition->data.pg_depend.deptype != DEPENDENCY_EXTENSION)
{
return false;
}
}
/* rest of the tests are to see if we want to follow the actual dependency */
ObjectAddress address = DependencyDefinitionObjectAddress(definition);
/*
* If the object is already in our dependency list we do not have to follow any
* further
*/
if (IsObjectAddressCollected(address, collector))
{
return false;
}
if (CitusExtensionObject(&address))
{
/* following citus extension could complicate role management */
return false;
}
return true;
}
/*
* ApplyAddToDependencyList is an apply function for RecurseObjectDependencies that will
* collect all the ObjectAddresses for pg_depend entries to the context, except it is
* extension owned one.
*
* The context here is assumed to be a (ObjectAddressCollector *) to the location where
* all ObjectAddresses will be collected.
*/ */
static void static void
ApplyAddToDependencyList(ObjectAddressCollector *collector, ApplyAddToDependencyList(ObjectAddressCollector *collector,
@ -1005,6 +1092,17 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe
List *sequenceDependencyList = GetRelationSequenceDependencyList(relationId); List *sequenceDependencyList = GetRelationSequenceDependencyList(relationId);
result = list_concat(result, sequenceDependencyList); result = list_concat(result, sequenceDependencyList);
/*
* Tables could have indexes. Indexes themself could have dependencies that
* need to be propagated. eg. TEXT SEARCH CONFIGRUATIONS. Here we add the
* addresses of all indices to the list of objects to vist, as to make sure we
* create all objects required by the indices before we create the table
* including indices.
*/
List *indexDependencyList = GetRelationIndicesDependencyList(relationId);
result = list_concat(result, indexDependencyList);
} }
default: default:
@ -1048,6 +1146,28 @@ GetRelationStatsSchemaDependencyList(Oid relationId)
} }
/*
* CollectIndexOids implements PGIndexProcessor to create a list of all index oids
*/
static void
CollectIndexOids(Form_pg_index formPgIndex, List **oids, int flags)
{
*oids = lappend_oid(*oids, formPgIndex->indexrelid);
}
/*
* GetRelationIndicesDependencyList creates a list of ObjectAddressDependencies for the
* indexes on a given relation.
*/
static List *
GetRelationIndicesDependencyList(Oid relationId)
{
List *indexIds = ExecuteFunctionOnEachTableIndex(relationId, CollectIndexOids, 0);
return CreateObjectAddressDependencyDefList(RelationRelationId, indexIds);
}
/* /*
* GetRelationTriggerFunctionDependencyList returns a list of DependencyDefinition * GetRelationTriggerFunctionDependencyList returns a list of DependencyDefinition
* objects for the functions that triggers of the relation with relationId depends. * objects for the functions that triggers of the relation with relationId depends.

View File

@ -405,6 +405,21 @@ GetDistributedObjectAddressList(void)
} }
/*
* GetRoleSpecObjectForUser creates a RoleSpec object for the given roleOid.
*/
RoleSpec *
GetRoleSpecObjectForUser(Oid roleOid)
{
RoleSpec *roleSpec = makeNode(RoleSpec);
roleSpec->roletype = OidIsValid(roleOid) ? ROLESPEC_CSTRING : ROLESPEC_PUBLIC;
roleSpec->rolename = OidIsValid(roleOid) ? GetUserNameFromId(roleOid, false) : NULL;
roleSpec->location = -1;
return roleSpec;
}
/* /*
* UpdateDistributedObjectColocationId gets an old and a new colocationId * UpdateDistributedObjectColocationId gets an old and a new colocationId
* and updates the colocationId of all tuples in citus.pg_dist_object which * and updates the colocationId of all tuples in citus.pg_dist_object which

View File

@ -31,6 +31,7 @@
#include "commands/dbcommands.h" #include "commands/dbcommands.h"
#include "commands/extension.h" #include "commands/extension.h"
#include "commands/trigger.h" #include "commands/trigger.h"
#include "distributed/backend_data.h"
#include "distributed/colocation_utils.h" #include "distributed/colocation_utils.h"
#include "distributed/connection_management.h" #include "distributed/connection_management.h"
#include "distributed/citus_ruleutils.h" #include "distributed/citus_ruleutils.h"
@ -3666,9 +3667,10 @@ GetLocalNodeId(void)
/* /*
* This is expected if the coordinator is not added to the metadata. * This is expected if the coordinator is not added to the metadata.
* We'll return 0 for this case and for all cases so views can function almost normally * We'll return GLOBAL_PID_NODE_ID_FOR_NODES_NOT_IN_METADATA for this case and
* for all cases so views can function almost normally
*/ */
nodeId = 0; nodeId = GLOBAL_PID_NODE_ID_FOR_NODES_NOT_IN_METADATA;
} }
LocalNodeId = nodeId; LocalNodeId = nodeId;

View File

@ -102,7 +102,6 @@ static GrantStmt * GenerateGrantStmtForRights(ObjectType objectType,
bool withGrantOption); bool withGrantOption);
static List * GetObjectsForGrantStmt(ObjectType objectType, Oid objectId); static List * GetObjectsForGrantStmt(ObjectType objectType, Oid objectId);
static AccessPriv * GetAccessPrivObjectForGrantStmt(char *permission); static AccessPriv * GetAccessPrivObjectForGrantStmt(char *permission);
static RoleSpec * GetRoleSpecObjectForGrantStmt(Oid roleOid);
static List * GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid, static List * GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid,
AclItem *aclItem); AclItem *aclItem);
static void SetLocalEnableMetadataSync(bool state); static void SetLocalEnableMetadataSync(bool state);
@ -1023,7 +1022,6 @@ DistributionCreateCommand(CitusTableCacheEntry *cacheEntry)
StringInfo insertDistributionCommand = makeStringInfo(); StringInfo insertDistributionCommand = makeStringInfo();
Oid relationId = cacheEntry->relationId; Oid relationId = cacheEntry->relationId;
char distributionMethod = cacheEntry->partitionMethod; char distributionMethod = cacheEntry->partitionMethod;
char *partitionKeyString = cacheEntry->partitionKeyString;
char *qualifiedRelationName = char *qualifiedRelationName =
generate_qualified_relation_name(relationId); generate_qualified_relation_name(relationId);
uint32 colocationId = cacheEntry->colocationId; uint32 colocationId = cacheEntry->colocationId;
@ -1037,7 +1035,7 @@ DistributionCreateCommand(CitusTableCacheEntry *cacheEntry)
else else
{ {
char *partitionKeyColumnName = char *partitionKeyColumnName =
ColumnToColumnName(relationId, partitionKeyString); ColumnToColumnName(relationId, (Node *) cacheEntry->partitionColumn);
appendStringInfo(tablePartitionKeyNameString, "%s", appendStringInfo(tablePartitionKeyNameString, "%s",
quote_literal_cstr(partitionKeyColumnName)); quote_literal_cstr(partitionKeyColumnName));
} }
@ -1782,7 +1780,7 @@ GenerateGrantStmtForRights(ObjectType objectType,
stmt->objtype = objectType; stmt->objtype = objectType;
stmt->objects = GetObjectsForGrantStmt(objectType, objectId); stmt->objects = GetObjectsForGrantStmt(objectType, objectId);
stmt->privileges = list_make1(GetAccessPrivObjectForGrantStmt(permission)); stmt->privileges = list_make1(GetAccessPrivObjectForGrantStmt(permission));
stmt->grantees = list_make1(GetRoleSpecObjectForGrantStmt(roleOid)); stmt->grantees = list_make1(GetRoleSpecObjectForUser(roleOid));
stmt->grant_option = withGrantOption; stmt->grant_option = withGrantOption;
return stmt; return stmt;
@ -1831,22 +1829,6 @@ GetAccessPrivObjectForGrantStmt(char *permission)
} }
/*
* GetRoleSpecObjectForGrantStmt creates a RoleSpec object for the given roleOid.
* It will be used when creating GrantStmt objects.
*/
static RoleSpec *
GetRoleSpecObjectForGrantStmt(Oid roleOid)
{
RoleSpec *roleSpec = makeNode(RoleSpec);
roleSpec->roletype = OidIsValid(roleOid) ? ROLESPEC_CSTRING : ROLESPEC_PUBLIC;
roleSpec->rolename = OidIsValid(roleOid) ? GetUserNameFromId(roleOid, false) : NULL;
roleSpec->location = -1;
return roleSpec;
}
/* /*
* SetLocalEnableMetadataSync sets the enable_metadata_sync locally * SetLocalEnableMetadataSync sets the enable_metadata_sync locally
*/ */
@ -2462,12 +2444,10 @@ citus_internal_add_partition_metadata(PG_FUNCTION_ARGS)
distributionColumnText = PG_GETARG_TEXT_P(2); distributionColumnText = PG_GETARG_TEXT_P(2);
distributionColumnString = text_to_cstring(distributionColumnText); distributionColumnString = text_to_cstring(distributionColumnText);
Relation relation = relation_open(relationId, AccessShareLock);
distributionColumnVar = distributionColumnVar =
BuildDistributionKeyFromColumnName(relation, distributionColumnString); BuildDistributionKeyFromColumnName(relationId, distributionColumnString,
AccessShareLock);
Assert(distributionColumnVar != NULL); Assert(distributionColumnVar != NULL);
relation_close(relation, NoLock);
} }
if (!ShouldSkipMetadataChecks()) if (!ShouldSkipMetadataChecks())

View File

@ -1539,6 +1539,31 @@ FindWorkerNodeAnyCluster(const char *nodeName, int32 nodePort)
} }
/*
* FindNodeWithNodeId searches pg_dist_node and returns the node with the nodeId.
* If the node cannot be found this functions errors.
*/
WorkerNode *
FindNodeWithNodeId(int nodeId)
{
List *workerList = ActiveReadableNodeList();
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerList)
{
if (workerNode->nodeId == nodeId)
{
return workerNode;
}
}
/* there isn't any node with nodeId in pg_dist_node */
elog(ERROR, "worker node with node id %d could not be found", nodeId);
return NULL;
}
/* /*
* ReadDistNode iterates over pg_dist_node table, converts each row * ReadDistNode iterates over pg_dist_node table, converts each row
* into it's memory representation (i.e., WorkerNode) and adds them into * into it's memory representation (i.e., WorkerNode) and adds them into

View File

@ -410,6 +410,7 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr,
case OBJECT_FUNCTION: case OBJECT_FUNCTION:
case OBJECT_PROCEDURE: case OBJECT_PROCEDURE:
case OBJECT_AGGREGATE: case OBJECT_AGGREGATE:
case OBJECT_TSCONFIGURATION:
case OBJECT_TYPE: case OBJECT_TYPE:
case OBJECT_FOREIGN_SERVER: case OBJECT_FOREIGN_SERVER:
case OBJECT_SEQUENCE: case OBJECT_SEQUENCE:

View File

@ -14,6 +14,7 @@
#include "access/htup_details.h" #include "access/htup_details.h"
#include "catalog/pg_type.h" #include "catalog/pg_type.h"
#include "distributed/backend_data.h"
#include "distributed/connection_management.h" #include "distributed/connection_management.h"
#include "distributed/metadata_cache.h" #include "distributed/metadata_cache.h"
#include "distributed/multi_client_executor.h" #include "distributed/multi_client_executor.h"
@ -50,8 +51,6 @@ static void ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray,
bool *statusArray, bool *statusArray,
StringInfo *resultStringArray, StringInfo *resultStringArray,
int commandCount); int commandCount);
static bool ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort,
char *queryString, StringInfo queryResult);
static Tuplestorestate * CreateTupleStore(TupleDesc tupleDescriptor, static Tuplestorestate * CreateTupleStore(TupleDesc tupleDescriptor,
StringInfo *nodeNameArray, int *nodePortArray, StringInfo *nodeNameArray, int *nodePortArray,
bool *statusArray, bool *statusArray,
@ -474,9 +473,10 @@ ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray,
int32 nodePort = nodePortArray[commandIndex]; int32 nodePort = nodePortArray[commandIndex];
char *queryString = commandStringArray[commandIndex]->data; char *queryString = commandStringArray[commandIndex]->data;
StringInfo queryResultString = resultStringArray[commandIndex]; StringInfo queryResultString = resultStringArray[commandIndex];
bool reportResultError = false;
bool success = ExecuteRemoteQueryOrCommand(nodeName, nodePort, queryString, bool success = ExecuteRemoteQueryOrCommand(nodeName, nodePort, queryString,
queryResultString); queryResultString, reportResultError);
statusArray[commandIndex] = success; statusArray[commandIndex] = success;
@ -491,9 +491,9 @@ ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray,
* (success/failure), and query result. The query is expected to return a single * (success/failure), and query result. The query is expected to return a single
* target containing zero or one rows. * target containing zero or one rows.
*/ */
static bool bool
ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, char *queryString, ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, char *queryString,
StringInfo queryResultString) StringInfo queryResultString, bool reportResultError)
{ {
int connectionFlags = FORCE_NEW_CONNECTION; int connectionFlags = FORCE_NEW_CONNECTION;
MultiConnection *connection = MultiConnection *connection =
@ -517,6 +517,11 @@ ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, char *queryString,
PGresult *queryResult = GetRemoteCommandResult(connection, raiseInterrupts); PGresult *queryResult = GetRemoteCommandResult(connection, raiseInterrupts);
bool success = EvaluateQueryResult(connection, queryResult, queryResultString); bool success = EvaluateQueryResult(connection, queryResult, queryResultString);
if (!success && reportResultError)
{
ReportResultError(connection, queryResult, ERROR);
}
PQclear(queryResult); PQclear(queryResult);
/* close the connection */ /* close the connection */

View File

@ -723,6 +723,16 @@ FunctionInFromClause(List *fromlist, Query *query)
static void static void
EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId) EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId)
{ {
/*
* If the distribution key is already set, the key is fixed until
* the force-delegation function returns. All nested force-delegation
* functions must use the same key.
*/
if (AllowedDistributionColumnValue.isActive)
{
return;
}
/* /*
* The saved distribution argument need to persist through the life * The saved distribution argument need to persist through the life
* of the query, both during the planning (where we save) and execution * of the query, both during the planning (where we save) and execution
@ -734,6 +744,7 @@ EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId)
colocationId)); colocationId));
AllowedDistributionColumnValue.distributionColumnValue = copyObject(distArgument); AllowedDistributionColumnValue.distributionColumnValue = copyObject(distArgument);
AllowedDistributionColumnValue.colocationId = colocationId; AllowedDistributionColumnValue.colocationId = colocationId;
AllowedDistributionColumnValue.executorLevel = ExecutorLevel;
AllowedDistributionColumnValue.isActive = true; AllowedDistributionColumnValue.isActive = true;
MemoryContextSwitchTo(oldcontext); MemoryContextSwitchTo(oldcontext);
} }
@ -747,15 +758,22 @@ EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId)
* the 2PC. Reset the distribution argument value once the function ends. * the 2PC. Reset the distribution argument value once the function ends.
*/ */
void void
ResetAllowedShardKeyValue(void) CheckAndResetAllowedShardKeyValueIfNeeded(void)
{ {
if (AllowedDistributionColumnValue.isActive) /*
* If no distribution argument is pinned or the pinned argument was
* set by a nested-executor from upper level, nothing to reset.
*/
if (!AllowedDistributionColumnValue.isActive ||
ExecutorLevel > AllowedDistributionColumnValue.executorLevel)
{ {
pfree(AllowedDistributionColumnValue.distributionColumnValue); return;
AllowedDistributionColumnValue.isActive = false;
} }
InTopLevelDelegatedFunctionCall = false; Assert(ExecutorLevel == AllowedDistributionColumnValue.executorLevel);
pfree(AllowedDistributionColumnValue.distributionColumnValue);
AllowedDistributionColumnValue.isActive = false;
AllowedDistributionColumnValue.executorLevel = 0;
} }
@ -767,6 +785,7 @@ bool
IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId) IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId)
{ {
Assert(AllowedDistributionColumnValue.isActive); Assert(AllowedDistributionColumnValue.isActive);
Assert(ExecutorLevel > AllowedDistributionColumnValue.executorLevel);
ereport(DEBUG4, errmsg("Comparing saved:%s with Shard key: %s colocationid:%d:%d", ereport(DEBUG4, errmsg("Comparing saved:%s with Shard key: %s colocationid:%d:%d",
pretty_format_node_dump( pretty_format_node_dump(

View File

@ -1018,9 +1018,9 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer
StringInfo errorHint = makeStringInfo(); StringInfo errorHint = makeStringInfo();
CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry( CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(
distributedTableId); distributedTableId);
char *partitionKeyString = cacheEntry->partitionKeyString; char *partitionColumnName =
char *partitionColumnName = ColumnToColumnName(distributedTableId, ColumnToColumnName(distributedTableId,
partitionKeyString); (Node *) cacheEntry->partitionColumn);
appendStringInfo(errorHint, "Consider using an equality filter on " appendStringInfo(errorHint, "Consider using an equality filter on "
"partition column \"%s\" to target a single shard.", "partition column \"%s\" to target a single shard.",
@ -3058,8 +3058,8 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
if (prunedShardIntervalCount != 1) if (prunedShardIntervalCount != 1)
{ {
char *partitionKeyString = cacheEntry->partitionKeyString; char *partitionKeyString = cacheEntry->partitionKeyString;
char *partitionColumnName = ColumnToColumnName(distributedTableId, char *partitionColumnName =
partitionKeyString); ColumnToColumnName(distributedTableId, stringToNode(partitionKeyString));
StringInfo errorMessage = makeStringInfo(); StringInfo errorMessage = makeStringInfo();
StringInfo errorHint = makeStringInfo(); StringInfo errorHint = makeStringInfo();
const char *targetCountType = NULL; const char *targetCountType = NULL;

View File

@ -513,6 +513,9 @@ CitusCleanupConnectionsAtExit(int code, Datum arg)
* are already given away. * are already given away.
*/ */
DeallocateReservedConnections(); DeallocateReservedConnections();
/* we don't want any monitoring view/udf to show already exited backends */
UnSetGlobalPID();
} }

View File

@ -17,7 +17,13 @@
#include "udfs/get_all_active_transactions/11.0-1.sql" #include "udfs/get_all_active_transactions/11.0-1.sql"
#include "udfs/get_global_active_transactions/11.0-1.sql" #include "udfs/get_global_active_transactions/11.0-1.sql"
#include "udfs/citus_internal_local_blocked_processes/11.0-1.sql"
#include "udfs/citus_internal_global_blocked_processes/11.0-1.sql"
#include "udfs/citus_worker_stat_activity/11.0-1.sql" #include "udfs/citus_worker_stat_activity/11.0-1.sql"
#include "udfs/worker_create_or_replace_object/11.0-1.sql"
#include "udfs/citus_isolation_test_session_is_blocked/11.0-1.sql"
#include "udfs/citus_blocking_pids/11.0-1.sql"
CREATE VIEW citus.citus_worker_stat_activity AS CREATE VIEW citus.citus_worker_stat_activity AS
SELECT * FROM pg_catalog.citus_worker_stat_activity(); SELECT * FROM pg_catalog.citus_worker_stat_activity();
@ -34,6 +40,9 @@ GRANT SELECT ON pg_catalog.citus_dist_stat_activity TO PUBLIC;
-- we have to recreate this view because recreated citus_dist_stat_activity that this view depends -- we have to recreate this view because recreated citus_dist_stat_activity that this view depends
#include "udfs/citus_lock_waits/11.0-1.sql" #include "udfs/citus_lock_waits/11.0-1.sql"
#include "udfs/pg_cancel_backend/11.0-1.sql"
#include "udfs/pg_terminate_backend/11.0-1.sql"
DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text); DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text);
DROP FUNCTION pg_catalog.master_get_table_metadata(text); DROP FUNCTION pg_catalog.master_get_table_metadata(text);
DROP FUNCTION pg_catalog.master_append_table_to_shard(bigint, text, text, integer); DROP FUNCTION pg_catalog.master_append_table_to_shard(bigint, text, text, integer);

View File

@ -21,13 +21,7 @@ ALTER FUNCTION citus.restore_isolation_tester_func SET SCHEMA citus_internal;
GRANT USAGE ON SCHEMA citus TO public; GRANT USAGE ON SCHEMA citus TO public;
#include "udfs/pg_dist_shard_placement_trigger_func/9.0-1.sql" #include "udfs/pg_dist_shard_placement_trigger_func/9.0-1.sql"
#include "udfs/worker_create_or_replace_object/9.0-1.sql"
CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$;
COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object';
CREATE OR REPLACE FUNCTION pg_catalog.master_unmark_object_distributed(classid oid, objid oid, objsubid int) CREATE OR REPLACE FUNCTION pg_catalog.master_unmark_object_distributed(classid oid, objid oid, objsubid int)
RETURNS void RETURNS void

View File

@ -113,6 +113,9 @@ CREATE FUNCTION get_global_active_transactions(OUT datid oid, OUT process_id int
RESET search_path; RESET search_path;
DROP FUNCTION citus_internal_local_blocked_processes CASCADE;
DROP FUNCTION citus_internal_global_blocked_processes CASCADE;
DROP FUNCTION pg_catalog.citus_dist_stat_activity CASCADE; DROP FUNCTION pg_catalog.citus_dist_stat_activity CASCADE;
CREATE OR REPLACE FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, CREATE OR REPLACE FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
@ -139,6 +142,162 @@ ALTER VIEW citus.citus_dist_stat_activity SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_dist_stat_activity TO PUBLIC; GRANT SELECT ON pg_catalog.citus_dist_stat_activity TO PUBLIC;
SET search_path = 'pg_catalog'; SET search_path = 'pg_catalog';
DROP FUNCTION citus_worker_stat_activity CASCADE;
CREATE OR REPLACE FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text)
RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$citus_worker_stat_activity$$;
COMMENT ON FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text)
IS 'returns distributed transaction activity on shards of distributed tables';
DROP FUNCTION pg_catalog.worker_create_or_replace_object(text[]);
#include "../udfs/worker_create_or_replace_object/9.0-1.sql"
DROP FUNCTION IF EXISTS pg_catalog.pg_cancel_backend(bigint) CASCADE;
DROP FUNCTION IF EXISTS pg_catalog.pg_terminate_backend(bigint, bigint) CASCADE;
DROP FUNCTION pg_catalog.dump_local_wait_edges CASCADE;
CREATE FUNCTION pg_catalog.dump_local_wait_edges(
OUT waiting_pid int4,
OUT waiting_node_id int4,
OUT waiting_transaction_num int8,
OUT waiting_transaction_stamp timestamptz,
OUT blocking_pid int4,
OUT blocking_node_id int4,
OUT blocking_transaction_num int8,
OUT blocking_transaction_stamp timestamptz,
OUT blocking_transaction_waiting bool)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS $$MODULE_PATHNAME$$, $$dump_local_wait_edges$$;
COMMENT ON FUNCTION pg_catalog.dump_local_wait_edges()
IS 'returns all local lock wait chains, that start from distributed transactions';
DROP FUNCTION pg_catalog.dump_global_wait_edges CASCADE;
CREATE FUNCTION pg_catalog.dump_global_wait_edges(
OUT waiting_pid int4,
OUT waiting_node_id int4,
OUT waiting_transaction_num int8,
OUT waiting_transaction_stamp timestamptz,
OUT blocking_pid int4,
OUT blocking_node_id int4,
OUT blocking_transaction_num int8,
OUT blocking_transaction_stamp timestamptz,
OUT blocking_transaction_waiting bool)
RETURNS SETOF RECORD
LANGUAGE 'c' STRICT
AS $$MODULE_PATHNAME$$, $$dump_global_wait_edges$$;
COMMENT ON FUNCTION pg_catalog.dump_global_wait_edges()
IS 'returns a global list of blocked transactions originating from this node';
DROP FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[]);
CREATE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[])
RETURNS boolean AS $$
DECLARE
mBlockedTransactionNum int8;
workerProcessId integer := current_setting('citus.isolation_test_session_remote_process_id');
coordinatorProcessId integer := current_setting('citus.isolation_test_session_process_id');
BEGIN
IF pg_catalog.old_pg_isolation_test_session_is_blocked(pBlockedPid, pInterestingPids) THEN
RETURN true;
END IF;
-- pg says we're not blocked locally; check whether we're blocked globally.
-- Note that worker process may be blocked or waiting for a lock. So we need to
-- get transaction number for both of them. Following IF provides the transaction
-- number when the worker process waiting for other session.
IF EXISTS (SELECT transaction_number FROM get_global_active_transactions()
WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId) THEN
SELECT transaction_number INTO mBlockedTransactionNum FROM get_global_active_transactions()
WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId;
ELSE
-- Check whether transactions initiated from the coordinator get locked
SELECT transaction_number INTO mBlockedTransactionNum
FROM get_all_active_transactions() WHERE process_id = pBlockedPid;
END IF;
RETURN EXISTS (
SELECT 1 FROM dump_global_wait_edges()
WHERE waiting_transaction_num = mBlockedTransactionNum
) OR EXISTS (
-- Check on the workers if any logical replication job spawned by the
-- current PID is blocked, by checking it's application name
-- Query is heavily based on: https://wiki.postgresql.org/wiki/Lock_Monitoring
SELECT result FROM run_command_on_workers($two$
SELECT blocked_activity.application_name AS blocked_application
FROM pg_catalog.pg_locks blocked_locks
JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid
JOIN pg_catalog.pg_locks blocking_locks
ON blocking_locks.locktype = blocked_locks.locktype
AND blocking_locks.DATABASE IS NOT DISTINCT FROM blocked_locks.DATABASE
AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
AND blocking_locks.pid != blocked_locks.pid
JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
WHERE NOT blocked_locks.GRANTED AND blocked_activity.application_name LIKE 'citus_shard_move_subscription_%'
$two$) where result='citus_shard_move_subscription_' || pBlockedPid);
END;
$$ LANGUAGE plpgsql;
REVOKE ALL ON FUNCTION citus_isolation_test_session_is_blocked(integer,integer[]) FROM PUBLIC;
DROP FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer);
CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer)
RETURNS int4[] AS $$
DECLARE
mLocalBlockingPids int4[];
mRemoteBlockingPids int4[];
mLocalTransactionNum int8;
BEGIN
SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids;
IF (array_length(mLocalBlockingPids, 1) > 0) THEN
RETURN mLocalBlockingPids;
END IF;
-- pg says we're not blocked locally; check whether we're blocked globally.
SELECT transaction_number INTO mLocalTransactionNum
FROM get_all_active_transactions() WHERE process_id = pBlockedPid;
SELECT array_agg(process_id) INTO mRemoteBlockingPids FROM (
WITH activeTransactions AS (
SELECT process_id, transaction_number FROM get_all_active_transactions()
), blockingTransactions AS (
SELECT blocking_transaction_num AS txn_num FROM dump_global_wait_edges()
WHERE waiting_transaction_num = mLocalTransactionNum
)
SELECT activeTransactions.process_id FROM activeTransactions, blockingTransactions
WHERE activeTransactions.transaction_number = blockingTransactions.txn_num
) AS sub;
RETURN mRemoteBlockingPids;
END;
$$ LANGUAGE plpgsql;
REVOKE ALL ON FUNCTION citus_blocking_pids(integer) FROM PUBLIC;
CREATE VIEW citus.citus_worker_stat_activity AS
SELECT * FROM pg_catalog.citus_worker_stat_activity();
ALTER VIEW citus.citus_worker_stat_activity SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_worker_stat_activity TO PUBLIC;
-- we have to recreate this view because we drop citus_dist_stat_activity that this view depends -- we have to recreate this view because we drop citus_dist_stat_activity that this view depends
CREATE VIEW citus.citus_lock_waits AS CREATE VIEW citus.citus_lock_waits AS
@ -183,29 +342,4 @@ JOIN
ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC;
DROP FUNCTION citus_worker_stat_activity CASCADE;
CREATE OR REPLACE FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text)
RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$citus_worker_stat_activity$$;
COMMENT ON FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int,
OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name,
OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET,
OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz,
OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text,
OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text)
IS 'returns distributed transaction activity on shards of distributed tables';
CREATE VIEW citus.citus_worker_stat_activity AS
SELECT * FROM pg_catalog.citus_worker_stat_activity();
ALTER VIEW citus.citus_worker_stat_activity SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_worker_stat_activity TO PUBLIC;
RESET search_path; RESET search_path;

View File

@ -0,0 +1,34 @@
DROP FUNCTION pg_catalog.citus_blocking_pids;
CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer)
RETURNS int4[] AS $$
DECLARE
mLocalBlockingPids int4[];
mRemoteBlockingPids int4[];
mLocalGlobalPid int8;
BEGIN
SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids;
IF (array_length(mLocalBlockingPids, 1) > 0) THEN
RETURN mLocalBlockingPids;
END IF;
-- pg says we're not blocked locally; check whether we're blocked globally.
SELECT global_pid INTO mLocalGlobalPid
FROM get_all_active_transactions() WHERE process_id = pBlockedPid;
SELECT array_agg(global_pid) INTO mRemoteBlockingPids FROM (
WITH activeTransactions AS (
SELECT global_pid FROM get_all_active_transactions()
), blockingTransactions AS (
SELECT blocking_global_pid FROM citus_internal_global_blocked_processes()
WHERE waiting_global_pid = mLocalGlobalPid
)
SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions
WHERE activeTransactions.global_pid = blockingTransactions.blocking_global_pid
) AS sub;
RETURN mRemoteBlockingPids;
END;
$$ LANGUAGE plpgsql;
REVOKE ALL ON FUNCTION citus_blocking_pids(integer) FROM PUBLIC;

View File

@ -0,0 +1,34 @@
DROP FUNCTION pg_catalog.citus_blocking_pids;
CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer)
RETURNS int4[] AS $$
DECLARE
mLocalBlockingPids int4[];
mRemoteBlockingPids int4[];
mLocalGlobalPid int8;
BEGIN
SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids;
IF (array_length(mLocalBlockingPids, 1) > 0) THEN
RETURN mLocalBlockingPids;
END IF;
-- pg says we're not blocked locally; check whether we're blocked globally.
SELECT global_pid INTO mLocalGlobalPid
FROM get_all_active_transactions() WHERE process_id = pBlockedPid;
SELECT array_agg(global_pid) INTO mRemoteBlockingPids FROM (
WITH activeTransactions AS (
SELECT global_pid FROM get_all_active_transactions()
), blockingTransactions AS (
SELECT blocking_global_pid FROM citus_internal_global_blocked_processes()
WHERE waiting_global_pid = mLocalGlobalPid
)
SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions
WHERE activeTransactions.global_pid = blockingTransactions.blocking_global_pid
) AS sub;
RETURN mRemoteBlockingPids;
END;
$$ LANGUAGE plpgsql;
REVOKE ALL ON FUNCTION citus_blocking_pids(integer) FROM PUBLIC;

View File

@ -0,0 +1,17 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes(
OUT waiting_global_pid int8,
OUT waiting_pid int4,
OUT waiting_node_id int4,
OUT waiting_transaction_num int8,
OUT waiting_transaction_stamp timestamptz,
OUT blocking_global_pid int8,
OUT blocking_pid int4,
OUT blocking_node_id int4,
OUT blocking_transaction_num int8,
OUT blocking_transaction_stamp timestamptz,
OUT blocking_transaction_waiting bool)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_global_blocked_processes()
IS 'returns a global list of blocked backends originating from this node';

View File

@ -0,0 +1,17 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes(
OUT waiting_global_pid int8,
OUT waiting_pid int4,
OUT waiting_node_id int4,
OUT waiting_transaction_num int8,
OUT waiting_transaction_stamp timestamptz,
OUT blocking_global_pid int8,
OUT blocking_pid int4,
OUT blocking_node_id int4,
OUT blocking_transaction_num int8,
OUT blocking_transaction_stamp timestamptz,
OUT blocking_transaction_waiting bool)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_global_blocked_processes()
IS 'returns a global list of blocked backends originating from this node';

View File

@ -0,0 +1,17 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes(
OUT waiting_global_pid int8,
OUT waiting_pid int4,
OUT waiting_node_id int4,
OUT waiting_transaction_num int8,
OUT waiting_transaction_stamp timestamptz,
OUT blocking_global_pid int8,
OUT blocking_pid int4,
OUT blocking_node_id int4,
OUT blocking_transaction_num int8,
OUT blocking_transaction_stamp timestamptz,
OUT blocking_transaction_waiting bool)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_local_blocked_processes()
IS 'returns all local lock wait chains, that start from any citus backend';

View File

@ -0,0 +1,17 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes(
OUT waiting_global_pid int8,
OUT waiting_pid int4,
OUT waiting_node_id int4,
OUT waiting_transaction_num int8,
OUT waiting_transaction_stamp timestamptz,
OUT blocking_global_pid int8,
OUT blocking_pid int4,
OUT blocking_node_id int4,
OUT blocking_transaction_num int8,
OUT blocking_transaction_stamp timestamptz,
OUT blocking_transaction_waiting bool)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_local_blocked_processes()
IS 'returns all local lock wait chains, that start from any citus backend';

View File

@ -0,0 +1,56 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[])
RETURNS boolean AS $$
DECLARE
mBlockedGlobalPid int8;
workerProcessId integer := current_setting('citus.isolation_test_session_remote_process_id');
coordinatorProcessId integer := current_setting('citus.isolation_test_session_process_id');
BEGIN
IF pg_catalog.old_pg_isolation_test_session_is_blocked(pBlockedPid, pInterestingPids) THEN
RETURN true;
END IF;
-- pg says we're not blocked locally; check whether we're blocked globally.
-- Note that worker process may be blocked or waiting for a lock. So we need to
-- get transaction number for both of them. Following IF provides the transaction
-- number when the worker process waiting for other session.
IF EXISTS (SELECT 1 FROM get_global_active_transactions()
WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId) THEN
SELECT global_pid INTO mBlockedGlobalPid FROM get_global_active_transactions()
WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId;
ELSE
-- Check whether transactions initiated from the coordinator get locked
SELECT global_pid INTO mBlockedGlobalPid
FROM get_all_active_transactions() WHERE process_id = pBlockedPid;
END IF;
RETURN EXISTS (
SELECT 1 FROM citus_internal_global_blocked_processes()
WHERE waiting_global_pid = mBlockedGlobalPid
) OR EXISTS (
-- Check on the workers if any logical replication job spawned by the
-- current PID is blocked, by checking it's application name
-- Query is heavily based on: https://wiki.postgresql.org/wiki/Lock_Monitoring
SELECT result FROM run_command_on_workers($two$
SELECT blocked_activity.application_name AS blocked_application
FROM pg_catalog.pg_locks blocked_locks
JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid
JOIN pg_catalog.pg_locks blocking_locks
ON blocking_locks.locktype = blocked_locks.locktype
AND blocking_locks.DATABASE IS NOT DISTINCT FROM blocked_locks.DATABASE
AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
AND blocking_locks.pid != blocked_locks.pid
JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
WHERE NOT blocked_locks.GRANTED AND blocked_activity.application_name LIKE 'citus_shard_move_subscription_%'
$two$) where result='citus_shard_move_subscription_' || pBlockedPid);
END;
$$ LANGUAGE plpgsql;
REVOKE ALL ON FUNCTION citus_isolation_test_session_is_blocked(integer,integer[]) FROM PUBLIC;

View File

@ -1,7 +1,7 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[]) CREATE OR REPLACE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[])
RETURNS boolean AS $$ RETURNS boolean AS $$
DECLARE DECLARE
mBlockedTransactionNum int8; mBlockedGlobalPid int8;
workerProcessId integer := current_setting('citus.isolation_test_session_remote_process_id'); workerProcessId integer := current_setting('citus.isolation_test_session_remote_process_id');
coordinatorProcessId integer := current_setting('citus.isolation_test_session_process_id'); coordinatorProcessId integer := current_setting('citus.isolation_test_session_process_id');
BEGIN BEGIN
@ -13,19 +13,19 @@ RETURNS boolean AS $$
-- Note that worker process may be blocked or waiting for a lock. So we need to -- Note that worker process may be blocked or waiting for a lock. So we need to
-- get transaction number for both of them. Following IF provides the transaction -- get transaction number for both of them. Following IF provides the transaction
-- number when the worker process waiting for other session. -- number when the worker process waiting for other session.
IF EXISTS (SELECT transaction_number FROM get_global_active_transactions() IF EXISTS (SELECT 1 FROM get_global_active_transactions()
WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId) THEN WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId) THEN
SELECT transaction_number INTO mBlockedTransactionNum FROM get_global_active_transactions() SELECT global_pid INTO mBlockedGlobalPid FROM get_global_active_transactions()
WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId; WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId;
ELSE ELSE
-- Check whether transactions initiated from the coordinator get locked -- Check whether transactions initiated from the coordinator get locked
SELECT transaction_number INTO mBlockedTransactionNum SELECT global_pid INTO mBlockedGlobalPid
FROM get_all_active_transactions() WHERE process_id = pBlockedPid; FROM get_all_active_transactions() WHERE process_id = pBlockedPid;
END IF; END IF;
RETURN EXISTS ( RETURN EXISTS (
SELECT 1 FROM dump_global_wait_edges() SELECT 1 FROM citus_internal_global_blocked_processes()
WHERE waiting_transaction_num = mBlockedTransactionNum WHERE waiting_global_pid = mBlockedGlobalPid
) OR EXISTS ( ) OR EXISTS (
-- Check on the workers if any logical replication job spawned by the -- Check on the workers if any logical replication job spawned by the
-- current PID is blocked, by checking it's application name -- current PID is blocked, by checking it's application name

View File

@ -8,7 +8,7 @@ citus_dist_stat_activity AS
), ),
unique_global_wait_edges AS unique_global_wait_edges AS
( (
SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges() SELECT DISTINCT ON(waiting_global_pid, blocking_global_pid) * FROM citus_internal_global_blocked_processes()
), ),
citus_dist_stat_activity_with_node_id AS citus_dist_stat_activity_with_node_id AS
( (
@ -21,6 +21,8 @@ citus_dist_stat_activity_with_node_id AS
citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport
) )
SELECT SELECT
waiting.global_pid as waiting_gpid,
blocking.global_pid as blocking_gpid,
waiting.pid AS waiting_pid, waiting.pid AS waiting_pid,
blocking.pid AS blocking_pid, blocking.pid AS blocking_pid,
waiting.query AS blocked_statement, waiting.query AS blocked_statement,
@ -34,9 +36,9 @@ SELECT
FROM FROM
unique_global_wait_edges unique_global_wait_edges
JOIN JOIN
citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id) citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_global_pid = waiting.global_pid)
JOIN JOIN
citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id); citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_global_pid = blocking.global_pid);
ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC;

View File

@ -8,7 +8,7 @@ citus_dist_stat_activity AS
), ),
unique_global_wait_edges AS unique_global_wait_edges AS
( (
SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges() SELECT DISTINCT ON(waiting_global_pid, blocking_global_pid) * FROM citus_internal_global_blocked_processes()
), ),
citus_dist_stat_activity_with_node_id AS citus_dist_stat_activity_with_node_id AS
( (
@ -21,6 +21,8 @@ citus_dist_stat_activity_with_node_id AS
citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport
) )
SELECT SELECT
waiting.global_pid as waiting_gpid,
blocking.global_pid as blocking_gpid,
waiting.pid AS waiting_pid, waiting.pid AS waiting_pid,
blocking.pid AS blocking_pid, blocking.pid AS blocking_pid,
waiting.query AS blocked_statement, waiting.query AS blocked_statement,
@ -34,9 +36,9 @@ SELECT
FROM FROM
unique_global_wait_edges unique_global_wait_edges
JOIN JOIN
citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id) citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_global_pid = waiting.global_pid)
JOIN JOIN
citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id); citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_global_pid = blocking.global_pid);
ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC;

View File

@ -0,0 +1,9 @@
DROP FUNCTION IF EXISTS pg_catalog.pg_cancel_backend(global_pid bigint) CASCADE;
CREATE OR REPLACE FUNCTION pg_catalog.pg_cancel_backend(global_pid bigint)
RETURNS BOOL
LANGUAGE C
AS 'MODULE_PATHNAME', $$pg_cancel_backend$$;
COMMENT ON FUNCTION pg_catalog.pg_cancel_backend(global_pid bigint)
IS 'cancels a Citus query which might be on any node in the Citus cluster';

View File

@ -0,0 +1,9 @@
DROP FUNCTION IF EXISTS pg_catalog.pg_cancel_backend(global_pid bigint) CASCADE;
CREATE OR REPLACE FUNCTION pg_catalog.pg_cancel_backend(global_pid bigint)
RETURNS BOOL
LANGUAGE C
AS 'MODULE_PATHNAME', $$pg_cancel_backend$$;
COMMENT ON FUNCTION pg_catalog.pg_cancel_backend(global_pid bigint)
IS 'cancels a Citus query which might be on any node in the Citus cluster';

View File

@ -0,0 +1,9 @@
DROP FUNCTION IF EXISTS pg_catalog.pg_terminate_backend(global_pid bigint, timeout bigint) CASCADE;
CREATE OR REPLACE FUNCTION pg_catalog.pg_terminate_backend(global_pid bigint, timeout bigint DEFAULT 0)
RETURNS BOOL
LANGUAGE C
AS 'MODULE_PATHNAME', $$pg_terminate_backend$$;
COMMENT ON FUNCTION pg_catalog.pg_terminate_backend(global_pid bigint, timeout bigint)
IS 'terminates a Citus query which might be on any node in the Citus cluster';

View File

@ -0,0 +1,9 @@
DROP FUNCTION IF EXISTS pg_catalog.pg_terminate_backend(global_pid bigint, timeout bigint) CASCADE;
CREATE OR REPLACE FUNCTION pg_catalog.pg_terminate_backend(global_pid bigint, timeout bigint DEFAULT 0)
RETURNS BOOL
LANGUAGE C
AS 'MODULE_PATHNAME', $$pg_terminate_backend$$;
COMMENT ON FUNCTION pg_catalog.pg_terminate_backend(global_pid bigint, timeout bigint)
IS 'terminates a Citus query which might be on any node in the Citus cluster';

View File

@ -0,0 +1,15 @@
CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$;
COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object';
CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statements text[])
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_create_or_replace_object_array$$;
COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statements text[])
IS 'takes a lost of sql statements, before executing these it will check if the object already exists in that exact state otherwise replaces that named object with the new object';

View File

@ -0,0 +1,6 @@
CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$;
COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object';

View File

@ -0,0 +1,15 @@
CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$;
COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text)
IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object';
CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statements text[])
RETURNS bool
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_create_or_replace_object_array$$;
COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statements text[])
IS 'takes a lost of sql statements, before executing these it will check if the object already exists in that exact state otherwise replaces that named object with the new object';

View File

@ -47,7 +47,7 @@ citus_get_all_dependencies_for_object(PG_FUNCTION_ARGS)
ObjectAddress address = { 0 }; ObjectAddress address = { 0 };
ObjectAddressSubSet(address, classid, objid, objsubid); ObjectAddressSubSet(address, classid, objid, objsubid);
List *dependencies = GetAllDependenciesForObject(&address); List *dependencies = GetAllSupportedDependenciesForObject(&address);
ObjectAddress *dependency = NULL; ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies) foreach_ptr(dependency, dependencies)
{ {

View File

@ -50,7 +50,10 @@ get_adjacency_list_wait_graph(PG_FUNCTION_ARGS)
bool isNulls[2]; bool isNulls[2];
Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor); Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
WaitGraph *waitGraph = BuildGlobalWaitGraph();
/* distributed deadlock detection only considers distributed txs */
bool onlyDistributedTx = true;
WaitGraph *waitGraph = BuildGlobalWaitGraph(onlyDistributedTx);
HTAB *adjacencyList = BuildAdjacencyListsForWaitGraph(waitGraph); HTAB *adjacencyList = BuildAdjacencyListsForWaitGraph(waitGraph);
/* iterate on all nodes */ /* iterate on all nodes */

View File

@ -58,6 +58,7 @@ static int64 GetRemoteProcessId(void);
PG_FUNCTION_INFO_V1(start_session_level_connection_to_node); PG_FUNCTION_INFO_V1(start_session_level_connection_to_node);
PG_FUNCTION_INFO_V1(run_commands_on_session_level_connection_to_node); PG_FUNCTION_INFO_V1(run_commands_on_session_level_connection_to_node);
PG_FUNCTION_INFO_V1(stop_session_level_connection_to_node); PG_FUNCTION_INFO_V1(stop_session_level_connection_to_node);
PG_FUNCTION_INFO_V1(override_backend_data_command_originator);
/* /*
@ -119,6 +120,17 @@ start_session_level_connection_to_node(PG_FUNCTION_ARGS)
ExecuteCriticalRemoteCommand(singleConnection, setAppName); ExecuteCriticalRemoteCommand(singleConnection, setAppName);
/*
* We are hackily overriding the remote processes' worker_query to be false
* such that relevant observibility UDFs work fine.
*/
StringInfo overrideBackendDataCommandOriginator = makeStringInfo();
appendStringInfo(overrideBackendDataCommandOriginator,
"SELECT override_backend_data_command_originator(true);");
ExecuteCriticalRemoteCommand(singleConnection,
overrideBackendDataCommandOriginator->data);
PG_RETURN_VOID(); PG_RETURN_VOID();
} }
@ -174,6 +186,23 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS)
} }
/*
* override_backend_data_command_originator is a wrapper around
* OverrideBackendDataDistributedCommandOriginator().
*/
Datum
override_backend_data_command_originator(PG_FUNCTION_ARGS)
{
CheckCitusVersion(ERROR);
bool distributedCommandOriginator = PG_GETARG_BOOL(0);
OverrideBackendDataDistributedCommandOriginator(distributedCommandOriginator);
PG_RETURN_VOID();
}
/* /*
* stop_session_level_connection_to_node closes the connection opened by the * stop_session_level_connection_to_node closes the connection opened by the
* start_session_level_connection_to_node and set the flag to false which * start_session_level_connection_to_node and set the flag to false which

View File

@ -33,6 +33,7 @@
#include "distributed/shared_connection_stats.h" #include "distributed/shared_connection_stats.h"
#include "distributed/transaction_identifier.h" #include "distributed/transaction_identifier.h"
#include "distributed/tuplestore.h" #include "distributed/tuplestore.h"
#include "distributed/worker_manager.h"
#include "nodes/execnodes.h" #include "nodes/execnodes.h"
#include "postmaster/autovacuum.h" /* to access autovacuum_max_workers */ #include "postmaster/autovacuum.h" /* to access autovacuum_max_workers */
#include "replication/walsender.h" #include "replication/walsender.h"
@ -47,6 +48,7 @@
#define GET_ACTIVE_TRANSACTION_QUERY "SELECT * FROM get_all_active_transactions();" #define GET_ACTIVE_TRANSACTION_QUERY "SELECT * FROM get_all_active_transactions();"
#define ACTIVE_TRANSACTION_COLUMN_COUNT 7 #define ACTIVE_TRANSACTION_COLUMN_COUNT 7
#define GLOBAL_PID_NODE_ID_MULTIPLIER 10000000000
/* /*
* Each backend's data reside in the shared memory * Each backend's data reside in the shared memory
@ -90,7 +92,6 @@ static BackendData *MyBackendData = NULL;
static void BackendManagementShmemInit(void); static void BackendManagementShmemInit(void);
static size_t BackendManagementShmemSize(void); static size_t BackendManagementShmemSize(void);
static void UnSetGlobalPID(void);
PG_FUNCTION_INFO_V1(assign_distributed_transaction_id); PG_FUNCTION_INFO_V1(assign_distributed_transaction_id);
@ -153,7 +154,6 @@ assign_distributed_transaction_id(PG_FUNCTION_ARGS)
MyBackendData->citusBackend.initiatorNodeIdentifier = MyBackendData->citusBackend.initiatorNodeIdentifier =
MyBackendData->transactionId.initiatorNodeIdentifier; MyBackendData->transactionId.initiatorNodeIdentifier;
MyBackendData->citusBackend.transactionOriginator = false;
SpinLockRelease(&MyBackendData->mutex); SpinLockRelease(&MyBackendData->mutex);
@ -411,15 +411,12 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
initiatorNodeIdentifier = currentBackend->citusBackend.initiatorNodeIdentifier; initiatorNodeIdentifier = currentBackend->citusBackend.initiatorNodeIdentifier;
/* /*
* We prefer to use worker_query instead of transactionOriginator in the user facing * We prefer to use worker_query instead of distributedCommandOriginator in
* functions since its more intuitive. Thus, we negate the result before returning. * the user facing functions since its more intuitive. Thus,
* * we negate the result before returning.
* We prefer to use citusBackend's transactionOriginator field over transactionId's
* field with the same name. The reason is that it also covers backends that are not
* inside a distributed transaction.
*/ */
bool coordinatorOriginatedQuery = bool distributedCommandOriginator =
currentBackend->citusBackend.transactionOriginator; currentBackend->distributedCommandOriginator;
transactionNumber = currentBackend->transactionId.transactionNumber; transactionNumber = currentBackend->transactionId.transactionNumber;
TimestampTz transactionIdTimestamp = currentBackend->transactionId.timestamp; TimestampTz transactionIdTimestamp = currentBackend->transactionId.timestamp;
@ -429,7 +426,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto
values[0] = ObjectIdGetDatum(databaseId); values[0] = ObjectIdGetDatum(databaseId);
values[1] = Int32GetDatum(backendPid); values[1] = Int32GetDatum(backendPid);
values[2] = Int32GetDatum(initiatorNodeIdentifier); values[2] = Int32GetDatum(initiatorNodeIdentifier);
values[3] = !coordinatorOriginatedQuery; values[3] = !distributedCommandOriginator;
values[4] = UInt64GetDatum(transactionNumber); values[4] = UInt64GetDatum(transactionNumber);
values[5] = TimestampTzGetDatum(transactionIdTimestamp); values[5] = TimestampTzGetDatum(transactionIdTimestamp);
values[6] = UInt64GetDatum(currentBackend->globalPID); values[6] = UInt64GetDatum(currentBackend->globalPID);
@ -664,7 +661,6 @@ UnSetDistributedTransactionId(void)
MyBackendData->transactionId.timestamp = 0; MyBackendData->transactionId.timestamp = 0;
MyBackendData->citusBackend.initiatorNodeIdentifier = -1; MyBackendData->citusBackend.initiatorNodeIdentifier = -1;
MyBackendData->citusBackend.transactionOriginator = false;
SpinLockRelease(&MyBackendData->mutex); SpinLockRelease(&MyBackendData->mutex);
} }
@ -674,7 +670,7 @@ UnSetDistributedTransactionId(void)
/* /*
* UnSetGlobalPID resets the global pid for the current backend. * UnSetGlobalPID resets the global pid for the current backend.
*/ */
static void void
UnSetGlobalPID(void) UnSetGlobalPID(void)
{ {
/* backend does not exist if the extension is not created */ /* backend does not exist if the extension is not created */
@ -777,7 +773,6 @@ AssignDistributedTransactionId(void)
MyBackendData->transactionId.timestamp = currentTimestamp; MyBackendData->transactionId.timestamp = currentTimestamp;
MyBackendData->citusBackend.initiatorNodeIdentifier = localGroupId; MyBackendData->citusBackend.initiatorNodeIdentifier = localGroupId;
MyBackendData->citusBackend.transactionOriginator = true;
SpinLockRelease(&MyBackendData->mutex); SpinLockRelease(&MyBackendData->mutex);
} }
@ -799,7 +794,6 @@ MarkCitusInitiatedCoordinatorBackend(void)
SpinLockAcquire(&MyBackendData->mutex); SpinLockAcquire(&MyBackendData->mutex);
MyBackendData->citusBackend.initiatorNodeIdentifier = localGroupId; MyBackendData->citusBackend.initiatorNodeIdentifier = localGroupId;
MyBackendData->citusBackend.transactionOriginator = true;
SpinLockRelease(&MyBackendData->mutex); SpinLockRelease(&MyBackendData->mutex);
} }
@ -815,10 +809,12 @@ void
AssignGlobalPID(void) AssignGlobalPID(void)
{ {
uint64 globalPID = INVALID_CITUS_INTERNAL_BACKEND_GPID; uint64 globalPID = INVALID_CITUS_INTERNAL_BACKEND_GPID;
bool distributedCommandOriginator = false;
if (!IsCitusInternalBackend()) if (!IsCitusInternalBackend())
{ {
globalPID = GenerateGlobalPID(); globalPID = GenerateGlobalPID();
distributedCommandOriginator = true;
} }
else else
{ {
@ -827,6 +823,21 @@ AssignGlobalPID(void)
SpinLockAcquire(&MyBackendData->mutex); SpinLockAcquire(&MyBackendData->mutex);
MyBackendData->globalPID = globalPID; MyBackendData->globalPID = globalPID;
MyBackendData->distributedCommandOriginator = distributedCommandOriginator;
SpinLockRelease(&MyBackendData->mutex);
}
/*
* OverrideBackendDataDistributedCommandOriginator should only be used for isolation testing.
* See how it is used in the relevant functions.
*/
void
OverrideBackendDataDistributedCommandOriginator(bool distributedCommandOriginator)
{
SpinLockAcquire(&MyBackendData->mutex);
MyBackendData->distributedCommandOriginator =
distributedCommandOriginator;
SpinLockRelease(&MyBackendData->mutex); SpinLockRelease(&MyBackendData->mutex);
} }
@ -865,7 +876,7 @@ GenerateGlobalPID(void)
* node ids might cause overflow. But even for the applications that scale around 50 nodes every * node ids might cause overflow. But even for the applications that scale around 50 nodes every
* day it'd take about 100K years. So we are not worried. * day it'd take about 100K years. So we are not worried.
*/ */
return (((uint64) GetLocalNodeId()) * 10000000000) + getpid(); return (((uint64) GetLocalNodeId()) * GLOBAL_PID_NODE_ID_MULTIPLIER) + getpid();
} }
@ -908,6 +919,42 @@ ExtractGlobalPID(char *applicationName)
} }
/*
* ExtractNodeIdFromGlobalPID extracts the node id from the global pid.
* Global pid is constructed by multiplying node id with GLOBAL_PID_NODE_ID_MULTIPLIER
* and adding process id. So integer division of global pid by GLOBAL_PID_NODE_ID_MULTIPLIER
* gives us the node id.
*/
int
ExtractNodeIdFromGlobalPID(uint64 globalPID)
{
int nodeId = (int) (globalPID / GLOBAL_PID_NODE_ID_MULTIPLIER);
if (nodeId == GLOBAL_PID_NODE_ID_FOR_NODES_NOT_IN_METADATA)
{
ereport(ERROR, (errmsg("originator node of the query with the global pid "
"%lu is not in Citus' metadata", globalPID),
errhint("connect to the node directly run pg_cancel_backend(pid) "
"or pg_terminate_backend(pid)")));
}
return nodeId;
}
/*
* ExtractProcessIdFromGlobalPID extracts the process id from the global pid.
* Global pid is constructed by multiplying node id with GLOBAL_PID_NODE_ID_MULTIPLIER
* and adding process id. So global pid mod GLOBAL_PID_NODE_ID_MULTIPLIER gives us the
* process id.
*/
int
ExtractProcessIdFromGlobalPID(uint64 globalPID)
{
return (int) (globalPID % GLOBAL_PID_NODE_ID_MULTIPLIER);
}
/* /*
* CurrentDistributedTransactionNumber returns the transaction number of the * CurrentDistributedTransactionNumber returns the transaction number of the
* current distributed transaction. The caller must make sure a distributed * current distributed transaction. The caller must make sure a distributed

View File

@ -157,10 +157,9 @@ FROM \
WHERE \ WHERE \
backend_type = 'client backend' \ backend_type = 'client backend' \
AND \ AND \
pg_stat_activity.query NOT ILIKE '%stat_activity%' \ worker_query = False \
AND \ AND \
pg_stat_activity.application_name NOT SIMILAR TO 'citus_internal gpid=\\d+'; \ pg_stat_activity.query NOT ILIKE '%stat_activity%';"
"
#define CITUS_WORKER_STAT_ACTIVITY_QUERY \ #define CITUS_WORKER_STAT_ACTIVITY_QUERY \
"\ "\
@ -195,7 +194,7 @@ FROM \
get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp, global_id) \ get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp, global_id) \
ON pg_stat_activity.pid = dist_txs.process_id \ ON pg_stat_activity.pid = dist_txs.process_id \
WHERE \ WHERE \
pg_stat_activity.application_name SIMILAR TO 'citus_internal gpid=\\d+' \ worker_query = True \
AND \ AND \
pg_stat_activity.query NOT ILIKE '%stat_activity%';" pg_stat_activity.query NOT ILIKE '%stat_activity%';"

View File

@ -119,7 +119,9 @@ CheckForDistributedDeadlocks(void)
return false; return false;
} }
WaitGraph *waitGraph = BuildGlobalWaitGraph(); /* distributed deadlock detection only considers distributed txs */
bool onlyDistributedTx = true;
WaitGraph *waitGraph = BuildGlobalWaitGraph(onlyDistributedTx);
HTAB *adjacencyLists = BuildAdjacencyListsForWaitGraph(waitGraph); HTAB *adjacencyLists = BuildAdjacencyListsForWaitGraph(waitGraph);
int edgeCount = waitGraph->edgeCount; int edgeCount = waitGraph->edgeCount;

View File

@ -47,7 +47,10 @@ typedef struct PROCStack
static void AddWaitEdgeFromResult(WaitGraph *waitGraph, PGresult *result, int rowIndex); static void AddWaitEdgeFromResult(WaitGraph *waitGraph, PGresult *result, int rowIndex);
static void ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo); static void ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo);
static WaitGraph * BuildLocalWaitGraph(void); static void AddWaitEdgeFromBlockedProcessResult(WaitGraph *waitGraph, PGresult *result,
int rowIndex);
static void ReturnBlockedProcessGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo);
static WaitGraph * BuildLocalWaitGraph(bool onlyDistributedTx);
static bool IsProcessWaitingForSafeOperations(PGPROC *proc); static bool IsProcessWaitingForSafeOperations(PGPROC *proc);
static void LockLockData(void); static void LockLockData(void);
static void UnlockLockData(void); static void UnlockLockData(void);
@ -62,10 +65,30 @@ static void AddProcToVisit(PROCStack *remaining, PGPROC *proc);
static bool IsSameLockGroup(PGPROC *leftProc, PGPROC *rightProc); static bool IsSameLockGroup(PGPROC *leftProc, PGPROC *rightProc);
static bool IsConflictingLockMask(int holdMask, int conflictMask); static bool IsConflictingLockMask(int holdMask, int conflictMask);
/*
* We almost have 2 sets of identical functions. The first set (e.g., dump_wait_edges)
* functions are intended for distributed deadlock detection purposes.
*
* The second set of functions (e.g., citus_internal_local_blocked_processes) are
* intended for citus_lock_waits view.
*
* The main difference is that the former functions only show processes that are blocked
* inside a distributed transaction (e.g., see AssignDistributedTransactionId()).
* The latter functions return a superset, where any blocked process is returned.
*
* We kept two different set of functions for two purposes. First, the deadlock detection
* is a performance critical code-path happening very frequently and we don't add any
* performance overhead. Secondly, to be able to do rolling upgrades, we cannot change
* the API of dump_global_wait_edges/dump_local_wait_edges such that they take a boolean
* parameter. If we do that, until all nodes are upgraded, the deadlock detection would fail,
* which is not acceptable.
*/
PG_FUNCTION_INFO_V1(dump_local_wait_edges); PG_FUNCTION_INFO_V1(dump_local_wait_edges);
PG_FUNCTION_INFO_V1(dump_global_wait_edges); PG_FUNCTION_INFO_V1(dump_global_wait_edges);
PG_FUNCTION_INFO_V1(citus_internal_local_blocked_processes);
PG_FUNCTION_INFO_V1(citus_internal_global_blocked_processes);
/* /*
* dump_global_wait_edges returns global wait edges for distributed transactions * dump_global_wait_edges returns global wait edges for distributed transactions
@ -74,7 +97,9 @@ PG_FUNCTION_INFO_V1(dump_global_wait_edges);
Datum Datum
dump_global_wait_edges(PG_FUNCTION_ARGS) dump_global_wait_edges(PG_FUNCTION_ARGS)
{ {
WaitGraph *waitGraph = BuildGlobalWaitGraph(); bool onlyDistributedTx = true;
WaitGraph *waitGraph = BuildGlobalWaitGraph(onlyDistributedTx);
ReturnWaitGraph(waitGraph, fcinfo); ReturnWaitGraph(waitGraph, fcinfo);
@ -82,20 +107,44 @@ dump_global_wait_edges(PG_FUNCTION_ARGS)
} }
/*
* citus_internal_global_blocked_processes returns global wait edges
* including all processes running on the cluster.
*/
Datum
citus_internal_global_blocked_processes(PG_FUNCTION_ARGS)
{
bool onlyDistributedTx = false;
WaitGraph *waitGraph = BuildGlobalWaitGraph(onlyDistributedTx);
ReturnBlockedProcessGraph(waitGraph, fcinfo);
return (Datum) 0;
}
/* /*
* BuildGlobalWaitGraph builds a wait graph for distributed transactions * BuildGlobalWaitGraph builds a wait graph for distributed transactions
* that originate from this node, including edges from all (other) worker * that originate from this node, including edges from all (other) worker
* nodes. * nodes.
*
*
* If onlyDistributedTx is true, we only return distributed transactions
* (e.g., AssignDistributedTransaction() or assign_distributed_transactions())
* has been called for the process. Distributed deadlock detection only
* interested in these processes.
*/ */
WaitGraph * WaitGraph *
BuildGlobalWaitGraph(void) BuildGlobalWaitGraph(bool onlyDistributedTx)
{ {
List *workerNodeList = ActiveReadableNodeList(); List *workerNodeList = ActiveReadableNodeList();
char *nodeUser = CitusExtensionOwnerName(); char *nodeUser = CitusExtensionOwnerName();
List *connectionList = NIL; List *connectionList = NIL;
int32 localGroupId = GetLocalGroupId(); int32 localGroupId = GetLocalGroupId();
WaitGraph *waitGraph = BuildLocalWaitGraph(); /* deadlock detection is only interested in distributed transactions */
WaitGraph *waitGraph = BuildLocalWaitGraph(onlyDistributedTx);
/* open connections in parallel */ /* open connections in parallel */
WorkerNode *workerNode = NULL; WorkerNode *workerNode = NULL;
@ -124,9 +173,28 @@ BuildGlobalWaitGraph(void)
MultiConnection *connection = NULL; MultiConnection *connection = NULL;
foreach_ptr(connection, connectionList) foreach_ptr(connection, connectionList)
{ {
const char *command = "SELECT * FROM dump_local_wait_edges()"; StringInfo queryString = makeStringInfo();
int querySent = SendRemoteCommand(connection, command); if (onlyDistributedTx)
{
appendStringInfo(queryString,
"SELECT waiting_pid, waiting_node_id, "
"waiting_transaction_num, waiting_transaction_stamp, "
"blocking_pid, blocking_node_id, blocking_transaction_num, "
"blocking_transaction_stamp, blocking_transaction_waiting "
"FROM dump_local_wait_edges()");
}
else
{
appendStringInfo(queryString,
"SELECT waiting_global_pid, waiting_pid, "
"waiting_node_id, waiting_transaction_num, waiting_transaction_stamp, "
"blocking_global_pid,blocking_pid, blocking_node_id, "
"blocking_transaction_num, blocking_transaction_stamp, blocking_transaction_waiting "
"FROM citus_internal_local_blocked_processes()");
}
int querySent = SendRemoteCommand(connection, queryString->data);
if (querySent == 0) if (querySent == 0)
{ {
ReportConnectionError(connection, WARNING); ReportConnectionError(connection, WARNING);
@ -148,17 +216,30 @@ BuildGlobalWaitGraph(void)
int64 rowCount = PQntuples(result); int64 rowCount = PQntuples(result);
int64 colCount = PQnfields(result); int64 colCount = PQnfields(result);
if (colCount != 9) if (onlyDistributedTx && colCount != 9)
{ {
ereport(WARNING, (errmsg("unexpected number of columns from " ereport(WARNING, (errmsg("unexpected number of columns from "
"dump_local_wait_edges"))); "dump_local_wait_edges")));
continue; continue;
} }
else if (!onlyDistributedTx && colCount != 11)
{
ereport(WARNING, (errmsg("unexpected number of columns from "
"citus_internal_local_blocked_processes")));
continue;
}
for (int64 rowIndex = 0; rowIndex < rowCount; rowIndex++) for (int64 rowIndex = 0; rowIndex < rowCount; rowIndex++)
{
if (onlyDistributedTx)
{ {
AddWaitEdgeFromResult(waitGraph, result, rowIndex); AddWaitEdgeFromResult(waitGraph, result, rowIndex);
} }
else
{
AddWaitEdgeFromBlockedProcessResult(waitGraph, result, rowIndex);
}
}
PQclear(result); PQclear(result);
ForgetResults(connection); ForgetResults(connection);
@ -177,10 +258,12 @@ AddWaitEdgeFromResult(WaitGraph *waitGraph, PGresult *result, int rowIndex)
{ {
WaitEdge *waitEdge = AllocWaitEdge(waitGraph); WaitEdge *waitEdge = AllocWaitEdge(waitGraph);
waitEdge->waitingGPid = 0; /* not requested for deadlock detection */
waitEdge->waitingPid = ParseIntField(result, rowIndex, 0); waitEdge->waitingPid = ParseIntField(result, rowIndex, 0);
waitEdge->waitingNodeId = ParseIntField(result, rowIndex, 1); waitEdge->waitingNodeId = ParseIntField(result, rowIndex, 1);
waitEdge->waitingTransactionNum = ParseIntField(result, rowIndex, 2); waitEdge->waitingTransactionNum = ParseIntField(result, rowIndex, 2);
waitEdge->waitingTransactionStamp = ParseTimestampTzField(result, rowIndex, 3); waitEdge->waitingTransactionStamp = ParseTimestampTzField(result, rowIndex, 3);
waitEdge->blockingGPid = 0; /* not requested for deadlock detection */
waitEdge->blockingPid = ParseIntField(result, rowIndex, 4); waitEdge->blockingPid = ParseIntField(result, rowIndex, 4);
waitEdge->blockingNodeId = ParseIntField(result, rowIndex, 5); waitEdge->blockingNodeId = ParseIntField(result, rowIndex, 5);
waitEdge->blockingTransactionNum = ParseIntField(result, rowIndex, 6); waitEdge->blockingTransactionNum = ParseIntField(result, rowIndex, 6);
@ -189,6 +272,29 @@ AddWaitEdgeFromResult(WaitGraph *waitGraph, PGresult *result, int rowIndex)
} }
/*
* AddWaitEdgeFromBlockedProcessResult adds an edge to the wait graph that
* is read from a PGresult.
*/
static void
AddWaitEdgeFromBlockedProcessResult(WaitGraph *waitGraph, PGresult *result, int rowIndex)
{
WaitEdge *waitEdge = AllocWaitEdge(waitGraph);
waitEdge->waitingGPid = ParseIntField(result, rowIndex, 0);
waitEdge->waitingPid = ParseIntField(result, rowIndex, 1);
waitEdge->waitingNodeId = ParseIntField(result, rowIndex, 2);
waitEdge->waitingTransactionNum = ParseIntField(result, rowIndex, 3);
waitEdge->waitingTransactionStamp = ParseTimestampTzField(result, rowIndex, 4);
waitEdge->blockingGPid = ParseIntField(result, rowIndex, 5);
waitEdge->blockingPid = ParseIntField(result, rowIndex, 6);
waitEdge->blockingNodeId = ParseIntField(result, rowIndex, 7);
waitEdge->blockingTransactionNum = ParseIntField(result, rowIndex, 8);
waitEdge->blockingTransactionStamp = ParseTimestampTzField(result, rowIndex, 9);
waitEdge->isBlockingXactWaiting = ParseBoolField(result, rowIndex, 10);
}
/* /*
* ParseIntField parses a int64 from a remote result or returns 0 if the * ParseIntField parses a int64 from a remote result or returns 0 if the
* result is NULL. * result is NULL.
@ -256,13 +362,31 @@ ParseTimestampTzField(PGresult *result, int rowIndex, int colIndex)
Datum Datum
dump_local_wait_edges(PG_FUNCTION_ARGS) dump_local_wait_edges(PG_FUNCTION_ARGS)
{ {
WaitGraph *waitGraph = BuildLocalWaitGraph(); bool onlyDistributedTx = true;
WaitGraph *waitGraph = BuildLocalWaitGraph(onlyDistributedTx);
ReturnWaitGraph(waitGraph, fcinfo); ReturnWaitGraph(waitGraph, fcinfo);
return (Datum) 0; return (Datum) 0;
} }
/*
* citus_internal_local_blocked_processes returns global wait edges
* including all processes running on the node.
*/
Datum
citus_internal_local_blocked_processes(PG_FUNCTION_ARGS)
{
bool onlyDistributedTx = false;
WaitGraph *waitGraph = BuildLocalWaitGraph(onlyDistributedTx);
ReturnBlockedProcessGraph(waitGraph, fcinfo);
return (Datum) 0;
}
/* /*
* ReturnWaitGraph returns a wait graph for a set returning function. * ReturnWaitGraph returns a wait graph for a set returning function.
*/ */
@ -325,12 +449,83 @@ ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo)
} }
/*
* ReturnBlockedProcessGraph returns a wait graph for a set returning function.
*/
static void
ReturnBlockedProcessGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo)
{
TupleDesc tupleDesc;
Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDesc);
/*
* Columns:
* 00: waiting_global_pid
* 01: waiting_pid
* 02: waiting_node_id
* 03: waiting_transaction_num
* 04: waiting_transaction_stamp
* 05: blocking_global_pid
* 06: blocking_pid
* 07: blocking__node_id
* 08: blocking_transaction_num
* 09: blocking_transaction_stamp
* 10: blocking_transaction_waiting
*/
for (size_t curEdgeNum = 0; curEdgeNum < waitGraph->edgeCount; curEdgeNum++)
{
Datum values[11];
bool nulls[11];
WaitEdge *curEdge = &waitGraph->edges[curEdgeNum];
memset(values, 0, sizeof(values));
memset(nulls, 0, sizeof(nulls));
values[0] = UInt64GetDatum(curEdge->waitingGPid);
values[1] = Int32GetDatum(curEdge->waitingPid);
values[2] = Int32GetDatum(curEdge->waitingNodeId);
if (curEdge->waitingTransactionNum != 0)
{
values[3] = Int64GetDatum(curEdge->waitingTransactionNum);
values[4] = TimestampTzGetDatum(curEdge->waitingTransactionStamp);
}
else
{
nulls[3] = true;
nulls[4] = true;
}
values[5] = UInt64GetDatum(curEdge->blockingGPid);
values[6] = Int32GetDatum(curEdge->blockingPid);
values[7] = Int32GetDatum(curEdge->blockingNodeId);
if (curEdge->blockingTransactionNum != 0)
{
values[8] = Int64GetDatum(curEdge->blockingTransactionNum);
values[9] = TimestampTzGetDatum(curEdge->blockingTransactionStamp);
}
else
{
nulls[8] = true;
nulls[9] = true;
}
values[10] = BoolGetDatum(curEdge->isBlockingXactWaiting);
tuplestore_putvalues(tupleStore, tupleDesc, values, nulls);
}
}
/* /*
* BuildLocalWaitGraph builds a wait graph for distributed transactions * BuildLocalWaitGraph builds a wait graph for distributed transactions
* that originate from the local node. * that originate from the local node.
*
* If onlyDistributedTx is true, we only return distributed transactions
* (e.g., AssignDistributedTransaction() or assign_distributed_transactions())
* has been called for the process. Distributed deadlock detection only
* interested in these processes.
*/ */
static WaitGraph * static WaitGraph *
BuildLocalWaitGraph(void) BuildLocalWaitGraph(bool onlyDistributedTx)
{ {
PROCStack remaining; PROCStack remaining;
int totalProcs = TotalProcCount(); int totalProcs = TotalProcCount();
@ -379,7 +574,8 @@ BuildLocalWaitGraph(void)
* care about distributed transactions for the purpose of distributed * care about distributed transactions for the purpose of distributed
* deadlock detection. * deadlock detection.
*/ */
if (!IsInDistributedTransaction(&currentBackendData)) if (onlyDistributedTx &&
!IsInDistributedTransaction(&currentBackendData))
{ {
continue; continue;
} }
@ -627,6 +823,7 @@ AddWaitEdge(WaitGraph *waitGraph, PGPROC *waitingProc, PGPROC *blockingProc,
} }
curEdge->waitingPid = waitingProc->pid; curEdge->waitingPid = waitingProc->pid;
curEdge->waitingGPid = waitingBackendData.globalPID;
if (IsInDistributedTransaction(&waitingBackendData)) if (IsInDistributedTransaction(&waitingBackendData))
{ {
@ -645,6 +842,7 @@ AddWaitEdge(WaitGraph *waitGraph, PGPROC *waitingProc, PGPROC *blockingProc,
} }
curEdge->blockingPid = blockingProc->pid; curEdge->blockingPid = blockingProc->pid;
curEdge->blockingGPid = blockingBackendData.globalPID;
if (IsInDistributedTransaction(&blockingBackendData)) if (IsInDistributedTransaction(&blockingBackendData))
{ {

View File

@ -557,7 +557,8 @@ ResetGlobalVariables()
MetadataSyncOnCommit = false; MetadataSyncOnCommit = false;
InTopLevelDelegatedFunctionCall = false; InTopLevelDelegatedFunctionCall = false;
ResetWorkerErrorIndication(); ResetWorkerErrorIndication();
AllowedDistributionColumnValue.isActive = false; memset(&AllowedDistributionColumnValue, 0,
sizeof(AllowedDistributionColumn));
} }

View File

@ -56,15 +56,12 @@ column_name_to_column(PG_FUNCTION_ARGS)
text *columnText = PG_GETARG_TEXT_P(1); text *columnText = PG_GETARG_TEXT_P(1);
char *columnName = text_to_cstring(columnText); char *columnName = text_to_cstring(columnText);
Relation relation = relation_open(relationId, AccessShareLock); Var *column = BuildDistributionKeyFromColumnName(relationId, columnName,
AccessShareLock);
Var *column = BuildDistributionKeyFromColumnName(relation, columnName);
Assert(column != NULL); Assert(column != NULL);
char *columnNodeString = nodeToString(column); char *columnNodeString = nodeToString(column);
text *columnNodeText = cstring_to_text(columnNodeString); text *columnNodeText = cstring_to_text(columnNodeString);
relation_close(relation, AccessShareLock);
PG_RETURN_TEXT_P(columnNodeText); PG_RETURN_TEXT_P(columnNodeText);
} }
@ -81,13 +78,10 @@ column_name_to_column_id(PG_FUNCTION_ARGS)
Oid distributedTableId = PG_GETARG_OID(0); Oid distributedTableId = PG_GETARG_OID(0);
char *columnName = PG_GETARG_CSTRING(1); char *columnName = PG_GETARG_CSTRING(1);
Relation relation = relation_open(distributedTableId, AccessExclusiveLock); Var *column = BuildDistributionKeyFromColumnName(distributedTableId, columnName,
AccessExclusiveLock);
Var *column = BuildDistributionKeyFromColumnName(relation, columnName);
Assert(column != NULL); Assert(column != NULL);
relation_close(relation, NoLock);
PG_RETURN_INT16((int16) column->varattno); PG_RETURN_INT16((int16) column->varattno);
} }
@ -107,8 +101,9 @@ column_to_column_name(PG_FUNCTION_ARGS)
text *columnNodeText = PG_GETARG_TEXT_P(1); text *columnNodeText = PG_GETARG_TEXT_P(1);
char *columnNodeString = text_to_cstring(columnNodeText); char *columnNodeString = text_to_cstring(columnNodeText);
Node *columnNode = stringToNode(columnNodeString);
char *columnName = ColumnToColumnName(relationId, columnNodeString); char *columnName = ColumnToColumnName(relationId, columnNode);
text *columnText = cstring_to_text(columnName); text *columnText = cstring_to_text(columnName);
@ -116,53 +111,6 @@ column_to_column_name(PG_FUNCTION_ARGS)
} }
/*
* FindColumnWithNameOnTargetRelation gets a source table and
* column name. The function returns the the column with the
* same name on the target table.
*
* Note that due to dropping columns, the parent's distribution key may not
* match the partition's distribution key. See issue #5123.
*
* The function throws error if the input or output is not valid or does
* not exist.
*/
Var *
FindColumnWithNameOnTargetRelation(Oid sourceRelationId, char *sourceColumnName,
Oid targetRelationId)
{
if (sourceColumnName == NULL || sourceColumnName[0] == '\0')
{
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("cannot find the given column on table \"%s\"",
generate_qualified_relation_name(sourceRelationId))));
}
AttrNumber attributeNumberOnTarget = get_attnum(targetRelationId, sourceColumnName);
if (attributeNumberOnTarget == InvalidAttrNumber)
{
ereport(ERROR, (errmsg("Column \"%s\" does not exist on "
"relation \"%s\"", sourceColumnName,
get_rel_name(targetRelationId))));
}
Index varNo = 1;
Oid targetTypeId = InvalidOid;
int32 targetTypMod = 0;
Oid targetCollation = InvalidOid;
Index varlevelsup = 0;
/* this function throws error in case anything goes wrong */
get_atttypetypmodcoll(targetRelationId, attributeNumberOnTarget,
&targetTypeId, &targetTypMod, &targetCollation);
Var *targetColumn =
makeVar(varNo, attributeNumberOnTarget, targetTypeId, targetTypMod,
targetCollation, varlevelsup);
return targetColumn;
}
/* /*
* BuildDistributionKeyFromColumnName builds a simple distribution key consisting * BuildDistributionKeyFromColumnName builds a simple distribution key consisting
* only out of a reference to the column of name columnName. Errors out if the * only out of a reference to the column of name columnName. Errors out if the
@ -173,9 +121,18 @@ FindColumnWithNameOnTargetRelation(Oid sourceRelationId, char *sourceColumnName,
* corresponds to reference tables. * corresponds to reference tables.
*/ */
Var * Var *
BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnName) BuildDistributionKeyFromColumnName(Oid relationId, char *columnName, LOCKMODE lockMode)
{ {
char *tableName = RelationGetRelationName(distributedRelation); Relation relation = try_relation_open(relationId, ExclusiveLock);
if (relation == NULL)
{
ereport(ERROR, (errmsg("relation does not exist")));
}
relation_close(relation, NoLock);
char *tableName = get_rel_name(relationId);
/* short circuit for reference tables */ /* short circuit for reference tables */
if (columnName == NULL) if (columnName == NULL)
@ -187,8 +144,7 @@ BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnNam
truncate_identifier(columnName, strlen(columnName), true); truncate_identifier(columnName, strlen(columnName), true);
/* lookup column definition */ /* lookup column definition */
HeapTuple columnTuple = SearchSysCacheAttName(RelationGetRelid(distributedRelation), HeapTuple columnTuple = SearchSysCacheAttName(relationId, columnName);
columnName);
if (!HeapTupleIsValid(columnTuple)) if (!HeapTupleIsValid(columnTuple))
{ {
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN),
@ -218,15 +174,13 @@ BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnNam
/* /*
* ColumnToColumnName returns the human-readable name of a column given a * ColumnToColumnName returns the human-readable name of a column given a
* relation identifier and the column's internal textual (Var) representation. * relation identifier and the column's internal (Var) representation.
* This function will raise an ERROR if no such column can be found or if the * This function will raise an ERROR if no such column can be found or if the
* provided Var refers to a system column. * provided Var refers to a system column.
*/ */
char * char *
ColumnToColumnName(Oid relationId, char *columnNodeString) ColumnToColumnName(Oid relationId, Node *columnNode)
{ {
Node *columnNode = stringToNode(columnNodeString);
if (columnNode == NULL || !IsA(columnNode, Var)) if (columnNode == NULL || !IsA(columnNode, Var))
{ {
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),

View File

@ -1012,8 +1012,8 @@ CitusRangeVarCallbackForLockTable(const RangeVar *rangeVar, Oid relationId,
return; return;
} }
/* we only allow tables and views to be locked */ /* we only allow tables, views and foreign tables to be locked */
if (!RegularTable(relationId)) if (!RegularTable(relationId) && !IsForeignTable(relationId))
{ {
ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not a table", rangeVar->relname))); errmsg("\"%s\" is not a table", rangeVar->relname)));

View File

@ -13,8 +13,10 @@
#include "catalog/dependency.h" #include "catalog/dependency.h"
#include "catalog/pg_collation.h" #include "catalog/pg_collation.h"
#include "catalog/pg_proc.h" #include "catalog/pg_proc.h"
#include "catalog/pg_ts_config.h"
#include "catalog/pg_type.h" #include "catalog/pg_type.h"
#include "fmgr.h" #include "fmgr.h"
#include "funcapi.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
#include "nodes/nodes.h" #include "nodes/nodes.h"
#include "parser/parse_type.h" #include "parser/parse_type.h"
@ -28,13 +30,17 @@
#include "distributed/commands.h" #include "distributed/commands.h"
#include "distributed/commands/utility_hook.h" #include "distributed/commands/utility_hook.h"
#include "distributed/deparser.h" #include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "distributed/metadata/distobject.h" #include "distributed/metadata/distobject.h"
#include "distributed/worker_create_or_replace.h" #include "distributed/worker_create_or_replace.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
static const char * CreateStmtByObjectAddress(const ObjectAddress *address); static List * CreateStmtListByObjectAddress(const ObjectAddress *address);
static bool CompareStringList(List *list1, List *list2);
PG_FUNCTION_INFO_V1(worker_create_or_replace_object); PG_FUNCTION_INFO_V1(worker_create_or_replace_object);
PG_FUNCTION_INFO_V1(worker_create_or_replace_object_array);
static bool WorkerCreateOrReplaceObject(List *sqlStatements);
/* /*
@ -51,6 +57,37 @@ WrapCreateOrReplace(const char *sql)
} }
/*
* WrapCreateOrReplaceList takes a list of sql commands and wraps it in a call to citus'
* udf to create or replace the existing object based on its create commands.
*/
char *
WrapCreateOrReplaceList(List *sqls)
{
StringInfoData textArrayLitteral = { 0 };
initStringInfo(&textArrayLitteral);
appendStringInfoString(&textArrayLitteral, "ARRAY[");
const char *sql = NULL;
bool first = true;
foreach_ptr(sql, sqls)
{
if (!first)
{
appendStringInfoString(&textArrayLitteral, ", ");
}
appendStringInfoString(&textArrayLitteral, quote_literal_cstr(sql));
first = false;
}
appendStringInfoString(&textArrayLitteral, "]::text[]");
StringInfoData buf = { 0 };
initStringInfo(&buf);
appendStringInfo(&buf, CREATE_OR_REPLACE_COMMAND, textArrayLitteral.data);
return buf.data;
}
/* /*
* worker_create_or_replace_object(statement text) * worker_create_or_replace_object(statement text)
* *
@ -73,35 +110,102 @@ Datum
worker_create_or_replace_object(PG_FUNCTION_ARGS) worker_create_or_replace_object(PG_FUNCTION_ARGS)
{ {
text *sqlStatementText = PG_GETARG_TEXT_P(0); text *sqlStatementText = PG_GETARG_TEXT_P(0);
const char *sqlStatement = text_to_cstring(sqlStatementText); char *sqlStatement = text_to_cstring(sqlStatementText);
Node *parseTree = ParseTreeNode(sqlStatement); List *sqlStatements = list_make1(sqlStatement);
PG_RETURN_BOOL(WorkerCreateOrReplaceObject(sqlStatements));
}
/* /*
* since going to the drop statement might require some resolving we will do a check * worker_create_or_replace_object(statements text[])
* if the type actually exists instead of adding the IF EXISTS keyword to the *
* statement. * function is called, by the coordinator, with a CREATE statement for an object. This
* function implements the CREATE ... IF NOT EXISTS functionality for objects that do not
* have this functionality or where their implementation is not sufficient.
*
* Besides checking if an object of said name exists it tries to compare the object to be
* created with the one in the local catalog. If there is a difference the one in the local
* catalog will be renamed after which the statement can be executed on this worker to
* create the object. If more statements are provided, all are compared in order with the
* statements generated on the worker. This works assuming a) both citus versions are the
* same, b) the objects are exactly the same.
*
* Renaming has two purposes
* - free the identifier for creation
* - non destructive if there is data store that would be destroyed if the object was
* used in a table on this node, eg. types. If the type would be dropped with a cascade
* it would drop any column holding user data for this type.
*/ */
Datum
worker_create_or_replace_object_array(PG_FUNCTION_ARGS)
{
List *sqlStatements = NIL;
Datum *textArray = NULL;
int length = 0;
deconstruct_array(PG_GETARG_ARRAYTYPE_P(0), TEXTOID, -1, false, 'i', &textArray,
NULL, &length);
for (int i = 0; i < length; i++)
{
sqlStatements = lappend(sqlStatements, TextDatumGetCString(textArray[i]));
}
if (list_length(sqlStatements) < 1)
{
ereport(ERROR, (errmsg("expected atleast 1 statement to be provided")));
}
PG_RETURN_BOOL(WorkerCreateOrReplaceObject(sqlStatements));
}
/*
* WorkerCreateOrReplaceObject implements the logic used by both variants of
* worker_create_or_replace_object to either create the object or coming to the conclusion
* the object already exists in the correct state.
*
* Returns true if the object has been created, false if it was already in the exact state
* it was asked for.
*/
static bool
WorkerCreateOrReplaceObject(List *sqlStatements)
{
/*
* To check which object we are changing we find the object address from the first
* statement passed into the UDF. Later we will check if all object addresses are the
* same.
*
* Although many of the objects will only have one statement in this call, more
* complex objects might come with a list of statements. We assume they all are on the
* same subject.
*/
Node *parseTree = ParseTreeNode(linitial(sqlStatements));
ObjectAddress address = GetObjectAddressFromParseTree(parseTree, true); ObjectAddress address = GetObjectAddressFromParseTree(parseTree, true);
if (ObjectExists(&address)) if (ObjectExists(&address))
{
const char *localSqlStatement = CreateStmtByObjectAddress(&address);
if (strcmp(sqlStatement, localSqlStatement) == 0)
{ {
/* /*
* TODO string compare is a poor man's comparison, but calling equal on the * Object with name from statement is already found locally, check if states are
* parsetree's returns false because there is extra information list character * identical. If objects differ we will rename the old object (non- destructively)
* position of some sort * as to make room to create the new object according to the spec sent.
*/ */
/* /*
* parseTree sent by the coordinator is the same as we would create for our * Based on the local catalog we generate the list of commands we would send to
* object, therefore we can omit the create statement locally and not create * recreate our version of the object. This we can compare to what the coordinator
* the object as it already exists. * sent us. If they match we don't do anything.
*/
List *localSqlStatements = CreateStmtListByObjectAddress(&address);
if (CompareStringList(sqlStatements, localSqlStatements))
{
/*
* statements sent by the coordinator are the same as we would create for our
* object, therefore we can omit the statements locally and not create the
* object as it already exists in the correct shape.
* *
* We let the coordinator know we didn't create the object. * We let the coordinator know we didn't create the object.
*/ */
PG_RETURN_BOOL(false); return false;
} }
char *newName = GenerateBackupNameForCollision(&address); char *newName = GenerateBackupNameForCollision(&address);
@ -113,12 +217,47 @@ worker_create_or_replace_object(PG_FUNCTION_ARGS)
NULL, None_Receiver, NULL); NULL, None_Receiver, NULL);
} }
/* apply create statement locally */ /* apply all statement locally */
char *sqlStatement = NULL;
foreach_ptr(sqlStatement, sqlStatements)
{
parseTree = ParseTreeNode(sqlStatement);
ProcessUtilityParseTree(parseTree, sqlStatement, PROCESS_UTILITY_QUERY, NULL, ProcessUtilityParseTree(parseTree, sqlStatement, PROCESS_UTILITY_QUERY, NULL,
None_Receiver, NULL); None_Receiver, NULL);
/* TODO verify all statements are about exactly 1 subject, mostly a sanity check
* to prevent unintentional use of this UDF, needs to come after the local
* execution to be able to actually resolve the ObjectAddress of the newly created
* object */
}
/* type has been created */ /* type has been created */
PG_RETURN_BOOL(true); return true;
}
static bool
CompareStringList(List *list1, List *list2)
{
if (list_length(list1) != list_length(list2))
{
return false;
}
ListCell *cell1 = NULL;
ListCell *cell2 = NULL;
forboth(cell1, list1, cell2, list2)
{
const char *str1 = lfirst(cell1);
const char *str2 = lfirst(cell2);
if (strcmp(str1, str2) != 0)
{
return false;
}
}
return true;
} }
@ -130,24 +269,38 @@ worker_create_or_replace_object(PG_FUNCTION_ARGS)
* therefore you cannot equal this tree against parsed statement. Instead it can be * therefore you cannot equal this tree against parsed statement. Instead it can be
* deparsed to do a string comparison. * deparsed to do a string comparison.
*/ */
static const char * static List *
CreateStmtByObjectAddress(const ObjectAddress *address) CreateStmtListByObjectAddress(const ObjectAddress *address)
{ {
switch (getObjectClass(address)) switch (getObjectClass(address))
{ {
case OCLASS_COLLATION: case OCLASS_COLLATION:
{ {
return CreateCollationDDL(address->objectId); return list_make1(CreateCollationDDL(address->objectId));
} }
case OCLASS_PROC: case OCLASS_PROC:
{ {
return GetFunctionDDLCommand(address->objectId, false); return list_make1(GetFunctionDDLCommand(address->objectId, false));
}
case OCLASS_TSCONFIG:
{
/*
* We do support TEXT SEARCH CONFIGURATION, however, we can't recreate the
* object in 1 command. Since the returned text is compared to the create
* statement sql we always want the sql to be different compared to the
* canonical creation sql we return here, hence we return an empty string, as
* that should never match the sql we have passed in for the creation.
*/
List *stmts = GetCreateTextSearchConfigStatements(address);
return DeparseTreeNodes(stmts);
} }
case OCLASS_TYPE: case OCLASS_TYPE:
{ {
return DeparseTreeNode(CreateTypeStmtByObjectAddress(address)); return list_make1(DeparseTreeNode(CreateTypeStmtByObjectAddress(address)));
} }
default: default:
@ -179,6 +332,11 @@ GenerateBackupNameForCollision(const ObjectAddress *address)
return GenerateBackupNameForProcCollision(address); return GenerateBackupNameForProcCollision(address);
} }
case OCLASS_TSCONFIG:
{
return GenerateBackupNameForTextSearchConfiguration(address);
}
case OCLASS_TYPE: case OCLASS_TYPE:
{ {
return GenerateBackupNameForTypeCollision(address); return GenerateBackupNameForTypeCollision(address);
@ -256,6 +414,25 @@ CreateRenameTypeStmt(const ObjectAddress *address, char *newName)
} }
/*
* CreateRenameTextSearchStmt creates a rename statement for a text search configuration
* based on its ObjectAddress. The rename statement will rename the existing object on its
* address to the value provided in newName.
*/
static RenameStmt *
CreateRenameTextSearchStmt(const ObjectAddress *address, char *newName)
{
Assert(address->classId == TSConfigRelationId);
RenameStmt *stmt = makeNode(RenameStmt);
stmt->renameType = OBJECT_TSCONFIGURATION;
stmt->object = (Node *) get_ts_config_namelist(address->objectId);
stmt->newname = newName;
return stmt;
}
/* /*
* CreateRenameTypeStmt creates a rename statement for a type based on its ObjectAddress. * CreateRenameTypeStmt creates a rename statement for a type based on its ObjectAddress.
* The rename statement will rename the existing object on its address to the value * The rename statement will rename the existing object on its address to the value
@ -325,6 +502,11 @@ CreateRenameStatement(const ObjectAddress *address, char *newName)
return CreateRenameProcStmt(address, newName); return CreateRenameProcStmt(address, newName);
} }
case OCLASS_TSCONFIG:
{
return CreateRenameTextSearchStmt(address, newName);
}
case OCLASS_TYPE: case OCLASS_TYPE:
{ {
return CreateRenameTypeStmt(address, newName); return CreateRenameTypeStmt(address, newName);

View File

@ -50,7 +50,6 @@ typedef struct ColumnarScanDescData *ColumnarScanDesc;
const TableAmRoutine * GetColumnarTableAmRoutine(void); const TableAmRoutine * GetColumnarTableAmRoutine(void);
extern void columnar_tableam_init(void); extern void columnar_tableam_init(void);
extern bool CheckCitusVersion(int elevel);
extern TableScanDesc columnar_beginscan_extended(Relation relation, Snapshot snapshot, extern TableScanDesc columnar_beginscan_extended(Relation relation, Snapshot snapshot,
int nkeys, ScanKey key, int nkeys, ScanKey key,
ParallelTableScanDesc parallel_scan, ParallelTableScanDesc parallel_scan,

View File

@ -29,7 +29,6 @@
typedef struct CitusInitiatedBackend typedef struct CitusInitiatedBackend
{ {
int initiatorNodeIdentifier; int initiatorNodeIdentifier;
bool transactionOriginator;
} CitusInitiatedBackend; } CitusInitiatedBackend;
@ -51,6 +50,7 @@ typedef struct BackendData
slock_t mutex; slock_t mutex;
bool cancelledDueToDeadlock; bool cancelledDueToDeadlock;
uint64 globalPID; uint64 globalPID;
bool distributedCommandOriginator;
CitusInitiatedBackend citusBackend; CitusInitiatedBackend citusBackend;
DistributedTransactionId transactionId; DistributedTransactionId transactionId;
} BackendData; } BackendData;
@ -62,11 +62,16 @@ extern void InitializeBackendData(void);
extern void LockBackendSharedMemory(LWLockMode lockMode); extern void LockBackendSharedMemory(LWLockMode lockMode);
extern void UnlockBackendSharedMemory(void); extern void UnlockBackendSharedMemory(void);
extern void UnSetDistributedTransactionId(void); extern void UnSetDistributedTransactionId(void);
extern void UnSetGlobalPID(void);
extern void AssignDistributedTransactionId(void); extern void AssignDistributedTransactionId(void);
extern void MarkCitusInitiatedCoordinatorBackend(void); extern void MarkCitusInitiatedCoordinatorBackend(void);
extern void AssignGlobalPID(void); extern void AssignGlobalPID(void);
extern uint64 GetGlobalPID(void); extern uint64 GetGlobalPID(void);
extern void OverrideBackendDataDistributedCommandOriginator(bool
distributedCommandOriginator);
extern uint64 ExtractGlobalPID(char *applicationName); extern uint64 ExtractGlobalPID(char *applicationName);
extern int ExtractNodeIdFromGlobalPID(uint64 globalPID);
extern int ExtractProcessIdFromGlobalPID(uint64 globalPID);
extern void GetBackendDataForProc(PGPROC *proc, BackendData *result); extern void GetBackendDataForProc(PGPROC *proc, BackendData *result);
extern void CancelTransactionDueToDeadlock(PGPROC *proc); extern void CancelTransactionDueToDeadlock(PGPROC *proc);
extern bool MyBackendGotCancelledDueToDeadlock(bool clearState); extern bool MyBackendGotCancelledDueToDeadlock(bool clearState);
@ -77,6 +82,11 @@ extern int GetAllActiveClientBackendCount(void);
extern void IncrementClientBackendCounter(void); extern void IncrementClientBackendCounter(void);
extern void DecrementClientBackendCounter(void); extern void DecrementClientBackendCounter(void);
extern bool ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort,
char *queryString, StringInfo queryResultString,
bool reportResultError);
#define INVALID_CITUS_INTERNAL_BACKEND_GPID 0 #define INVALID_CITUS_INTERNAL_BACKEND_GPID 0
#define GLOBAL_PID_NODE_ID_FOR_NODES_NOT_IN_METADATA 99999999
#endif /* BACKEND_DATA_H */ #endif /* BACKEND_DATA_H */

View File

@ -151,6 +151,8 @@ extern ObjectAddress AlterCollationSchemaStmtObjectAddress(Node *stmt,
extern List * PostprocessAlterCollationSchemaStmt(Node *stmt, const char *queryString); extern List * PostprocessAlterCollationSchemaStmt(Node *stmt, const char *queryString);
extern char * GenerateBackupNameForCollationCollision(const ObjectAddress *address); extern char * GenerateBackupNameForCollationCollision(const ObjectAddress *address);
extern ObjectAddress DefineCollationStmtObjectAddress(Node *stmt, bool missing_ok); extern ObjectAddress DefineCollationStmtObjectAddress(Node *stmt, bool missing_ok);
extern List * PreprocessDefineCollationStmt(Node *stmt, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern List * PostprocessDefineCollationStmt(Node *stmt, const char *queryString); extern List * PostprocessDefineCollationStmt(Node *stmt, const char *queryString);
/* database.c - forward declarations */ /* database.c - forward declarations */
@ -465,6 +467,54 @@ extern Oid GetSequenceOid(Oid relationId, AttrNumber attnum);
extern bool ConstrTypeUsesIndex(ConstrType constrType); extern bool ConstrTypeUsesIndex(ConstrType constrType);
/* text_search.c - forward declarations */
extern List * PostprocessCreateTextSearchConfigurationStmt(Node *node,
const char *queryString);
extern List * GetCreateTextSearchConfigStatements(const ObjectAddress *address);
extern List * CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address);
extern List * PreprocessDropTextSearchConfigurationStmt(Node *node,
const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PreprocessAlterTextSearchConfigurationStmt(Node *node,
const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PreprocessRenameTextSearchConfigurationStmt(Node *node,
const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node,
const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node,
const char *queryString);
extern List * PreprocessTextSearchConfigurationCommentStmt(Node *node,
const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node,
const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node,
const char *queryString);
extern ObjectAddress CreateTextSearchConfigurationObjectAddress(Node *node,
bool missing_ok);
extern ObjectAddress RenameTextSearchConfigurationStmtObjectAddress(Node *node,
bool missing_ok);
extern ObjectAddress AlterTextSearchConfigurationStmtObjectAddress(Node *node,
bool missing_ok);
extern ObjectAddress AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node,
bool missing_ok);
extern ObjectAddress TextSearchConfigurationCommentObjectAddress(Node *node,
bool missing_ok);
extern ObjectAddress AlterTextSearchConfigurationOwnerObjectAddress(Node *node,
bool missing_ok);
extern char * GenerateBackupNameForTextSearchConfiguration(const ObjectAddress *address);
extern List * get_ts_config_namelist(Oid tsconfigOid);
/* truncate.c - forward declarations */ /* truncate.c - forward declarations */
extern void PreprocessTruncateStatement(TruncateStmt *truncateStatement); extern void PreprocessTruncateStatement(TruncateStmt *truncateStatement);

View File

@ -31,6 +31,7 @@ extern void AssertObjectTypeIsFunctional(ObjectType type);
extern void QualifyTreeNode(Node *stmt); extern void QualifyTreeNode(Node *stmt);
extern char * DeparseTreeNode(Node *stmt); extern char * DeparseTreeNode(Node *stmt);
extern List * DeparseTreeNodes(List *stmts);
/* forward declarations for deparse_attribute_stmts.c */ /* forward declarations for deparse_attribute_stmts.c */
extern char * DeparseRenameAttributeStmt(Node *); extern char * DeparseRenameAttributeStmt(Node *);
@ -59,6 +60,15 @@ extern char * DeparseAlterTableStmt(Node *node);
extern void QualifyAlterTableSchemaStmt(Node *stmt); extern void QualifyAlterTableSchemaStmt(Node *stmt);
/* foward declarations fro deparse_text_search.c */
extern char * DeparseCreateTextSearchStmt(Node *node);
extern char * DeparseDropTextSearchConfigurationStmt(Node *node);
extern char * DeparseRenameTextSearchConfigurationStmt(Node *node);
extern char * DeparseAlterTextSearchConfigurationStmt(Node *node);
extern char * DeparseAlterTextSearchConfigurationSchemaStmt(Node *node);
extern char * DeparseTextSearchConfigurationCommentStmt(Node *node);
extern char * DeparseAlterTextSearchConfigurationOwnerStmt(Node *node);
/* forward declarations for deparse_schema_stmts.c */ /* forward declarations for deparse_schema_stmts.c */
extern char * DeparseCreateSchemaStmt(Node *node); extern char * DeparseCreateSchemaStmt(Node *node);
extern char * DeparseDropSchemaStmt(Node *node); extern char * DeparseDropSchemaStmt(Node *node);
@ -140,6 +150,14 @@ extern char * DeparseAlterExtensionStmt(Node *stmt);
/* forward declarations for deparse_database_stmts.c */ /* forward declarations for deparse_database_stmts.c */
extern char * DeparseAlterDatabaseOwnerStmt(Node *node); extern char * DeparseAlterDatabaseOwnerStmt(Node *node);
/* forward declatations for depatse_text_search_stmts.c */
extern void QualifyDropTextSearchConfigurationStmt(Node *node);
extern void QualifyAlterTextSearchConfigurationStmt(Node *node);
extern void QualifyRenameTextSearchConfigurationStmt(Node *node);
extern void QualifyAlterTextSearchConfigurationSchemaStmt(Node *node);
extern void QualifyTextSearchConfigurationCommentStmt(Node *node);
extern void QualifyAlterTextSearchConfigurationOwnerStmt(Node *node);
/* forward declarations for deparse_sequence_stmts.c */ /* forward declarations for deparse_sequence_stmts.c */
extern char * DeparseDropSequenceStmt(Node *node); extern char * DeparseDropSequenceStmt(Node *node);
extern char * DeparseRenameSequenceStmt(Node *node); extern char * DeparseRenameSequenceStmt(Node *node);

View File

@ -19,11 +19,9 @@
/* Remaining metadata utility functions */ /* Remaining metadata utility functions */
extern Var * FindColumnWithNameOnTargetRelation(Oid sourceRelationId, extern Var * BuildDistributionKeyFromColumnName(Oid relationId,
char *sourceColumnName, char *columnName,
Oid targetRelationId); LOCKMODE lockMode);
extern Var * BuildDistributionKeyFromColumnName(Relation distributedRelation, extern char * ColumnToColumnName(Oid relationId, Node *columnNode);
char *columnName);
extern char * ColumnToColumnName(Oid relationId, char *columnNodeString);
#endif /* DISTRIBUTION_COLUMN_H */ #endif /* DISTRIBUTION_COLUMN_H */

View File

@ -23,7 +23,7 @@ extern bool InTopLevelDelegatedFunctionCall;
extern bool InDelegatedProcedureCall; extern bool InDelegatedProcedureCall;
PlannedStmt * TryToDelegateFunctionCall(DistributedPlanningContext *planContext); PlannedStmt * TryToDelegateFunctionCall(DistributedPlanningContext *planContext);
extern void ResetAllowedShardKeyValue(void); extern void CheckAndResetAllowedShardKeyValueIfNeeded(void);
extern bool IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId); extern bool IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId);
#endif /* FUNCTION_CALL_DELEGATION_H */ #endif /* FUNCTION_CALL_DELEGATION_H */

View File

@ -31,11 +31,13 @@
*/ */
typedef struct WaitEdge typedef struct WaitEdge
{ {
uint64 waitingGPid;
int waitingPid; int waitingPid;
int waitingNodeId; int waitingNodeId;
int64 waitingTransactionNum; int64 waitingTransactionNum;
TimestampTz waitingTransactionStamp; TimestampTz waitingTransactionStamp;
uint64 blockingGPid;
int blockingPid; int blockingPid;
int blockingNodeId; int blockingNodeId;
int64 blockingTransactionNum; int64 blockingTransactionNum;
@ -58,7 +60,7 @@ typedef struct WaitGraph
} WaitGraph; } WaitGraph;
extern WaitGraph * BuildGlobalWaitGraph(void); extern WaitGraph * BuildGlobalWaitGraph(bool onlyDistributedTx);
extern bool IsProcessWaitingForLock(PGPROC *proc); extern bool IsProcessWaitingForLock(PGPROC *proc);
extern bool IsInDistributedTransaction(BackendData *backendData); extern bool IsInDistributedTransaction(BackendData *backendData);
extern TimestampTz ParseTimestampTzField(PGresult *result, int rowIndex, int colIndex); extern TimestampTz ParseTimestampTzField(PGresult *result, int rowIndex, int colIndex);

View File

@ -19,6 +19,7 @@
extern List * GetUniqueDependenciesList(List *objectAddressesList); extern List * GetUniqueDependenciesList(List *objectAddressesList);
extern List * GetDependenciesForObject(const ObjectAddress *target); extern List * GetDependenciesForObject(const ObjectAddress *target);
extern List * GetAllSupportedDependenciesForObject(const ObjectAddress *target);
extern List * GetAllDependenciesForObject(const ObjectAddress *target); extern List * GetAllDependenciesForObject(const ObjectAddress *target);
extern List * OrderObjectAddressListInDependencyOrder(List *objectAddressList); extern List * OrderObjectAddressListInDependencyOrder(List *objectAddressList);
extern bool SupportedDependencyByCitus(const ObjectAddress *address); extern bool SupportedDependencyByCitus(const ObjectAddress *address);

View File

@ -30,8 +30,8 @@ extern bool IsObjectAddressOwnedByExtension(const ObjectAddress *target,
ObjectAddress *extensionAddress); ObjectAddress *extensionAddress);
extern ObjectAddress PgGetObjectAddress(char *ttype, ArrayType *namearr, extern ObjectAddress PgGetObjectAddress(char *ttype, ArrayType *namearr,
ArrayType *argsarr); ArrayType *argsarr);
extern List * GetDistributedObjectAddressList(void); extern List * GetDistributedObjectAddressList(void);
extern RoleSpec * GetRoleSpecObjectForUser(Oid roleOid);
extern void UpdateDistributedObjectColocationId(uint32 oldColocationId, uint32 extern void UpdateDistributedObjectColocationId(uint32 oldColocationId, uint32
newColocationId); newColocationId);
#endif /* CITUS_METADATA_DISTOBJECT_H */ #endif /* CITUS_METADATA_DISTOBJECT_H */

View File

@ -238,7 +238,7 @@ extern void DeleteShardRow(uint64 shardId);
extern void UpdateShardPlacementState(uint64 placementId, char shardState); extern void UpdateShardPlacementState(uint64 placementId, char shardState);
extern void UpdatePlacementGroupId(uint64 placementId, int groupId); extern void UpdatePlacementGroupId(uint64 placementId, int groupId);
extern void DeleteShardPlacementRow(uint64 placementId); extern void DeleteShardPlacementRow(uint64 placementId);
extern void CreateDistributedTable(Oid relationId, Var *distributionColumn, extern void CreateDistributedTable(Oid relationId, char *distributionColumnName,
char distributionMethod, int shardCount, char distributionMethod, int shardCount,
bool shardCountIsStrict, char *colocateWithTableName, bool shardCountIsStrict, char *colocateWithTableName,
bool viaDeprecatedAPI); bool viaDeprecatedAPI);

View File

@ -70,6 +70,9 @@ typedef struct AllowedDistributionColumn
Const *distributionColumnValue; Const *distributionColumnValue;
uint32 colocationId; uint32 colocationId;
bool isActive; bool isActive;
/* In nested executor, track the level at which value is set */
int executorLevel;
} AllowedDistributionColumn; } AllowedDistributionColumn;
/* /*

View File

@ -19,6 +19,7 @@
#define CREATE_OR_REPLACE_COMMAND "SELECT worker_create_or_replace_object(%s);" #define CREATE_OR_REPLACE_COMMAND "SELECT worker_create_or_replace_object(%s);"
extern char * WrapCreateOrReplace(const char *sql); extern char * WrapCreateOrReplace(const char *sql);
extern char * WrapCreateOrReplaceList(List *sqls);
extern char * GenerateBackupNameForCollision(const ObjectAddress *address); extern char * GenerateBackupNameForCollision(const ObjectAddress *address);
extern RenameStmt * CreateRenameStatement(const ObjectAddress *address, char *newName); extern RenameStmt * CreateRenameStatement(const ObjectAddress *address, char *newName);

View File

@ -86,6 +86,7 @@ extern List * ActiveReadableNodeList(void);
extern WorkerNode * FindWorkerNode(const char *nodeName, int32 nodePort); extern WorkerNode * FindWorkerNode(const char *nodeName, int32 nodePort);
extern WorkerNode * FindWorkerNodeOrError(const char *nodeName, int32 nodePort); extern WorkerNode * FindWorkerNodeOrError(const char *nodeName, int32 nodePort);
extern WorkerNode * FindWorkerNodeAnyCluster(const char *nodeName, int32 nodePort); extern WorkerNode * FindWorkerNodeAnyCluster(const char *nodeName, int32 nodePort);
extern WorkerNode * FindNodeWithNodeId(int nodeId);
extern List * ReadDistNode(bool includeNodesFromOtherClusters); extern List * ReadDistNode(bool includeNodesFromOtherClusters);
extern void EnsureCoordinator(void); extern void EnsureCoordinator(void);
extern void InsertCoordinatorIfClusterEmpty(void); extern void InsertCoordinatorIfClusterEmpty(void);

View File

@ -256,3 +256,7 @@ s/CREATE TABLESPACE test_tablespace LOCATION.*/CREATE TABLESPACE test_tablespace
s/(.*absolute correlation \()([0,1]\.[0-9]+)(\) of var attribute [0-9]+ is smaller than.*)/\1X\.YZ\3/g s/(.*absolute correlation \()([0,1]\.[0-9]+)(\) of var attribute [0-9]+ is smaller than.*)/\1X\.YZ\3/g
s/NOTICE: issuing WITH placement_data\(shardid, shardstate, shardlength, groupid, placementid\) AS \(VALUES \([0-9]+, [0-9]+, [0-9]+, [0-9]+, [0-9]+\)\)/NOTICE: issuing WITH placement_data\(shardid, shardstate, shardlength, groupid, placementid\) AS \(VALUES \(xxxxxx, xxxxxx, xxxxxx, xxxxxx, xxxxxx\)\)/g s/NOTICE: issuing WITH placement_data\(shardid, shardstate, shardlength, groupid, placementid\) AS \(VALUES \([0-9]+, [0-9]+, [0-9]+, [0-9]+, [0-9]+\)\)/NOTICE: issuing WITH placement_data\(shardid, shardstate, shardlength, groupid, placementid\) AS \(VALUES \(xxxxxx, xxxxxx, xxxxxx, xxxxxx, xxxxxx\)\)/g
# global_pid when pg_cancel_backend is sent to workers
s/pg_cancel_backend\('[0-9]+'::bigint\)/pg_cancel_backend('xxxxx'::bigint)/g
s/issuing SELECT pg_cancel_backend\([0-9]+::integer\)/issuing SELECT pg_cancel_backend(xxxxx::integer)/g

View File

@ -875,6 +875,7 @@ BEGIN
RETURN $1 * $1; RETURN $1 * $1;
END; END;
$function$; $function$;
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION square_func(int) CREATE OR REPLACE FUNCTION square_func(int)
RETURNS int RETURNS int
LANGUAGE plpgsql LANGUAGE plpgsql
@ -883,6 +884,7 @@ BEGIN
RETURN $1 * $1; RETURN $1 * $1;
END; END;
$function$; $function$;
RESET citus.enable_metadata_sync;
SELECT const_function(1), string_agg(a::character, ',') FROM t1; SELECT const_function(1), string_agg(a::character, ',') FROM t1;
NOTICE: stable_fn called NOTICE: stable_fn called
CONTEXT: PL/pgSQL function const_function(integer) line XX at RAISE CONTEXT: PL/pgSQL function const_function(integer) line XX at RAISE

View File

@ -724,6 +724,58 @@ $$);
(localhost,57638,t,0) (localhost,57638,t,0)
(2 rows) (2 rows)
-- verify that partitioned citus local tables with dropped columns can be distributed. issue: #5577
CREATE TABLE parent_dropped_col(a int, eventtime date) PARTITION BY RANGE ( eventtime);
SELECT citus_add_local_table_to_metadata('parent_dropped_col');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
ALTER TABLE parent_dropped_col DROP column a;
CREATE TABLE parent_dropped_col_1 PARTITION OF parent_dropped_col for VALUES FROM ('2000-01-01') TO ('2001-01-01');
SELECT create_distributed_table('parent_dropped_col', 'eventtime');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- another example to test
CREATE TABLE parent_dropped_col_2(
col_to_drop_0 text,
col_to_drop_1 text,
col_to_drop_2 date,
col_to_drop_3 inet,
col_to_drop_4 date,
measureid integer,
eventdatetime date,
measure_data jsonb,
PRIMARY KEY (measureid, eventdatetime, measure_data))
PARTITION BY RANGE(eventdatetime);
select citus_add_local_table_to_metadata('parent_dropped_col_2');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
ALTER TABLE parent_dropped_col_2 DROP COLUMN col_to_drop_1;
CREATE TABLE parent_dropped_col_2_2000 PARTITION OF parent_dropped_col_2 FOR VALUES FROM ('2000-01-01') TO ('2001-01-01');
SELECT create_distributed_table('parent_dropped_col_2', 'measureid');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- verify that the partitioned tables are distributed with the correct distribution column
SELECT logicalrelid, partmethod, partkey FROM pg_dist_partition
WHERE logicalrelid IN ('parent_dropped_col'::regclass, 'parent_dropped_col_2'::regclass)
ORDER BY logicalrelid;
logicalrelid | partmethod | partkey
---------------------------------------------------------------------
parent_dropped_col | h | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}
parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location -1}
(2 rows)
-- cleanup at exit -- cleanup at exit
set client_min_messages to error; set client_min_messages to error;
DROP SCHEMA citus_local_tables_mx CASCADE; DROP SCHEMA citus_local_tables_mx CASCADE;

View File

@ -58,6 +58,7 @@ CREATE TABLE postgres_local_table(a int, b int);
-- We shouldn't use LIMIT in INSERT SELECT queries to make the test faster as -- We shouldn't use LIMIT in INSERT SELECT queries to make the test faster as
-- LIMIT would force planner to wrap SELECT query in an intermediate result and -- LIMIT would force planner to wrap SELECT query in an intermediate result and
-- this might reduce the coverage of the test cases. -- this might reduce the coverage of the test cases.
SET citus.enable_metadata_sync TO OFF;
CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$ CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$
BEGIN BEGIN
SET client_min_messages to ERROR; SET client_min_messages to ERROR;
@ -74,6 +75,7 @@ CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$
RESET client_min_messages; RESET client_min_messages;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
RESET citus.enable_metadata_sync;
--------------------------------------------------------------------- ---------------------------------------------------------------------
---- SELECT ---- ---- SELECT ----
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -523,6 +523,7 @@ BEGIN
RETURN trunc(random() * (end_int-start_int) + start_int); RETURN trunc(random() * (end_int-start_int) + start_int);
END; END;
$$ LANGUAGE 'plpgsql' STRICT; $$ LANGUAGE 'plpgsql' STRICT;
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE PROCEDURE coordinator_evaluation.test_procedure(int) CREATE OR REPLACE PROCEDURE coordinator_evaluation.test_procedure(int)
LANGUAGE plpgsql LANGUAGE plpgsql
AS $procedure$ AS $procedure$
@ -532,6 +533,7 @@ BEGIN
PERFORM DISTINCT value FROM coordinator_evaluation_table_2 WHERE key = filterKey; PERFORM DISTINCT value FROM coordinator_evaluation_table_2 WHERE key = filterKey;
END; END;
$procedure$; $procedure$;
RESET citus.enable_metadata_sync;
-- we couldn't find a meaningful query to write for this -- we couldn't find a meaningful query to write for this
-- however this query fails before https://github.com/citusdata/citus/pull/3454 -- however this query fails before https://github.com/citusdata/citus/pull/3454
SET client_min_messages TO DEBUG2; SET client_min_messages TO DEBUG2;
@ -564,6 +566,7 @@ BEGIN
INSERT INTO coordinator_evaluation_table_2 VALUES (filterKey, filterKey); INSERT INTO coordinator_evaluation_table_2 VALUES (filterKey, filterKey);
END; END;
$procedure$; $procedure$;
DEBUG: switching to sequential query execution mode
RESET citus.log_remote_commands ; RESET citus.log_remote_commands ;
RESET client_min_messages; RESET client_min_messages;
-- these calls would INSERT key = 101, so test if insert succeeded -- these calls would INSERT key = 101, so test if insert succeeded

View File

@ -833,11 +833,13 @@ EXECUTE router_with_only_function;
SET citus.log_local_commands TO ON; SET citus.log_local_commands TO ON;
SET search_path TO coordinator_evaluation_combinations_modify; SET search_path TO coordinator_evaluation_combinations_modify;
-- returns 2 on the worker -- returns 2 on the worker
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION get_constant_stable() CREATE OR REPLACE FUNCTION get_constant_stable()
RETURNS INT AS $$ RETURNS INT AS $$
BEGIN BEGIN
RETURN 2; RETURN 2;
END; $$ language plpgsql STABLE; END; $$ language plpgsql STABLE;
RESET citus.enable_metadata_sync;
-- all local values -- all local values
INSERT INTO user_info_data (user_id, u_data) VALUES INSERT INTO user_info_data (user_id, u_data) VALUES
(3, '(''test3'', 3)'), (4, '(''test4'', 4)'), (7, '(''test7'', 7)'), (3, '(''test3'', 3)'), (4, '(''test4'', 4)'), (7, '(''test7'', 7)'),

View File

@ -898,9 +898,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in
-- a helper function which return true if the coordinated -- a helper function which return true if the coordinated
-- trannsaction uses 2PC -- trannsaction uses 2PC
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION coordinated_transaction_should_use_2PC() CREATE OR REPLACE FUNCTION coordinated_transaction_should_use_2PC()
RETURNS BOOL LANGUAGE C STRICT VOLATILE AS 'citus', RETURNS BOOL LANGUAGE C STRICT VOLATILE AS 'citus',
$$coordinated_transaction_should_use_2PC$$; $$coordinated_transaction_should_use_2PC$$;
RESET citus.enable_metadata_sync;
-- a local SELECT followed by remote SELECTs -- a local SELECT followed by remote SELECTs
-- does not trigger 2PC -- does not trigger 2PC
BEGIN; BEGIN;

View File

@ -163,3 +163,19 @@ SELECT run_command_on_workers($$DROP USER collationuser;$$);
(localhost,57638,t,"DROP ROLE") (localhost,57638,t,"DROP ROLE")
(2 rows) (2 rows)
\c - - - :worker_1_port
-- test creating a collation on a worker
CREATE COLLATION another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk');
ERROR: operation is not allowed on this node
HINT: Connect to the coordinator and run it again.
-- test if creating a collation on a worker on a local
-- schema raises the right error
SET citus.enable_ddl_propagation TO off;
CREATE SCHEMA collation_creation_on_worker;
SET citus.enable_ddl_propagation TO on;
CREATE COLLATION collation_creation_on_worker.another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk');
ERROR: operation is not allowed on this node
HINT: Connect to the coordinator and run it again.
SET citus.enable_ddl_propagation TO off;
DROP SCHEMA collation_creation_on_worker;
SET citus.enable_ddl_propagation TO on;

View File

@ -3,6 +3,7 @@
CREATE SCHEMA proc_conflict; CREATE SCHEMA proc_conflict;
\c - - - :worker_1_port \c - - - :worker_1_port
SET search_path TO proc_conflict; SET search_path TO proc_conflict;
SET citus.enable_metadata_sync TO OFF;
CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$
BEGIN BEGIN
RETURN state * 2 + i; RETURN state * 2 + i;
@ -12,6 +13,7 @@ CREATE AGGREGATE existing_agg(int) (
SFUNC = existing_func, SFUNC = existing_func,
STYPE = int STYPE = int
); );
RESET citus.enable_metadata_sync;
\c - - - :master_port \c - - - :master_port
SET search_path TO proc_conflict; SET search_path TO proc_conflict;
CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$
@ -62,6 +64,7 @@ DROP AGGREGATE existing_agg(int) CASCADE;
DROP FUNCTION existing_func(int, int) CASCADE; DROP FUNCTION existing_func(int, int) CASCADE;
\c - - - :worker_1_port \c - - - :worker_1_port
SET search_path TO proc_conflict; SET search_path TO proc_conflict;
SET citus.enable_metadata_sync TO OFF;
CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$
BEGIN BEGIN
RETURN state * 3 + i; RETURN state * 3 + i;
@ -71,6 +74,7 @@ CREATE AGGREGATE existing_agg(int) (
SFUNC = existing_func, SFUNC = existing_func,
STYPE = int STYPE = int
); );
RESET citus.enable_metadata_sync;
\c - - - :master_port \c - - - :master_port
SET search_path TO proc_conflict; SET search_path TO proc_conflict;
CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$

View File

@ -228,7 +228,10 @@ BEGIN
DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2; DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT modify_fast_path_plpsql(1,1); SELECT modify_fast_path_plpsql(1,1);
DEBUG: function does not have co-located tables
DEBUG: Deferred pruning for a fast-path router query DEBUG: Deferred pruning for a fast-path router query
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement
@ -241,6 +244,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem
(1 row) (1 row)
SELECT modify_fast_path_plpsql(2,2); SELECT modify_fast_path_plpsql(2,2);
DEBUG: function does not have co-located tables
DEBUG: Deferred pruning for a fast-path router query DEBUG: Deferred pruning for a fast-path router query
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement
@ -253,6 +257,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem
(1 row) (1 row)
SELECT modify_fast_path_plpsql(3,3); SELECT modify_fast_path_plpsql(3,3);
DEBUG: function does not have co-located tables
DEBUG: Deferred pruning for a fast-path router query DEBUG: Deferred pruning for a fast-path router query
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement
@ -265,6 +270,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem
(1 row) (1 row)
SELECT modify_fast_path_plpsql(4,4); SELECT modify_fast_path_plpsql(4,4);
DEBUG: function does not have co-located tables
DEBUG: Deferred pruning for a fast-path router query DEBUG: Deferred pruning for a fast-path router query
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement
@ -277,6 +283,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem
(1 row) (1 row)
SELECT modify_fast_path_plpsql(5,5); SELECT modify_fast_path_plpsql(5,5);
DEBUG: function does not have co-located tables
DEBUG: Deferred pruning for a fast-path router query DEBUG: Deferred pruning for a fast-path router query
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement
@ -289,6 +296,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem
(1 row) (1 row)
SELECT modify_fast_path_plpsql(6,6); SELECT modify_fast_path_plpsql(6,6);
DEBUG: function does not have co-located tables
DEBUG: Deferred pruning for a fast-path router query DEBUG: Deferred pruning for a fast-path router query
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement
@ -301,6 +309,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem
(1 row) (1 row)
SELECT modify_fast_path_plpsql(6,6); SELECT modify_fast_path_plpsql(6,6);
DEBUG: function does not have co-located tables
modify_fast_path_plpsql modify_fast_path_plpsql
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -239,7 +239,7 @@ ERROR: node group 0 does not have a secondary node
-- should work this time -- should work this time
\c -reuse-previous=off regression - - :master_port \c -reuse-previous=off regression - - :master_port
SET search_path TO single_node; SET search_path TO single_node;
SELECT 1 FROM master_add_node('localhost', :follower_master_port, groupid => 0, noderole => 'secondary'); SELECT 1 FROM master_add_node('localhost', :follower_master_port, groupid => 0, noderole => 'secondary', nodecluster => 'second-cluster');
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -251,7 +251,7 @@ SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhavesha
1 1
(1 row) (1 row)
\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'" \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'"
SET search_path TO single_node; SET search_path TO single_node;
SELECT * FROM test WHERE x = 1; SELECT * FROM test WHERE x = 1;
x | y x | y
@ -387,6 +387,40 @@ SELECT * FROM columnar_test ORDER BY 1,2;
1 | 8 1 | 8
(6 rows) (6 rows)
\c -reuse-previous=off regression - - :master_port
SET citus.shard_replication_factor TO 1;
SET search_path TO single_node;
CREATE TABLE dist_table (a INT, b INT);
SELECT create_distributed_table ('dist_table', 'a', shard_count:=4);
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO dist_table VALUES (1, 1);
\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'"
SET search_path TO single_node;
SELECT * FROM dist_table;
a | b
---------------------------------------------------------------------
1 | 1
(1 row)
SELECT global_pid AS follower_coordinator_gpid FROM get_all_active_transactions() WHERE process_id = pg_backend_pid() \gset
SELECT pg_typeof(:follower_coordinator_gpid);
pg_typeof
---------------------------------------------------------------------
bigint
(1 row)
SELECT pg_cancel_backend(:follower_coordinator_gpid);
ERROR: canceling statement due to user request
SET citus.log_remote_commands TO ON;
SELECT pg_cancel_backend(:follower_coordinator_gpid) FROM dist_table WHERE a = 1;
NOTICE: executing the command locally: SELECT pg_cancel_backend('xxxxx'::bigint) AS pg_cancel_backend FROM single_node.dist_table_102008 dist_table WHERE (a OPERATOR(pg_catalog.=) 1)
NOTICE: issuing SELECT pg_cancel_backend(xxxxx::integer)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
ERROR: canceling statement due to user request
-- Cleanup -- Cleanup
\c -reuse-previous=off regression - - :master_port \c -reuse-previous=off regression - - :master_port
SET search_path TO single_node; SET search_path TO single_node;

View File

@ -293,6 +293,8 @@ BEGIN
RETURN ret_val; RETURN ret_val;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION func_calls_forcepush_func() CREATE OR REPLACE FUNCTION func_calls_forcepush_func()
RETURNS NUMERIC AS $$ RETURNS NUMERIC AS $$
DECLARE incremented_val NUMERIC; DECLARE incremented_val NUMERIC;
@ -302,6 +304,8 @@ BEGIN
RETURN incremented_val; RETURN incremented_val;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('func_calls_forcepush_func()'); SELECT create_distributed_function('func_calls_forcepush_func()');
DEBUG: switching to sequential query execution mode DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
@ -354,12 +358,16 @@ PL/pgSQL function func_calls_forcepush_func() line XX at SQL statement
101 101
(1 row) (1 row)
-- Block distributing that function as distributing it causes
-- different test output on PG 14.
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION get_val() CREATE OR REPLACE FUNCTION get_val()
RETURNS INT AS $$ RETURNS INT AS $$
BEGIN BEGIN
RETURN 100::INT; RETURN 100::INT;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
RESET citus.enable_metadata_sync;
-- --
-- UDF calling another UDF in a FROM clause -- UDF calling another UDF in a FROM clause
-- fn() -- fn()
@ -377,7 +385,10 @@ BEGIN
RETURN incremented_val; RETURN incremented_val;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT func_calls_forcepush_func_infrom(); SELECT func_calls_forcepush_func_infrom();
DEBUG: function does not have co-located tables
DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT inner_force_delegation_function FROM inner_force_delegation_function(add_val + 100)" CONTEXT: SQL statement "SELECT inner_force_delegation_function FROM inner_force_delegation_function(add_val + 100)"
PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement
@ -395,6 +406,7 @@ PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement
BEGIN; BEGIN;
SELECT func_calls_forcepush_func_infrom(); SELECT func_calls_forcepush_func_infrom();
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT inner_force_delegation_function FROM inner_force_delegation_function(add_val + 100)" CONTEXT: SQL statement "SELECT inner_force_delegation_function FROM inner_force_delegation_function(add_val + 100)"
PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement
@ -428,7 +440,10 @@ BEGIN
RETURN incremented_val; RETURN incremented_val;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT func_calls_forcepush_func_intarget(); SELECT func_calls_forcepush_func_intarget();
DEBUG: function does not have co-located tables
DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT inner_force_delegation_function(100 + 100) OFFSET 0" CONTEXT: SQL statement "SELECT inner_force_delegation_function(100 + 100) OFFSET 0"
PL/pgSQL function func_calls_forcepush_func_intarget() line XX at SQL statement PL/pgSQL function func_calls_forcepush_func_intarget() line XX at SQL statement
@ -446,6 +461,7 @@ PL/pgSQL function func_calls_forcepush_func_intarget() line XX at SQL statement
BEGIN; BEGIN;
SELECT func_calls_forcepush_func_intarget(); SELECT func_calls_forcepush_func_intarget();
DEBUG: not pushing down function calls in a multi-statement transaction
NOTICE: inner_force_delegation_function():201 NOTICE: inner_force_delegation_function():201
DETAIL: from localhost:xxxxx DETAIL: from localhost:xxxxx
CONTEXT: SQL statement "SELECT inner_force_delegation_function(100 + 100) OFFSET 0" CONTEXT: SQL statement "SELECT inner_force_delegation_function(100 + 100) OFFSET 0"
@ -473,6 +489,8 @@ BEGIN
END if; END if;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('test_recursive(int)', '$1', colocate_with := 'test_nested', force_delegation := true); SELECT create_distributed_function('test_recursive(int)', '$1', colocate_with := 'test_nested', force_delegation := true);
DEBUG: switching to sequential query execution mode DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
@ -544,6 +562,8 @@ BEGIN
RETURN x + y; RETURN x + y;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function( SELECT create_distributed_function(
'test_non_constant(int,bigint)', 'test_non_constant(int,bigint)',
'$1', '$1',
@ -610,6 +630,8 @@ BEGIN
INSERT INTO emp VALUES (empname, 33); INSERT INTO emp VALUES (empname, 33);
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION outer_emp() CREATE OR REPLACE FUNCTION outer_emp()
RETURNS void RETURNS void
AS $$ AS $$
@ -618,6 +640,8 @@ BEGIN
PERFORM inner_emp('hello'); PERFORM inner_emp('hello');
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('inner_emp(text)','empname', force_delegation := true); SELECT create_distributed_function('inner_emp(text)','empname', force_delegation := true);
DEBUG: switching to sequential query execution mode DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
@ -627,6 +651,7 @@ DETAIL: A command for a distributed function is run. To make sure subsequent co
(1 row) (1 row)
SELECT outer_emp(); SELECT outer_emp();
DEBUG: function does not have co-located tables
DEBUG: Skipping pushdown of function from a PL/PgSQL simple expression DEBUG: Skipping pushdown of function from a PL/PgSQL simple expression
CONTEXT: SQL statement "SELECT inner_emp('hello')" CONTEXT: SQL statement "SELECT inner_emp('hello')"
PL/pgSQL function outer_emp() line XX at PERFORM PL/pgSQL function outer_emp() line XX at PERFORM
@ -650,6 +675,8 @@ BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown SELECT(a+1); INSERT INTO forcepushdown_schema.test_forcepushdown SELECT(a+1);
END; END;
$fn$; $fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function( SELECT create_distributed_function(
'insert_select_data(int)', 'a', 'insert_select_data(int)', 'a',
colocate_with := 'test_forcepushdown', colocate_with := 'test_forcepushdown',
@ -725,6 +752,8 @@ BEGIN
SELECT intcol FROM forcepushdown_schema.test_forcepushdown_noncolocate; SELECT intcol FROM forcepushdown_schema.test_forcepushdown_noncolocate;
END; END;
$fn$; $fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function( SELECT create_distributed_function(
'insert_select_data_nonlocal(int)', 'a', 'insert_select_data_nonlocal(int)', 'a',
colocate_with := 'test_forcepushdown', colocate_with := 'test_forcepushdown',
@ -803,6 +832,8 @@ BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown_char VALUES (a); INSERT INTO forcepushdown_schema.test_forcepushdown_char VALUES (a);
END; END;
$fn$; $fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function( SELECT create_distributed_function(
'insert_data_char(char)', 'a', 'insert_data_char(char)', 'a',
colocate_with := 'test_forcepushdown_char', colocate_with := 'test_forcepushdown_char',
@ -821,6 +852,8 @@ BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown_varchar VALUES (a); INSERT INTO forcepushdown_schema.test_forcepushdown_varchar VALUES (a);
END; END;
$fn$; $fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function( SELECT create_distributed_function(
'insert_data_varchar(varchar)', 'a', 'insert_data_varchar(varchar)', 'a',
colocate_with := 'test_forcepushdown_varchar', colocate_with := 'test_forcepushdown_varchar',
@ -839,6 +872,8 @@ BEGIN
INSERT INTO forcepushdown_schema.test_forcepushdown_text VALUES (a); INSERT INTO forcepushdown_schema.test_forcepushdown_text VALUES (a);
END; END;
$fn$; $fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function( SELECT create_distributed_function(
'insert_data_text(text)', 'a', 'insert_data_text(text)', 'a',
colocate_with := 'test_forcepushdown_text', colocate_with := 'test_forcepushdown_text',
@ -947,6 +982,8 @@ BEGIN
RAISE NOTICE 'Result: %', var; RAISE NOTICE 'Result: %', var;
END; END;
$fn$; $fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function( SELECT create_distributed_function(
'select_data(int)', 'a', 'select_data(int)', 'a',
colocate_with := 'test_subquery', colocate_with := 'test_subquery',
@ -969,6 +1006,8 @@ BEGIN
RAISE NOTICE 'Result: %', var; RAISE NOTICE 'Result: %', var;
END; END;
$fn$; $fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function( SELECT create_distributed_function(
'select_data_noncolocate(int)', 'a', 'select_data_noncolocate(int)', 'a',
colocate_with := 'test_subquery', colocate_with := 'test_subquery',
@ -990,6 +1029,8 @@ BEGIN
RAISE NOTICE 'Result: %', var; RAISE NOTICE 'Result: %', var;
END; END;
$fn$; $fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function( SELECT create_distributed_function(
'insert_select_data_cte1(int)', 'a', 'insert_select_data_cte1(int)', 'a',
colocate_with := 'test_subquery', colocate_with := 'test_subquery',
@ -1011,6 +1052,8 @@ BEGIN
RAISE NOTICE 'Result: %', var; RAISE NOTICE 'Result: %', var;
END; END;
$fn$; $fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function( SELECT create_distributed_function(
'insert_select_data_cte2(int)', 'a', 'insert_select_data_cte2(int)', 'a',
colocate_with := 'test_subquery', colocate_with := 'test_subquery',
@ -1033,6 +1076,8 @@ BEGIN
RAISE NOTICE 'Result: %', var; RAISE NOTICE 'Result: %', var;
END; END;
$fn$; $fn$;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function( SELECT create_distributed_function(
'insert_data_cte_nondist(int)', 'a', 'insert_data_cte_nondist(int)', 'a',
colocate_with := 'test_subquery', colocate_with := 'test_subquery',
@ -1203,6 +1248,8 @@ BEGIN
RETURN x + y; RETURN x + y;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('test_prepare(int,int)','x',force_delegation :=true, colocate_with := 'table_test_prepare'); SELECT create_distributed_function('test_prepare(int,int)','x',force_delegation :=true, colocate_with := 'table_test_prepare');
DEBUG: switching to sequential query execution mode DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
@ -1223,9 +1270,12 @@ BEGIN
PERFORM 1, 1 + a FROM test_prepare(x + 1, y + 1) a; PERFORM 1, 1 + a FROM test_prepare(x + 1, y + 1) a;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
-- First 5 get delegated and succeeds -- First 5 get delegated and succeeds
BEGIN; BEGIN;
SELECT outer_test_prepare(1,1); SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1244,6 +1294,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
(1 row) (1 row)
SELECT outer_test_prepare(1,1); SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1262,6 +1313,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
(1 row) (1 row)
SELECT outer_test_prepare(1,1); SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1280,6 +1332,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
(1 row) (1 row)
SELECT outer_test_prepare(1,1); SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1298,6 +1351,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
(1 row) (1 row)
SELECT outer_test_prepare(1,1); SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1324,6 +1378,7 @@ SELECT COUNT(*) FROM table_test_prepare;
-- 6th execution will be generic plan and should get delegated -- 6th execution will be generic plan and should get delegated
SELECT outer_test_prepare(1,1); SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1342,6 +1397,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
(1 row) (1 row)
SELECT outer_test_prepare(1,1); SELECT outer_test_prepare(1,1);
DEBUG: not pushing down function calls in a multi-statement transaction
DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1362,6 +1418,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
END; END;
-- Fails as expected -- Fails as expected
SELECT outer_test_prepare(1,2); SELECT outer_test_prepare(1,2);
DEBUG: function does not have co-located tables
DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)"
PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM
@ -1397,6 +1454,8 @@ BEGIN
RETURN x; RETURN x;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('test(int)', 'x', SELECT create_distributed_function('test(int)', 'x',
colocate_with := 'test_perform', force_delegation := true); colocate_with := 'test_perform', force_delegation := true);
DEBUG: switching to sequential query execution mode DEBUG: switching to sequential query execution mode
@ -1418,10 +1477,471 @@ NOTICE: INPUT 3
CONTEXT: PL/pgSQL function test(integer) line XX at RAISE CONTEXT: PL/pgSQL function test(integer) line XX at RAISE
SQL statement "SELECT test(3)" SQL statement "SELECT test(3)"
PL/pgSQL function inline_code_block line XX at PERFORM PL/pgSQL function inline_code_block line XX at PERFORM
CREATE TABLE testnested_table (x int, y int);
SELECT create_distributed_table('testnested_table','x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE OR REPLACE FUNCTION inner_fn(x int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
-- Non-force function calling force-delegation function
CREATE OR REPLACE FUNCTION outer_local_fn()
RETURNS void
AS $$
DECLARE
BEGIN
PERFORM 1 FROM inner_fn(1);
INSERT INTO forcepushdown_schema.testnested_table VALUES (2,3);
PERFORM 1 FROM inner_fn(4);
INSERT INTO forcepushdown_schema.testnested_table VALUES (5,6);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('inner_fn(int)','x',
colocate_with:='testnested_table', force_delegation := true);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT outer_local_fn();
DEBUG: function does not have co-located tables
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT 1 FROM inner_fn(1)"
PL/pgSQL function outer_local_fn() line XX at PERFORM
DEBUG: pushing down the function call
CONTEXT: SQL statement "SELECT 1 FROM inner_fn(1)"
PL/pgSQL function outer_local_fn() line XX at PERFORM
DEBUG: pushing down function call in a multi-statement transaction
CONTEXT: SQL statement "SELECT 1 FROM inner_fn(4)"
PL/pgSQL function outer_local_fn() line XX at PERFORM
DEBUG: pushing down the function call
CONTEXT: SQL statement "SELECT 1 FROM inner_fn(4)"
PL/pgSQL function outer_local_fn() line XX at PERFORM
outer_local_fn
---------------------------------------------------------------------
(1 row)
-- Rows from 1-6 should appear
SELECT * FROM testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
1 | 1
2 | 3
4 | 4
5 | 6
(4 rows)
BEGIN;
SELECT outer_local_fn();
DEBUG: not pushing down function calls in a multi-statement transaction
outer_local_fn
---------------------------------------------------------------------
(1 row)
END;
SELECT * FROM testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
1 | 1
1 | 1
2 | 3
2 | 3
4 | 4
4 | 4
5 | 6
5 | 6
(8 rows)
DROP FUNCTION inner_fn(int);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
DROP FUNCTION outer_local_fn();
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
TRUNCATE TABLE testnested_table;
CREATE OR REPLACE FUNCTION inner_fn(x int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
-- Force-delegation function calling non-force function
CREATE OR REPLACE FUNCTION outer_fn(y int, z int)
RETURNS void
AS $$
DECLARE
BEGIN
PERFORM 1 FROM forcepushdown_schema.inner_fn(y);
INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y);
PERFORM 1 FROM forcepushdown_schema.inner_fn(z);
INSERT INTO forcepushdown_schema.testnested_table VALUES (z,z);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function('inner_fn(int)','x',
colocate_with:='testnested_table', force_delegation := false);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_function('outer_fn(int, int)','y',
colocate_with:='testnested_table', force_delegation := true);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT outer_fn(1, 2);
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x)"
PL/pgSQL function forcepushdown_schema.inner_fn(integer) line XX at SQL statement
SQL statement "SELECT 1 FROM forcepushdown_schema.inner_fn(z)"
PL/pgSQL function forcepushdown_schema.outer_fn(integer,integer) line XX at PERFORM
while executing command on localhost:xxxxx
BEGIN;
SELECT outer_fn(1, 2);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x)"
PL/pgSQL function forcepushdown_schema.inner_fn(integer) line XX at SQL statement
SQL statement "SELECT 1 FROM forcepushdown_schema.inner_fn(z)"
PL/pgSQL function forcepushdown_schema.outer_fn(integer,integer) line XX at PERFORM
while executing command on localhost:xxxxx
END;
-- No rows
SELECT * FROM testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
(0 rows)
-- Force-delegation function calling force-delegation function
CREATE OR REPLACE FUNCTION force_push_inner(y int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION force_push_outer(x int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x);
PERFORM forcepushdown_schema.force_push_inner(x+1) LIMIT 1;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'force_push_outer(int)', 'x',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_function(
'force_push_inner(int)', 'y',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
-- Keys 7,8,9,14 fall on one node and 15 on a different node
-- Function gets delegated to node with shard-key = 7 and inner function
-- will not be delegated but inserts shard-key = 8 locally
SELECT force_push_outer(7);
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)"
PL/pgSQL function forcepushdown_schema.force_push_inner(integer) line XX at SQL statement
SQL statement "SELECT forcepushdown_schema.force_push_inner(x+1) LIMIT 1"
PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at PERFORM
while executing command on localhost:xxxxx
BEGIN;
-- Function gets delegated to node with shard-key = 8 and inner function
-- will not be delegated but inserts shard-key = 9 locally
SELECT force_push_outer(8);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)"
PL/pgSQL function forcepushdown_schema.force_push_inner(integer) line XX at SQL statement
SQL statement "SELECT forcepushdown_schema.force_push_inner(x+1) LIMIT 1"
PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at PERFORM
while executing command on localhost:xxxxx
END;
BEGIN;
-- Function gets delegated to node with shard-key = 14 and inner function
-- will not be delegated but fails to insert shard-key = 15 remotely
SELECT force_push_outer(14);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)"
PL/pgSQL function forcepushdown_schema.force_push_inner(integer) line XX at SQL statement
SQL statement "SELECT forcepushdown_schema.force_push_inner(x+1) LIMIT 1"
PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at PERFORM
while executing command on localhost:xxxxx
END;
SELECT * FROM testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
(0 rows)
--
-- Function-1() --> function-2() --> function-3()
--
CREATE OR REPLACE FUNCTION force_push_1(x int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x);
PERFORM forcepushdown_schema.force_push_2(x+1) LIMIT 1;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION force_push_2(y int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y);
PERFORM forcepushdown_schema.force_push_3(y+1) LIMIT 1;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION force_push_3(z int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (z,z);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'force_push_1(int)', 'x',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_function(
'force_push_2(int)', 'y',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_function(
'force_push_3(int)', 'z',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
TRUNCATE TABLE testnested_table;
BEGIN;
-- All local inserts
SELECT force_push_1(7);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)"
PL/pgSQL function forcepushdown_schema.force_push_2(integer) line XX at SQL statement
SQL statement "SELECT forcepushdown_schema.force_push_2(x+1) LIMIT 1"
PL/pgSQL function forcepushdown_schema.force_push_1(integer) line XX at PERFORM
while executing command on localhost:xxxxx
END;
BEGIN;
-- Local(shard-keys 13, 15) + remote insert (shard-key 14)
SELECT force_push_1(13);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)"
PL/pgSQL function forcepushdown_schema.force_push_2(integer) line XX at SQL statement
SQL statement "SELECT forcepushdown_schema.force_push_2(x+1) LIMIT 1"
PL/pgSQL function forcepushdown_schema.force_push_1(integer) line XX at PERFORM
while executing command on localhost:xxxxx
END;
SELECT * FROM testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
(0 rows)
TRUNCATE TABLE testnested_table;
CREATE OR REPLACE FUNCTION force_push_inner(y int)
RETURNS void
AS $$
DECLARE
BEGIN
INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION force_push_outer(x int)
RETURNS void
AS $$
DECLARE
BEGIN
PERFORM FROM forcepushdown_schema.force_push_inner(x);
INSERT INTO forcepushdown_schema.testnested_table VALUES (x+1,x+1);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
SELECT create_distributed_function(
'force_push_inner(int)', 'y',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_function(
'force_push_outer(int)', 'x',
colocate_with := 'testnested_table',
force_delegation := true
);
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
BEGIN;
SELECT force_push_outer(7);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown
HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false)
CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (x+1,x+1)"
PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at SQL statement
while executing command on localhost:xxxxx
END;
TABLE testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
(0 rows)
CREATE OR REPLACE FUNCTION force_push_inner(y int)
RETURNS void
AS $$
DECLARE
BEGIN
RAISE NOTICE '%', y;
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
CREATE OR REPLACE FUNCTION force_push_outer(x int)
RETURNS void
AS $$
DECLARE
BEGIN
PERFORM FROM forcepushdown_schema.force_push_inner(x+1);
INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x);
END;
$$ LANGUAGE plpgsql;
DEBUG: switching to sequential query execution mode
DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands
BEGIN;
SELECT force_push_outer(9);
DEBUG: pushing down function call in a multi-statement transaction
DEBUG: pushing down the function call
NOTICE: 10
DETAIL: from localhost:xxxxx
force_push_outer
---------------------------------------------------------------------
(1 row)
END;
TABLE testnested_table ORDER BY 1;
x | y
---------------------------------------------------------------------
9 | 9
(1 row)
RESET client_min_messages; RESET client_min_messages;
SET citus.log_remote_commands TO off; SET citus.log_remote_commands TO off;
DROP SCHEMA forcepushdown_schema CASCADE; DROP SCHEMA forcepushdown_schema CASCADE;
NOTICE: drop cascades to 38 other objects NOTICE: drop cascades to 46 other objects
DETAIL: drop cascades to table test_forcepushdown DETAIL: drop cascades to table test_forcepushdown
drop cascades to table test_forcepushdown_noncolocate drop cascades to table test_forcepushdown_noncolocate
drop cascades to function insert_data(integer) drop cascades to function insert_data(integer)
@ -1460,3 +1980,11 @@ drop cascades to function test_prepare(integer,integer)
drop cascades to function outer_test_prepare(integer,integer) drop cascades to function outer_test_prepare(integer,integer)
drop cascades to table test_perform drop cascades to table test_perform
drop cascades to function test(integer) drop cascades to function test(integer)
drop cascades to table testnested_table
drop cascades to function inner_fn(integer)
drop cascades to function outer_fn(integer,integer)
drop cascades to function force_push_inner(integer)
drop cascades to function force_push_outer(integer)
drop cascades to function force_push_1(integer)
drop cascades to function force_push_2(integer)
drop cascades to function force_push_3(integer)

View File

@ -0,0 +1,321 @@
CREATE SCHEMA function_propagation_schema;
SET search_path TO 'function_propagation_schema';
-- Check whether supported dependencies can be distributed while propagating functions
-- Check types
SET citus.enable_metadata_sync TO OFF;
CREATE TYPE function_prop_type AS (a int, b int);
RESET citus.enable_metadata_sync;
CREATE OR REPLACE FUNCTION func_1(param_1 function_prop_type)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
-- Check all dependent objects and function depends on all nodes
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema'::regnamespace::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(schema,{function_propagation_schema},{})
(1 row)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type'::regtype::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(type,{function_propagation_schema.function_prop_type},{})
(1 row)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_1'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(function,"{function_propagation_schema,func_1}",{function_propagation_schema.function_prop_type})
(1 row)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema'::regnamespace::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (schema,{function_propagation_schema},{})
localhost | 57638 | t | (schema,{function_propagation_schema},{})
(2 rows)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type'::regtype::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (type,{function_propagation_schema.function_prop_type},{})
localhost | 57638 | t | (type,{function_propagation_schema.function_prop_type},{})
(2 rows)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_1'::regproc::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (function,"{function_propagation_schema,func_1}",{function_propagation_schema.function_prop_type})
localhost | 57638 | t | (function,"{function_propagation_schema,func_1}",{function_propagation_schema.function_prop_type})
(2 rows)
SET citus.enable_metadata_sync TO OFF;
CREATE TYPE function_prop_type_2 AS (a int, b int);
RESET citus.enable_metadata_sync;
CREATE OR REPLACE FUNCTION func_2(param_1 int)
RETURNS function_prop_type_2
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_2'::regtype::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(type,{function_propagation_schema.function_prop_type_2},{})
(1 row)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_2'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(function,"{function_propagation_schema,func_2}",{integer})
(1 row)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_2'::regtype::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (type,{function_propagation_schema.function_prop_type_2},{})
localhost | 57638 | t | (type,{function_propagation_schema.function_prop_type_2},{})
(2 rows)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_2'::regproc::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (function,"{function_propagation_schema,func_2}",{integer})
localhost | 57638 | t | (function,"{function_propagation_schema,func_2}",{integer})
(2 rows)
-- Have a separate check for type created in transaction
BEGIN;
CREATE TYPE function_prop_type_3 AS (a int, b int);
COMMIT;
-- Objects in the body part is not found as dependency
CREATE OR REPLACE FUNCTION func_3(param_1 int)
RETURNS int
LANGUAGE plpgsql AS
$$
DECLARE
internal_param1 function_prop_type_3;
BEGIN
return 1;
END;
$$;
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_3'::regtype::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(0 rows)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_3'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(function,"{function_propagation_schema,func_3}",{integer})
(1 row)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_3'::regproc::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (function,"{function_propagation_schema,func_3}",{integer})
localhost | 57638 | t | (function,"{function_propagation_schema,func_3}",{integer})
(2 rows)
-- Check table
CREATE TABLE function_prop_table(a int, b int);
-- Non-distributed table is not distributed as dependency
CREATE OR REPLACE FUNCTION func_4(param_1 function_prop_table)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
WARNING: Citus can't distribute function "func_4" having dependency on non-distributed relation "function_prop_table"
DETAIL: Function will be created only locally
HINT: To distribute function, distribute dependent relations first. Then, re-create the function
CREATE OR REPLACE FUNCTION func_5(param_1 int)
RETURNS function_prop_table
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
WARNING: Citus can't distribute function "func_5" having dependency on non-distributed relation "function_prop_table"
DETAIL: Function will be created only locally
HINT: To distribute function, distribute dependent relations first. Then, re-create the function
-- Functions can be created with distributed table dependency
SELECT create_distributed_table('function_prop_table', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE OR REPLACE FUNCTION func_6(param_1 function_prop_table)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table})
(1 row)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table})
localhost | 57638 | t | (function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table})
(2 rows)
-- Views are not supported
CREATE VIEW function_prop_view AS SELECT * FROM function_prop_table;
CREATE OR REPLACE FUNCTION func_7(param_1 function_prop_view)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
WARNING: Citus can't distribute functions having dependency on unsupported object of type "view"
DETAIL: Function will be created only locally
CREATE OR REPLACE FUNCTION func_8(param_1 int)
RETURNS function_prop_view
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
WARNING: Citus can't distribute functions having dependency on unsupported object of type "view"
DETAIL: Function will be created only locally
-- Check within transaction
BEGIN;
CREATE TYPE type_in_transaction AS (a int, b int);
CREATE OR REPLACE FUNCTION func_in_transaction(param_1 type_in_transaction)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
-- Within transaction functions are not distributed
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(0 rows)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(0 rows)
COMMIT;
-- Show that recreating it outside transaction distributes the function and dependencies
CREATE OR REPLACE FUNCTION func_in_transaction(param_1 type_in_transaction)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(type,{function_propagation_schema.type_in_transaction},{})
(1 row)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction})
(1 row)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (type,{function_propagation_schema.type_in_transaction},{})
localhost | 57638 | t | (type,{function_propagation_schema.type_in_transaction},{})
(2 rows)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction})
localhost | 57638 | t | (function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction})
(2 rows)
-- Test for SQL function with unsupported object in function body
CREATE TABLE table_in_sql_body(id int);
CREATE FUNCTION max_of_table()
RETURNS int
LANGUAGE SQL AS
$$
SELECT max(id) FROM table_in_sql_body
$$;
-- Show that only function has propagated, since the table is not resolved as dependency
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regclass::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(0 rows)
SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.max_of_table'::regproc::oid;
pg_identify_object_as_address
---------------------------------------------------------------------
(function,"{function_propagation_schema,max_of_table}",{})
(1 row)
SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.max_of_table'::regproc::oid;$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (function,"{function_propagation_schema,max_of_table}",{})
localhost | 57638 | t | (function,"{function_propagation_schema,max_of_table}",{})
(2 rows)
-- Check extension owned table
CREATE TABLE extension_owned_table(a int);
SELECT run_command_on_workers($$
CREATE TABLE function_propagation_schema.extension_owned_table(a int);
$$
);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE TABLE")
(localhost,57638,t,"CREATE TABLE")
(2 rows)
CREATE EXTENSION seg;
ALTER EXTENSION seg ADD TABLE extension_owned_table;
NOTICE: Citus does not propagate adding/dropping member objects
HINT: You can add/drop the member objects on the workers as well.
SELECT run_command_on_workers($$
ALTER EXTENSION seg ADD TABLE function_propagation_schema.extension_owned_table;
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"ALTER EXTENSION")
(localhost,57638,t,"ALTER EXTENSION")
(2 rows)
CREATE OR REPLACE FUNCTION func_for_ext_check(param_1 extension_owned_table)
RETURNS int
LANGUAGE plpgsql AS
$$
BEGIN
return 1;
END;
$$;
RESET search_path;
SET client_min_messages TO WARNING;
DROP SCHEMA function_propagation_schema CASCADE;

View File

@ -0,0 +1,83 @@
CREATE SCHEMA global_cancel;
SET search_path TO global_cancel;
SET citus.next_shard_id TO 56789000;
CREATE TABLE dist_table (a INT, b INT);
SELECT create_distributed_table ('dist_table', 'a', shard_count:=4);
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO dist_table VALUES (1, 1);
SELECT global_pid AS coordinator_gpid FROM get_all_active_transactions() WHERE process_id = pg_backend_pid() \gset
SELECT pg_typeof(:coordinator_gpid);
pg_typeof
---------------------------------------------------------------------
bigint
(1 row)
SELECT pg_cancel_backend(:coordinator_gpid);
ERROR: canceling statement due to user request
SET citus.log_remote_commands TO ON;
SELECT pg_cancel_backend(:coordinator_gpid) FROM dist_table WHERE a = 1;
NOTICE: issuing SELECT pg_cancel_backend('xxxxx'::bigint) AS pg_cancel_backend FROM global_cancel.dist_table_56789000 dist_table WHERE (a OPERATOR(pg_catalog.=) 1)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
ERROR: canceling statement due to user request
BEGIN;
SELECT pg_cancel_backend(:coordinator_gpid) FROM dist_table WHERE a = 1;
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT pg_cancel_backend('xxxxx'::bigint) AS pg_cancel_backend FROM global_cancel.dist_table_56789000 dist_table WHERE (a OPERATOR(pg_catalog.=) 1)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
ERROR: canceling statement due to user request
END;
SET citus.log_remote_commands TO OFF;
SELECT global_pid AS maintenance_daemon_gpid
FROM pg_stat_activity psa JOIN get_all_active_transactions() gaat ON psa.pid = gaat.process_id
WHERE application_name = 'Citus Maintenance Daemon' \gset
SET client_min_messages TO ERROR;
CREATE USER global_cancel_user;
SELECT 1 FROM run_command_on_workers('CREATE USER global_cancel_user');
?column?
---------------------------------------------------------------------
1
1
(2 rows)
RESET client_min_messages;
SET ROLE global_cancel_user;
SELECT pg_typeof(:maintenance_daemon_gpid);
pg_typeof
---------------------------------------------------------------------
bigint
(1 row)
SELECT pg_cancel_backend(:maintenance_daemon_gpid);
ERROR: must be a superuser to cancel superuser query
CONTEXT: while executing command on localhost:xxxxx
SELECT pg_terminate_backend(:maintenance_daemon_gpid);
ERROR: must be a superuser to terminate superuser process
CONTEXT: while executing command on localhost:xxxxx
RESET ROLE;
SELECT nodeid AS coordinator_node_id FROM pg_dist_node WHERE nodeport = :master_port \gset
SET client_min_messages TO DEBUG;
-- 10000000000 is the node id multiplier for global pid
SELECT pg_cancel_backend(10000000000 * :coordinator_node_id + 0);
DEBUG: PID 0 is not a PostgreSQL server process
DETAIL: from localhost:xxxxx
pg_cancel_backend
---------------------------------------------------------------------
f
(1 row)
SELECT pg_terminate_backend(10000000000 * :coordinator_node_id + 0);
DEBUG: PID 0 is not a PostgreSQL server process
DETAIL: from localhost:xxxxx
pg_terminate_backend
---------------------------------------------------------------------
f
(1 row)
RESET client_min_messages;
DROP SCHEMA global_cancel CASCADE;
NOTICE: drop cascades to table dist_table

View File

@ -27,7 +27,7 @@ step s2-update-ref-table:
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1');
<waiting ...> <waiting ...>
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -112,7 +112,7 @@ step s2-update-ref-table:
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1');
<waiting ...> <waiting ...>
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -208,7 +208,7 @@ step s2-update-dist-table:
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5'); SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5');
<waiting ...> <waiting ...>
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -304,7 +304,7 @@ step s2-update-ref-table:
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1');
<waiting ...> <waiting ...>
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -400,7 +400,7 @@ step s2-update-ref-table:
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1');
<waiting ...> <waiting ...>
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -501,7 +501,7 @@ run_commands_on_session_level_connection_to_node
(1 row) (1 row)
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement|current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement|current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -590,7 +590,7 @@ step s2-update-ref-table:
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1');
<waiting ...> <waiting ...>
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -691,7 +691,7 @@ run_commands_on_session_level_connection_to_node
(1 row) (1 row)
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement|current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement|current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -785,7 +785,7 @@ run_commands_on_session_level_connection_to_node
(1 row) (1 row)
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement|current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement|current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -874,7 +874,7 @@ step s2-update-ref-table:
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1');
<waiting ...> <waiting ...>
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -957,7 +957,7 @@ step s1-alter-table:
ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id);
<waiting ...> <waiting ...>
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1003,7 +1003,7 @@ step s2-update-on-the-coordinator:
UPDATE tt1 SET value_1 = 4; UPDATE tt1 SET value_1 = 4;
<waiting ...> <waiting ...>
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1069,7 +1069,7 @@ step s4-update-dist-table:
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5'); SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5');
<waiting ...> <waiting ...>
step s3-select-distributed-waiting-queries: step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1119,3 +1119,134 @@ restore_isolation_tester_func
(1 row) (1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-dist-table-id-1 s2-start-session-level-connection s2-update-dist-table-id-1 s3-select-distributed-waiting-queries s1-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
start_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s1-begin-on-worker:
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
run_commands_on_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s1-update-dist-table-id-1:
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4 WHERE user_id = 1');
run_commands_on_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
start_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s2-update-dist-table-id-1:
SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4 WHERE user_id = 1');
<waiting ...>
step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
---------------------------------------------------------------------
UPDATE tt1 SET value_1 = 4 WHERE user_id = 1|UPDATE tt1 SET value_1 = 4 WHERE user_id = 1|localhost |localhost | 57638| 57637
(1 row)
step s1-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
run_commands_on_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s2-update-dist-table-id-1: <... completed>
run_commands_on_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s1-stop-connection:
SELECT stop_session_level_connection_to_node();
stop_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s2-stop-connection:
SELECT stop_session_level_connection_to_node();
stop_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-update-ref-table-from-coordinator s2-start-session-level-connection s2-update-ref-table s3-select-distributed-waiting-queries s1-commit s2-stop-connection
step s1-begin:
BEGIN;
step s1-update-ref-table-from-coordinator:
UPDATE ref_table SET value_1 = 15;
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
start_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s2-update-ref-table:
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1');
<waiting ...>
step s3-select-distributed-waiting-queries:
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%';
blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port
---------------------------------------------------------------------
UPDATE ref_table SET value_1 = 12 WHERE user_id = 1|
UPDATE ref_table SET value_1 = 15;
|localhost |coordinator_host | 57638| 57636
(1 row)
step s1-commit:
COMMIT;
step s2-update-ref-table: <... completed>
run_commands_on_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s2-stop-connection:
SELECT stop_session_level_connection_to_node();
stop_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -148,7 +148,8 @@ step s1-move-placement-back:
SET client_min_messages to NOTICE; SET client_min_messages to NOTICE;
SHOW log_error_verbosity; SHOW log_error_verbosity;
SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637); SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637);
<waiting ...>
step s1-move-placement-back: <... completed>
log_error_verbosity log_error_verbosity
--------------------------------------------------------------------- ---------------------------------------------------------------------
verbose verbose

View File

@ -101,13 +101,14 @@ step s2-view-worker:
FROM citus_worker_stat_activity FROM citus_worker_stat_activity
WHERE query NOT ILIKE '%pg_prepared_xacts%' AND WHERE query NOT ILIKE '%pg_prepared_xacts%' AND
query NOT ILIKE '%COMMIT%' AND query NOT ILIKE '%COMMIT%' AND
query NOT ILIKE '%dump_local_wait_edges%' query NOT ILIKE '%dump_local_%' AND
query NOT ILIKE '%citus_internal_local_blocked_processes%'
ORDER BY query, query_hostport DESC; ORDER BY query, query_hostport DESC;
query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname
--------------------------------------------------------------------- ---------------------------------------------------------------------
UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression UPDATE public.ref_table_1500777 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression
UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression UPDATE public.ref_table_1500777 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression
(2 rows) (2 rows)
step s2-end: step s2-end:

View File

@ -62,6 +62,7 @@ SET citus.log_local_commands TO ON;
-- returns true of the distribution key filter -- returns true of the distribution key filter
-- on the distributed tables (e.g., WHERE key = 1), we'll hit a shard -- on the distributed tables (e.g., WHERE key = 1), we'll hit a shard
-- placement which is local to this not -- placement which is local to this not
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) RETURNS bool AS $$ CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) RETURNS bool AS $$
DECLARE shard_is_local BOOLEAN := FALSE; DECLARE shard_is_local BOOLEAN := FALSE;
@ -84,6 +85,7 @@ CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) R
RETURN shard_is_local; RETURN shard_is_local;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
RESET citus.enable_metadata_sync;
-- pick some example values that reside on the shards locally and remote -- pick some example values that reside on the shards locally and remote
-- distribution key values of 1,6, 500 and 701 are LOCAL to shards, -- distribution key values of 1,6, 500 and 701 are LOCAL to shards,
-- we'll use these values in the tests -- we'll use these values in the tests

View File

@ -94,6 +94,7 @@ SET search_path TO local_shard_execution;
-- returns true of the distribution key filter -- returns true of the distribution key filter
-- on the distributed tables (e.g., WHERE key = 1), we'll hit a shard -- on the distributed tables (e.g., WHERE key = 1), we'll hit a shard
-- placement which is local to this not -- placement which is local to this not
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) RETURNS bool AS $$ CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) RETURNS bool AS $$
DECLARE shard_is_local BOOLEAN := FALSE; DECLARE shard_is_local BOOLEAN := FALSE;
@ -116,6 +117,7 @@ CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) R
RETURN shard_is_local; RETURN shard_is_local;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
RESET citus.enable_metadata_sync;
-- test case for issue #3556 -- test case for issue #3556
SET citus.log_intermediate_results TO TRUE; SET citus.log_intermediate_results TO TRUE;
SET client_min_messages TO DEBUG1; SET client_min_messages TO DEBUG1;
@ -801,6 +803,7 @@ BEGIN;
ERROR: VACUUM cannot run inside a transaction block ERROR: VACUUM cannot run inside a transaction block
ROLLBACK; ROLLBACK;
-- make sure that functions can use local execution -- make sure that functions can use local execution
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE PROCEDURE only_local_execution() AS $$ CREATE OR REPLACE PROCEDURE only_local_execution() AS $$
DECLARE cnt INT; DECLARE cnt INT;
BEGIN BEGIN
@ -896,6 +899,7 @@ CREATE OR REPLACE PROCEDURE local_execution_followed_by_dist() AS $$
SELECT count(*) INTO cnt FROM distributed_table; SELECT count(*) INTO cnt FROM distributed_table;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
RESET citus.enable_metadata_sync;
CALL local_execution_followed_by_dist(); CALL local_execution_followed_by_dist();
NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text
CONTEXT: SQL statement "INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29'" CONTEXT: SQL statement "INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29'"

View File

@ -770,6 +770,7 @@ NOTICE: executing the command locally: INSERT INTO local_shard_execution_replic
ERROR: VACUUM cannot run inside a transaction block ERROR: VACUUM cannot run inside a transaction block
ROLLBACK; ROLLBACK;
-- make sure that functions can use local execution -- make sure that functions can use local execution
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE PROCEDURE only_local_execution() AS $$ CREATE OR REPLACE PROCEDURE only_local_execution() AS $$
DECLARE cnt INT; DECLARE cnt INT;
BEGIN BEGIN
@ -865,6 +866,7 @@ CREATE OR REPLACE PROCEDURE local_execution_followed_by_dist() AS $$
SELECT count(*) INTO cnt FROM distributed_table; SELECT count(*) INTO cnt FROM distributed_table;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
RESET citus.enable_metadata_sync;
CALL local_execution_followed_by_dist(); CALL local_execution_followed_by_dist();
NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text
CONTEXT: SQL statement "INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29'" CONTEXT: SQL statement "INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29'"

View File

@ -987,6 +987,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
ERROR: Node with group id 123123123 for shard placement xxxxx does not exist ERROR: Node with group id 123123123 for shard placement xxxxx does not exist
ROLLBACK; ROLLBACK;
-- create a volatile function that returns the local node id -- create a volatile function that returns the local node id
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION get_node_id() CREATE OR REPLACE FUNCTION get_node_id()
RETURNS INT AS $$ RETURNS INT AS $$
DECLARE localGroupId int; DECLARE localGroupId int;
@ -999,6 +1000,7 @@ BEGIN
nodeport = 57637 AND nodename = 'localhost' AND isactive AND nodecluster = 'default'; nodeport = 57637 AND nodename = 'localhost' AND isactive AND nodecluster = 'default';
RETURN localGroupId; RETURN localGroupId;
END; $$ language plpgsql; END; $$ language plpgsql;
RESET citus.enable_metadata_sync;
-- fails because we ingest more placements for the same shards to the same worker node -- fails because we ingest more placements for the same shards to the same worker node
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');

View File

@ -7,6 +7,7 @@
-- reference and hash-distributed version of orders, customer and part tables. -- reference and hash-distributed version of orders, customer and part tables.
SET citus.next_shard_id TO 360000; SET citus.next_shard_id TO 360000;
-- this function is dropped in Citus10, added here for tests -- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text, distribution_column text,
distribution_method citus.distribution_type) distribution_method citus.distribution_type)
@ -23,6 +24,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex
RETURNS void RETURNS void
AS 'citus', $$master_create_worker_shards$$ AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT; LANGUAGE C STRICT;
RESET citus.enable_metadata_sync;
CREATE TABLE lineitem ( CREATE TABLE lineitem (
l_orderkey bigint not null, l_orderkey bigint not null,
l_partkey integer not null, l_partkey integer not null,

View File

@ -689,17 +689,7 @@ SELECT create_distributed_function('func_custom_param(intpair)');
(1 row) (1 row)
RESET citus.enable_metadata_sync; RESET citus.enable_metadata_sync;
SELECT deparse_and_run_on_workers($cmd$
ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param; ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param;
$cmd$);
INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_custom_param(function_tests.intpair) RENAME TO func_with_custom_param;
CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE
deparse_and_run_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"ALTER FUNCTION")
(localhost,57638,t,"ALTER FUNCTION")
(2 rows)
-- a function that returns TABLE -- a function that returns TABLE
CREATE FUNCTION func_returns_table(IN count INT) CREATE FUNCTION func_returns_table(IN count INT)
RETURNS TABLE (x INT, y INT) RETURNS TABLE (x INT, y INT)
@ -713,17 +703,7 @@ SELECT create_distributed_function('func_returns_table(INT)');
(1 row) (1 row)
RESET citus.enable_metadata_sync; RESET citus.enable_metadata_sync;
SELECT deparse_and_run_on_workers($cmd$
ALTER FUNCTION func_returns_table ROWS 100; ALTER FUNCTION func_returns_table ROWS 100;
$cmd$);
INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_returns_table(integer) ROWS 100.000000;
CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE
deparse_and_run_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"ALTER FUNCTION")
(localhost,57638,t,"ALTER FUNCTION")
(2 rows)
-- clear objects -- clear objects
SET client_min_messages TO WARNING; -- suppress cascading objects dropping SET client_min_messages TO WARNING; -- suppress cascading objects dropping
DROP SCHEMA "CiTuS.TeeN" CASCADE; DROP SCHEMA "CiTuS.TeeN" CASCADE;

View File

@ -220,7 +220,7 @@ SELECT column_to_column_name('pg_dist_node'::regclass,'{FROMEXPR :fromlist ({RAN
ERROR: not a valid column ERROR: not a valid column
-- test column_name_to_column with illegal arguments -- test column_name_to_column with illegal arguments
SELECT column_name_to_column(1204127312,''); SELECT column_name_to_column(1204127312,'');
ERROR: could not open relation with OID 1204127312 ERROR: relation does not exist
SELECT column_name_to_column('customers','notacolumn'); SELECT column_name_to_column('customers','notacolumn');
ERROR: column "notacolumn" of relation "customers" does not exist ERROR: column "notacolumn" of relation "customers" does not exist
-- make one huge shard and manually inspect shard row -- make one huge shard and manually inspect shard row

View File

@ -77,6 +77,8 @@ END
$func$ LANGUAGE plpgsql; $func$ LANGUAGE plpgsql;
CREATE SCHEMA test; CREATE SCHEMA test;
:create_function_test_maintenance_worker :create_function_test_maintenance_worker
WARNING: Citus can't distribute functions having dependency on unsupported object of type "view"
DETAIL: Function will be created only locally
-- check maintenance daemon is started -- check maintenance daemon is started
SELECT datname, current_database(), SELECT datname, current_database(),
usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')
@ -107,6 +109,7 @@ DROP EXTENSION citus;
-- these tests switch between citus versions and call ddl's that require pg_dist_object to be created -- these tests switch between citus versions and call ddl's that require pg_dist_object to be created
SET citus.enable_metadata_sync TO 'false'; SET citus.enable_metadata_sync TO 'false';
SET citus.enable_version_checks TO 'false'; SET citus.enable_version_checks TO 'false';
SET columnar.enable_version_checks TO 'false';
CREATE EXTENSION citus VERSION '8.0-1'; CREATE EXTENSION citus VERSION '8.0-1';
ALTER EXTENSION citus UPDATE TO '8.0-2'; ALTER EXTENSION citus UPDATE TO '8.0-2';
ALTER EXTENSION citus UPDATE TO '8.0-3'; ALTER EXTENSION citus UPDATE TO '8.0-3';
@ -757,6 +760,7 @@ SELECT * FROM multi_extension.print_extension_changes();
\set VERBOSITY terse \set VERBOSITY terse
CREATE TABLE columnar_table(a INT, b INT) USING columnar; CREATE TABLE columnar_table(a INT, b INT) USING columnar;
SET citus.enable_version_checks TO ON; SET citus.enable_version_checks TO ON;
SET columnar.enable_version_checks TO ON;
-- all should throw an error due to version mismatch -- all should throw an error due to version mismatch
VACUUM FULL columnar_table; VACUUM FULL columnar_table;
ERROR: loaded Citus library version differs from installed extension version ERROR: loaded Citus library version differs from installed extension version
@ -785,6 +789,7 @@ CREATE TABLE new_columnar_table (a int) USING columnar;
ERROR: loaded Citus library version differs from installed extension version ERROR: loaded Citus library version differs from installed extension version
-- do cleanup for the rest of the tests -- do cleanup for the rest of the tests
SET citus.enable_version_checks TO OFF; SET citus.enable_version_checks TO OFF;
SET columnar.enable_version_checks TO OFF;
DROP TABLE columnar_table; DROP TABLE columnar_table;
RESET columnar.enable_custom_scan; RESET columnar.enable_custom_scan;
\set VERBOSITY default \set VERBOSITY default
@ -1007,13 +1012,18 @@ SELECT * FROM multi_extension.print_extension_changes();
| function citus_check_connection_to_node(text,integer) boolean | function citus_check_connection_to_node(text,integer) boolean
| function citus_disable_node(text,integer,boolean) void | function citus_disable_node(text,integer,boolean) void
| function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) void | function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) void
| function citus_internal_global_blocked_processes() SETOF record
| function citus_internal_local_blocked_processes() SETOF record
| function citus_run_local_command(text) void | function citus_run_local_command(text) void
| function citus_shard_indexes_on_worker() SETOF record | function citus_shard_indexes_on_worker() SETOF record
| function citus_shards_on_worker() SETOF record | function citus_shards_on_worker() SETOF record
| function create_distributed_function(regprocedure,text,text,boolean) void | function create_distributed_function(regprocedure,text,text,boolean) void
| function pg_cancel_backend(bigint) boolean
| function pg_terminate_backend(bigint,bigint) boolean
| function worker_create_or_replace_object(text[]) boolean
| function worker_drop_sequence_dependency(text) void | function worker_drop_sequence_dependency(text) void
| function worker_drop_shell_table(text) void | function worker_drop_shell_table(text) void
(15 rows) (20 rows)
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version -- show running version
@ -1040,6 +1050,7 @@ ORDER BY 1, 2;
-- see incompatible version errors out -- see incompatible version errors out
RESET citus.enable_version_checks; RESET citus.enable_version_checks;
RESET columnar.enable_version_checks;
DROP EXTENSION citus; DROP EXTENSION citus;
CREATE EXTENSION citus VERSION '8.0-1'; CREATE EXTENSION citus VERSION '8.0-1';
ERROR: specified version incompatible with loaded Citus library ERROR: specified version incompatible with loaded Citus library
@ -1047,8 +1058,10 @@ DETAIL: Loaded library requires 11.0, but 8.0-1 was specified.
HINT: If a newer library is present, restart the database and try the command again. HINT: If a newer library is present, restart the database and try the command again.
-- Test non-distributed queries work even in version mismatch -- Test non-distributed queries work even in version mismatch
SET citus.enable_version_checks TO 'false'; SET citus.enable_version_checks TO 'false';
SET columnar.enable_version_checks TO 'false';
CREATE EXTENSION citus VERSION '8.1-1'; CREATE EXTENSION citus VERSION '8.1-1';
SET citus.enable_version_checks TO 'true'; SET citus.enable_version_checks TO 'true';
SET columnar.enable_version_checks TO 'true';
-- Test CREATE TABLE -- Test CREATE TABLE
CREATE TABLE version_mismatch_table(column1 int); CREATE TABLE version_mismatch_table(column1 int);
-- Test COPY -- Test COPY
@ -1098,15 +1111,18 @@ $function$;
ERROR: cannot change return type of existing function ERROR: cannot change return type of existing function
HINT: Use DROP FUNCTION relation_is_a_known_shard(regclass) first. HINT: Use DROP FUNCTION relation_is_a_known_shard(regclass) first.
SET citus.enable_version_checks TO 'false'; SET citus.enable_version_checks TO 'false';
SET columnar.enable_version_checks TO 'false';
-- This will fail because of previous function declaration -- This will fail because of previous function declaration
ALTER EXTENSION citus UPDATE TO '8.1-1'; ALTER EXTENSION citus UPDATE TO '8.1-1';
NOTICE: version "8.1-1" of extension "citus" is already installed NOTICE: version "8.1-1" of extension "citus" is already installed
-- We can DROP problematic function and continue ALTER EXTENSION even when version checks are on -- We can DROP problematic function and continue ALTER EXTENSION even when version checks are on
SET citus.enable_version_checks TO 'true'; SET citus.enable_version_checks TO 'true';
SET columnar.enable_version_checks TO 'true';
DROP FUNCTION pg_catalog.relation_is_a_known_shard(regclass); DROP FUNCTION pg_catalog.relation_is_a_known_shard(regclass);
ERROR: cannot drop function relation_is_a_known_shard(regclass) because extension citus requires it ERROR: cannot drop function relation_is_a_known_shard(regclass) because extension citus requires it
HINT: You can drop extension citus instead. HINT: You can drop extension citus instead.
SET citus.enable_version_checks TO 'false'; SET citus.enable_version_checks TO 'false';
SET columnar.enable_version_checks TO 'false';
ALTER EXTENSION citus UPDATE TO '8.1-1'; ALTER EXTENSION citus UPDATE TO '8.1-1';
NOTICE: version "8.1-1" of extension "citus" is already installed NOTICE: version "8.1-1" of extension "citus" is already installed
-- Test updating to the latest version without specifying the version number -- Test updating to the latest version without specifying the version number
@ -1119,8 +1135,10 @@ CREATE EXTENSION citus;
\c - - - :worker_1_port \c - - - :worker_1_port
DROP EXTENSION citus; DROP EXTENSION citus;
SET citus.enable_version_checks TO 'false'; SET citus.enable_version_checks TO 'false';
SET columnar.enable_version_checks TO 'false';
CREATE EXTENSION citus VERSION '8.0-1'; CREATE EXTENSION citus VERSION '8.0-1';
SET citus.enable_version_checks TO 'true'; SET citus.enable_version_checks TO 'true';
SET columnar.enable_version_checks TO 'true';
-- during ALTER EXTENSION, we should invalidate the cache -- during ALTER EXTENSION, we should invalidate the cache
ALTER EXTENSION citus UPDATE; ALTER EXTENSION citus UPDATE;
-- if cache is invalidated succesfull, this \d should work without any problem -- if cache is invalidated succesfull, this \d should work without any problem
@ -1197,6 +1215,8 @@ HINT: You can manually create a database and its extensions on workers.
CREATE EXTENSION citus; CREATE EXTENSION citus;
CREATE SCHEMA test; CREATE SCHEMA test;
:create_function_test_maintenance_worker :create_function_test_maintenance_worker
WARNING: Citus can't distribute functions having dependency on unsupported object of type "view"
DETAIL: Function will be created only locally
-- see that the daemon started -- see that the daemon started
SELECT datname, current_database(), SELECT datname, current_database(),
usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')

Some files were not shown because too many files have changed in this diff Show More