Merge branch 'main' into eag/mvtest-workflow

eag/mvtest-workflow
eaydingol 2025-12-12 16:39:23 +03:00 committed by GitHub
commit dd1620bf81
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
137 changed files with 7725 additions and 6399 deletions

View File

@ -113,10 +113,10 @@ FROM base AS uncrustify-builder
RUN sudo apt update && sudo apt install -y cmake tree RUN sudo apt update && sudo apt install -y cmake tree
WORKDIR /uncrustify WORKDIR /uncrustify
RUN curl -L https://github.com/uncrustify/uncrustify/archive/uncrustify-0.68.1.tar.gz | tar xz RUN curl -L https://github.com/uncrustify/uncrustify/archive/uncrustify-0.82.0.tar.gz | tar xz
WORKDIR /uncrustify/uncrustify-uncrustify-0.68.1/ WORKDIR /uncrustify/uncrustify-uncrustify-0.82.0/
RUN mkdir build RUN mkdir build
WORKDIR /uncrustify/uncrustify-uncrustify-0.68.1/build/ WORKDIR /uncrustify/uncrustify-uncrustify-0.82.0/build/
RUN cmake .. RUN cmake ..
RUN MAKEFLAGS="-j $(nproc)" make -s RUN MAKEFLAGS="-j $(nproc)" make -s

View File

@ -30,9 +30,9 @@ jobs:
fail_test_image_name: "ghcr.io/citusdata/failtester" fail_test_image_name: "ghcr.io/citusdata/failtester"
pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester" pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester"
style_checker_image_name: "ghcr.io/citusdata/stylechecker" style_checker_image_name: "ghcr.io/citusdata/stylechecker"
style_checker_tools_version: "0.8.18" style_checker_tools_version: "0.8.33"
sql_snapshot_pg_version: "17.6" sql_snapshot_pg_version: "17.6"
image_suffix: "-va20872f" image_suffix: "-ve4d3aa0"
pg15_version: '{ "major": "15", "full": "15.14" }' pg15_version: '{ "major": "15", "full": "15.14" }'
pg16_version: '{ "major": "16", "full": "16.10" }' pg16_version: '{ "major": "16", "full": "16.10" }'
pg17_version: '{ "major": "17", "full": "17.6" }' pg17_version: '{ "major": "17", "full": "17.6" }'

View File

@ -11,9 +11,9 @@ tool. This tool uses `uncrustify` under the hood.
```bash ```bash
# Uncrustify changes the way it formats code every release a bit. To make sure # Uncrustify changes the way it formats code every release a bit. To make sure
# everyone formats consistently we use version 0.68.1: # everyone formats consistently we use version 0.82.0:
curl -L https://github.com/uncrustify/uncrustify/archive/uncrustify-0.68.1.tar.gz | tar xz curl -L https://github.com/uncrustify/uncrustify/archive/uncrustify-0.82.0.tar.gz | tar xz
cd uncrustify-uncrustify-0.68.1/ cd uncrustify-uncrustify-0.82.0/
mkdir build mkdir build
cd build cd build
cmake .. cmake ..

View File

@ -1330,10 +1330,10 @@ GetHighestUsedAddress(Relation rel)
Oid Oid
ColumnarRelationId(Oid relid, RelFileLocator relfilelocator) ColumnarRelationId(Oid relid, RelFileLocator relfilelocator)
{ {
return OidIsValid(relid) ? relid : RelidByRelfilenumber(RelationTablespace_compat( return OidIsValid(relid) ? relid : RelidByRelfilenumber(RelationTablespace_compat
relfilelocator), (relfilelocator),
RelationPhysicalIdentifierNumber_compat( RelationPhysicalIdentifierNumber_compat
relfilelocator)); (relfilelocator));
} }

View File

@ -758,8 +758,10 @@ SnapshotMightSeeUnflushedStripes(Snapshot snapshot)
} }
default: default:
{
return false; return false;
} }
}
} }

View File

@ -547,7 +547,8 @@ ColumnarStorageTruncate(Relation rel, uint64 newDataReservation)
if (!ColumnarLogicalOffsetIsValid(newDataReservation)) if (!ColumnarLogicalOffsetIsValid(newDataReservation))
{ {
elog(ERROR, elog(ERROR,
"attempted to truncate relation %d to invalid logical offset: " UINT64_FORMAT, "attempted to truncate relation %d to "
"invalid logical offset: " UINT64_FORMAT,
rel->rd_id, newDataReservation); rel->rd_id, newDataReservation);
} }

View File

@ -2410,10 +2410,11 @@ ColumnarProcessUtility(PlannedStmt *pstmt,
} }
default: default:
{
/* FALL THROUGH */ /* FALL THROUGH */
break; break;
} }
}
if (columnarOptions != NIL && columnarRangeVar == NULL) if (columnarOptions != NIL && columnarRangeVar == NULL)
{ {

View File

@ -185,8 +185,8 @@ typedef struct TableConversionState
static TableConversionReturn * AlterDistributedTable(TableConversionParameters *params); static TableConversionReturn * AlterDistributedTable(TableConversionParameters *params);
static TableConversionReturn * AlterTableSetAccessMethod( static TableConversionReturn * AlterTableSetAccessMethod(TableConversionParameters *
TableConversionParameters *params); params);
static TableConversionReturn * ConvertTable(TableConversionState *con); static TableConversionReturn * ConvertTable(TableConversionState *con);
static TableConversionReturn * ConvertTableInternal(TableConversionState *con); static TableConversionReturn * ConvertTableInternal(TableConversionState *con);
static bool SwitchToSequentialAndLocalExecutionIfShardNameTooLong(char *relationName, static bool SwitchToSequentialAndLocalExecutionIfShardNameTooLong(char *relationName,
@ -215,7 +215,7 @@ static char * CreateWorkerChangeSequenceDependencyCommand(char *qualifiedSequece
static void ErrorIfMatViewSizeExceedsTheLimit(Oid matViewOid); static void ErrorIfMatViewSizeExceedsTheLimit(Oid matViewOid);
static char * CreateMaterializedViewDDLCommand(Oid matViewOid); static char * CreateMaterializedViewDDLCommand(Oid matViewOid);
static char * GetAccessMethodForMatViewIfExists(Oid viewOid); static char * GetAccessMethodForMatViewIfExists(Oid viewOid);
static bool WillRecreateForeignKeyToReferenceTable(Oid relationId, static bool WillRecreateFKeyToReferenceTable(Oid relationId,
CascadeToColocatedOption cascadeOption); CascadeToColocatedOption cascadeOption);
static void WarningsForDroppingForeignKeysWithDistributedTables(Oid relationId); static void WarningsForDroppingForeignKeysWithDistributedTables(Oid relationId);
static void ErrorIfUnsupportedCascadeObjects(Oid relationId); static void ErrorIfUnsupportedCascadeObjects(Oid relationId);
@ -505,7 +505,8 @@ UndistributeTable(TableConversionParameters *params)
if (!params->bypassTenantCheck && IsTenantSchema(schemaId) && if (!params->bypassTenantCheck && IsTenantSchema(schemaId) &&
IsCitusTableType(params->relationId, SINGLE_SHARD_DISTRIBUTED)) IsCitusTableType(params->relationId, SINGLE_SHARD_DISTRIBUTED))
{ {
EnsureUndistributeTenantTableSafe(params->relationId, EnsureUndistributeTenantTableSafe(
params->relationId,
TenantOperationNames[TENANT_UNDISTRIBUTE_TABLE]); TenantOperationNames[TENANT_UNDISTRIBUTE_TABLE]);
} }
@ -577,7 +578,7 @@ AlterDistributedTable(TableConversionParameters *params)
TableConversionState *con = CreateTableConversion(params); TableConversionState *con = CreateTableConversion(params);
CheckAlterDistributedTableConversionParameters(con); CheckAlterDistributedTableConversionParameters(con);
if (WillRecreateForeignKeyToReferenceTable(con->relationId, con->cascadeToColocated)) if (WillRecreateFKeyToReferenceTable(con->relationId, con->cascadeToColocated))
{ {
ereport(DEBUG1, (errmsg("setting multi shard modify mode to sequential"))); ereport(DEBUG1, (errmsg("setting multi shard modify mode to sequential")));
SetLocalMultiShardModifyModeToSequential(); SetLocalMultiShardModifyModeToSequential();
@ -1927,14 +1928,10 @@ GetNonGeneratedStoredColumnNameList(Oid relationId)
for (int columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++) for (int columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++)
{ {
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex); Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
if (currentColumn->attisdropped)
{
/* skip dropped columns */
continue;
}
if (currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED) if (IsDroppedOrGenerated(currentColumn))
{ {
/* skip dropped or generated columns */
continue; continue;
} }
@ -2197,12 +2194,12 @@ GetAccessMethodForMatViewIfExists(Oid viewOid)
/* /*
* WillRecreateForeignKeyToReferenceTable checks if the table of relationId has any foreign * WillRecreateFKeyToReferenceTable checks if the table of relationId has any foreign
* key to a reference table, if conversion will be cascaded to colocated table this function * key to a reference table, if conversion will be cascaded to colocated table this function
* also checks if any of the colocated tables have a foreign key to a reference table too * also checks if any of the colocated tables have a foreign key to a reference table too
*/ */
bool bool
WillRecreateForeignKeyToReferenceTable(Oid relationId, WillRecreateFKeyToReferenceTable(Oid relationId,
CascadeToColocatedOption cascadeOption) CascadeToColocatedOption cascadeOption)
{ {
if (cascadeOption == CASCADE_TO_COLOCATED_NO || if (cascadeOption == CASCADE_TO_COLOCATED_NO ||

View File

@ -522,7 +522,7 @@ ExecuteCascadeOperationForRelationIdList(List *relationIdList,
* with the flag InTableTypeConversionFunctionCall set to true. * with the flag InTableTypeConversionFunctionCall set to true.
*/ */
void void
ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCommandList) ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCmdList)
{ {
bool oldValue = InTableTypeConversionFunctionCall; bool oldValue = InTableTypeConversionFunctionCall;
InTableTypeConversionFunctionCall = true; InTableTypeConversionFunctionCall = true;
@ -531,7 +531,7 @@ ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCommandL
PG_TRY(); PG_TRY();
{ {
char *utilityCommand = NULL; char *utilityCommand = NULL;
foreach_declared_ptr(utilityCommand, utilityCommandList) foreach_declared_ptr(utilityCommand, utilityCmdList)
{ {
/* /*
* CREATE MATERIALIZED VIEW commands need to be parsed/transformed, * CREATE MATERIALIZED VIEW commands need to be parsed/transformed,
@ -566,10 +566,10 @@ ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCommandL
* ExecuteAndLogUtilityCommand function for each of them. * ExecuteAndLogUtilityCommand function for each of them.
*/ */
void void
ExecuteAndLogUtilityCommandList(List *utilityCommandList) ExecuteAndLogUtilityCommandList(List *utilityCmdList)
{ {
char *utilityCommand = NULL; char *utilityCommand = NULL;
foreach_declared_ptr(utilityCommand, utilityCommandList) foreach_declared_ptr(utilityCommand, utilityCmdList)
{ {
ExecuteAndLogUtilityCommand(utilityCommand); ExecuteAndLogUtilityCommand(utilityCommand);
} }

View File

@ -64,8 +64,8 @@ PostprocessCreateDistributedObjectFromCatalogStmt(Node *stmt, const char *queryS
return NIL; return NIL;
} }
if (ops->qualify && DistOpsValidityState(stmt, ops) == if (ops->qualify &&
ShouldQualifyAfterLocalCreation) DistOpsValidityState(stmt, ops) == ShouldQualifyAfterLocalCreation)
{ {
/* qualify the statement after local creation */ /* qualify the statement after local creation */
ops->qualify(stmt); ops->qualify(stmt);

View File

@ -175,8 +175,9 @@ static bool DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationD
static int numeric_typmod_scale(int32 typmod); static int numeric_typmod_scale(int32 typmod);
static bool is_valid_numeric_typmod(int32 typmod); static bool is_valid_numeric_typmod(int32 typmod);
static bool DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc, static void DistributionColumnIsGeneratedCheck(TupleDesc relationDesc,
Var *distributionColumn); Var *distributionColumn,
const char *relationName);
static bool CanUseExclusiveConnections(Oid relationId, bool localTableEmpty); static bool CanUseExclusiveConnections(Oid relationId, bool localTableEmpty);
static uint64 DoCopyFromLocalTableIntoShards(Relation distributedRelation, static uint64 DoCopyFromLocalTableIntoShards(Relation distributedRelation,
DestReceiver *copyDest, DestReceiver *copyDest,
@ -701,7 +702,8 @@ EnsureColocateWithTableIsValid(Oid relationId, char distributionMethod,
* given table. We should make those checks after local table conversion by acquiring locks to * given table. We should make those checks after local table conversion by acquiring locks to
* the relation because the distribution column can be modified in that period. * the relation because the distribution column can be modified in that period.
*/ */
Oid distributionColumnType = ColumnTypeIdForRelationColumnName(relationId, Oid distributionColumnType = ColumnTypeIdForRelationColumnName(
relationId,
distributionColumnName); distributionColumnName);
text *colocateWithTableNameText = cstring_to_text(colocateWithTableName); text *colocateWithTableNameText = cstring_to_text(colocateWithTableName);
@ -1107,8 +1109,8 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
DistributedTableParams *distributedTableParams) DistributedTableParams *distributedTableParams)
{ {
if ((tableType == HASH_DISTRIBUTED || tableType == APPEND_DISTRIBUTED || if ((tableType == HASH_DISTRIBUTED || tableType == APPEND_DISTRIBUTED ||
tableType == RANGE_DISTRIBUTED || tableType == SINGLE_SHARD_DISTRIBUTED) != tableType == SINGLE_SHARD_DISTRIBUTED ||
(distributedTableParams != NULL)) tableType == RANGE_DISTRIBUTED) != (distributedTableParams != NULL))
{ {
ereport(ERROR, (errmsg("distributed table params must be provided " ereport(ERROR, (errmsg("distributed table params must be provided "
"when creating a distributed table and must " "when creating a distributed table and must "
@ -2103,13 +2105,10 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
/* verify target relation is not distributed by a generated stored column /* verify target relation is not distributed by a generated stored column
*/ */
if (distributionMethod != DISTRIBUTE_BY_NONE && if (distributionMethod != DISTRIBUTE_BY_NONE)
DistributionColumnUsesGeneratedStoredColumn(relationDesc, distributionColumn))
{ {
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), DistributionColumnIsGeneratedCheck(relationDesc, distributionColumn,
errmsg("cannot distribute relation: %s", relationName), relationName);
errdetail("Distribution column must not use GENERATED ALWAYS "
"AS (...) STORED.")));
} }
/* verify target relation is not distributed by a column of type numeric with negative scale */ /* verify target relation is not distributed by a column of type numeric with negative scale */
@ -2829,9 +2828,7 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex); Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
char *columnName = NameStr(currentColumn->attname); char *columnName = NameStr(currentColumn->attname);
if (currentColumn->attisdropped || if (IsDroppedOrGenerated(currentColumn))
currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
)
{ {
continue; continue;
} }
@ -2893,22 +2890,43 @@ DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
/* /*
* DistributionColumnUsesGeneratedStoredColumn returns whether a given relation uses * DistributionColumnIsGeneratedCheck throws an error if a given relation uses
* GENERATED ALWAYS AS (...) STORED on distribution column * GENERATED ALWAYS AS (...) STORED | VIRTUAL on distribution column
*/ */
static bool static void
DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc, DistributionColumnIsGeneratedCheck(TupleDesc relationDesc,
Var *distributionColumn) Var *distributionColumn,
const char *relationName)
{ {
Form_pg_attribute attributeForm = TupleDescAttr(relationDesc, Form_pg_attribute attributeForm = TupleDescAttr(relationDesc,
distributionColumn->varattno - 1); distributionColumn->varattno - 1);
switch (attributeForm->attgenerated)
if (attributeForm->attgenerated == ATTRIBUTE_GENERATED_STORED)
{ {
return true; case ATTRIBUTE_GENERATED_STORED:
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot distribute relation: %s", relationName),
errdetail("Distribution column must not use GENERATED ALWAYS "
"AS (...) STORED.")));
break;
} }
return false; #if PG_VERSION_NUM >= PG_VERSION_18
case ATTRIBUTE_GENERATED_VIRTUAL:
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot distribute relation: %s", relationName),
errdetail("Distribution column must not use GENERATED ALWAYS "
"AS (...) VIRTUAL.")));
break;
}
#endif
default:
{
break;
}
}
} }

View File

@ -2165,9 +2165,11 @@ GetDistributeObjectOps(Node *node)
} }
default: default:
{
return &Any_SecLabel; return &Any_SecLabel;
} }
} }
}
case T_RenameStmt: case T_RenameStmt:
{ {

View File

@ -107,9 +107,9 @@ static void DistributeFunctionColocatedWithDistributedTable(RegProcedure funcOid
char *colocateWithTableName, char *colocateWithTableName,
const ObjectAddress * const ObjectAddress *
functionAddress); functionAddress);
static void DistributeFunctionColocatedWithSingleShardTable(const static void DistributeFunctionColocatedWithSingleShardTable(const ObjectAddress *
ObjectAddress *functionAddress, functionAddress, text *
text *colocateWithText); colocateWithText);
static void DistributeFunctionColocatedWithReferenceTable(const static void DistributeFunctionColocatedWithReferenceTable(const
ObjectAddress *functionAddress); ObjectAddress *functionAddress);
static List * FilterDistributedFunctions(GrantStmt *grantStmt); static List * FilterDistributedFunctions(GrantStmt *grantStmt);
@ -1896,8 +1896,10 @@ ShouldAddFunctionSignature(FunctionParameterMode mode)
} }
default: default:
{
return true; return true;
} }
}
} }

View File

@ -96,6 +96,7 @@ PreprocessGrantStmt(Node *node, const char *queryString,
{ {
appendStringInfo(&privsString, "%s", priv->priv_name); appendStringInfo(&privsString, "%s", priv->priv_name);
} }
/* /*
* ALL can only be set alone. * ALL can only be set alone.
* And ALL is not added as a keyword in priv_name by parser, but * And ALL is not added as a keyword in priv_name by parser, but
@ -108,6 +109,7 @@ PreprocessGrantStmt(Node *node, const char *queryString,
/* this is used for column level only */ /* this is used for column level only */
appendStringInfo(&privsString, "ALL"); appendStringInfo(&privsString, "ALL");
} }
/* /*
* Instead of relying only on the syntax check done by Postgres and * Instead of relying only on the syntax check done by Postgres and
* adding an assert here, add a default ERROR if ALL is not first * adding an assert here, add a default ERROR if ALL is not first
@ -227,8 +229,8 @@ CollectGrantTableIdList(GrantStmt *grantStmt)
bool grantOnTableCommand = (grantStmt->targtype == ACL_TARGET_OBJECT && bool grantOnTableCommand = (grantStmt->targtype == ACL_TARGET_OBJECT &&
grantStmt->objtype == OBJECT_TABLE); grantStmt->objtype == OBJECT_TABLE);
bool grantAllTablesOnSchemaCommand = (grantStmt->targtype == bool grantAllTablesOnSchemaCommand = (grantStmt->targtype == ACL_TARGET_ALL_IN_SCHEMA
ACL_TARGET_ALL_IN_SCHEMA && &&
grantStmt->objtype == OBJECT_TABLE); grantStmt->objtype == OBJECT_TABLE);
/* we are only interested in table level grants */ /* we are only interested in table level grants */

View File

@ -64,8 +64,8 @@ static int GetNumberOfIndexParameters(IndexStmt *createIndexStatement);
static bool IndexAlreadyExists(IndexStmt *createIndexStatement); static bool IndexAlreadyExists(IndexStmt *createIndexStatement);
static Oid CreateIndexStmtGetIndexId(IndexStmt *createIndexStatement); static Oid CreateIndexStmtGetIndexId(IndexStmt *createIndexStatement);
static Oid CreateIndexStmtGetSchemaId(IndexStmt *createIndexStatement); static Oid CreateIndexStmtGetSchemaId(IndexStmt *createIndexStatement);
static void SwitchToSequentialAndLocalExecutionIfIndexNameTooLong( static void SwitchToSequentialAndLocalExecutionIfIndexNameTooLong(IndexStmt *
IndexStmt *createIndexStatement); createIndexStatement);
static char * GenerateLongestShardPartitionIndexName(IndexStmt *createIndexStatement); static char * GenerateLongestShardPartitionIndexName(IndexStmt *createIndexStatement);
static char * GenerateDefaultIndexName(IndexStmt *createIndexStatement); static char * GenerateDefaultIndexName(IndexStmt *createIndexStatement);
static List * GenerateIndexParameters(IndexStmt *createIndexStatement); static List * GenerateIndexParameters(IndexStmt *createIndexStatement);

View File

@ -350,7 +350,6 @@ static void LogLocalCopyToRelationExecution(uint64 shardId);
static void LogLocalCopyToFileExecution(uint64 shardId); static void LogLocalCopyToFileExecution(uint64 shardId);
static void ErrorIfMergeInCopy(CopyStmt *copyStatement); static void ErrorIfMergeInCopy(CopyStmt *copyStatement);
/* exports for SQL callable functions */ /* exports for SQL callable functions */
PG_FUNCTION_INFO_V1(citus_text_send_as_jsonb); PG_FUNCTION_INFO_V1(citus_text_send_as_jsonb);
@ -484,9 +483,7 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletion *completionTag)
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex); Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
char *columnName = NameStr(currentColumn->attname); char *columnName = NameStr(currentColumn->attname);
if (currentColumn->attisdropped || if (IsDroppedOrGenerated(currentColumn))
currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
)
{ {
continue; continue;
} }
@ -804,9 +801,7 @@ CanUseBinaryCopyFormat(TupleDesc tupleDescription)
{ {
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescription, columnIndex); Form_pg_attribute currentColumn = TupleDescAttr(tupleDescription, columnIndex);
if (currentColumn->attisdropped || if (IsDroppedOrGenerated(currentColumn))
currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
)
{ {
continue; continue;
} }
@ -1277,8 +1272,10 @@ ConversionPathForTypes(Oid inputType, Oid destType, CopyCoercionData *result)
} }
default: default:
{
Assert(false); /* there are no other options for this enum */ Assert(false); /* there are no other options for this enum */
} }
}
} }
@ -1316,9 +1313,7 @@ TypeArrayFromTupleDescriptor(TupleDesc tupleDescriptor)
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++) for (int columnIndex = 0; columnIndex < columnCount; columnIndex++)
{ {
Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, columnIndex); Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, columnIndex);
if (attr->attisdropped || if (IsDroppedOrGenerated(attr))
attr->attgenerated == ATTRIBUTE_GENERATED_STORED
)
{ {
typeArray[columnIndex] = InvalidOid; typeArray[columnIndex] = InvalidOid;
} }
@ -1486,9 +1481,7 @@ AppendCopyRowData(Datum *valueArray, bool *isNullArray, TupleDesc rowDescriptor,
value = CoerceColumnValue(value, &columnCoercionPaths[columnIndex]); value = CoerceColumnValue(value, &columnCoercionPaths[columnIndex]);
} }
if (currentColumn->attisdropped || if (IsDroppedOrGenerated(currentColumn))
currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
)
{ {
continue; continue;
} }
@ -1607,9 +1600,7 @@ AvailableColumnCount(TupleDesc tupleDescriptor)
{ {
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex); Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
if (!currentColumn->attisdropped && if (!IsDroppedOrGenerated(currentColumn))
currentColumn->attgenerated != ATTRIBUTE_GENERATED_STORED
)
{ {
columnCount++; columnCount++;
} }
@ -2869,8 +2860,8 @@ ErrorIfCopyHasOnErrorLogVerbosity(CopyStmt *copyStatement)
{ {
if (strcmp(option->defname, "on_error") == 0) if (strcmp(option->defname, "on_error") == 0)
{ {
ereport(ERROR, (errmsg( ereport(ERROR, (errmsg("Citus does not support "
"Citus does not support COPY FROM with ON_ERROR option."))); "COPY FROM with ON_ERROR option.")));
} }
else if (strcmp(option->defname, "log_verbosity") == 0) else if (strcmp(option->defname, "log_verbosity") == 0)
{ {
@ -2887,8 +2878,8 @@ ErrorIfCopyHasOnErrorLogVerbosity(CopyStmt *copyStatement)
*/ */
if (log_verbosity) if (log_verbosity)
{ {
ereport(ERROR, (errmsg( ereport(ERROR, (errmsg("Citus does not support "
"Citus does not support COPY FROM with LOG_VERBOSITY option."))); "COPY FROM with LOG_VERBOSITY option.")));
} }
#endif #endif
} }
@ -3999,3 +3990,20 @@ UnclaimCopyConnections(List *connectionStateList)
UnclaimConnection(connectionState->connection); UnclaimConnection(connectionState->connection);
} }
} }
/*
* IsDroppedOrGenerated - helper function for determining if an attribute is
* dropped or generated. Used by COPY and Citus DDL to skip such columns.
*/
inline bool
IsDroppedOrGenerated(Form_pg_attribute attr)
{
/*
* If the "is dropped" flag is true or the generated column flag
* is not the default nul character (in which case its value is 's'
* for ATTRIBUTE_GENERATED_STORED or possibly 'v' with PG18+ for
* ATTRIBUTE_GENERATED_VIRTUAL) then return true.
*/
return attr->attisdropped || (attr->attgenerated != '\0');
}

View File

@ -196,6 +196,27 @@ BuildCreatePublicationStmt(Oid publicationId)
-1); -1);
createPubStmt->options = lappend(createPubStmt->options, pubViaRootOption); createPubStmt->options = lappend(createPubStmt->options, pubViaRootOption);
/* WITH (publish_generated_columns = ...) option (PG18+) */
#if PG_VERSION_NUM >= PG_VERSION_18
if (publicationForm->pubgencols == 's') /* stored */
{
DefElem *pubGenColsOption =
makeDefElem("publish_generated_columns",
(Node *) makeString("stored"),
-1);
createPubStmt->options =
lappend(createPubStmt->options, pubGenColsOption);
}
else if (publicationForm->pubgencols != 'n') /* 'n' = none (default) */
{
ereport(ERROR,
(errmsg("unexpected pubgencols value '%c' for publication %u",
publicationForm->pubgencols, publicationId)));
}
#endif
/* WITH (publish = 'insert, update, delete, truncate') option */ /* WITH (publish = 'insert, update, delete, truncate') option */
List *publishList = NIL; List *publishList = NIL;

View File

@ -149,7 +149,7 @@ PreprocessRenameStmt(Node *node, const char *renameCommand,
} }
default: default:
{
/* /*
* Nodes that are not supported by Citus: we pass-through to the * Nodes that are not supported by Citus: we pass-through to the
* main PostgreSQL executor. Any Citus-supported RenameStmt * main PostgreSQL executor. Any Citus-supported RenameStmt
@ -157,6 +157,7 @@ PreprocessRenameStmt(Node *node, const char *renameCommand,
*/ */
return NIL; return NIL;
} }
}
bool isCitusRelation = IsCitusTable(tableRelationId); bool isCitusRelation = IsCitusTable(tableRelationId);
if (!isCitusRelation) if (!isCitusRelation)

View File

@ -177,8 +177,7 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList,
{ {
Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex); Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex);
if (attributeForm->attisdropped || if (IsDroppedOrGenerated(attributeForm))
attributeForm->attgenerated == ATTRIBUTE_GENERATED_STORED)
{ {
/* skip dropped columns and columns with GENERATED AS ALWAYS expressions */ /* skip dropped columns and columns with GENERATED AS ALWAYS expressions */
continue; continue;
@ -463,8 +462,8 @@ PreprocessAlterSequenceStmt(Node *node, const char *queryString,
if (IsAnyObjectDistributed(addresses) || SequenceUsedInDistributedTable(address, if (IsAnyObjectDistributed(addresses) || SequenceUsedInDistributedTable(address,
DEPENDENCY_INTERNAL)) DEPENDENCY_INTERNAL))
{ {
ereport(ERROR, (errmsg( ereport(ERROR, (errmsg("Altering a distributed sequence "
"Altering a distributed sequence is currently not supported."))); "is currently not supported.")));
} }
/* /*
@ -992,8 +991,8 @@ FilterDistributedSequences(GrantStmt *stmt)
{ {
bool grantOnSequenceCommand = (stmt->targtype == ACL_TARGET_OBJECT && bool grantOnSequenceCommand = (stmt->targtype == ACL_TARGET_OBJECT &&
stmt->objtype == OBJECT_SEQUENCE); stmt->objtype == OBJECT_SEQUENCE);
bool grantOnAllSequencesInSchemaCommand = (stmt->targtype == bool grantOnAllSequencesInSchemaCommand = (stmt->targtype == ACL_TARGET_ALL_IN_SCHEMA
ACL_TARGET_ALL_IN_SCHEMA && &&
stmt->objtype == OBJECT_SEQUENCE); stmt->objtype == OBJECT_SEQUENCE);
/* we are only interested in sequence level grants */ /* we are only interested in sequence level grants */
@ -1034,10 +1033,9 @@ FilterDistributedSequences(GrantStmt *stmt)
*/ */
if (list_member_oid(namespaceOidList, namespaceOid)) if (list_member_oid(namespaceOidList, namespaceOid))
{ {
RangeVar *distributedSequence = makeRangeVar(get_namespace_name( RangeVar *distributedSequence = makeRangeVar(
namespaceOid), get_namespace_name(namespaceOid),
get_rel_name( get_rel_name(sequenceAddress->objectId),
sequenceAddress->objectId),
-1); -1);
grantSequenceList = lappend(grantSequenceList, distributedSequence); grantSequenceList = lappend(grantSequenceList, distributedSequence);
} }

View File

@ -237,8 +237,10 @@ AcquireCitusAdvisoryObjectClassLockGetOid(ObjectClass objectClass,
} }
default: default:
{
elog(ERROR, "unsupported object class: %d", objectClass); elog(ERROR, "unsupported object class: %d", objectClass);
} }
}
} }
@ -270,6 +272,8 @@ AcquireCitusAdvisoryObjectClassLockCheckPrivileges(ObjectClass objectClass, Oid
} }
default: default:
{
elog(ERROR, "unsupported object class: %d", objectClass); elog(ERROR, "unsupported object class: %d", objectClass);
} }
}
} }

View File

@ -81,23 +81,23 @@ static void ErrorIfAttachCitusTableToPgLocalTable(Oid parentRelationId,
Oid partitionRelationId); Oid partitionRelationId);
static bool DeparserSupportsAlterTableAddColumn(AlterTableStmt *alterTableStatement, static bool DeparserSupportsAlterTableAddColumn(AlterTableStmt *alterTableStatement,
AlterTableCmd *addColumnSubCommand); AlterTableCmd *addColumnSubCommand);
static bool ATDefinesFKeyBetweenPostgresAndCitusLocalOrRef( static bool ATDefinesFKeyBetweenPostgresAndCitusLocalOrRef(AlterTableStmt *
AlterTableStmt *alterTableStatement); alterTableStatement);
static bool ShouldMarkConnectedRelationsNotAutoConverted(Oid leftRelationId, static bool ShouldMarkConnectedRelationsNotAutoConverted(Oid leftRelationId,
Oid rightRelationId); Oid rightRelationId);
static bool RelationIdListContainsCitusTableType(List *relationIdList, static bool RelationIdListContainsCitusTableType(List *relationIdList,
CitusTableType citusTableType); CitusTableType citusTableType);
static bool RelationIdListContainsPostgresTable(List *relationIdList); static bool RelationIdListContainsPostgresTable(List *relationIdList);
static void ConvertPostgresLocalTablesToCitusLocalTables( static void ConvertPostgresLocalTablesToCitusLocalTables(AlterTableStmt *
AlterTableStmt *alterTableStatement); alterTableStatement);
static bool RangeVarListHasLocalRelationConvertedByUser(List *relationRangeVarList, static bool RangeVarListHasLocalRelationConvertedByUser(List *relationRangeVarList,
AlterTableStmt * AlterTableStmt *
alterTableStatement); alterTableStatement);
static int CompareRangeVarsByOid(const void *leftElement, const void *rightElement); static int CompareRangeVarsByOid(const void *leftElement, const void *rightElement);
static List * GetAlterTableAddFKeyRightRelationIdList( static List * GetAlterTableAddFKeyRightRelationIdList(AlterTableStmt *
AlterTableStmt *alterTableStatement); alterTableStatement);
static List * GetAlterTableAddFKeyRightRelationRangeVarList( static List * GetAlterTableAddFKeyRightRelationRangeVarList(AlterTableStmt *
AlterTableStmt *alterTableStatement); alterTableStatement);
static List * GetAlterTableAddFKeyConstraintList(AlterTableStmt *alterTableStatement); static List * GetAlterTableAddFKeyConstraintList(AlterTableStmt *alterTableStatement);
static List * GetAlterTableCommandFKeyConstraintList(AlterTableCmd *command); static List * GetAlterTableCommandFKeyConstraintList(AlterTableCmd *command);
static List * GetRangeVarListFromFKeyConstraintList(List *fKeyConstraintList); static List * GetRangeVarListFromFKeyConstraintList(List *fKeyConstraintList);
@ -1352,6 +1352,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
constraint); constraint);
} }
} }
/* /*
* When constraint->indexname is not NULL we are handling an * When constraint->indexname is not NULL we are handling an
* ADD {PRIMARY KEY, UNIQUE} USING INDEX command. In this case * ADD {PRIMARY KEY, UNIQUE} USING INDEX command. In this case
@ -1532,6 +1533,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
} }
} }
} }
/* /*
* We check for ALTER COLUMN .. SET/DROP DEFAULT * We check for ALTER COLUMN .. SET/DROP DEFAULT
* we should not propagate anything to shards * we should not propagate anything to shards
@ -2181,8 +2183,10 @@ AlterTableCommandTypeIsTrigger(AlterTableType alterTableType)
} }
default: default:
{
return false; return false;
} }
}
} }
@ -2719,6 +2723,7 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
} }
} }
} }
/* /*
* We check for ALTER COLUMN .. SET DEFAULT nextval('user_defined_seq') * We check for ALTER COLUMN .. SET DEFAULT nextval('user_defined_seq')
* we should make sure that the type of the column that uses * we should make sure that the type of the column that uses
@ -2815,6 +2820,7 @@ FixAlterTableStmtIndexNames(AlterTableStmt *alterTableStatement)
FixPartitionShardIndexNames(relationId, parentIndexOid); FixPartitionShardIndexNames(relationId, parentIndexOid);
} }
/* /*
* If this is an ALTER TABLE .. ATTACH PARTITION command * If this is an ALTER TABLE .. ATTACH PARTITION command
* we have wrong index names generated on indexes of shards of * we have wrong index names generated on indexes of shards of
@ -3425,8 +3431,8 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
if (commandList->length > 1 || if (commandList->length > 1 ||
columnConstraints->length > 1) columnConstraints->length > 1)
{ {
ereport(ERROR, (errcode( ereport(ERROR,
ERRCODE_FEATURE_NOT_SUPPORTED), (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg( errmsg(
"cannot execute ADD COLUMN .. DEFAULT nextval('..')" "cannot execute ADD COLUMN .. DEFAULT nextval('..')"
" command with other subcommands/constraints"), " command with other subcommands/constraints"),
@ -3440,8 +3446,8 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
*/ */
if (!TableEmpty(relationId)) if (!TableEmpty(relationId))
{ {
ereport(ERROR, (errcode( ereport(ERROR,
ERRCODE_FEATURE_NOT_SUPPORTED), (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg( errmsg(
"cannot add a column involving DEFAULT nextval('..') " "cannot add a column involving DEFAULT nextval('..') "
"because the table is not empty"), "because the table is not empty"),

View File

@ -1297,7 +1297,8 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
"partial failure, potentially leading to an inconsistent " "partial failure, potentially leading to an inconsistent "
"state.\nIf the problematic command is a CREATE operation, " "state.\nIf the problematic command is a CREATE operation, "
"consider using the 'IF EXISTS' syntax to drop the object," "consider using the 'IF EXISTS' syntax to drop the object,"
"\nif applicable, and then re-attempt the original command."))); "\nif applicable, and then re-attempt "
"the original command.")));
} }
PG_RE_THROW(); PG_RE_THROW();

View File

@ -48,21 +48,27 @@ typedef struct CitusVacuumParams
#endif #endif
} CitusVacuumParams; } CitusVacuumParams;
/*
* Information we track per VACUUM/ANALYZE target relation.
*/
typedef struct CitusVacuumRelation
{
VacuumRelation *vacuumRelation;
Oid relationId;
} CitusVacuumRelation;
/* Local functions forward declarations for processing distributed table commands */ /* Local functions forward declarations for processing distributed table commands */
static bool IsDistributedVacuumStmt(List *vacuumRelationIdList); static bool IsDistributedVacuumStmt(List *vacuumRelationList);
static List * VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, static List * VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams,
List *vacuumColumnList); List *vacuumColumnList);
static char * DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams); static char * DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams);
static char * DeparseVacuumColumnNames(List *columnNameList); static char * DeparseVacuumColumnNames(List *columnNameList);
static List * VacuumColumnList(VacuumStmt *vacuumStmt, int relationIndex); static void ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationList,
static List * ExtractVacuumTargetRels(VacuumStmt *vacuumStmt);
static void ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
CitusVacuumParams vacuumParams); CitusVacuumParams vacuumParams);
static void ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt, static void ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt,
CitusVacuumParams vacuumParams); CitusVacuumParams vacuumParams);
static CitusVacuumParams VacuumStmtParams(VacuumStmt *vacstmt); static CitusVacuumParams VacuumStmtParams(VacuumStmt *vacstmt);
static List * VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams static List * VacuumRelationList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams);
vacuumParams);
/* /*
* PostprocessVacuumStmt processes vacuum statements that may need propagation to * PostprocessVacuumStmt processes vacuum statements that may need propagation to
@ -97,7 +103,7 @@ PostprocessVacuumStmt(Node *node, const char *vacuumCommand)
* when no table is specified propagate the command as it is; * when no table is specified propagate the command as it is;
* otherwise, only propagate when there is at least 1 citus table * otherwise, only propagate when there is at least 1 citus table
*/ */
List *relationIdList = VacuumRelationIdList(vacuumStmt, vacuumParams); List *vacuumRelationList = VacuumRelationList(vacuumStmt, vacuumParams);
if (list_length(vacuumStmt->rels) == 0) if (list_length(vacuumStmt->rels) == 0)
{ {
@ -105,11 +111,11 @@ PostprocessVacuumStmt(Node *node, const char *vacuumCommand)
ExecuteUnqualifiedVacuumTasks(vacuumStmt, vacuumParams); ExecuteUnqualifiedVacuumTasks(vacuumStmt, vacuumParams);
} }
else if (IsDistributedVacuumStmt(relationIdList)) else if (IsDistributedVacuumStmt(vacuumRelationList))
{ {
/* there is at least 1 citus table specified */ /* there is at least 1 citus table specified */
ExecuteVacuumOnDistributedTables(vacuumStmt, relationIdList, ExecuteVacuumOnDistributedTables(vacuumStmt, vacuumRelationList,
vacuumParams); vacuumParams);
} }
@ -120,39 +126,58 @@ PostprocessVacuumStmt(Node *node, const char *vacuumCommand)
/* /*
* VacuumRelationIdList returns the oid of the relations in the given vacuum statement. * VacuumRelationList returns the list of relations in the given vacuum statement,
* along with their resolved Oids (if they can be locked).
*/ */
static List * static List *
VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams) VacuumRelationList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
{ {
LOCKMODE lockMode = (vacuumParams.options & VACOPT_FULL) ? AccessExclusiveLock : LOCKMODE lockMode = (vacuumParams.options & VACOPT_FULL) ? AccessExclusiveLock :
ShareUpdateExclusiveLock; ShareUpdateExclusiveLock;
bool skipLocked = (vacuumParams.options & VACOPT_SKIP_LOCKED); bool skipLocked = (vacuumParams.options & VACOPT_SKIP_LOCKED);
List *vacuumRelationList = ExtractVacuumTargetRels(vacuumStmt); List *relationList = NIL;
List *relationIdList = NIL; VacuumRelation *vacuumRelation = NULL;
foreach_declared_ptr(vacuumRelation, vacuumStmt->rels)
RangeVar *vacuumRelation = NULL;
foreach_declared_ptr(vacuumRelation, vacuumRelationList)
{ {
Oid relationId = InvalidOid;
/* /*
* If skip_locked option is enabled, we are skipping that relation * If skip_locked option is enabled, we are skipping that relation
* if the lock for it is currently not available; else, we get the lock. * if the lock for it is currently not available; otherwise, we get the lock.
*/ */
Oid relationId = RangeVarGetRelidExtended(vacuumRelation, if (vacuumRelation->relation)
{
relationId = RangeVarGetRelidExtended(vacuumRelation->relation,
lockMode, lockMode,
skipLocked ? RVR_SKIP_LOCKED : 0, NULL, skipLocked ? RVR_SKIP_LOCKED : 0, NULL,
NULL); NULL);
}
else if (OidIsValid(vacuumRelation->oid))
{
/* fall back to the Oid directly when provided */
if (!skipLocked || ConditionalLockRelationOid(vacuumRelation->oid, lockMode))
{
if (!skipLocked)
{
LockRelationOid(vacuumRelation->oid, lockMode);
}
relationId = vacuumRelation->oid;
}
}
if (OidIsValid(relationId)) if (OidIsValid(relationId))
{ {
relationIdList = lappend_oid(relationIdList, relationId); CitusVacuumRelation *relation = palloc(sizeof(CitusVacuumRelation));
relation->vacuumRelation = vacuumRelation;
relation->relationId = relationId;
relationList = lappend(relationList, relation);
} }
} }
return relationIdList; return relationList;
} }
@ -161,12 +186,13 @@ VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
* otherwise, it returns false. * otherwise, it returns false.
*/ */
static bool static bool
IsDistributedVacuumStmt(List *vacuumRelationIdList) IsDistributedVacuumStmt(List *vacuumRelationList)
{ {
Oid relationId = InvalidOid; CitusVacuumRelation *vacuumRelation = NULL;
foreach_declared_oid(relationId, vacuumRelationIdList) foreach_declared_ptr(vacuumRelation, vacuumRelationList)
{ {
if (OidIsValid(relationId) && IsCitusTable(relationId)) if (OidIsValid(vacuumRelation->relationId) &&
IsCitusTable(vacuumRelation->relationId))
{ {
return true; return true;
} }
@ -181,24 +207,31 @@ IsDistributedVacuumStmt(List *vacuumRelationIdList)
* if they are citus tables. * if they are citus tables.
*/ */
static void static void
ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList, ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationList,
CitusVacuumParams vacuumParams) CitusVacuumParams vacuumParams)
{ {
int relationIndex = 0; CitusVacuumRelation *vacuumRelationEntry = NULL;
foreach_declared_ptr(vacuumRelationEntry, relationList)
Oid relationId = InvalidOid;
foreach_declared_oid(relationId, relationIdList)
{ {
Oid relationId = vacuumRelationEntry->relationId;
VacuumRelation *vacuumRelation = vacuumRelationEntry->vacuumRelation;
RangeVar *relation = vacuumRelation->relation;
if (relation != NULL && !relation->inh)
{
/* ONLY specified, so don't recurse to shard placements */
continue;
}
if (IsCitusTable(relationId)) if (IsCitusTable(relationId))
{ {
List *vacuumColumnList = VacuumColumnList(vacuumStmt, relationIndex); List *vacuumColumnList = vacuumRelation->va_cols;
List *taskList = VacuumTaskList(relationId, vacuumParams, vacuumColumnList); List *taskList = VacuumTaskList(relationId, vacuumParams, vacuumColumnList);
/* local execution is not implemented for VACUUM commands */ /* local execution is not implemented for VACUUM commands */
bool localExecutionSupported = false; bool localExecutionSupported = false;
ExecuteUtilityTaskList(taskList, localExecutionSupported); ExecuteUtilityTaskList(taskList, localExecutionSupported);
} }
relationIndex++;
} }
} }
@ -484,39 +517,6 @@ DeparseVacuumColumnNames(List *columnNameList)
} }
/*
* VacuumColumnList returns list of columns from relation
* in the vacuum statement at specified relationIndex.
*/
static List *
VacuumColumnList(VacuumStmt *vacuumStmt, int relationIndex)
{
VacuumRelation *vacuumRelation = (VacuumRelation *) list_nth(vacuumStmt->rels,
relationIndex);
return vacuumRelation->va_cols;
}
/*
* ExtractVacuumTargetRels returns list of target
* relations from vacuum statement.
*/
static List *
ExtractVacuumTargetRels(VacuumStmt *vacuumStmt)
{
List *vacuumList = NIL;
VacuumRelation *vacuumRelation = NULL;
foreach_declared_ptr(vacuumRelation, vacuumStmt->rels)
{
vacuumList = lappend(vacuumList, vacuumRelation->relation);
}
return vacuumList;
}
/* /*
* VacuumStmtParams returns a CitusVacuumParams based on the supplied VacuumStmt. * VacuumStmtParams returns a CitusVacuumParams based on the supplied VacuumStmt.
*/ */

View File

@ -475,8 +475,8 @@ FindAvailableConnection(dlist_head *connections, uint32 flags)
if (flags & OUTSIDE_TRANSACTION) if (flags & OUTSIDE_TRANSACTION)
{ {
/* don't return connections that are used in transactions */ /* don't return connections that are used in transactions */
if (connection->remoteTransaction.transactionState != if (connection->
REMOTE_TRANS_NOT_STARTED) remoteTransaction.transactionState != REMOTE_TRANS_NOT_STARTED)
{ {
continue; continue;
} }

View File

@ -191,8 +191,8 @@ static HTAB *ConnectionShardHash;
static MultiConnection * FindPlacementListConnection(int flags, List *placementAccessList, static MultiConnection * FindPlacementListConnection(int flags, List *placementAccessList,
const char *userName); const char *userName);
static ConnectionPlacementHashEntry * FindOrCreatePlacementEntry( static ConnectionPlacementHashEntry * FindOrCreatePlacementEntry(ShardPlacement *
ShardPlacement *placement); placement);
static bool CanUseExistingConnection(uint32 flags, const char *userName, static bool CanUseExistingConnection(uint32 flags, const char *userName,
ConnectionReference *placementConnection); ConnectionReference *placementConnection);
static bool ConnectionAccessedDifferentPlacement(MultiConnection *connection, static bool ConnectionAccessedDifferentPlacement(MultiConnection *connection,

View File

@ -675,7 +675,8 @@ SharedConnectionStatsShmemInit(void)
ConnectionStatsSharedState->sharedConnectionHashTrancheId = LWLockNewTrancheId(); ConnectionStatsSharedState->sharedConnectionHashTrancheId = LWLockNewTrancheId();
ConnectionStatsSharedState->sharedConnectionHashTrancheName = ConnectionStatsSharedState->sharedConnectionHashTrancheName =
"Shared Connection Tracking Hash Tranche"; "Shared Connection Tracking Hash Tranche";
LWLockRegisterTranche(ConnectionStatsSharedState->sharedConnectionHashTrancheId, LWLockRegisterTranche(
ConnectionStatsSharedState->sharedConnectionHashTrancheId,
ConnectionStatsSharedState->sharedConnectionHashTrancheName); ConnectionStatsSharedState->sharedConnectionHashTrancheName);
LWLockInitialize(&ConnectionStatsSharedState->sharedConnectionHashLock, LWLockInitialize(&ConnectionStatsSharedState->sharedConnectionHashLock,

View File

@ -471,6 +471,13 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
appendStringInfo(&buffer, " GENERATED ALWAYS AS (%s) STORED", appendStringInfo(&buffer, " GENERATED ALWAYS AS (%s) STORED",
defaultString); defaultString);
} }
#if PG_VERSION_NUM >= PG_VERSION_18
else if (attributeForm->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
{
appendStringInfo(&buffer, " GENERATED ALWAYS AS (%s) VIRTUAL",
defaultString);
}
#endif
else else
{ {
Oid seqOid = GetSequenceOid(tableRelationId, defaultValue->adnum); Oid seqOid = GetSequenceOid(tableRelationId, defaultValue->adnum);
@ -547,6 +554,13 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
appendStringInfoString(&buffer, "("); appendStringInfoString(&buffer, "(");
appendStringInfoString(&buffer, checkString); appendStringInfoString(&buffer, checkString);
appendStringInfoString(&buffer, ")"); appendStringInfoString(&buffer, ")");
#if PG_VERSION_NUM >= PG_VERSION_18
if (!checkConstraint->ccenforced)
{
appendStringInfoString(&buffer, " NOT ENFORCED");
}
#endif
} }
/* close create table's outer parentheses */ /* close create table's outer parentheses */

View File

@ -28,7 +28,8 @@ static void AppendCreateExtensionStmtOptions(StringInfo buf, List *options);
static void AppendDropExtensionStmt(StringInfo buf, DropStmt *stmt); static void AppendDropExtensionStmt(StringInfo buf, DropStmt *stmt);
static void AppendExtensionNameList(StringInfo buf, List *objects); static void AppendExtensionNameList(StringInfo buf, List *objects);
static void AppendAlterExtensionSchemaStmt(StringInfo buf, static void AppendAlterExtensionSchemaStmt(StringInfo buf,
AlterObjectSchemaStmt *alterExtensionSchemaStmt); AlterObjectSchemaStmt *
alterExtensionSchemaStmt);
static void AppendAlterExtensionStmt(StringInfo buf, static void AppendAlterExtensionStmt(StringInfo buf,
AlterExtensionStmt *alterExtensionStmt); AlterExtensionStmt *alterExtensionStmt);

View File

@ -290,8 +290,10 @@ GetDefElemActionString(DefElemAction action)
} }
default: default:
{
return ""; return "";
} }
}
} }

View File

@ -118,9 +118,11 @@ ObjectTypeToKeyword(ObjectType objtype)
} }
default: default:
{
elog(ERROR, "Unknown object type: %d", objtype); elog(ERROR, "Unknown object type: %d", objtype);
return NULL; return NULL;
} }
}
} }

View File

@ -242,8 +242,8 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
{ {
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg( errmsg("only simple column references are allowed "
"only simple column references are allowed in CREATE STATISTICS"))); "in CREATE STATISTICS")));
} }
const char *columnName = quote_identifier(column->name); const char *columnName = quote_identifier(column->name);

View File

@ -536,9 +536,11 @@ GeneratedWhenStr(char generatedWhen)
} }
default: default:
{
ereport(ERROR, (errmsg("unrecognized generated_when: %d", ereport(ERROR, (errmsg("unrecognized generated_when: %d",
generatedWhen))); generatedWhen)));
} }
}
} }
@ -649,13 +651,18 @@ AppendAlterTableCmdAddColumn(StringInfo buf, AlterTableCmd *alterTableCmd,
} }
else if (constraint->contype == CONSTR_GENERATED) else if (constraint->contype == CONSTR_GENERATED)
{ {
char attgenerated = 's'; char attgenerated = ATTRIBUTE_GENERATED_STORED;
appendStringInfo(buf, " GENERATED %s AS (%s) STORED", #if PG_VERSION_NUM >= PG_VERSION_18
attgenerated = constraint->generated_kind;
#endif
appendStringInfo(buf, " GENERATED %s AS (%s) %s",
GeneratedWhenStr(constraint->generated_when), GeneratedWhenStr(constraint->generated_when),
DeparseRawExprForColumnDefault(relationId, typeOid, typmod, DeparseRawExprForColumnDefault(relationId, typeOid, typmod,
columnDefinition->colname, columnDefinition->colname,
attgenerated, attgenerated,
constraint->raw_expr)); constraint->raw_expr),
(attgenerated == ATTRIBUTE_GENERATED_STORED ? "STORED" :
"VIRTUAL"));
} }
else if (constraint->contype == CONSTR_CHECK || else if (constraint->contype == CONSTR_CHECK ||
constraint->contype == CONSTR_PRIMARY || constraint->contype == CONSTR_PRIMARY ||

View File

@ -642,11 +642,11 @@ static DistributedExecution * CreateDistributedExecution(RowModifyLevel modLevel
xactProperties, xactProperties,
List *jobIdList, List *jobIdList,
bool localExecutionSupported); bool localExecutionSupported);
static TransactionProperties DecideTransactionPropertiesForTaskList(RowModifyLevel static TransactionProperties DecideTaskListTransactionProperties(RowModifyLevel
modLevel, modLevel,
List *taskList, List *taskList,
bool bool
exludeFromTransaction); excludeFromTransaction);
static void StartDistributedExecution(DistributedExecution *execution); static void StartDistributedExecution(DistributedExecution *execution);
static void RunLocalExecution(CitusScanState *scanState, DistributedExecution *execution); static void RunLocalExecution(CitusScanState *scanState, DistributedExecution *execution);
static void RunDistributedExecution(DistributedExecution *execution); static void RunDistributedExecution(DistributedExecution *execution);
@ -711,8 +711,8 @@ static void PlacementExecutionReady(TaskPlacementExecution *placementExecution);
static TaskExecutionState TaskExecutionStateMachine(ShardCommandExecution * static TaskExecutionState TaskExecutionStateMachine(ShardCommandExecution *
shardCommandExecution); shardCommandExecution);
static int GetEventSetSize(List *sessionList); static int GetEventSetSize(List *sessionList);
static bool ProcessSessionsWithFailedWaitEventSetOperations( static bool ProcessSessionsWithFailedWaitEventSetOperations(DistributedExecution *
DistributedExecution *execution); execution);
static bool HasIncompleteConnectionEstablishment(DistributedExecution *execution); static bool HasIncompleteConnectionEstablishment(DistributedExecution *execution);
static void RebuildWaitEventSet(DistributedExecution *execution); static void RebuildWaitEventSet(DistributedExecution *execution);
static void RebuildWaitEventSetForSessions(DistributedExecution *execution); static void RebuildWaitEventSetForSessions(DistributedExecution *execution);
@ -842,7 +842,7 @@ AdaptiveExecutor(CitusScanState *scanState)
bool excludeFromXact = false; bool excludeFromXact = false;
TransactionProperties xactProperties = DecideTransactionPropertiesForTaskList( TransactionProperties xactProperties = DecideTaskListTransactionProperties(
distributedPlan->modLevel, taskList, excludeFromXact); distributedPlan->modLevel, taskList, excludeFromXact);
/* /*
@ -941,7 +941,7 @@ ExecuteUtilityTaskList(List *utilityTaskList, bool localExecutionSupported)
modLevel, utilityTaskList, MaxAdaptiveExecutorPoolSize, localExecutionSupported modLevel, utilityTaskList, MaxAdaptiveExecutorPoolSize, localExecutionSupported
); );
executionParams->xactProperties = executionParams->xactProperties =
DecideTransactionPropertiesForTaskList(modLevel, utilityTaskList, false); DecideTaskListTransactionProperties(modLevel, utilityTaskList, false);
executionParams->isUtilityCommand = true; executionParams->isUtilityCommand = true;
return ExecuteTaskListExtended(executionParams); return ExecuteTaskListExtended(executionParams);
@ -963,7 +963,7 @@ ExecuteUtilityTaskListExtended(List *utilityTaskList, int poolSize,
bool excludeFromXact = false; bool excludeFromXact = false;
executionParams->xactProperties = executionParams->xactProperties =
DecideTransactionPropertiesForTaskList(modLevel, utilityTaskList, DecideTaskListTransactionProperties(modLevel, utilityTaskList,
excludeFromXact); excludeFromXact);
executionParams->isUtilityCommand = true; executionParams->isUtilityCommand = true;
@ -984,7 +984,7 @@ ExecuteTaskList(RowModifyLevel modLevel, List *taskList)
); );
bool excludeFromXact = false; bool excludeFromXact = false;
executionParams->xactProperties = DecideTransactionPropertiesForTaskList( executionParams->xactProperties = DecideTaskListTransactionProperties(
modLevel, taskList, excludeFromXact); modLevel, taskList, excludeFromXact);
return ExecuteTaskListExtended(executionParams); return ExecuteTaskListExtended(executionParams);
@ -1010,7 +1010,7 @@ ExecuteTaskListOutsideTransaction(RowModifyLevel modLevel, List *taskList,
modLevel, taskList, targetPoolSize, localExecutionSupported modLevel, taskList, targetPoolSize, localExecutionSupported
); );
executionParams->xactProperties = DecideTransactionPropertiesForTaskList( executionParams->xactProperties = DecideTaskListTransactionProperties(
modLevel, taskList, true); modLevel, taskList, true);
return ExecuteTaskListExtended(executionParams); return ExecuteTaskListExtended(executionParams);
} }
@ -1032,7 +1032,7 @@ CreateDefaultExecutionParams(RowModifyLevel modLevel, List *taskList,
modLevel, taskList, targetPoolSize, localExecutionSupported modLevel, taskList, targetPoolSize, localExecutionSupported
); );
executionParams->xactProperties = DecideTransactionPropertiesForTaskList( executionParams->xactProperties = DecideTaskListTransactionProperties(
modLevel, taskList, false); modLevel, taskList, false);
executionParams->expectResults = expectResults; executionParams->expectResults = expectResults;
executionParams->tupleDestination = tupleDest; executionParams->tupleDestination = tupleDest;
@ -1252,7 +1252,7 @@ CreateDistributedExecution(RowModifyLevel modLevel, List *taskList,
/* /*
* DecideTransactionPropertiesForTaskList decides whether to use remote transaction * DecideTaskListTransactionProperties decides whether to use remote transaction
* blocks, whether to use 2PC for the given task list, and whether to error on any * blocks, whether to use 2PC for the given task list, and whether to error on any
* failure. * failure.
* *
@ -1260,8 +1260,8 @@ CreateDistributedExecution(RowModifyLevel modLevel, List *taskList,
* errorOnAnyFailure, but not the other way around) we keep them in the same place. * errorOnAnyFailure, but not the other way around) we keep them in the same place.
*/ */
static TransactionProperties static TransactionProperties
DecideTransactionPropertiesForTaskList(RowModifyLevel modLevel, List *taskList, bool DecideTaskListTransactionProperties(RowModifyLevel modLevel, List *taskList, bool
exludeFromTransaction) excludeFromTransaction)
{ {
TransactionProperties xactProperties; TransactionProperties xactProperties;
@ -1277,7 +1277,7 @@ DecideTransactionPropertiesForTaskList(RowModifyLevel modLevel, List *taskList,
return xactProperties; return xactProperties;
} }
if (exludeFromTransaction) if (excludeFromTransaction)
{ {
xactProperties.useRemoteTransactionBlocks = TRANSACTION_BLOCKS_DISALLOWED; xactProperties.useRemoteTransactionBlocks = TRANSACTION_BLOCKS_DISALLOWED;
return xactProperties; return xactProperties;
@ -2634,10 +2634,8 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
connectionFlags |= adaptiveConnectionManagementFlag; connectionFlags |= adaptiveConnectionManagementFlag;
/* open a new connection to the worker */ /* open a new connection to the worker */
MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags, MultiConnection *connection = StartNodeUserDatabaseConnection(
workerPool->nodeName, connectionFlags, workerPool->nodeName, workerPool->nodePort, NULL, NULL);
workerPool->nodePort,
NULL, NULL);
if (!connection) if (!connection)
{ {
/* connection can only be NULL for optional connections */ /* connection can only be NULL for optional connections */

View File

@ -67,8 +67,8 @@ static void CitusPreExecScan(CitusScanState *scanState);
static bool ModifyJobNeedsEvaluation(Job *workerJob); static bool ModifyJobNeedsEvaluation(Job *workerJob);
static void RegenerateTaskForFasthPathQuery(Job *workerJob); static void RegenerateTaskForFasthPathQuery(Job *workerJob);
static void RegenerateTaskListForInsert(Job *workerJob); static void RegenerateTaskListForInsert(Job *workerJob);
static DistributedPlan * CopyDistributedPlanWithoutCache( static DistributedPlan * CopyDistributedPlanWithoutCache(DistributedPlan *
DistributedPlan *originalDistributedPlan); originalDistributedPlan);
static void CitusEndScan(CustomScanState *node); static void CitusEndScan(CustomScanState *node);
static void CitusReScan(CustomScanState *node); static void CitusReScan(CustomScanState *node);
static void EnsureForceDelegationDistributionKey(Job *job); static void EnsureForceDelegationDistributionKey(Job *job);

View File

@ -69,8 +69,8 @@ static List * WrapTasksForPartitioning(const char *resultIdPrefix,
bool binaryFormat); bool binaryFormat);
static List * ExecutePartitionTaskList(List *partitionTaskList, static List * ExecutePartitionTaskList(List *partitionTaskList,
CitusTableCacheEntry *targetRelation); CitusTableCacheEntry *targetRelation);
static PartitioningTupleDest * CreatePartitioningTupleDest( static PartitioningTupleDest * CreatePartitioningTupleDest(CitusTableCacheEntry *
CitusTableCacheEntry *targetRelation); targetRelation);
static void PartitioningTupleDestPutTuple(TupleDestination *self, Task *task, static void PartitioningTupleDestPutTuple(TupleDestination *self, Task *task,
int placementIndex, int queryNumber, int placementIndex, int queryNumber,
HeapTuple heapTuple, uint64 tupleLibpqSize); HeapTuple heapTuple, uint64 tupleLibpqSize);

View File

@ -66,7 +66,8 @@ static HTAB * ExecutePlanIntoColocatedIntermediateResults(Oid targetRelationId,
List *insertTargetList, List *insertTargetList,
PlannedStmt *selectPlan, PlannedStmt *selectPlan,
EState *executorState, EState *executorState,
char *intermediateResultIdPrefix); char *
intermediateResultIdPrefix);
static int PartitionColumnIndexFromColumnList(Oid relationId, List *columnNameList); static int PartitionColumnIndexFromColumnList(Oid relationId, List *columnNameList);
static void WrapTaskListForProjection(List *taskList, List *projectedTargetEntries); static void WrapTaskListForProjection(List *taskList, List *projectedTargetEntries);

View File

@ -38,11 +38,13 @@ static HTAB * ExecuteMergeSourcePlanIntoColocatedIntermediateResults(Oid targetR
sourceTargetList, sourceTargetList,
PlannedStmt * PlannedStmt *
sourcePlan, sourcePlan,
EState *executorState, EState *
executorState,
char * char *
intermediateResultIdPrefix, intermediateResultIdPrefix,
int int
partitionColumnIndex); partitionColumnIndex)
;
/* /*

View File

@ -180,9 +180,10 @@ static bool FollowExtAndInternalDependencies(ObjectAddressCollector *collector,
DependencyDefinition *definition); DependencyDefinition *definition);
static void ApplyAddToDependencyList(ObjectAddressCollector *collector, static void ApplyAddToDependencyList(ObjectAddressCollector *collector,
DependencyDefinition *definition); DependencyDefinition *definition);
static void ApplyAddCitusDependedObjectsToDependencyList( static void ApplyAddCitusDependedObjectsToDependencyList(ObjectAddressCollector *
ObjectAddressCollector *collector, collector,
DependencyDefinition *definition); DependencyDefinition *
definition);
static List * GetViewRuleReferenceDependencyList(Oid relationId); static List * GetViewRuleReferenceDependencyList(Oid relationId);
static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector, static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector,
ObjectAddress target); ObjectAddress target);

View File

@ -338,8 +338,8 @@ ShouldMarkRelationDistributed(Oid relationId)
bool ownedByExtension = IsTableOwnedByExtension(relationId); bool ownedByExtension = IsTableOwnedByExtension(relationId);
bool alreadyDistributed = IsObjectDistributed(relationAddress); bool alreadyDistributed = IsObjectDistributed(relationAddress);
bool hasUnsupportedDependency = bool hasUnsupportedDependency =
DeferErrorIfAnyObjectHasUnsupportedDependency(list_make1(relationAddress)) != DeferErrorIfAnyObjectHasUnsupportedDependency(
NULL; list_make1(relationAddress)) != NULL;
bool hasCircularDependency = bool hasCircularDependency =
DeferErrorIfCircularDependencyExists(relationAddress) != NULL; DeferErrorIfCircularDependencyExists(relationAddress) != NULL;

View File

@ -823,8 +823,8 @@ GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList,
/* SELECT SUM(worker_partitioned_...) FROM VALUES (...) */ /* SELECT SUM(worker_partitioned_...) FROM VALUES (...) */
char *subqueryForPartitionedShards = char *subqueryForPartitionedShards =
GenerateSizeQueryForRelationNameList(partitionedShardNames, GenerateSizeQueryForRelationNameList(partitionedShardNames,
GetWorkerPartitionedSizeUDFNameBySizeQueryType( GetWorkerPartitionedSizeUDFNameBySizeQueryType
sizeQueryType)); (sizeQueryType));
/* SELECT SUM(pg_..._size) FROM VALUES (...) */ /* SELECT SUM(pg_..._size) FROM VALUES (...) */
char *subqueryForNonPartitionedShards = char *subqueryForNonPartitionedShards =
@ -4266,10 +4266,9 @@ CancelTasksForJob(int64 jobid)
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(jobid)); BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(jobid));
const bool indexOK = true; const bool indexOK = true;
SysScanDesc scanDescriptor = systable_beginscan(pgDistBackgroundTasks, SysScanDesc scanDescriptor = systable_beginscan(
DistBackgroundTaskJobIdTaskIdIndexId(), pgDistBackgroundTasks, DistBackgroundTaskJobIdTaskIdIndexId(),
indexOK, NULL, indexOK, NULL, lengthof(scanKey), scanKey);
lengthof(scanKey), scanKey);
List *runningTaskPids = NIL; List *runningTaskPids = NIL;
HeapTuple taskTuple = NULL; HeapTuple taskTuple = NULL;

View File

@ -76,7 +76,8 @@ static List * DropTaskList(Oid relationId, char *schemaName, char *relationName,
List *deletableShardIntervalList); List *deletableShardIntervalList);
static void ExecuteDropShardPlacementCommandRemotely(ShardPlacement *shardPlacement, static void ExecuteDropShardPlacementCommandRemotely(ShardPlacement *shardPlacement,
const char *shardRelationName, const char *shardRelationName,
const char *dropShardPlacementCommand); const char *
dropShardPlacementCommand);
static char * CreateDropShardPlacementCommand(const char *schemaName, static char * CreateDropShardPlacementCommand(const char *schemaName,
const char *shardRelationName, const char *shardRelationName,
char storageType); char storageType);

View File

@ -78,7 +78,8 @@ static void GatherIndexAndConstraintDefinitionListExcludingReplicaIdentity(Form_
indexForm, indexForm,
List ** List **
indexDDLEventList, indexDDLEventList,
int indexFlags); int
indexFlags);
static Datum WorkerNodeGetDatum(WorkerNode *workerNode, TupleDesc tupleDescriptor); static Datum WorkerNodeGetDatum(WorkerNode *workerNode, TupleDesc tupleDescriptor);
static char * CitusCreateAlterColumnarTableSet(char *qualifiedRelationName, static char * CitusCreateAlterColumnarTableSet(char *qualifiedRelationName,

View File

@ -939,8 +939,8 @@ TryDropDatabaseOutsideTransaction(char *databaseName, char *nodeName, int nodePo
* because we don't want to open a transaction block on remote nodes as DROP * because we don't want to open a transaction block on remote nodes as DROP
* DATABASE commands cannot be run inside a transaction block. * DATABASE commands cannot be run inside a transaction block.
*/ */
if (ExecuteOptionalRemoteCommand(connection, commandString, NULL) != if (ExecuteOptionalRemoteCommand(
RESPONSE_OKAY) connection, commandString, NULL) != RESPONSE_OKAY)
{ {
executeCommand = false; executeCommand = false;
break; break;

View File

@ -131,14 +131,16 @@ static void UpdateDistributionColumnsForShardGroup(List *colocatedShardList,
uint32 colocationId); uint32 colocationId);
static void InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList, static void InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList,
List *workersForPlacementList); List *workersForPlacementList);
static void CreatePartitioningHierarchyForBlockingSplit( static void CreatePartitioningHierarchyForBlockingSplit(List *
List *shardGroupSplitIntervalListList, shardGroupSplitIntervalListList,
List *workersForPlacementList); List *workersForPlacementList);
static void CreateForeignKeyConstraints(List *shardGroupSplitIntervalListList, static void CreateForeignKeyConstraints(List *shardGroupSplitIntervalListList,
List *workersForPlacementList); List *workersForPlacementList);
static Task * CreateTaskForDDLCommandList(List *ddlCommandList, WorkerNode *workerNode); static Task * CreateTaskForDDLCommandList(List *ddlCommandList, WorkerNode *workerNode);
static StringInfo CreateSplitShardReplicationSetupUDF( static StringInfo CreateSplitShardReplicationSetupUDF(List *
List *sourceColocatedShardIntervalList, List *shardGroupSplitIntervalListList, sourceColocatedShardIntervalList,
List *
shardGroupSplitIntervalListList,
List *destinationWorkerNodesList, List *destinationWorkerNodesList,
DistributionColumnMap * DistributionColumnMap *
distributionColumnOverrides); distributionColumnOverrides);
@ -816,7 +818,7 @@ CreateAuxiliaryStructuresForShardGroup(List *shardGroupSplitIntervalListList,
ROW_MODIFY_NONE, ROW_MODIFY_NONE,
ddlTaskExecList, ddlTaskExecList,
MaxAdaptiveExecutorPoolSize, MaxAdaptiveExecutorPoolSize,
NULL /* jobIdList (ignored by API implementation) */); NULL /* jobIdList (ignored by API impl.) */);
} }
@ -883,7 +885,7 @@ DoSplitCopy(WorkerNode *sourceShardNode, List *sourceColocatedShardIntervalList,
ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, splitCopyTaskList, ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, splitCopyTaskList,
MaxAdaptiveExecutorPoolSize, MaxAdaptiveExecutorPoolSize,
NULL /* jobIdList (ignored by API implementation) */); NULL /* jobIdList (ignored by API impl.) */);
} }
@ -1880,8 +1882,9 @@ ExecuteSplitShardReplicationSetupUDF(WorkerNode *sourceWorkerNode,
ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE),
errmsg( errmsg(
"Failed to run worker_split_shard_replication_setup UDF. It should successfully execute " "Failed to run worker_split_shard_replication_setup UDF. "
" for splitting a shard in a non-blocking way. Please retry."))); "It should successfully execute for splitting a shard in "
"a non-blocking way. Please retry.")));
} }
/* Get replication slot information */ /* Get replication slot information */

View File

@ -2064,8 +2064,7 @@ CopyShardsToNode(WorkerNode *sourceNode, WorkerNode *targetNode, List *shardInte
ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, copyTaskList, ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, copyTaskList,
MaxAdaptiveExecutorPoolSize, MaxAdaptiveExecutorPoolSize,
NULL /* jobIdList (ignored by API implementation) */ NULL /* jobIdList (ignored by API impl.) */);
);
} }

View File

@ -471,8 +471,8 @@ WriteLocalTuple(TupleTableSlot *slot, ShardCopyDestReceiver *copyDest)
SetLocalExecutionStatus(LOCAL_EXECUTION_REQUIRED); SetLocalExecutionStatus(LOCAL_EXECUTION_REQUIRED);
bool isBinaryCopy = localCopyOutState->binary; bool isBinaryCopy = localCopyOutState->binary;
bool shouldAddBinaryHeaders = (isBinaryCopy && localCopyOutState->fe_msgbuf->len == bool shouldAddBinaryHeaders = (isBinaryCopy &&
0); localCopyOutState->fe_msgbuf->len == 0);
if (shouldAddBinaryHeaders) if (shouldAddBinaryHeaders)
{ {
AppendCopyBinaryHeaders(localCopyOutState); AppendCopyBinaryHeaders(localCopyOutState);

View File

@ -71,8 +71,8 @@ worker_split_copy(PG_FUNCTION_ARGS)
if (arrayHasNull) if (arrayHasNull)
{ {
ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg( errmsg("pg_catalog.split_copy_info array "
"pg_catalog.split_copy_info array cannot contain null values"))); "cannot contain null values")));
} }
const int slice_ndim = 0; const int slice_ndim = 0;

View File

@ -85,8 +85,8 @@ int PlannerLevel = 0;
static bool ListContainsDistributedTableRTE(List *rangeTableList, static bool ListContainsDistributedTableRTE(List *rangeTableList,
bool *maybeHasForeignDistributedTable); bool *maybeHasForeignDistributedTable);
static PlannedStmt * CreateDistributedPlannedStmt( static PlannedStmt * CreateDistributedPlannedStmt(DistributedPlanningContext *
DistributedPlanningContext *planContext); planContext);
static PlannedStmt * InlineCtesAndCreateDistributedPlannedStmt(uint64 planId, static PlannedStmt * InlineCtesAndCreateDistributedPlannedStmt(uint64 planId,
DistributedPlanningContext DistributedPlanningContext
*planContext); *planContext);
@ -125,12 +125,14 @@ static void AdjustReadIntermediateResultsCostInternal(RelOptInfo *relOptInfo,
Const *resultFormatConst); Const *resultFormatConst);
static List * OuterPlanParamsList(PlannerInfo *root); static List * OuterPlanParamsList(PlannerInfo *root);
static List * CopyPlanParamList(List *originalPlanParamList); static List * CopyPlanParamList(List *originalPlanParamList);
static PlannerRestrictionContext * CreateAndPushPlannerRestrictionContext( static void CreateAndPushPlannerRestrictionContext(DistributedPlanningContext *
FastPathRestrictionContext *fastPathContext); planContext,
FastPathRestrictionContext *
fastPathContext);
static PlannerRestrictionContext * CurrentPlannerRestrictionContext(void); static PlannerRestrictionContext * CurrentPlannerRestrictionContext(void);
static void PopPlannerRestrictionContext(void); static void PopPlannerRestrictionContext(void);
static void ResetPlannerRestrictionContext( static void ResetPlannerRestrictionContext(PlannerRestrictionContext *
PlannerRestrictionContext *plannerRestrictionContext); plannerRestrictionContext);
static PlannedStmt * PlanFastPathDistributedStmt(DistributedPlanningContext *planContext); static PlannedStmt * PlanFastPathDistributedStmt(DistributedPlanningContext *planContext);
static PlannedStmt * PlanDistributedStmt(DistributedPlanningContext *planContext, static PlannedStmt * PlanDistributedStmt(DistributedPlanningContext *planContext,
int rteIdCounter); int rteIdCounter);
@ -245,8 +247,8 @@ distributed_planner(Query *parse,
*/ */
HideCitusDependentObjectsOnQueriesOfPgMetaTables((Node *) parse, NULL); HideCitusDependentObjectsOnQueriesOfPgMetaTables((Node *) parse, NULL);
/* create a restriction context and put it at the end of context list */ /* create a restriction context and put it at the end of our plan context's context list */
planContext.plannerRestrictionContext = CreateAndPushPlannerRestrictionContext( CreateAndPushPlannerRestrictionContext(&planContext,
&fastPathContext); &fastPathContext);
/* /*
@ -281,6 +283,9 @@ distributed_planner(Query *parse,
Assert(saveNestLevel > 0); Assert(saveNestLevel > 0);
AtEOXact_GUC(true, saveNestLevel); AtEOXact_GUC(true, saveNestLevel);
} }
/* Pop the plan context from the current restriction context */
planContext.plannerRestrictionContext->planContext = NULL;
#endif #endif
needsDistributedPlanning = CheckPostPlanDistribution(&planContext, needsDistributedPlanning = CheckPostPlanDistribution(&planContext,
needsDistributedPlanning, needsDistributedPlanning,
@ -2033,6 +2038,32 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
lappend(relationRestrictionContext->relationRestrictionList, relationRestriction); lappend(relationRestrictionContext->relationRestrictionList, relationRestriction);
MemoryContextSwitchTo(oldMemoryContext); MemoryContextSwitchTo(oldMemoryContext);
#if PG_VERSION_NUM >= PG_VERSION_18
if (root->query_level == 1 && plannerRestrictionContext->planContext != NULL)
{
/* We're at the top query with a distributed context; see if Postgres
* has changed the query tree we passed to it in distributed_planner().
* This check was necessitated by PG commit 1e4351a, becuase in it the
* planner modfies a copy of the passed in query tree with the consequence
* that changes are not reflected back to the caller of standard_planner().
*/
Query *query = plannerRestrictionContext->planContext->query;
if (root->parse != query)
{
/*
* The Postgres planner has reconstructed the query tree, so the query
* tree our distributed context passed in (to standard_planner() is
* updated to track the new query tree.
*/
ereport(DEBUG4, (errmsg(
"Detected query reconstruction by Postgres planner, updating "
"planContext to track it")));
plannerRestrictionContext->planContext->query = root->parse;
}
}
#endif
} }
@ -2410,11 +2441,13 @@ CopyPlanParamList(List *originalPlanParamList)
* context with an empty relation restriction context and an empty join and * context with an empty relation restriction context and an empty join and
* a copy of the given fast path restriction context (if present). Finally, * a copy of the given fast path restriction context (if present). Finally,
* the planner restriction context is inserted to the beginning of the * the planner restriction context is inserted to the beginning of the
* global plannerRestrictionContextList and it is returned. * global plannerRestrictionContextList and, in PG18+, given a reference to
* its distributed plan context.
*/ */
static PlannerRestrictionContext * static void
CreateAndPushPlannerRestrictionContext( CreateAndPushPlannerRestrictionContext(DistributedPlanningContext *planContext,
FastPathRestrictionContext *fastPathRestrictionContext) FastPathRestrictionContext *
fastPathRestrictionContext)
{ {
PlannerRestrictionContext *plannerRestrictionContext = PlannerRestrictionContext *plannerRestrictionContext =
palloc0(sizeof(PlannerRestrictionContext)); palloc0(sizeof(PlannerRestrictionContext));
@ -2451,7 +2484,11 @@ CreateAndPushPlannerRestrictionContext(
plannerRestrictionContextList = lcons(plannerRestrictionContext, plannerRestrictionContextList = lcons(plannerRestrictionContext,
plannerRestrictionContextList); plannerRestrictionContextList);
return plannerRestrictionContext; planContext->plannerRestrictionContext = plannerRestrictionContext;
#if PG_VERSION_NUM >= PG_VERSION_18
plannerRestrictionContext->planContext = planContext;
#endif
} }
@ -2512,6 +2549,18 @@ CurrentPlannerRestrictionContext(void)
static void static void
PopPlannerRestrictionContext(void) PopPlannerRestrictionContext(void)
{ {
#if PG_VERSION_NUM >= PG_VERSION_18
/*
* PG18+: Clear the restriction context's planContext pointer; this is done
* by distributed_planner() when popping the context, but in case of error
* during standard_planner() we want to clean up here also.
*/
PlannerRestrictionContext *plannerRestrictionContext =
(PlannerRestrictionContext *) linitial(plannerRestrictionContextList);
plannerRestrictionContext->planContext = NULL;
#endif
plannerRestrictionContextList = list_delete_first(plannerRestrictionContextList); plannerRestrictionContextList = list_delete_first(plannerRestrictionContextList);
} }

View File

@ -828,10 +828,11 @@ IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId)
Assert(AllowedDistributionColumnValue.isActive); Assert(AllowedDistributionColumnValue.isActive);
Assert(ExecutorLevel > AllowedDistributionColumnValue.executorLevel); Assert(ExecutorLevel > AllowedDistributionColumnValue.executorLevel);
ereport(DEBUG4, errmsg("Comparing saved:%s with Shard key: %s colocationid:%d:%d", ereport(DEBUG4, errmsg(
"Comparing saved:%s with Shard key: %s colocationid:%d:%d",
pretty_format_node_dump( pretty_format_node_dump(
nodeToString( nodeToString(AllowedDistributionColumnValue.
AllowedDistributionColumnValue.distributionColumnValue)), distributionColumnValue)),
pretty_format_node_dump(nodeToString(shardKey)), pretty_format_node_dump(nodeToString(shardKey)),
AllowedDistributionColumnValue.colocationId, colocationId)); AllowedDistributionColumnValue.colocationId, colocationId));

View File

@ -66,7 +66,8 @@ static bool InsertSelectHasRouterSelect(Query *originalQuery,
PlannerRestrictionContext * PlannerRestrictionContext *
plannerRestrictionContext); plannerRestrictionContext);
static Task * RouterModifyTaskForShardInterval(Query *originalQuery, static Task * RouterModifyTaskForShardInterval(Query *originalQuery,
CitusTableCacheEntry *targetTableCacheEntry, CitusTableCacheEntry *
targetTableCacheEntry,
ShardInterval *shardInterval, ShardInterval *shardInterval,
PlannerRestrictionContext * PlannerRestrictionContext *
plannerRestrictionContext, plannerRestrictionContext,
@ -1152,7 +1153,8 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte,
exprTypmod((Node *) newSubqueryTargetEntry->expr), exprTypmod((Node *) newSubqueryTargetEntry->expr),
exprCollation((Node *) newSubqueryTargetEntry->expr), exprCollation((Node *) newSubqueryTargetEntry->expr),
0); 0);
TargetEntry *newInsertTargetEntry = makeTargetEntry((Expr *) newInsertVar, TargetEntry *newInsertTargetEntry = makeTargetEntry(
(Expr *) newInsertVar,
originalAttrNo, originalAttrNo,
oldInsertTargetEntry->resname, oldInsertTargetEntry->resname,
oldInsertTargetEntry->resjunk); oldInsertTargetEntry->resjunk);

View File

@ -67,7 +67,8 @@ static DeferredErrorMessage * MergeQualAndTargetListFunctionsSupported(Oid
Query *query, Query *query,
Node *quals, Node *quals,
List *targetList, List *targetList,
CmdType commandType); CmdType
commandType);
static DistributedPlan * CreateRouterMergePlan(Oid targetRelationId, Query *originalQuery, static DistributedPlan * CreateRouterMergePlan(Oid targetRelationId, Query *originalQuery,
Query *query, Query *query,
@ -574,8 +575,8 @@ IsDistributionColumnInMergeSource(Expr *columnExpression, Query *query, bool
Var *distributionColumn = DistPartitionKey(relationId); Var *distributionColumn = DistPartitionKey(relationId);
/* not all distributed tables have partition column */ /* not all distributed tables have partition column */
if (distributionColumn != NULL && column->varattno == if (distributionColumn != NULL &&
distributionColumn->varattno) column->varattno == distributionColumn->varattno)
{ {
isDistributionColumn = true; isDistributionColumn = true;
} }
@ -1045,7 +1046,8 @@ DeferErrorIfTargetHasFalseClause(Oid targetRelationId,
PlannerRestrictionContext *plannerRestrictionContext) PlannerRestrictionContext *plannerRestrictionContext)
{ {
ListCell *restrictionCell = NULL; ListCell *restrictionCell = NULL;
foreach(restrictionCell, foreach(
restrictionCell,
plannerRestrictionContext->relationRestrictionContext->relationRestrictionList) plannerRestrictionContext->relationRestrictionContext->relationRestrictionList)
{ {
RelationRestriction *relationRestriction = RelationRestriction *relationRestriction =
@ -1078,7 +1080,8 @@ DeferErrorIfTargetHasFalseClause(Oid targetRelationId,
*/ */
static DeferredErrorMessage * static DeferredErrorMessage *
DeferErrorIfRoutableMergeNotSupported(Query *query, List *rangeTableList, DeferErrorIfRoutableMergeNotSupported(Query *query, List *rangeTableList,
PlannerRestrictionContext *plannerRestrictionContext, PlannerRestrictionContext *
plannerRestrictionContext,
Oid targetRelationId) Oid targetRelationId)
{ {
List *distTablesList = NIL; List *distTablesList = NIL;
@ -1115,8 +1118,8 @@ DeferErrorIfRoutableMergeNotSupported(Query *query, List *rangeTableList,
if (list_length(distTablesList) > 0 && list_length(localTablesList) > 0) if (list_length(distTablesList) > 0 && list_length(localTablesList) > 0)
{ {
ereport(DEBUG1, (errmsg( ereport(DEBUG1, (errmsg("A mix of distributed and local table, "
"A mix of distributed and local table, try repartitioning"))); "try repartitioning")));
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"A mix of distributed and citus-local table, " "A mix of distributed and citus-local table, "
"routable query is not possible", NULL, NULL); "routable query is not possible", NULL, NULL);

View File

@ -271,7 +271,8 @@ static void AppendTargetEntryToGroupClause(TargetEntry *targetEntry,
static bool WorkerAggregateWalker(Node *node, static bool WorkerAggregateWalker(Node *node,
WorkerAggregateWalkerContext *walkerContext); WorkerAggregateWalkerContext *walkerContext);
static List * WorkerAggregateExpressionList(Aggref *originalAggregate, static List * WorkerAggregateExpressionList(Aggref *originalAggregate,
WorkerAggregateWalkerContext *walkerContextry); WorkerAggregateWalkerContext *
walkerContextry);
static AggregateType GetAggregateType(Aggref *aggregatExpression); static AggregateType GetAggregateType(Aggref *aggregatExpression);
static Oid AggregateArgumentType(Aggref *aggregate); static Oid AggregateArgumentType(Aggref *aggregate);
static Expr * FirstAggregateArgument(Aggref *aggregate); static Expr * FirstAggregateArgument(Aggref *aggregate);
@ -293,16 +294,17 @@ static Const * MakeIntegerConst(int32 integerValue);
/* Local functions forward declarations for aggregate expression checks */ /* Local functions forward declarations for aggregate expression checks */
static bool HasNonDistributableAggregates(MultiNode *logicalPlanNode); static bool HasNonDistributableAggregates(MultiNode *logicalPlanNode);
static bool CanPushDownExpression(Node *expression, static bool CanPushDownExpression(Node *expression,
const ExtendedOpNodeProperties *extendedOpNodeProperties); const ExtendedOpNodeProperties *
static DeferredErrorMessage * DeferErrorIfHasNonDistributableAggregates( extendedOpNodeProperties);
MultiNode *logicalPlanNode); static DeferredErrorMessage * DeferErrorIfHasNonDistributableAggregates(MultiNode *
static DeferredErrorMessage * DeferErrorIfUnsupportedArrayAggregate( logicalPlanNode);
Aggref *arrayAggregateExpression); static DeferredErrorMessage * DeferErrorIfUnsupportedArrayAggregate(Aggref *
arrayAggregateExpression);
static DeferredErrorMessage * DeferErrorIfUnsupportedJsonAggregate(AggregateType type, static DeferredErrorMessage * DeferErrorIfUnsupportedJsonAggregate(AggregateType type,
Aggref * Aggref *
aggregateExpression); aggregateExpression);
static DeferredErrorMessage * DeferErrorIfUnsupportedAggregateDistinct( static DeferredErrorMessage * DeferErrorIfUnsupportedAggregateDistinct(Aggref *
Aggref *aggregateExpression, aggregateExpression,
MultiNode * MultiNode *
logicalPlanNode); logicalPlanNode);
static Var * AggregateDistinctColumn(Aggref *aggregateExpression); static Var * AggregateDistinctColumn(Aggref *aggregateExpression);
@ -322,8 +324,8 @@ static bool HasOrderByAggregate(List *sortClauseList, List *targetList);
static bool HasOrderByNonCommutativeAggregate(List *sortClauseList, List *targetList); static bool HasOrderByNonCommutativeAggregate(List *sortClauseList, List *targetList);
static bool HasOrderByComplexExpression(List *sortClauseList, List *targetList); static bool HasOrderByComplexExpression(List *sortClauseList, List *targetList);
static bool HasOrderByHllType(List *sortClauseList, List *targetList); static bool HasOrderByHllType(List *sortClauseList, List *targetList);
static bool ShouldProcessDistinctOrderAndLimitForWorker( static bool ShouldProcessDistinctOrderAndLimitForWorker(ExtendedOpNodeProperties *
ExtendedOpNodeProperties *extendedOpNodeProperties, extendedOpNodeProperties,
bool pushingDownOriginalGrouping, bool pushingDownOriginalGrouping,
Node *havingQual); Node *havingQual);
static bool IsIndexInRange(const List *list, int index); static bool IsIndexInRange(const List *list, int index);
@ -5061,8 +5063,8 @@ HasOrderByHllType(List *sortClauseList, List *targetList)
* neither should ProcessLimitOrderByForWorkerQuery. * neither should ProcessLimitOrderByForWorkerQuery.
*/ */
static bool static bool
ShouldProcessDistinctOrderAndLimitForWorker( ShouldProcessDistinctOrderAndLimitForWorker(ExtendedOpNodeProperties *
ExtendedOpNodeProperties *extendedOpNodeProperties, extendedOpNodeProperties,
bool pushingDownOriginalGrouping, bool pushingDownOriginalGrouping,
Node *havingQual) Node *havingQual)
{ {

View File

@ -153,8 +153,8 @@ static String * MakeDummyColumnString(int dummyColumnId);
static List * BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError); static List * BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError);
static List * GroupInsertValuesByShardId(List *insertValuesList); static List * GroupInsertValuesByShardId(List *insertValuesList);
static List * ExtractInsertValuesList(Query *query, Var *partitionColumn); static List * ExtractInsertValuesList(Query *query, Var *partitionColumn);
static DeferredErrorMessage * DeferErrorIfUnsupportedRouterPlannableSelectQuery( static DeferredErrorMessage * DeferErrorIfUnsupportedRouterPlannableSelectQuery(Query *
Query *query); query);
static DeferredErrorMessage * ErrorIfQueryHasUnroutableModifyingCTE(Query *queryTree); static DeferredErrorMessage * ErrorIfQueryHasUnroutableModifyingCTE(Query *queryTree);
static DeferredErrorMessage * ErrorIfQueryHasCTEWithSearchClause(Query *queryTree); static DeferredErrorMessage * ErrorIfQueryHasCTEWithSearchClause(Query *queryTree);
static bool ContainsSearchClauseWalker(Node *node, void *context); static bool ContainsSearchClauseWalker(Node *node, void *context);
@ -855,7 +855,8 @@ DeferErrorIfUnsupportedLocalTableJoin(List *rangeTableList)
"Modifying local tables with remote local tables is " "Modifying local tables with remote local tables is "
"not supported.", "not supported.",
NULL, NULL,
"Consider wrapping remote local table to a CTE, or subquery"); "Consider wrapping remote local table to a CTE, "
"or subquery");
} }
return NULL; return NULL;
} }
@ -3151,8 +3152,8 @@ TargetShardIntervalForFastPathQuery(Query *query, bool *isMultiShardQuery,
FindShardInterval(inputDistributionKeyValue->constvalue, cache); FindShardInterval(inputDistributionKeyValue->constvalue, cache);
if (cachedShardInterval == NULL) if (cachedShardInterval == NULL)
{ {
ereport(ERROR, (errmsg( ereport(ERROR, (errmsg("could not find shardinterval to which to send "
"could not find shardinterval to which to send the query"))); "the query")));
} }
if (outputPartitionValueConst != NULL) if (outputPartitionValueConst != NULL)

View File

@ -107,8 +107,12 @@ static AttrNumber FindResnoForVarInTargetList(List *targetList, int varno, int v
static bool RelationInfoContainsOnlyRecurringTuples(PlannerInfo *plannerInfo, static bool RelationInfoContainsOnlyRecurringTuples(PlannerInfo *plannerInfo,
Relids relids); Relids relids);
static char * RecurringTypeDescription(RecurringTuplesType recurType); static char * RecurringTypeDescription(RecurringTuplesType recurType);
static DeferredErrorMessage * DeferredErrorIfUnsupportedLateralSubquery( static DeferredErrorMessage * DeferredErrorIfUnsupportedLateralSubquery(PlannerInfo *
PlannerInfo *plannerInfo, Relids recurringRelIds, Relids nonRecurringRelIds); plannerInfo,
Relids
recurringRelIds,
Relids
nonRecurringRelIds);
static bool ContainsLateralSubquery(PlannerInfo *plannerInfo); static bool ContainsLateralSubquery(PlannerInfo *plannerInfo);
static Var * PartitionColumnForPushedDownSubquery(Query *query); static Var * PartitionColumnForPushedDownSubquery(Query *query);
static bool ContainsReferencesToRelids(Query *query, Relids relids, int *foundRelid); static bool ContainsReferencesToRelids(Query *query, Relids relids, int *foundRelid);
@ -790,8 +794,8 @@ FromClauseRecurringTupleType(Query *queryTree)
* such queries have lateral subqueries. * such queries have lateral subqueries.
*/ */
static DeferredErrorMessage * static DeferredErrorMessage *
DeferredErrorIfUnsupportedRecurringTuplesJoin( DeferredErrorIfUnsupportedRecurringTuplesJoin(PlannerRestrictionContext *
PlannerRestrictionContext *plannerRestrictionContext, plannerRestrictionContext,
bool plannerPhase) bool plannerPhase)
{ {
List *joinRestrictionList = List *joinRestrictionList =

View File

@ -161,7 +161,8 @@ static void RecursivelyPlanNonColocatedSubqueriesInWhere(Query *query,
RecursivePlanningContext * RecursivePlanningContext *
recursivePlanningContext); recursivePlanningContext);
static bool RecursivelyPlanRecurringTupleOuterJoinWalker(Node *node, Query *query, static bool RecursivelyPlanRecurringTupleOuterJoinWalker(Node *node, Query *query,
RecursivePlanningContext *context, RecursivePlanningContext *
context,
bool chainedJoin); bool chainedJoin);
static void RecursivelyPlanDistributedJoinNode(Node *node, Query *query, static void RecursivelyPlanDistributedJoinNode(Node *node, Query *query,
RecursivePlanningContext *context); RecursivePlanningContext *context);
@ -207,8 +208,8 @@ static bool CanPushdownRecurringOuterJoinOnOuterRTE(RangeTblEntry *rte);
static bool CanPushdownRecurringOuterJoinOnInnerVar(Var *innervar, RangeTblEntry *rte); static bool CanPushdownRecurringOuterJoinOnInnerVar(Var *innervar, RangeTblEntry *rte);
static bool CanPushdownRecurringOuterJoin(JoinExpr *joinExpr, Query *query); static bool CanPushdownRecurringOuterJoin(JoinExpr *joinExpr, Query *query);
#if PG_VERSION_NUM < PG_VERSION_17 #if PG_VERSION_NUM < PG_VERSION_17
static bool hasPseudoconstantQuals( static bool hasPseudoconstantQuals(RelationRestrictionContext *
RelationRestrictionContext *relationRestrictionContext); relationRestrictionContext);
#endif #endif
/* /*
@ -2192,6 +2193,7 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry)
subquery->targetList = lappend(subquery->targetList, targetEntry); subquery->targetList = lappend(subquery->targetList, targetEntry);
} }
} }
/* /*
* If tupleDesc is NULL we have 2 different cases: * If tupleDesc is NULL we have 2 different cases:
* *
@ -2241,6 +2243,7 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry)
columnType = list_nth_oid(rangeTblFunction->funccoltypes, columnType = list_nth_oid(rangeTblFunction->funccoltypes,
targetColumnIndex); targetColumnIndex);
} }
/* use the types in the function definition otherwise */ /* use the types in the function definition otherwise */
else else
{ {
@ -2780,8 +2783,8 @@ CanPushdownRecurringOuterJoinOnInnerVar(Var *innerVar, RangeTblEntry *rte)
} }
/* Check if the inner variable is part of the distribution column */ /* Check if the inner variable is part of the distribution column */
if (cacheEntry->partitionColumn && innerVar->varattno == if (cacheEntry->partitionColumn &&
cacheEntry->partitionColumn->varattno) innerVar->varattno == cacheEntry->partitionColumn->varattno)
{ {
return true; return true;
} }
@ -2921,8 +2924,8 @@ CanPushdownRecurringOuterJoinExtended(JoinExpr *joinExpr, Query *query,
if (JoinTreeContainsLateral(joinExpr->rarg, query->rtable) || JoinTreeContainsLateral( if (JoinTreeContainsLateral(joinExpr->rarg, query->rtable) || JoinTreeContainsLateral(
joinExpr->larg, query->rtable)) joinExpr->larg, query->rtable))
{ {
ereport(DEBUG5, (errmsg( ereport(DEBUG5, (errmsg("Lateral join is not supported for pushdown "
"Lateral join is not supported for pushdown in this path."))); "in this path.")));
return false; return false;
} }
@ -2983,6 +2986,7 @@ CanPushdownRecurringOuterJoinExtended(JoinExpr *joinExpr, Query *query,
return true; return true;
} }
} }
/* the inner table is a subquery, extract the base relation referred in the qual */ /* the inner table is a subquery, extract the base relation referred in the qual */
else if (rte && rte->rtekind == RTE_SUBQUERY) else if (rte && rte->rtekind == RTE_SUBQUERY)
{ {

View File

@ -156,8 +156,9 @@ static bool AllDistributedRelationsInRestrictionContextColocated(
restrictionContext); restrictionContext);
static bool IsNotSafeRestrictionToRecursivelyPlan(Node *node); static bool IsNotSafeRestrictionToRecursivelyPlan(Node *node);
static bool HasPlaceHolderVar(Node *node); static bool HasPlaceHolderVar(Node *node);
static JoinRestrictionContext * FilterJoinRestrictionContext( static JoinRestrictionContext * FilterJoinRestrictionContext(JoinRestrictionContext *
JoinRestrictionContext *joinRestrictionContext, Relids joinRestrictionContext,
Relids
queryRteIdentities); queryRteIdentities);
static bool RangeTableArrayContainsAnyRTEIdentities(RangeTblEntry **rangeTableEntries, int static bool RangeTableArrayContainsAnyRTEIdentities(RangeTblEntry **rangeTableEntries, int
rangeTableArrayLength, Relids rangeTableArrayLength, Relids
@ -613,7 +614,8 @@ RestrictionEquivalenceForPartitionKeys(PlannerRestrictionContext *restrictionCon
List *attributeEquivalenceList = GenerateAllAttributeEquivalences(restrictionContext); List *attributeEquivalenceList = GenerateAllAttributeEquivalences(restrictionContext);
return RestrictionEquivalenceForPartitionKeysViaEquivalences(restrictionContext, return RestrictionEquivalenceForPartitionKeysViaEquivalences(
restrictionContext,
attributeEquivalenceList); attributeEquivalenceList);
} }
@ -1160,8 +1162,8 @@ GenerateCommonEquivalence(List *attributeEquivalenceList,
* with a single AttributeEquivalenceClassMember. * with a single AttributeEquivalenceClassMember.
*/ */
static AttributeEquivalenceClass * static AttributeEquivalenceClass *
GenerateEquivalenceClassForRelationRestriction( GenerateEquivalenceClassForRelationRestriction(RelationRestrictionContext *
RelationRestrictionContext *relationRestrictionContext) relationRestrictionContext)
{ {
ListCell *relationRestrictionCell = NULL; ListCell *relationRestrictionCell = NULL;
AttributeEquivalenceClassMember *eqMember = NULL; AttributeEquivalenceClassMember *eqMember = NULL;
@ -2071,8 +2073,8 @@ FindQueryContainingRTEIdentityInternal(Node *node,
* distributed relations in the given relation restrictions list are co-located. * distributed relations in the given relation restrictions list are co-located.
*/ */
static bool static bool
AllDistributedRelationsInRestrictionContextColocated( AllDistributedRelationsInRestrictionContextColocated(RelationRestrictionContext *
RelationRestrictionContext *restrictionContext) restrictionContext)
{ {
RelationRestriction *relationRestriction = NULL; RelationRestriction *relationRestriction = NULL;
List *relationIdList = NIL; List *relationIdList = NIL;

View File

@ -1215,9 +1215,11 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla
} }
default: default:
{
Assert(false); Assert(false);
} }
} }
}
prune->hasValidConstraint = true; prune->hasValidConstraint = true;
} }

View File

@ -131,8 +131,8 @@ static void ExecuteRemainingPostLoadTableCommands(List *logicalRepTargetList);
static char * escape_param_str(const char *str); static char * escape_param_str(const char *str);
static XLogRecPtr GetRemoteLSN(MultiConnection *connection, char *command); static XLogRecPtr GetRemoteLSN(MultiConnection *connection, char *command);
static void WaitForMiliseconds(long timeout); static void WaitForMiliseconds(long timeout);
static XLogRecPtr GetSubscriptionPosition( static XLogRecPtr GetSubscriptionPosition(GroupedLogicalRepTargets *
GroupedLogicalRepTargets *groupedLogicalRepTargets); groupedLogicalRepTargets);
static HTAB * CreateShardMovePublicationInfoHash(WorkerNode *targetNode, static HTAB * CreateShardMovePublicationInfoHash(WorkerNode *targetNode,
List *shardIntervals); List *shardIntervals);

View File

@ -210,10 +210,12 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
/* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */ /* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */
default: default:
{
ereport(ERROR, errmsg( ereport(ERROR, errmsg(
"Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE", "Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE",
change->action)); change->action));
} }
}
#else #else
switch (change->action) switch (change->action)
{ {
@ -245,10 +247,12 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
/* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */ /* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */
default: default:
{
ereport(ERROR, errmsg( ereport(ERROR, errmsg(
"Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE", "Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE",
change->action)); change->action));
} }
}
#endif #endif
/* Current replication slot is not responsible for handling the change */ /* Current replication slot is not responsible for handling the change */
@ -318,10 +322,12 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
/* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */ /* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */
default: default:
{
ereport(ERROR, errmsg( ereport(ERROR, errmsg(
"Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE", "Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE",
change->action)); change->action));
} }
}
#else #else
switch (change->action) switch (change->action)
{ {
@ -373,10 +379,12 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
/* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */ /* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */
default: default:
{
ereport(ERROR, errmsg( ereport(ERROR, errmsg(
"Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE", "Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE",
change->action)); change->action));
} }
}
#endif #endif
} }

View File

@ -539,7 +539,8 @@ StatCountersShmemInit(void)
bool sharedBackendStatsSlotArrayAlreadyInit = false; bool sharedBackendStatsSlotArrayAlreadyInit = false;
SharedBackendStatsSlotArray = (BackendStatsSlot *) SharedBackendStatsSlotArray = (BackendStatsSlot *)
ShmemInitStruct("Citus Shared Backend Stats Slot Array", ShmemInitStruct(
"Citus Shared Backend Stats Slot Array",
SharedBackendStatsSlotArrayShmemSize(), SharedBackendStatsSlotArrayShmemSize(),
&sharedBackendStatsSlotArrayAlreadyInit); &sharedBackendStatsSlotArrayAlreadyInit);

View File

@ -34,8 +34,8 @@
#include "distributed/shard_rebalancer.h" #include "distributed/shard_rebalancer.h"
/* static declarations for json conversion */ /* static declarations for json conversion */
static List * JsonArrayToShardPlacementTestInfoList( static List * JsonArrayToShardPlacementTestInfoList(ArrayType *
ArrayType *shardPlacementJsonArrayObject); shardPlacementJsonArrayObject);
static List * JsonArrayToWorkerTestInfoList(ArrayType *workerNodeJsonArrayObject); static List * JsonArrayToWorkerTestInfoList(ArrayType *workerNodeJsonArrayObject);
static bool JsonFieldValueBoolDefault(Datum jsonDocument, const char *key, static bool JsonFieldValueBoolDefault(Datum jsonDocument, const char *key,
bool defaultValue); bool defaultValue);

View File

@ -395,8 +395,8 @@ AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode)
DistributedTransactionId *currentTransactionId = DistributedTransactionId *currentTransactionId =
&currentBackendData.transactionId; &currentBackendData.transactionId;
if (currentTransactionId->transactionNumber != if (currentTransactionId->transactionNumber != transactionNode->transactionId.
transactionNode->transactionId.transactionNumber) transactionNumber)
{ {
continue; continue;
} }

View File

@ -634,8 +634,9 @@ SendMetadataCommandListToWorkerListInCoordinatedTransaction(List *workerNodeList
* false. * false.
*/ */
bool bool
SendOptionalCommandListToWorkerOutsideTransactionWithConnection( SendOptionalCommandListToWorkerOutsideTransactionWithConnection(MultiConnection *
MultiConnection *workerConnection, List *commandList) workerConnection, List *
commandList)
{ {
if (PQstatus(workerConnection->pgConn) != CONNECTION_OK) if (PQstatus(workerConnection->pgConn) != CONNECTION_OK)
{ {

View File

@ -376,12 +376,12 @@ ExtractAggregationValues(FunctionCallInfo fcinfo, int argumentIndex,
HeapTupleHeader tupleHeader = HeapTupleHeader tupleHeader =
DatumGetHeapTupleHeader(fcGetArgValue(fcinfo, argumentIndex)); DatumGetHeapTupleHeader(fcGetArgValue(fcinfo, argumentIndex));
if (HeapTupleHeaderGetNatts(tupleHeader) != if (HeapTupleHeaderGetNatts(
aggregationArgumentContext->argumentCount || tupleHeader) != aggregationArgumentContext->argumentCount ||
HeapTupleHeaderGetTypeId(tupleHeader) != HeapTupleHeaderGetTypeId(
aggregationArgumentContext->tupleDesc->tdtypeid || tupleHeader) != aggregationArgumentContext->tupleDesc->tdtypeid ||
HeapTupleHeaderGetTypMod(tupleHeader) != HeapTupleHeaderGetTypMod(
aggregationArgumentContext->tupleDesc->tdtypmod) tupleHeader) != aggregationArgumentContext->tupleDesc->tdtypmod)
{ {
ereport(ERROR, (errmsg("worker_partial_agg_sfunc received " ereport(ERROR, (errmsg("worker_partial_agg_sfunc received "
"incompatible record"))); "incompatible record")));
@ -817,8 +817,8 @@ coord_combine_agg_ffunc(PG_FUNCTION_ARGS)
if (!TypecheckCoordCombineAggReturnType(fcinfo, ffunc, box)) if (!TypecheckCoordCombineAggReturnType(fcinfo, ffunc, box))
{ {
ereport(ERROR, (errmsg( ereport(ERROR, (errmsg("coord_combine_agg_ffunc could not "
"coord_combine_agg_ffunc could not confirm type correctness"))); "confirm type correctness")));
} }
if (ffunc == InvalidOid) if (ffunc == InvalidOid)

View File

@ -88,26 +88,26 @@ static shm_mq_result ConsumeTaskWorkerOutput(shm_mq_handle *responseq, StringInf
bool *hadError); bool *hadError);
static void UpdateDependingTasks(BackgroundTask *task); static void UpdateDependingTasks(BackgroundTask *task);
static int64 CalculateBackoffDelay(int retryCount); static int64 CalculateBackoffDelay(int retryCount);
static bool NewExecutorExceedsCitusLimit( static bool NewExecutorExceedsCitusLimit(QueueMonitorExecutionContext *
QueueMonitorExecutionContext *queueMonitorExecutionContext); queueMonitorExecutionContext);
static bool NewExecutorExceedsPgMaxWorkers(BackgroundWorkerHandle *handle, static bool NewExecutorExceedsPgMaxWorkers(BackgroundWorkerHandle *handle,
QueueMonitorExecutionContext * QueueMonitorExecutionContext *
queueMonitorExecutionContext); queueMonitorExecutionContext);
static bool AssignRunnableTaskToNewExecutor(BackgroundTask *runnableTask, static bool AssignRunnableTaskToNewExecutor(BackgroundTask *runnableTask,
QueueMonitorExecutionContext * QueueMonitorExecutionContext *
queueMonitorExecutionContext); queueMonitorExecutionContext);
static void AssignRunnableTasks( static void AssignRunnableTasks(QueueMonitorExecutionContext *
QueueMonitorExecutionContext *queueMonitorExecutionContext); queueMonitorExecutionContext);
static List * GetRunningTaskEntries(HTAB *currentExecutors); static List * GetRunningTaskEntries(HTAB *currentExecutors);
static shm_mq_result ReadFromExecutorQueue( static shm_mq_result ReadFromExecutorQueue(BackgroundExecutorHashEntry *
BackgroundExecutorHashEntry *backgroundExecutorHashEntry, backgroundExecutorHashEntry,
bool *hadError); bool *hadError);
static void CheckAndResetLastWorkerAllocationFailure( static void CheckAndResetLastWorkerAllocationFailure(QueueMonitorExecutionContext *
QueueMonitorExecutionContext *queueMonitorExecutionContext); queueMonitorExecutionContext);
static TaskExecutionStatus TaskConcurrentCancelCheck( static TaskExecutionStatus TaskConcurrentCancelCheck(TaskExecutionContext *
TaskExecutionContext *taskExecutionContext); taskExecutionContext);
static TaskExecutionStatus ConsumeExecutorQueue( static TaskExecutionStatus ConsumeExecutorQueue(TaskExecutionContext *
TaskExecutionContext *taskExecutionContext); taskExecutionContext);
static void TaskHadError(TaskExecutionContext *taskExecutionContext); static void TaskHadError(TaskExecutionContext *taskExecutionContext);
static void TaskEnded(TaskExecutionContext *taskExecutionContext); static void TaskEnded(TaskExecutionContext *taskExecutionContext);
static void TerminateAllTaskExecutors(HTAB *currentExecutors); static void TerminateAllTaskExecutors(HTAB *currentExecutors);
@ -537,7 +537,8 @@ NewExecutorExceedsPgMaxWorkers(BackgroundWorkerHandle *handle,
*/ */
static bool static bool
AssignRunnableTaskToNewExecutor(BackgroundTask *runnableTask, AssignRunnableTaskToNewExecutor(BackgroundTask *runnableTask,
QueueMonitorExecutionContext *queueMonitorExecutionContext) QueueMonitorExecutionContext *
queueMonitorExecutionContext)
{ {
Assert(runnableTask && runnableTask->status == BACKGROUND_TASK_STATUS_RUNNABLE); Assert(runnableTask && runnableTask->status == BACKGROUND_TASK_STATUS_RUNNABLE);
@ -649,8 +650,8 @@ GetRunningTaskEntries(HTAB *currentExecutors)
* It also resets the failure timestamp. * It also resets the failure timestamp.
*/ */
static void static void
CheckAndResetLastWorkerAllocationFailure( CheckAndResetLastWorkerAllocationFailure(QueueMonitorExecutionContext *
QueueMonitorExecutionContext *queueMonitorExecutionContext) queueMonitorExecutionContext)
{ {
if (queueMonitorExecutionContext->backgroundWorkerFailedStartTime > 0) if (queueMonitorExecutionContext->backgroundWorkerFailedStartTime > 0)
{ {

View File

@ -531,8 +531,8 @@ ColocationId(int shardCount, int replicationFactor, Oid distributionColumnType,
continue; continue;
} }
if (colocationId == INVALID_COLOCATION_ID || colocationId > if (colocationId == INVALID_COLOCATION_ID ||
colocationForm->colocationid) colocationId > colocationForm->colocationid)
{ {
/* /*
* We assign the smallest colocation id among all the matches so that we * We assign the smallest colocation id among all the matches so that we
@ -1051,8 +1051,8 @@ ColocatedShardIntervalList(ShardInterval *shardInterval)
* Since we iterate over co-located tables, shard count of each table should be * Since we iterate over co-located tables, shard count of each table should be
* same and greater than shardIntervalIndex. * same and greater than shardIntervalIndex.
*/ */
Assert(cacheEntry->shardIntervalArrayLength == Assert(cacheEntry->shardIntervalArrayLength == colocatedTableCacheEntry->
colocatedTableCacheEntry->shardIntervalArrayLength); shardIntervalArrayLength);
ShardInterval *colocatedShardInterval = ShardInterval *colocatedShardInterval =
colocatedTableCacheEntry->sortedShardIntervalArray[shardIntervalIndex]; colocatedTableCacheEntry->sortedShardIntervalArray[shardIntervalIndex];
@ -1122,8 +1122,8 @@ ColocatedNonPartitionShardIntervalList(ShardInterval *shardInterval)
* Since we iterate over co-located tables, shard count of each table should be * Since we iterate over co-located tables, shard count of each table should be
* same and greater than shardIntervalIndex. * same and greater than shardIntervalIndex.
*/ */
Assert(cacheEntry->shardIntervalArrayLength == Assert(cacheEntry->shardIntervalArrayLength == colocatedTableCacheEntry->
colocatedTableCacheEntry->shardIntervalArrayLength); shardIntervalArrayLength);
ShardInterval *colocatedShardInterval = ShardInterval *colocatedShardInterval =
colocatedTableCacheEntry->sortedShardIntervalArray[shardIntervalIndex]; colocatedTableCacheEntry->sortedShardIntervalArray[shardIntervalIndex];

View File

@ -81,7 +81,8 @@ static List * GetRelationshipNodesForFKeyConnectedRelations(
static List * GetAllNeighboursList(ForeignConstraintRelationshipNode *relationshipNode); static List * GetAllNeighboursList(ForeignConstraintRelationshipNode *relationshipNode);
static ForeignConstraintRelationshipNode * GetRelationshipNodeForRelationId(Oid static ForeignConstraintRelationshipNode * GetRelationshipNodeForRelationId(Oid
relationId, relationId,
bool *isFound); bool *
isFound);
static void CreateForeignConstraintRelationshipGraph(void); static void CreateForeignConstraintRelationshipGraph(void);
static bool IsForeignConstraintRelationshipGraphValid(void); static bool IsForeignConstraintRelationshipGraphValid(void);
static List * GetNeighbourList(ForeignConstraintRelationshipNode *relationshipNode, static List * GetNeighbourList(ForeignConstraintRelationshipNode *relationshipNode,
@ -177,8 +178,8 @@ ShouldUndistributeCitusLocalTable(Oid relationId)
* to given relation node via a foreign key relationhip graph. * to given relation node via a foreign key relationhip graph.
*/ */
static List * static List *
GetRelationshipNodesForFKeyConnectedRelations( GetRelationshipNodesForFKeyConnectedRelations(ForeignConstraintRelationshipNode *
ForeignConstraintRelationshipNode *relationshipNode) relationshipNode)
{ {
HTAB *oidVisitedMap = CreateSimpleHashSetWithName(Oid, "oid visited hash set"); HTAB *oidVisitedMap = CreateSimpleHashSetWithName(Oid, "oid visited hash set");
@ -566,8 +567,8 @@ PopulateAdjacencyLists(void)
/* we just saw this edge, no need to add it twice */ /* we just saw this edge, no need to add it twice */
if (currentFConstraintRelationshipEdge->referencingRelationOID == if (currentFConstraintRelationshipEdge->referencingRelationOID ==
prevReferencingOid && prevReferencingOid &&
currentFConstraintRelationshipEdge->referencedRelationOID == currentFConstraintRelationshipEdge->referencedRelationOID == prevReferencedOid
prevReferencedOid) )
{ {
continue; continue;
} }

View File

@ -61,8 +61,12 @@ static void CreateFixPartitionShardIndexNames(Oid parentRelationId,
static List * WorkerFixPartitionShardIndexNamesCommandList(uint64 parentShardId, static List * WorkerFixPartitionShardIndexNamesCommandList(uint64 parentShardId,
List *indexIdList, List *indexIdList,
Oid partitionRelationId); Oid partitionRelationId);
static List * WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex( static List * WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex(char *
char *qualifiedParentShardIndexName, Oid parentIndexId, Oid partitionRelationId); qualifiedParentShardIndexName,
Oid
parentIndexId,
Oid
partitionRelationId);
static List * WorkerFixPartitionShardIndexNamesCommandListForPartitionIndex(Oid static List * WorkerFixPartitionShardIndexNamesCommandListForPartitionIndex(Oid
partitionIndexId, partitionIndexId,
char * char *
@ -652,8 +656,10 @@ WorkerFixPartitionShardIndexNamesCommandList(uint64 parentShardId,
* given partition. Otherwise, all the partitions are included. * given partition. Otherwise, all the partitions are included.
*/ */
static List * static List *
WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex( WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex(char *
char *qualifiedParentShardIndexName, Oid parentIndexId, Oid partitionRelationId) qualifiedParentShardIndexName,
Oid parentIndexId, Oid
partitionRelationId)
{ {
List *commandList = NIL; List *commandList = NIL;

View File

@ -29,7 +29,8 @@ extern char * pg_get_serverdef_string(Oid tableRelationId);
extern char * pg_get_sequencedef_string(Oid sequenceRelid); extern char * pg_get_sequencedef_string(Oid sequenceRelid);
extern Form_pg_sequence pg_get_sequencedef(Oid sequenceRelationId); extern Form_pg_sequence pg_get_sequencedef(Oid sequenceRelationId);
extern char * pg_get_tableschemadef_string(Oid tableRelationId, extern char * pg_get_tableschemadef_string(Oid tableRelationId,
IncludeSequenceDefaults includeSequenceDefaults, IncludeSequenceDefaults
includeSequenceDefaults,
IncludeIdentities includeIdentityDefaults, IncludeIdentities includeIdentityDefaults,
char *accessMethod); char *accessMethod);
extern void EnsureRelationKindSupported(Oid relationId); extern void EnsureRelationKindSupported(Oid relationId);

View File

@ -48,7 +48,8 @@ extern void SwitchToSequentialAndLocalExecutionIfRelationNameTooLong(Oid relatio
extern void SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(Oid extern void SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(Oid
parentRelationId, parentRelationId,
Oid Oid
partitionRelationId); partitionRelationId)
;
/* DistOpsOperationType to be used in DistributeObjectOps */ /* DistOpsOperationType to be used in DistributeObjectOps */
typedef enum DistOpsOperationType typedef enum DistOpsOperationType
@ -560,13 +561,15 @@ extern List * PreprocessAlterSequenceSchemaStmt(Node *node, const char *queryStr
processUtilityContext); processUtilityContext);
extern List * PostprocessAlterSequenceSchemaStmt(Node *node, const char *queryString); extern List * PostprocessAlterSequenceSchemaStmt(Node *node, const char *queryString);
extern List * PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString, extern List * PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext); ProcessUtilityContext
processUtilityContext);
extern List * PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString); extern List * PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString);
extern List * PreprocessAlterSequencePersistenceStmt(Node *node, const char *queryString, extern List * PreprocessAlterSequencePersistenceStmt(Node *node, const char *queryString,
ProcessUtilityContext ProcessUtilityContext
processUtilityContext); processUtilityContext);
extern List * PreprocessSequenceAlterTableStmt(Node *node, const char *queryString, extern List * PreprocessSequenceAlterTableStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext); ProcessUtilityContext
processUtilityContext);
extern List * PreprocessDropSequenceStmt(Node *node, const char *queryString, extern List * PreprocessDropSequenceStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext); ProcessUtilityContext processUtilityContext);
extern List * SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok, bool extern List * SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok, bool
@ -639,7 +642,8 @@ extern void PrepareAlterTableStmtForConstraint(AlterTableStmt *alterTableStateme
extern List * PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, extern List * PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
ProcessUtilityContext processUtilityContext); ProcessUtilityContext processUtilityContext);
extern List * PreprocessAlterTableMoveAllStmt(Node *node, const char *queryString, extern List * PreprocessAlterTableMoveAllStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext); ProcessUtilityContext
processUtilityContext);
extern List * PreprocessAlterTableSchemaStmt(Node *node, const char *queryString, extern List * PreprocessAlterTableSchemaStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext); ProcessUtilityContext processUtilityContext);
extern void SkipForeignKeyValidationIfConstraintIsFkey(AlterTableStmt *alterTableStmt, extern void SkipForeignKeyValidationIfConstraintIsFkey(AlterTableStmt *alterTableStmt,
@ -789,8 +793,8 @@ extern List * PostprocessAlterTriggerDependsStmt(Node *node, const char *querySt
extern List * PreprocessAlterTriggerDependsStmt(Node *node, const char *queryString, extern List * PreprocessAlterTriggerDependsStmt(Node *node, const char *queryString,
ProcessUtilityContext ProcessUtilityContext
processUtilityContext); processUtilityContext);
extern void AlterTriggerDependsEventExtendNames( extern void AlterTriggerDependsEventExtendNames(AlterObjectDependsStmt *
AlterObjectDependsStmt *alterTriggerDependsStmt, alterTriggerDependsStmt,
char *schemaName, uint64 shardId); char *schemaName, uint64 shardId);
extern void ErrorOutForTriggerIfNotSupported(Oid relationId); extern void ErrorOutForTriggerIfNotSupported(Oid relationId);
extern void ErrorIfRelationHasUnsupportedTrigger(Oid relationId); extern void ErrorIfRelationHasUnsupportedTrigger(Oid relationId);
@ -834,8 +838,8 @@ extern bool RelationIdListHasReferenceTable(List *relationIdList);
extern List * GetFKeyCreationCommandsForRelationIdList(List *relationIdList); extern List * GetFKeyCreationCommandsForRelationIdList(List *relationIdList);
extern void DropRelationForeignKeys(Oid relationId, int flags); extern void DropRelationForeignKeys(Oid relationId, int flags);
extern void SetLocalEnableLocalReferenceForeignKeys(bool state); extern void SetLocalEnableLocalReferenceForeignKeys(bool state);
extern void ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI( extern void ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *
List *utilityCommandList); utilityCmdList);
extern void ExecuteAndLogUtilityCommandList(List *ddlCommandList); extern void ExecuteAndLogUtilityCommandList(List *ddlCommandList);
extern void ExecuteAndLogUtilityCommand(const char *commandString); extern void ExecuteAndLogUtilityCommand(const char *commandString);
extern void ExecuteForeignKeyCreateCommandList(List *ddlCommandList, extern void ExecuteForeignKeyCreateCommandList(List *ddlCommandList,

View File

@ -112,5 +112,6 @@ extern void UndistributeDisconnectedCitusLocalTables(void);
extern void NotifyUtilityHookConstraintDropped(void); extern void NotifyUtilityHookConstraintDropped(void);
extern void ResetConstraintDropped(void); extern void ResetConstraintDropped(void);
extern void ExecuteDistributedDDLJob(DDLJob *ddlJob); extern void ExecuteDistributedDDLJob(DDLJob *ddlJob);
extern bool IsDroppedOrGenerated(Form_pg_attribute attr);
#endif /* MULTI_UTILITY_H */ #endif /* MULTI_UTILITY_H */

View File

@ -63,7 +63,8 @@
#define WORKER_APPLY_SHARD_DDL_COMMAND_WITHOUT_SCHEMA \ #define WORKER_APPLY_SHARD_DDL_COMMAND_WITHOUT_SCHEMA \
"SELECT worker_apply_shard_ddl_command (" UINT64_FORMAT ", %s)" "SELECT worker_apply_shard_ddl_command (" UINT64_FORMAT ", %s)"
#define WORKER_APPLY_INTER_SHARD_DDL_COMMAND \ #define WORKER_APPLY_INTER_SHARD_DDL_COMMAND \
"SELECT worker_apply_inter_shard_ddl_command (" UINT64_FORMAT ", %s, " UINT64_FORMAT \ "SELECT worker_apply_inter_shard_ddl_command (" UINT64_FORMAT \
", %s, " UINT64_FORMAT \
", %s, %s)" ", %s, %s)"
#define SHARD_RANGE_QUERY "SELECT min(%s), max(%s) FROM %s" #define SHARD_RANGE_QUERY "SELECT min(%s), max(%s) FROM %s"
#define SHARD_TABLE_SIZE_QUERY "SELECT pg_table_size(%s)" #define SHARD_TABLE_SIZE_QUERY "SELECT pg_table_size(%s)"
@ -225,7 +226,8 @@ extern uint64 GetNextShardId(void);
extern uint64 GetNextPlacementId(void); extern uint64 GetNextPlacementId(void);
extern Oid ResolveRelationId(text *relationName, bool missingOk); extern Oid ResolveRelationId(text *relationName, bool missingOk);
extern List * GetFullTableCreationCommands(Oid relationId, extern List * GetFullTableCreationCommands(Oid relationId,
IncludeSequenceDefaults includeSequenceDefaults, IncludeSequenceDefaults
includeSequenceDefaults,
IncludeIdentities includeIdentityDefaults, IncludeIdentities includeIdentityDefaults,
bool creatingShellTableOnRemoteNode); bool creatingShellTableOnRemoteNode);
extern List * GetPostLoadTableCreationCommands(Oid relationId, bool includeIndexes, extern List * GetPostLoadTableCreationCommands(Oid relationId, bool includeIndexes,

View File

@ -119,6 +119,7 @@ typedef struct FastPathRestrictionContext
bool delayFastPathPlanning; bool delayFastPathPlanning;
} FastPathRestrictionContext; } FastPathRestrictionContext;
struct DistributedPlanningContext;
typedef struct PlannerRestrictionContext typedef struct PlannerRestrictionContext
{ {
RelationRestrictionContext *relationRestrictionContext; RelationRestrictionContext *relationRestrictionContext;
@ -132,6 +133,18 @@ typedef struct PlannerRestrictionContext
*/ */
FastPathRestrictionContext *fastPathRestrictionContext; FastPathRestrictionContext *fastPathRestrictionContext;
MemoryContext memoryContext; MemoryContext memoryContext;
#if PG_VERSION_NUM >= PG_VERSION_18
/*
* Enable access to the distributed planning context from
* planner hooks called by Postgres. Enables Citus to track
* changes made by Postgres to the query tree (such as
* expansion of virtual columns) and ensure they are reflected
* back to subsequent distributed planning.
*/
struct DistributedPlanningContext *planContext;
#endif
} PlannerRestrictionContext; } PlannerRestrictionContext;
typedef struct RelationShard typedef struct RelationShard

View File

@ -40,8 +40,9 @@ typedef struct ExtendedOpNodeProperties
} ExtendedOpNodeProperties; } ExtendedOpNodeProperties;
extern ExtendedOpNodeProperties BuildExtendedOpNodeProperties( extern ExtendedOpNodeProperties BuildExtendedOpNodeProperties(MultiExtendedOp *
MultiExtendedOp *extendedOpNode, bool hasNonDistributableAggregates); extendedOpNode, bool
hasNonDistributableAggregates);
#endif /* EXTENDED_OP_NODE_UTILS_H_ */ #endif /* EXTENDED_OP_NODE_UTILS_H_ */

View File

@ -112,8 +112,8 @@ extern List * PartitionTasklistResults(const char *resultIdPrefix, List *selectT
int partitionColumnIndex, int partitionColumnIndex,
CitusTableCacheEntry *distributionScheme, CitusTableCacheEntry *distributionScheme,
bool binaryFormat); bool binaryFormat);
extern char * QueryStringForFragmentsTransfer( extern char * QueryStringForFragmentsTransfer(NodeToNodeFragmentsTransfer *
NodeToNodeFragmentsTransfer *fragmentsTransfer); fragmentsTransfer);
extern void ShardMinMaxValueArrays(ShardInterval **shardIntervalArray, int shardCount, extern void ShardMinMaxValueArrays(ShardInterval **shardIntervalArray, int shardCount,
Oid intervalTypeId, ArrayType **minValueArray, Oid intervalTypeId, ArrayType **minValueArray,
ArrayType **maxValueArray); ArrayType **maxValueArray);

View File

@ -160,7 +160,7 @@ typedef struct ListCellAndListWrapper
for (int var ## PositionDoNotUse = 0; \ for (int var ## PositionDoNotUse = 0; \
(var ## PositionDoNotUse) < list_length(l) && \ (var ## PositionDoNotUse) < list_length(l) && \
(((var) = list_nth(l, var ## PositionDoNotUse)) || true); \ (((var) = list_nth(l, var ## PositionDoNotUse)) || true); \
var ## PositionDoNotUse ++) var ## PositionDoNotUse++)
/* utility functions declaration shared within this module */ /* utility functions declaration shared within this module */
extern List * SortList(List *pointerList, extern List * SortList(List *pointerList,

View File

@ -210,7 +210,8 @@ extern ShardPlacement * ShardPlacementForFunctionColocatedWithDistTable(
DistObjectCacheEntry *procedure, List *argumentList, Var *partitionColumn, DistObjectCacheEntry *procedure, List *argumentList, Var *partitionColumn,
CitusTableCacheEntry CitusTableCacheEntry
*cacheEntry, *cacheEntry,
PlannedStmt *plan); PlannedStmt *
plan);
extern bool CitusHasBeenLoaded(void); extern bool CitusHasBeenLoaded(void);
extern bool CheckCitusVersion(int elevel); extern bool CheckCitusVersion(int elevel);
extern bool CheckAvailableVersion(int elevel); extern bool CheckAvailableVersion(int elevel);

View File

@ -21,7 +21,8 @@ extern MultiConnection * StartPlacementConnection(uint32 flags,
struct ShardPlacement *placement, struct ShardPlacement *placement,
const char *userName); const char *userName);
extern MultiConnection * GetConnectionIfPlacementAccessedInXact(int flags, extern MultiConnection * GetConnectionIfPlacementAccessedInXact(int flags,
List *placementAccessList, List *
placementAccessList,
const char *userName); const char *userName);
extern MultiConnection * StartPlacementListConnection(uint32 flags, extern MultiConnection * StartPlacementListConnection(uint32 flags,
List *placementAccessList, List *placementAccessList,

View File

@ -26,7 +26,8 @@ extern int ValuesMaterializationThreshold;
extern bool CanPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLimit); extern bool CanPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLimit);
extern bool ShouldUseSubqueryPushDown(Query *originalQuery, Query *rewrittenQuery, extern bool ShouldUseSubqueryPushDown(Query *originalQuery, Query *rewrittenQuery,
PlannerRestrictionContext *plannerRestrictionContext); PlannerRestrictionContext *
plannerRestrictionContext);
extern bool JoinTreeContainsSubquery(Query *query); extern bool JoinTreeContainsSubquery(Query *query);
extern bool IsNodeSubquery(Node *node); extern bool IsNodeSubquery(Node *node);
extern bool HasEmptyJoinTree(Query *query); extern bool HasEmptyJoinTree(Query *query);
@ -37,8 +38,8 @@ extern MultiNode * SubqueryMultiNodeTree(Query *originalQuery,
Query *queryTree, Query *queryTree,
PlannerRestrictionContext * PlannerRestrictionContext *
plannerRestrictionContext); plannerRestrictionContext);
extern DeferredErrorMessage * DeferErrorIfUnsupportedSubqueryPushdown( extern DeferredErrorMessage * DeferErrorIfUnsupportedSubqueryPushdown(Query *
Query *originalQuery, originalQuery,
PlannerRestrictionContext PlannerRestrictionContext
* *
plannerRestrictionContext, plannerRestrictionContext,

View File

@ -31,8 +31,8 @@ typedef struct RangeTblEntryIndex
Index rteIndex; Index rteIndex;
}RangeTblEntryIndex; }RangeTblEntryIndex;
extern PlannerRestrictionContext * GetPlannerRestrictionContext( extern PlannerRestrictionContext * GetPlannerRestrictionContext(RecursivePlanningContext *
RecursivePlanningContext *recursivePlanningContext); recursivePlanningContext);
extern List * GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery, extern List * GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery,
PlannerRestrictionContext * PlannerRestrictionContext *
plannerRestrictionContext, plannerRestrictionContext,

View File

@ -41,8 +41,8 @@ extern PlannerRestrictionContext * FilterPlannerRestrictionForQuery(
extern List * GetRestrictInfoListForRelation(RangeTblEntry *rangeTblEntry, extern List * GetRestrictInfoListForRelation(RangeTblEntry *rangeTblEntry,
PlannerRestrictionContext * PlannerRestrictionContext *
plannerRestrictionContext); plannerRestrictionContext);
extern RelationRestriction * RelationRestrictionForRelation( extern RelationRestriction * RelationRestrictionForRelation(RangeTblEntry *
RangeTblEntry *rangeTableEntry, rangeTableEntry,
PlannerRestrictionContext * PlannerRestrictionContext *
plannerRestrictionContext); plannerRestrictionContext);
extern JoinRestrictionContext * RemoveDuplicateJoinRestrictions(JoinRestrictionContext * extern JoinRestrictionContext * RemoveDuplicateJoinRestrictions(JoinRestrictionContext *

View File

@ -20,8 +20,8 @@ extern List * GenerateTaskListWithColocatedIntermediateResults(Oid targetRelatio
Query * Query *
modifyQueryViaCoordinatorOrRepartition, modifyQueryViaCoordinatorOrRepartition,
char *resultIdPrefix); char *resultIdPrefix);
extern List * GenerateTaskListWithRedistributedResults( extern List * GenerateTaskListWithRedistributedResults(Query *
Query *modifyQueryViaCoordinatorOrRepartition, modifyQueryViaCoordinatorOrRepartition,
CitusTableCacheEntry * CitusTableCacheEntry *
targetRelation, targetRelation,
List **redistributedResults, List **redistributedResults,

View File

@ -171,7 +171,8 @@ IsNodeWideObjectClass(ObjectClass objectClass)
* this assertion check based on latest supported major Postgres version. * this assertion check based on latest supported major Postgres version.
*/ */
StaticAssertStmt(PG_MAJORVERSION_NUM <= 18, StaticAssertStmt(PG_MAJORVERSION_NUM <= 18,
"better to check if any of newly added ObjectClass'es are node-wide"); "better to check if any of newly added ObjectClass'es are node-wide")
;
switch (objectClass) switch (objectClass)
{ {
@ -187,8 +188,10 @@ IsNodeWideObjectClass(ObjectClass objectClass)
} }
default: default:
{
return false; return false;
} }
}
} }

View File

@ -38,7 +38,8 @@ typedef struct SortShardIntervalContext
extern ShardInterval ** SortShardIntervalArray(ShardInterval **shardIntervalArray, int extern ShardInterval ** SortShardIntervalArray(ShardInterval **shardIntervalArray, int
shardCount, Oid collation, shardCount, Oid collation,
FmgrInfo *shardIntervalSortCompareFunction); FmgrInfo *
shardIntervalSortCompareFunction);
extern int CompareShardIntervals(const void *leftElement, const void *rightElement, extern int CompareShardIntervals(const void *leftElement, const void *rightElement,
SortShardIntervalContext *sortContext); SortShardIntervalContext *sortContext);
extern int CompareShardIntervalsById(const void *leftElement, const void *rightElement); extern int CompareShardIntervalsById(const void *leftElement, const void *rightElement);

View File

@ -34,9 +34,10 @@ extern List * PopulateShardSplitSubscriptionsMetadataList(HTAB *shardSplitInfoHa
List * List *
shardGroupSplitIntervalListList, shardGroupSplitIntervalListList,
List *workersForPlacementList); List *workersForPlacementList);
extern HTAB * CreateShardSplitInfoMapForPublication( extern HTAB * CreateShardSplitInfoMapForPublication(List *
List *sourceColocatedShardIntervalList, sourceColocatedShardIntervalList,
List *shardGroupSplitIntervalListList, List *
shardGroupSplitIntervalListList,
List *destinationWorkerNodesList); List *destinationWorkerNodesList);
/* Functions to drop publisher-subscriber resources */ /* Functions to drop publisher-subscriber resources */

View File

@ -74,7 +74,8 @@ void StoreShardSplitSharedMemoryHandle(dsm_handle dsmHandle);
/* Functions for creating and accessing shared memory segments consisting shard split information */ /* Functions for creating and accessing shared memory segments consisting shard split information */
extern ShardSplitInfoSMHeader * CreateSharedMemoryForShardSplitInfo(int extern ShardSplitInfoSMHeader * CreateSharedMemoryForShardSplitInfo(int
shardSplitInfoCount, shardSplitInfoCount,
dsm_handle *dsmHandle); dsm_handle *
dsmHandle);
extern void ReleaseSharedMemoryOfShardSplitInfo(void); extern void ReleaseSharedMemoryOfShardSplitInfo(void);
extern ShardSplitInfoSMHeader * GetShardSplitInfoSMHeader(void); extern ShardSplitInfoSMHeader * GetShardSplitInfoSMHeader(void);

View File

@ -16,7 +16,8 @@
extern bool EnableBinaryProtocol; extern bool EnableBinaryProtocol;
extern DestReceiver * CreateShardCopyDestReceiver(EState *executorState, extern DestReceiver * CreateShardCopyDestReceiver(EState *executorState,
List *destinationShardFullyQualifiedName, List *
destinationShardFullyQualifiedName,
uint32_t destinationNodeId); uint32_t destinationNodeId);
extern const char * CopyableColumnNamesFromRelationName(const char *schemaName, const extern const char * CopyableColumnNamesFromRelationName(const char *schemaName, const

View File

@ -81,8 +81,10 @@ extern bool SendOptionalCommandListToWorkerOutsideTransactionWithConnection(
List * List *
commandList); commandList);
extern bool SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(const extern bool SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(const
char *nodeName, char *
int32 nodePort, nodeName,
int32
nodePort,
const char * const char *
nodeUser, nodeUser,
List * List *
@ -100,16 +102,17 @@ extern void SendCommandListToWorkerOutsideTransaction(const char *nodeName,
int32 nodePort, int32 nodePort,
const char *nodeUser, const char *nodeUser,
List *commandList); List *commandList);
extern void SendCommandListToWorkerOutsideTransactionWithConnection( extern void SendCommandListToWorkerOutsideTransactionWithConnection(MultiConnection *
MultiConnection *workerConnection, workerConnection,
List *commandList); List *commandList);
extern void SendCommandListToWorkerListWithBareConnections(List *workerConnections, extern void SendCommandListToWorkerListWithBareConnections(List *workerConnections,
List *commandList); List *commandList);
extern void SendMetadataCommandListToWorkerListInCoordinatedTransaction( extern void SendMetadataCommandListToWorkerListInCoordinatedTransaction(List *
List *workerNodeList, workerNodeList,
const char * const char *
nodeUser, nodeUser,
List *commandList); List *
commandList);
extern void RemoveWorkerTransaction(const char *nodeName, int32 nodePort); extern void RemoveWorkerTransaction(const char *nodeName, int32 nodePort);
/* helper functions for worker transactions */ /* helper functions for worker transactions */

View File

@ -364,8 +364,10 @@ getObjectClass(const ObjectAddress *object)
} }
case TransformRelationId: case TransformRelationId:
{
return OCLASS_TRANSFORM; return OCLASS_TRANSFORM;
} }
}
/* shouldn't get here */ /* shouldn't get here */
elog(ERROR, "unrecognized object class: %u", object->classId); elog(ERROR, "unrecognized object class: %u", object->classId);

View File

@ -364,9 +364,6 @@ s/(Actual[[:space:]]+Rows:[[:space:]]*)N\.N/\1N/gI
# ignore any “Disabled:” lines in test output # ignore any “Disabled:” lines in test output
/^\s*Disabled:/d /^\s*Disabled:/d
# ignore any JSON-style Disabled field
/^\s*"Disabled":/d
# ignore XML <Disabled>true</Disabled> or <Disabled>false</Disabled> # ignore XML <Disabled>true</Disabled> or <Disabled>false</Disabled>
/^\s*<Disabled>.*<\/Disabled>/d /^\s*<Disabled>.*<\/Disabled>/d
# pg18 “Disabled” change end # pg18 “Disabled” change end

View File

@ -1,13 +1,3 @@
--
-- PG15
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
\gset
\if :server_version_ge_15
\else
\q
\endif
-- create/drop database for pg >= 15 -- create/drop database for pg >= 15
set citus.enable_create_database_propagation=on; set citus.enable_create_database_propagation=on;
CREATE DATABASE mydatabase CREATE DATABASE mydatabase

View File

@ -1,9 +0,0 @@
--
-- PG15
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
\gset
\if :server_version_ge_15
\else
\q

View File

@ -2,7 +2,6 @@ SHOW server_version \gset
SELECT CASE SELECT CASE
WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+' WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+'
WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16' WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16'
WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14'
ELSE 'Unsupported version' ELSE 'Unsupported version'
END AS version_category; END AS version_category;
version_category version_category
@ -10,12 +9,6 @@ SELECT CASE
17+ 17+
(1 row) (1 row)
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
\gset
\if :server_version_ge_15
\else
\q
\endif
-- --
-- MERGE test from PG community (adapted to Citus by converting all tables to Citus local) -- MERGE test from PG community (adapted to Citus by converting all tables to Citus local)
-- --

View File

@ -2,7 +2,6 @@ SHOW server_version \gset
SELECT CASE SELECT CASE
WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+' WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+'
WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16' WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16'
WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14'
ELSE 'Unsupported version' ELSE 'Unsupported version'
END AS version_category; END AS version_category;
version_category version_category
@ -10,12 +9,6 @@ SELECT CASE
15_16 15_16
(1 row) (1 row)
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
\gset
\if :server_version_ge_15
\else
\q
\endif
-- --
-- MERGE test from PG community (adapted to Citus by converting all tables to Citus local) -- MERGE test from PG community (adapted to Citus by converting all tables to Citus local)
-- --

View File

@ -1,17 +0,0 @@
SHOW server_version \gset
SELECT CASE
WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+'
WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16'
WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14'
ELSE 'Unsupported version'
END AS version_category;
version_category
---------------------------------------------------------------------
14
(1 row)
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
\gset
\if :server_version_ge_15
\else
\q

View File

@ -6,6 +6,10 @@
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3 -- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15 -- The alternative output can be deleted when we drop support for PG15
-- --
-- This test file has an alternative output because of the following in PG18:
-- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a
-- The alternative output can be deleted when we drop support for PG17
--
SHOW server_version \gset SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16; SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
server_version_ge_16 server_version_ge_16
@ -13,6 +17,12 @@ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
t t
(1 row) (1 row)
SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18;
server_version_ge_18
---------------------------------------------------------------------
t
(1 row)
SET citus.next_shard_id TO 570000; SET citus.next_shard_id TO 570000;
\a\t \a\t
SET citus.explain_distributed_queries TO on; SET citus.explain_distributed_queries TO on;
@ -111,6 +121,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Node Type": "Sort", "Node Type": "Sort",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"], "Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"],
"Plans": [ "Plans": [
{ {
@ -120,6 +131,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parent Relationship": "Outer", "Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Group Key": ["remote_scan.l_quantity"], "Group Key": ["remote_scan.l_quantity"],
"Plans": [ "Plans": [
{ {
@ -128,6 +140,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Custom Plan Provider": "Citus Adaptive", "Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Distributed Query": { "Distributed Query": {
"Job": { "Job": {
"Task Count": 2, "Task Count": 2,
@ -144,6 +157,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Partial Mode": "Simple", "Partial Mode": "Simple",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Group Key": ["l_quantity"], "Group Key": ["l_quantity"],
"Plans": [ "Plans": [
{ {
@ -152,7 +166,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Relation Name": "lineitem_360000", "Relation Name": "lineitem_360000",
"Alias": "lineitem" "Alias": "lineitem",
"Disabled": false
} }
] ]
} }
@ -1184,6 +1199,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Partial Mode": "Simple", "Partial Mode": "Simple",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Custom Scan", "Node Type": "Custom Scan",
@ -1191,6 +1207,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Custom Plan Provider": "Citus Adaptive", "Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Distributed Query": { "Distributed Query": {
"Job": { "Job": {
"Task Count": 6, "Task Count": 6,
@ -1745,7 +1762,8 @@ SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
"Parallel Aware": false,+ "Parallel Aware": false,+
"Async Capable": false, + "Async Capable": false, +
"Actual Rows": 1, + "Actual Rows": 1, +
"Actual Loops": 1 + "Actual Loops": 1, +
"Disabled": false +
}, + }, +
"Triggers": [ + "Triggers": [ +
] + ] +
@ -2170,108 +2188,69 @@ SELECT * FROM explain_pk ORDER BY 1;
ROLLBACK; ROLLBACK;
-- test EXPLAIN ANALYZE with non-text output formats -- test EXPLAIN ANALYZE with non-text output formats
BEGIN; BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3); EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
[ - Plan:
{ Node Type: "Custom Scan"
"Plan": { Custom Plan Provider: "Citus Adaptive"
"Node Type": "Custom Scan", Parallel Aware: false
"Custom Plan Provider": "Citus Adaptive", Async Capable: false
"Parallel Aware": false, Actual Rows: 0
"Async Capable": false, Actual Loops: 1
"Actual Rows": 0, Distributed Query:
"Actual Loops": 1, Job:
"Distributed Query": { Task Count: 2
"Job": { Tasks Shown: "One of 2"
"Task Count": 2, Tasks:
"Tasks Shown": "One of 2", - Node: "host=localhost port=xxxxx dbname=regression"
"Tasks": [ Remote Plan:
{ - Plan:
"Node": "host=localhost port=xxxxx dbname=regression", Node Type: "ModifyTable"
"Remote Plan": [ Operation: "Insert"
[ Parallel Aware: false
{ Async Capable: false
"Plan": { Relation Name: "explain_pk_570013"
"Node Type": "ModifyTable", Alias: "citus_table_alias"
"Operation": "Insert", Actual Rows: 0
"Parallel Aware": false, Actual Loops: 1
"Async Capable": false, Plans:
"Relation Name": "explain_pk_570013", - Node Type: "Result"
"Alias": "citus_table_alias", Parent Relationship: "Outer"
"Actual Rows": 0, Parallel Aware: false
"Actual Loops": 1, Async Capable: false
"Plans": [ Actual Rows: 1
{ Actual Loops: 1
"Node Type": "Result", Triggers:
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Async Capable": false,
"Actual Rows": 1,
"Actual Loops": 1
}
]
},
"Triggers": [
]
}
]
] Triggers:
}
]
}
}
},
"Triggers": [
]
}
]
ROLLBACK; ROLLBACK;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) SELECT * FROM explain_pk; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) SELECT * FROM explain_pk;
[ - Plan:
{ Node Type: "Custom Scan"
"Plan": { Custom Plan Provider: "Citus Adaptive"
"Node Type": "Custom Scan", Parallel Aware: false
"Custom Plan Provider": "Citus Adaptive", Async Capable: false
"Parallel Aware": false, Actual Rows: 0
"Async Capable": false, Actual Loops: 1
"Actual Rows": 0, Distributed Query:
"Actual Loops": 1, Job:
"Distributed Query": { Task Count: 4
"Job": { Tuple data received from nodes: "0 bytes"
"Task Count": 4, Tasks Shown: "One of 4"
"Tuple data received from nodes": "0 bytes", Tasks:
"Tasks Shown": "One of 4", - Tuple data received from node: "0 bytes"
"Tasks": [ Node: "host=localhost port=xxxxx dbname=regression"
{ Remote Plan:
"Tuple data received from node": "0 bytes", - Plan:
"Node": "host=localhost port=xxxxx dbname=regression", Node Type: "Seq Scan"
"Remote Plan": [ Parallel Aware: false
[ Async Capable: false
{ Relation Name: "explain_pk_570013"
"Plan": { Alias: "explain_pk"
"Node Type": "Seq Scan", Actual Rows: 0
"Parallel Aware": false, Actual Loops: 1
"Async Capable": false, Triggers:
"Relation Name": "explain_pk_570013",
"Alias": "explain_pk",
"Actual Rows": 0,
"Actual Loops": 1
},
"Triggers": [
]
}
]
] Triggers:
}
]
}
}
},
"Triggers": [
]
}
]
BEGIN; BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3); EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
<explain xmlns="http://www.postgresql.org/2009/explain"> <explain xmlns="http://www.postgresql.org/2009/explain">
@ -3286,6 +3265,7 @@ SELECT * FROM a;
"Async Capable": false, "Async Capable": false,
"Actual Rows": 1, "Actual Rows": 1,
"Actual Loops": 1, "Actual Loops": 1,
"Disabled": false,
"Distributed Query": { "Distributed Query": {
"Subplans": [ "Subplans": [
{ {
@ -3300,6 +3280,7 @@ SELECT * FROM a;
"Async Capable": false, "Async Capable": false,
"Actual Rows": 1, "Actual Rows": 1,
"Actual Loops": 1, "Actual Loops": 1,
"Disabled": false,
"Distributed Query": { "Distributed Query": {
"Job": { "Job": {
"Task Count": 1, "Task Count": 1,
@ -3321,6 +3302,7 @@ SELECT * FROM a;
"Alias": "test_subplans_570038", "Alias": "test_subplans_570038",
"Actual Rows": 1, "Actual Rows": 1,
"Actual Loops": 1, "Actual Loops": 1,
"Disabled": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Result", "Node Type": "Result",
@ -3328,7 +3310,8 @@ SELECT * FROM a;
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Actual Rows": 1, "Actual Rows": 1,
"Actual Loops": 1 "Actual Loops": 1,
"Disabled": false
} }
] ]
}, },
@ -3367,7 +3350,8 @@ SELECT * FROM a;
"Function Name": "read_intermediate_result", "Function Name": "read_intermediate_result",
"Alias": "intermediate_result", "Alias": "intermediate_result",
"Actual Rows": 1, "Actual Rows": 1,
"Actual Loops": 1 "Actual Loops": 1,
"Disabled": false
}, },
"Triggers": [ "Triggers": [
] ]

View File

@ -6,9 +6,19 @@
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3 -- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15 -- The alternative output can be deleted when we drop support for PG15
-- --
-- This test file has an alternative output because of the following in PG18:
-- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a
-- The alternative output can be deleted when we drop support for PG17
--
SHOW server_version \gset SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16; SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
server_version_ge_16 server_version_ge_16
---------------------------------------------------------------------
t
(1 row)
SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18;
server_version_ge_18
--------------------------------------------------------------------- ---------------------------------------------------------------------
f f
(1 row) (1 row)
@ -665,7 +675,7 @@ Aggregate
-> GroupAggregate -> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
-> Sort -> Sort
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), events.event_time
-> Hash Join -> Hash Join
Hash Cond: (users.composite_id = events.composite_id) Hash Cond: (users.composite_id = events.composite_id)
-> Seq Scan on users_1400289 users -> Seq Scan on users_1400289 users
@ -757,7 +767,7 @@ HashAggregate
-> GroupAggregate -> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
-> Sort -> Sort
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone, events.event_time
-> Hash Left Join -> Hash Left Join
Hash Cond: (users.composite_id = subquery_2.composite_id) Hash Cond: (users.composite_id = subquery_2.composite_id)
-> HashAggregate -> HashAggregate
@ -870,7 +880,7 @@ Sort
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
Filter: (array_ndims(array_agg(('action=>1'::text) ORDER BY events.event_time)) > 0) Filter: (array_ndims(array_agg(('action=>1'::text) ORDER BY events.event_time)) > 0)
-> Sort -> Sort
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay, events.event_time
-> Hash Left Join -> Hash Left Join
Hash Cond: (users.composite_id = subquery_2.composite_id) Hash Cond: (users.composite_id = subquery_2.composite_id)
-> HashAggregate -> HashAggregate
@ -975,7 +985,7 @@ Limit
-> GroupAggregate -> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
-> Sort -> Sort
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), events.event_time
-> Nested Loop Left Join -> Nested Loop Left Join
-> Limit -> Limit
-> Sort -> Sort
@ -2170,108 +2180,69 @@ SELECT * FROM explain_pk ORDER BY 1;
ROLLBACK; ROLLBACK;
-- test EXPLAIN ANALYZE with non-text output formats -- test EXPLAIN ANALYZE with non-text output formats
BEGIN; BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3); EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
[ - Plan:
{ Node Type: "Custom Scan"
"Plan": { Custom Plan Provider: "Citus Adaptive"
"Node Type": "Custom Scan", Parallel Aware: false
"Custom Plan Provider": "Citus Adaptive", Async Capable: false
"Parallel Aware": false, Actual Rows: 0
"Async Capable": false, Actual Loops: 1
"Actual Rows": 0, Distributed Query:
"Actual Loops": 1, Job:
"Distributed Query": { Task Count: 2
"Job": { Tasks Shown: "One of 2"
"Task Count": 2, Tasks:
"Tasks Shown": "One of 2", - Node: "host=localhost port=xxxxx dbname=regression"
"Tasks": [ Remote Plan:
{ - Plan:
"Node": "host=localhost port=xxxxx dbname=regression", Node Type: "ModifyTable"
"Remote Plan": [ Operation: "Insert"
[ Parallel Aware: false
{ Async Capable: false
"Plan": { Relation Name: "explain_pk_570013"
"Node Type": "ModifyTable", Alias: "citus_table_alias"
"Operation": "Insert", Actual Rows: 0
"Parallel Aware": false, Actual Loops: 1
"Async Capable": false, Plans:
"Relation Name": "explain_pk_570013", - Node Type: "Result"
"Alias": "citus_table_alias", Parent Relationship: "Outer"
"Actual Rows": 0, Parallel Aware: false
"Actual Loops": 1, Async Capable: false
"Plans": [ Actual Rows: 1
{ Actual Loops: 1
"Node Type": "Result", Triggers:
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Async Capable": false,
"Actual Rows": 1,
"Actual Loops": 1
}
]
},
"Triggers": [
]
}
]
] Triggers:
}
]
}
}
},
"Triggers": [
]
}
]
ROLLBACK; ROLLBACK;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) SELECT * FROM explain_pk; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) SELECT * FROM explain_pk;
[ - Plan:
{ Node Type: "Custom Scan"
"Plan": { Custom Plan Provider: "Citus Adaptive"
"Node Type": "Custom Scan", Parallel Aware: false
"Custom Plan Provider": "Citus Adaptive", Async Capable: false
"Parallel Aware": false, Actual Rows: 0
"Async Capable": false, Actual Loops: 1
"Actual Rows": 0, Distributed Query:
"Actual Loops": 1, Job:
"Distributed Query": { Task Count: 4
"Job": { Tuple data received from nodes: "0 bytes"
"Task Count": 4, Tasks Shown: "One of 4"
"Tuple data received from nodes": "0 bytes", Tasks:
"Tasks Shown": "One of 4", - Tuple data received from node: "0 bytes"
"Tasks": [ Node: "host=localhost port=xxxxx dbname=regression"
{ Remote Plan:
"Tuple data received from node": "0 bytes", - Plan:
"Node": "host=localhost port=xxxxx dbname=regression", Node Type: "Seq Scan"
"Remote Plan": [ Parallel Aware: false
[ Async Capable: false
{ Relation Name: "explain_pk_570013"
"Plan": { Alias: "explain_pk"
"Node Type": "Seq Scan", Actual Rows: 0
"Parallel Aware": false, Actual Loops: 1
"Async Capable": false, Triggers:
"Relation Name": "explain_pk_570013",
"Alias": "explain_pk",
"Actual Rows": 0,
"Actual Loops": 1
},
"Triggers": [
]
}
]
] Triggers:
}
]
}
}
},
"Triggers": [
]
}
]
BEGIN; BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3); EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
<explain xmlns="http://www.postgresql.org/2009/explain"> <explain xmlns="http://www.postgresql.org/2009/explain">
@ -2407,11 +2378,16 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Tuple data received from node: 8 bytes Tuple data received from node: 8 bytes
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Aggregate (actual rows=1 loops=1) -> Aggregate (actual rows=1 loops=1)
-> Hash Join (actual rows=10 loops=1) -> Merge Join (actual rows=10 loops=1)
Hash Cond: (ref_table.a = intermediate_result.a) Merge Cond: (intermediate_result.a = ref_table.a)
-> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1) -> Sort (actual rows=10 loops=1)
-> Hash (actual rows=10 loops=1) Sort Key: intermediate_result.a
Sort Method: quicksort Memory: 25kB
-> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1) -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: ref_table.a
Sort Method: quicksort Memory: 25kB
-> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
EXPLAIN :default_analyze_flags EXPLAIN :default_analyze_flags
SELECT count(distinct a) FROM (SELECT GREATEST(random(), 2) r, a FROM dist_table) t NATURAL JOIN ref_table; SELECT count(distinct a) FROM (SELECT GREATEST(random(), 2) r, a FROM dist_table) t NATURAL JOIN ref_table;
Aggregate (actual rows=1 loops=1) Aggregate (actual rows=1 loops=1)
@ -2470,6 +2446,9 @@ Aggregate (actual rows=1 loops=1)
-> Aggregate (actual rows=1 loops=1) -> Aggregate (actual rows=1 loops=1)
InitPlan 1 InitPlan 1
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1) -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
-> Sort (actual rows=4 loops=1)
Sort Key: dist_table.a
Sort Method: quicksort Memory: 25kB
-> Result (actual rows=4 loops=1) -> Result (actual rows=4 loops=1)
One-Time Filter: (InitPlan 1).col1 One-Time Filter: (InitPlan 1).col1
-> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1) -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
@ -2514,6 +2493,9 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Tuple data received from node: 8 bytes Tuple data received from node: 8 bytes
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Aggregate (actual rows=1 loops=1) -> Aggregate (actual rows=1 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: intermediate_result.a2
Sort Method: quicksort Memory: 25kB
-> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1) -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
ROLLBACK; ROLLBACK;
-- https://github.com/citusdata/citus/issues/4074 -- https://github.com/citusdata/citus/issues/4074

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -86,6 +86,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Node Type": "Sort", "Node Type": "Sort",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"], "Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"],
"Plans": [ "Plans": [
{ {
@ -95,6 +96,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parent Relationship": "Outer", "Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Group Key": ["remote_scan.l_quantity"], "Group Key": ["remote_scan.l_quantity"],
"Plans": [ "Plans": [
{ {
@ -103,6 +105,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Custom Plan Provider": "Citus Adaptive", "Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Distributed Query": { "Distributed Query": {
"Job": { "Job": {
"Task Count": 16, "Task Count": 16,
@ -119,6 +122,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Partial Mode": "Simple", "Partial Mode": "Simple",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Group Key": ["l_quantity"], "Group Key": ["l_quantity"],
"Plans": [ "Plans": [
{ {
@ -127,7 +131,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Relation Name": "lineitem_mx_1220052", "Relation Name": "lineitem_mx_1220052",
"Alias": "lineitem_mx" "Alias": "lineitem_mx",
"Disabled": false
} }
] ]
} }
@ -553,6 +558,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Partial Mode": "Simple", "Partial Mode": "Simple",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Custom Scan", "Node Type": "Custom Scan",
@ -560,6 +566,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Custom Plan Provider": "Citus Adaptive", "Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Distributed Query": { "Distributed Query": {
"Job": { "Job": {
"Task Count": 16, "Task Count": 16,
@ -576,6 +583,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Partial Mode": "Simple", "Partial Mode": "Simple",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Hash Join", "Node Type": "Hash Join",
@ -583,6 +591,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Join Type": "Inner", "Join Type": "Inner",
"Disabled": false,
"Inner Unique": false, "Inner Unique": false,
"Hash Cond": "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)", "Hash Cond": "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)",
"Plans": [ "Plans": [
@ -592,6 +601,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Join Type": "Inner", "Join Type": "Inner",
"Disabled": false,
"Inner Unique": false, "Inner Unique": false,
"Hash Cond": "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)", "Hash Cond": "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)",
"Plans": [ "Plans": [
@ -601,13 +611,15 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Relation Name": "supplier_mx_1220087", "Relation Name": "supplier_mx_1220087",
"Alias": "supplier_mx" "Alias": "supplier_mx",
"Disabled": false
}, },
{ {
"Node Type": "Hash", "Node Type": "Hash",
"Parent Relationship": "Inner", "Parent Relationship": "Inner",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Seq Scan", "Node Type": "Seq Scan",
@ -615,7 +627,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Relation Name": "lineitem_mx_1220052", "Relation Name": "lineitem_mx_1220052",
"Alias": "lineitem_mx" "Alias": "lineitem_mx",
"Disabled": false
} }
] ]
} }
@ -626,6 +639,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parent Relationship": "Inner", "Parent Relationship": "Inner",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Hash Join", "Node Type": "Hash Join",
@ -633,6 +647,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Join Type": "Inner", "Join Type": "Inner",
"Disabled": false,
"Inner Unique": false, "Inner Unique": false,
"Hash Cond": "(customer_mx.c_custkey = orders_mx.o_custkey)", "Hash Cond": "(customer_mx.c_custkey = orders_mx.o_custkey)",
"Plans": [ "Plans": [
@ -642,13 +657,15 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Relation Name": "customer_mx_1220084", "Relation Name": "customer_mx_1220084",
"Alias": "customer_mx" "Alias": "customer_mx",
"Disabled": false
}, },
{ {
"Node Type": "Hash", "Node Type": "Hash",
"Parent Relationship": "Inner", "Parent Relationship": "Inner",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Disabled": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Seq Scan", "Node Type": "Seq Scan",
@ -656,7 +673,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false, "Async Capable": false,
"Relation Name": "orders_mx_1220068", "Relation Name": "orders_mx_1220068",
"Alias": "orders_mx" "Alias": "orders_mx",
"Disabled": false
} }
] ]
} }

Some files were not shown because too many files have changed in this diff Show More