diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index 6579c52d9..73a5ba0aa 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -113,10 +113,10 @@ FROM base AS uncrustify-builder
RUN sudo apt update && sudo apt install -y cmake tree
WORKDIR /uncrustify
-RUN curl -L https://github.com/uncrustify/uncrustify/archive/uncrustify-0.68.1.tar.gz | tar xz
-WORKDIR /uncrustify/uncrustify-uncrustify-0.68.1/
+RUN curl -L https://github.com/uncrustify/uncrustify/archive/uncrustify-0.82.0.tar.gz | tar xz
+WORKDIR /uncrustify/uncrustify-uncrustify-0.82.0/
RUN mkdir build
-WORKDIR /uncrustify/uncrustify-uncrustify-0.68.1/build/
+WORKDIR /uncrustify/uncrustify-uncrustify-0.82.0/build/
RUN cmake ..
RUN MAKEFLAGS="-j $(nproc)" make -s
diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml
index bd644395f..30329e1b3 100644
--- a/.github/workflows/build_and_test.yml
+++ b/.github/workflows/build_and_test.yml
@@ -30,9 +30,9 @@ jobs:
fail_test_image_name: "ghcr.io/citusdata/failtester"
pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester"
style_checker_image_name: "ghcr.io/citusdata/stylechecker"
- style_checker_tools_version: "0.8.18"
+ style_checker_tools_version: "0.8.33"
sql_snapshot_pg_version: "17.6"
- image_suffix: "-va20872f"
+ image_suffix: "-ve4d3aa0"
pg15_version: '{ "major": "15", "full": "15.14" }'
pg16_version: '{ "major": "16", "full": "16.10" }'
pg17_version: '{ "major": "17", "full": "17.6" }'
diff --git a/STYLEGUIDE.md b/STYLEGUIDE.md
index b4eec23b0..e4f511af3 100644
--- a/STYLEGUIDE.md
+++ b/STYLEGUIDE.md
@@ -11,9 +11,9 @@ tool. This tool uses `uncrustify` under the hood.
```bash
# Uncrustify changes the way it formats code every release a bit. To make sure
-# everyone formats consistently we use version 0.68.1:
-curl -L https://github.com/uncrustify/uncrustify/archive/uncrustify-0.68.1.tar.gz | tar xz
-cd uncrustify-uncrustify-0.68.1/
+# everyone formats consistently we use version 0.82.0:
+curl -L https://github.com/uncrustify/uncrustify/archive/uncrustify-0.82.0.tar.gz | tar xz
+cd uncrustify-uncrustify-0.82.0/
mkdir build
cd build
cmake ..
diff --git a/src/backend/columnar/columnar_metadata.c b/src/backend/columnar/columnar_metadata.c
index cd62c8b0c..ee0c9386a 100644
--- a/src/backend/columnar/columnar_metadata.c
+++ b/src/backend/columnar/columnar_metadata.c
@@ -72,9 +72,9 @@
#define COLUMNAR_RELOPTION_NAMESPACE "columnar"
#define SLOW_METADATA_ACCESS_WARNING \
- "Metadata index %s is not available, this might mean slower read/writes " \
- "on columnar tables. This is expected during Postgres upgrades and not " \
- "expected otherwise."
+ "Metadata index %s is not available, this might mean slower read/writes " \
+ "on columnar tables. This is expected during Postgres upgrades and not " \
+ "expected otherwise."
typedef struct
{
@@ -1330,10 +1330,10 @@ GetHighestUsedAddress(Relation rel)
Oid
ColumnarRelationId(Oid relid, RelFileLocator relfilelocator)
{
- return OidIsValid(relid) ? relid : RelidByRelfilenumber(RelationTablespace_compat(
- relfilelocator),
- RelationPhysicalIdentifierNumber_compat(
- relfilelocator));
+ return OidIsValid(relid) ? relid : RelidByRelfilenumber(RelationTablespace_compat
+ (relfilelocator),
+ RelationPhysicalIdentifierNumber_compat
+ (relfilelocator));
}
diff --git a/src/backend/columnar/columnar_reader.c b/src/backend/columnar/columnar_reader.c
index 17c4061f1..9ab74b83f 100644
--- a/src/backend/columnar/columnar_reader.c
+++ b/src/backend/columnar/columnar_reader.c
@@ -41,8 +41,8 @@
#include "distributed/listutils.h"
#define UNEXPECTED_STRIPE_READ_ERR_MSG \
- "attempted to read an unexpected stripe while reading columnar " \
- "table %s, stripe with id=" UINT64_FORMAT " is not flushed"
+ "attempted to read an unexpected stripe while reading columnar " \
+ "table %s, stripe with id=" UINT64_FORMAT " is not flushed"
typedef struct ChunkGroupReadState
{
@@ -758,7 +758,9 @@ SnapshotMightSeeUnflushedStripes(Snapshot snapshot)
}
default:
+ {
return false;
+ }
}
}
diff --git a/src/backend/columnar/columnar_storage.c b/src/backend/columnar/columnar_storage.c
index 0ae6ccca3..e0bb66a40 100644
--- a/src/backend/columnar/columnar_storage.c
+++ b/src/backend/columnar/columnar_storage.c
@@ -547,7 +547,8 @@ ColumnarStorageTruncate(Relation rel, uint64 newDataReservation)
if (!ColumnarLogicalOffsetIsValid(newDataReservation))
{
elog(ERROR,
- "attempted to truncate relation %d to invalid logical offset: " UINT64_FORMAT,
+ "attempted to truncate relation %d to "
+ "invalid logical offset: " UINT64_FORMAT,
rel->rd_id, newDataReservation);
}
diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c
index 8271d28b2..b44eef08d 100644
--- a/src/backend/columnar/columnar_tableam.c
+++ b/src/backend/columnar/columnar_tableam.c
@@ -2410,9 +2410,10 @@ ColumnarProcessUtility(PlannedStmt *pstmt,
}
default:
-
+ {
/* FALL THROUGH */
break;
+ }
}
if (columnarOptions != NIL && columnarRangeVar == NULL)
diff --git a/src/backend/distributed/clock/causal_clock.c b/src/backend/distributed/clock/causal_clock.c
index ff05d03db..033cb5708 100644
--- a/src/backend/distributed/clock/causal_clock.c
+++ b/src/backend/distributed/clock/causal_clock.c
@@ -44,17 +44,17 @@
#include "distributed/remote_commands.h"
#define SAVE_AND_PERSIST(c) \
- do { \
- Oid savedUserId = InvalidOid; \
- int savedSecurityContext = 0; \
- LogicalClockShmem->clusterClockValue = *(c); \
- GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); \
- SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); \
- DirectFunctionCall2(setval_oid, \
- ObjectIdGetDatum(DistClockLogicalSequenceId()), \
- Int64GetDatum((c)->logical)); \
- SetUserIdAndSecContext(savedUserId, savedSecurityContext); \
- } while (0)
+ do { \
+ Oid savedUserId = InvalidOid; \
+ int savedSecurityContext = 0; \
+ LogicalClockShmem->clusterClockValue = *(c); \
+ GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); \
+ SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); \
+ DirectFunctionCall2(setval_oid, \
+ ObjectIdGetDatum(DistClockLogicalSequenceId()), \
+ Int64GetDatum((c)->logical)); \
+ SetUserIdAndSecContext(savedUserId, savedSecurityContext); \
+ } while (0)
PG_FUNCTION_INFO_V1(citus_get_node_clock);
PG_FUNCTION_INFO_V1(citus_internal_adjust_local_clock_to_remote);
diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c
index d2f8348da..e814cb9ca 100644
--- a/src/backend/distributed/commands/alter_table.c
+++ b/src/backend/distributed/commands/alter_table.c
@@ -77,9 +77,9 @@
#define ALTER_TABLE_SET_ACCESS_METHOD 'm'
#define UNDISTRIBUTE_TABLE_CASCADE_HINT \
- "Use cascade option to undistribute all the relations involved in " \
- "a foreign key relationship with %s by executing SELECT " \
- "undistribute_table($$%s$$, cascade_via_foreign_keys=>true)"
+ "Use cascade option to undistribute all the relations involved in " \
+ "a foreign key relationship with %s by executing SELECT " \
+ "undistribute_table($$%s$$, cascade_via_foreign_keys=>true)"
typedef TableConversionReturn *(*TableConversionFunction)(struct
@@ -185,8 +185,8 @@ typedef struct TableConversionState
static TableConversionReturn * AlterDistributedTable(TableConversionParameters *params);
-static TableConversionReturn * AlterTableSetAccessMethod(
- TableConversionParameters *params);
+static TableConversionReturn * AlterTableSetAccessMethod(TableConversionParameters *
+ params);
static TableConversionReturn * ConvertTable(TableConversionState *con);
static TableConversionReturn * ConvertTableInternal(TableConversionState *con);
static bool SwitchToSequentialAndLocalExecutionIfShardNameTooLong(char *relationName,
@@ -215,8 +215,8 @@ static char * CreateWorkerChangeSequenceDependencyCommand(char *qualifiedSequece
static void ErrorIfMatViewSizeExceedsTheLimit(Oid matViewOid);
static char * CreateMaterializedViewDDLCommand(Oid matViewOid);
static char * GetAccessMethodForMatViewIfExists(Oid viewOid);
-static bool WillRecreateForeignKeyToReferenceTable(Oid relationId,
- CascadeToColocatedOption cascadeOption);
+static bool WillRecreateFKeyToReferenceTable(Oid relationId,
+ CascadeToColocatedOption cascadeOption);
static void WarningsForDroppingForeignKeysWithDistributedTables(Oid relationId);
static void ErrorIfUnsupportedCascadeObjects(Oid relationId);
static List * WrapTableDDLCommands(List *commandStrings);
@@ -505,8 +505,9 @@ UndistributeTable(TableConversionParameters *params)
if (!params->bypassTenantCheck && IsTenantSchema(schemaId) &&
IsCitusTableType(params->relationId, SINGLE_SHARD_DISTRIBUTED))
{
- EnsureUndistributeTenantTableSafe(params->relationId,
- TenantOperationNames[TENANT_UNDISTRIBUTE_TABLE]);
+ EnsureUndistributeTenantTableSafe(
+ params->relationId,
+ TenantOperationNames[TENANT_UNDISTRIBUTE_TABLE]);
}
if (!params->cascadeViaForeignKeys)
@@ -577,7 +578,7 @@ AlterDistributedTable(TableConversionParameters *params)
TableConversionState *con = CreateTableConversion(params);
CheckAlterDistributedTableConversionParameters(con);
- if (WillRecreateForeignKeyToReferenceTable(con->relationId, con->cascadeToColocated))
+ if (WillRecreateFKeyToReferenceTable(con->relationId, con->cascadeToColocated))
{
ereport(DEBUG1, (errmsg("setting multi shard modify mode to sequential")));
SetLocalMultiShardModifyModeToSequential();
@@ -1927,14 +1928,10 @@ GetNonGeneratedStoredColumnNameList(Oid relationId)
for (int columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++)
{
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
- if (currentColumn->attisdropped)
- {
- /* skip dropped columns */
- continue;
- }
- if (currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED)
+ if (IsDroppedOrGenerated(currentColumn))
{
+ /* skip dropped or generated columns */
continue;
}
@@ -2197,13 +2194,13 @@ GetAccessMethodForMatViewIfExists(Oid viewOid)
/*
- * WillRecreateForeignKeyToReferenceTable checks if the table of relationId has any foreign
+ * WillRecreateFKeyToReferenceTable checks if the table of relationId has any foreign
* key to a reference table, if conversion will be cascaded to colocated table this function
* also checks if any of the colocated tables have a foreign key to a reference table too
*/
bool
-WillRecreateForeignKeyToReferenceTable(Oid relationId,
- CascadeToColocatedOption cascadeOption)
+WillRecreateFKeyToReferenceTable(Oid relationId,
+ CascadeToColocatedOption cascadeOption)
{
if (cascadeOption == CASCADE_TO_COLOCATED_NO ||
cascadeOption == CASCADE_TO_COLOCATED_UNSPECIFIED)
diff --git a/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c b/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c
index 02b175960..97c920d63 100644
--- a/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c
+++ b/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c
@@ -522,7 +522,7 @@ ExecuteCascadeOperationForRelationIdList(List *relationIdList,
* with the flag InTableTypeConversionFunctionCall set to true.
*/
void
-ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCommandList)
+ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCmdList)
{
bool oldValue = InTableTypeConversionFunctionCall;
InTableTypeConversionFunctionCall = true;
@@ -531,7 +531,7 @@ ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCommandL
PG_TRY();
{
char *utilityCommand = NULL;
- foreach_declared_ptr(utilityCommand, utilityCommandList)
+ foreach_declared_ptr(utilityCommand, utilityCmdList)
{
/*
* CREATE MATERIALIZED VIEW commands need to be parsed/transformed,
@@ -566,10 +566,10 @@ ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCommandL
* ExecuteAndLogUtilityCommand function for each of them.
*/
void
-ExecuteAndLogUtilityCommandList(List *utilityCommandList)
+ExecuteAndLogUtilityCommandList(List *utilityCmdList)
{
char *utilityCommand = NULL;
- foreach_declared_ptr(utilityCommand, utilityCommandList)
+ foreach_declared_ptr(utilityCommand, utilityCmdList)
{
ExecuteAndLogUtilityCommand(utilityCommand);
}
diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c
index 10cef8308..a9e43b23c 100644
--- a/src/backend/distributed/commands/common.c
+++ b/src/backend/distributed/commands/common.c
@@ -64,8 +64,8 @@ PostprocessCreateDistributedObjectFromCatalogStmt(Node *stmt, const char *queryS
return NIL;
}
- if (ops->qualify && DistOpsValidityState(stmt, ops) ==
- ShouldQualifyAfterLocalCreation)
+ if (ops->qualify &&
+ DistOpsValidityState(stmt, ops) == ShouldQualifyAfterLocalCreation)
{
/* qualify the statement after local creation */
ops->qualify(stmt);
diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c
index 38fadb0f3..de691e35c 100644
--- a/src/backend/distributed/commands/create_distributed_table.c
+++ b/src/backend/distributed/commands/create_distributed_table.c
@@ -175,8 +175,9 @@ static bool DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationD
static int numeric_typmod_scale(int32 typmod);
static bool is_valid_numeric_typmod(int32 typmod);
-static bool DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc,
- Var *distributionColumn);
+static void DistributionColumnIsGeneratedCheck(TupleDesc relationDesc,
+ Var *distributionColumn,
+ const char *relationName);
static bool CanUseExclusiveConnections(Oid relationId, bool localTableEmpty);
static uint64 DoCopyFromLocalTableIntoShards(Relation distributedRelation,
DestReceiver *copyDest,
@@ -701,8 +702,9 @@ EnsureColocateWithTableIsValid(Oid relationId, char distributionMethod,
* given table. We should make those checks after local table conversion by acquiring locks to
* the relation because the distribution column can be modified in that period.
*/
- Oid distributionColumnType = ColumnTypeIdForRelationColumnName(relationId,
- distributionColumnName);
+ Oid distributionColumnType = ColumnTypeIdForRelationColumnName(
+ relationId,
+ distributionColumnName);
text *colocateWithTableNameText = cstring_to_text(colocateWithTableName);
Oid colocateWithTableId = ResolveRelationId(colocateWithTableNameText, false);
@@ -1107,8 +1109,8 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
DistributedTableParams *distributedTableParams)
{
if ((tableType == HASH_DISTRIBUTED || tableType == APPEND_DISTRIBUTED ||
- tableType == RANGE_DISTRIBUTED || tableType == SINGLE_SHARD_DISTRIBUTED) !=
- (distributedTableParams != NULL))
+ tableType == SINGLE_SHARD_DISTRIBUTED ||
+ tableType == RANGE_DISTRIBUTED) != (distributedTableParams != NULL))
{
ereport(ERROR, (errmsg("distributed table params must be provided "
"when creating a distributed table and must "
@@ -2103,13 +2105,10 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
/* verify target relation is not distributed by a generated stored column
*/
- if (distributionMethod != DISTRIBUTE_BY_NONE &&
- DistributionColumnUsesGeneratedStoredColumn(relationDesc, distributionColumn))
+ if (distributionMethod != DISTRIBUTE_BY_NONE)
{
- ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot distribute relation: %s", relationName),
- errdetail("Distribution column must not use GENERATED ALWAYS "
- "AS (...) STORED.")));
+ DistributionColumnIsGeneratedCheck(relationDesc, distributionColumn,
+ relationName);
}
/* verify target relation is not distributed by a column of type numeric with negative scale */
@@ -2829,9 +2828,7 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
char *columnName = NameStr(currentColumn->attname);
- if (currentColumn->attisdropped ||
- currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
- )
+ if (IsDroppedOrGenerated(currentColumn))
{
continue;
}
@@ -2893,22 +2890,43 @@ DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
/*
- * DistributionColumnUsesGeneratedStoredColumn returns whether a given relation uses
- * GENERATED ALWAYS AS (...) STORED on distribution column
+ * DistributionColumnIsGeneratedCheck throws an error if a given relation uses
+ * GENERATED ALWAYS AS (...) STORED | VIRTUAL on distribution column
*/
-static bool
-DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc,
- Var *distributionColumn)
+static void
+DistributionColumnIsGeneratedCheck(TupleDesc relationDesc,
+ Var *distributionColumn,
+ const char *relationName)
{
Form_pg_attribute attributeForm = TupleDescAttr(relationDesc,
distributionColumn->varattno - 1);
-
- if (attributeForm->attgenerated == ATTRIBUTE_GENERATED_STORED)
+ switch (attributeForm->attgenerated)
{
- return true;
- }
+ case ATTRIBUTE_GENERATED_STORED:
+ {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot distribute relation: %s", relationName),
+ errdetail("Distribution column must not use GENERATED ALWAYS "
+ "AS (...) STORED.")));
+ break;
+ }
- return false;
+#if PG_VERSION_NUM >= PG_VERSION_18
+ case ATTRIBUTE_GENERATED_VIRTUAL:
+ {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot distribute relation: %s", relationName),
+ errdetail("Distribution column must not use GENERATED ALWAYS "
+ "AS (...) VIRTUAL.")));
+ break;
+ }
+
+#endif
+ default:
+ {
+ break;
+ }
+ }
}
diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c
index 8d1c6bc23..5acecf099 100644
--- a/src/backend/distributed/commands/distribute_object_ops.c
+++ b/src/backend/distributed/commands/distribute_object_ops.c
@@ -2165,7 +2165,9 @@ GetDistributeObjectOps(Node *node)
}
default:
+ {
return &Any_SecLabel;
+ }
}
}
diff --git a/src/backend/distributed/commands/foreign_constraint.c b/src/backend/distributed/commands/foreign_constraint.c
index bc12ccb4d..536db0fb6 100644
--- a/src/backend/distributed/commands/foreign_constraint.c
+++ b/src/backend/distributed/commands/foreign_constraint.c
@@ -47,13 +47,13 @@
#define BehaviorIsRestrictOrNoAction(x) \
- ((x) == FKCONSTR_ACTION_NOACTION || (x) == FKCONSTR_ACTION_RESTRICT)
+ ((x) == FKCONSTR_ACTION_NOACTION || (x) == FKCONSTR_ACTION_RESTRICT)
#define USE_CREATE_REFERENCE_TABLE_HINT \
- "You could use SELECT create_reference_table('%s') " \
- "to replicate the referenced table to all nodes or " \
- "consider dropping the foreign key"
+ "You could use SELECT create_reference_table('%s') " \
+ "to replicate the referenced table to all nodes or " \
+ "consider dropping the foreign key"
typedef bool (*CheckRelationFunc)(Oid);
diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c
index d1631f879..bf7a3f247 100644
--- a/src/backend/distributed/commands/function.c
+++ b/src/backend/distributed/commands/function.c
@@ -71,7 +71,7 @@
#define DISABLE_LOCAL_CHECK_FUNCTION_BODIES "SET LOCAL check_function_bodies TO off;"
#define RESET_CHECK_FUNCTION_BODIES "RESET check_function_bodies;"
#define argumentStartsWith(arg, prefix) \
- (strncmp(arg, prefix, strlen(prefix)) == 0)
+ (strncmp(arg, prefix, strlen(prefix)) == 0)
/* forward declaration for helper functions*/
static bool RecreateSameNonColocatedFunction(ObjectAddress functionAddress,
@@ -107,9 +107,9 @@ static void DistributeFunctionColocatedWithDistributedTable(RegProcedure funcOid
char *colocateWithTableName,
const ObjectAddress *
functionAddress);
-static void DistributeFunctionColocatedWithSingleShardTable(const
- ObjectAddress *functionAddress,
- text *colocateWithText);
+static void DistributeFunctionColocatedWithSingleShardTable(const ObjectAddress *
+ functionAddress, text *
+ colocateWithText);
static void DistributeFunctionColocatedWithReferenceTable(const
ObjectAddress *functionAddress);
static List * FilterDistributedFunctions(GrantStmt *grantStmt);
@@ -1896,7 +1896,9 @@ ShouldAddFunctionSignature(FunctionParameterMode mode)
}
default:
+ {
return true;
+ }
}
}
diff --git a/src/backend/distributed/commands/grant.c b/src/backend/distributed/commands/grant.c
index c60afa197..fc09f050f 100644
--- a/src/backend/distributed/commands/grant.c
+++ b/src/backend/distributed/commands/grant.c
@@ -96,6 +96,7 @@ PreprocessGrantStmt(Node *node, const char *queryString,
{
appendStringInfo(&privsString, "%s", priv->priv_name);
}
+
/*
* ALL can only be set alone.
* And ALL is not added as a keyword in priv_name by parser, but
@@ -108,6 +109,7 @@ PreprocessGrantStmt(Node *node, const char *queryString,
/* this is used for column level only */
appendStringInfo(&privsString, "ALL");
}
+
/*
* Instead of relying only on the syntax check done by Postgres and
* adding an assert here, add a default ERROR if ALL is not first
@@ -227,8 +229,8 @@ CollectGrantTableIdList(GrantStmt *grantStmt)
bool grantOnTableCommand = (grantStmt->targtype == ACL_TARGET_OBJECT &&
grantStmt->objtype == OBJECT_TABLE);
- bool grantAllTablesOnSchemaCommand = (grantStmt->targtype ==
- ACL_TARGET_ALL_IN_SCHEMA &&
+ bool grantAllTablesOnSchemaCommand = (grantStmt->targtype == ACL_TARGET_ALL_IN_SCHEMA
+ &&
grantStmt->objtype == OBJECT_TABLE);
/* we are only interested in table level grants */
diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c
index d95c53fb5..5c03bac01 100644
--- a/src/backend/distributed/commands/index.c
+++ b/src/backend/distributed/commands/index.c
@@ -64,8 +64,8 @@ static int GetNumberOfIndexParameters(IndexStmt *createIndexStatement);
static bool IndexAlreadyExists(IndexStmt *createIndexStatement);
static Oid CreateIndexStmtGetIndexId(IndexStmt *createIndexStatement);
static Oid CreateIndexStmtGetSchemaId(IndexStmt *createIndexStatement);
-static void SwitchToSequentialAndLocalExecutionIfIndexNameTooLong(
- IndexStmt *createIndexStatement);
+static void SwitchToSequentialAndLocalExecutionIfIndexNameTooLong(IndexStmt *
+ createIndexStatement);
static char * GenerateLongestShardPartitionIndexName(IndexStmt *createIndexStatement);
static char * GenerateDefaultIndexName(IndexStmt *createIndexStatement);
static List * GenerateIndexParameters(IndexStmt *createIndexStatement);
diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c
index 79dc4719a..8ab337d8b 100644
--- a/src/backend/distributed/commands/multi_copy.c
+++ b/src/backend/distributed/commands/multi_copy.c
@@ -350,7 +350,6 @@ static void LogLocalCopyToRelationExecution(uint64 shardId);
static void LogLocalCopyToFileExecution(uint64 shardId);
static void ErrorIfMergeInCopy(CopyStmt *copyStatement);
-
/* exports for SQL callable functions */
PG_FUNCTION_INFO_V1(citus_text_send_as_jsonb);
@@ -484,9 +483,7 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletion *completionTag)
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
char *columnName = NameStr(currentColumn->attname);
- if (currentColumn->attisdropped ||
- currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
- )
+ if (IsDroppedOrGenerated(currentColumn))
{
continue;
}
@@ -804,9 +801,7 @@ CanUseBinaryCopyFormat(TupleDesc tupleDescription)
{
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescription, columnIndex);
- if (currentColumn->attisdropped ||
- currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
- )
+ if (IsDroppedOrGenerated(currentColumn))
{
continue;
}
@@ -1277,7 +1272,9 @@ ConversionPathForTypes(Oid inputType, Oid destType, CopyCoercionData *result)
}
default:
+ {
Assert(false); /* there are no other options for this enum */
+ }
}
}
@@ -1316,9 +1313,7 @@ TypeArrayFromTupleDescriptor(TupleDesc tupleDescriptor)
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++)
{
Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, columnIndex);
- if (attr->attisdropped ||
- attr->attgenerated == ATTRIBUTE_GENERATED_STORED
- )
+ if (IsDroppedOrGenerated(attr))
{
typeArray[columnIndex] = InvalidOid;
}
@@ -1486,9 +1481,7 @@ AppendCopyRowData(Datum *valueArray, bool *isNullArray, TupleDesc rowDescriptor,
value = CoerceColumnValue(value, &columnCoercionPaths[columnIndex]);
}
- if (currentColumn->attisdropped ||
- currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
- )
+ if (IsDroppedOrGenerated(currentColumn))
{
continue;
}
@@ -1607,9 +1600,7 @@ AvailableColumnCount(TupleDesc tupleDescriptor)
{
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
- if (!currentColumn->attisdropped &&
- currentColumn->attgenerated != ATTRIBUTE_GENERATED_STORED
- )
+ if (!IsDroppedOrGenerated(currentColumn))
{
columnCount++;
}
@@ -2479,7 +2470,7 @@ ProcessAppendToShardOption(Oid relationId, CopyStmt *copyStatement)
if (!IsCitusTableType(relationId, APPEND_DISTRIBUTED))
{
ereport(ERROR, (errmsg(APPEND_TO_SHARD_OPTION " is only valid for "
- "append-distributed tables")));
+ "append-distributed tables")));
}
/* throws an error if shard does not exist */
@@ -2869,8 +2860,8 @@ ErrorIfCopyHasOnErrorLogVerbosity(CopyStmt *copyStatement)
{
if (strcmp(option->defname, "on_error") == 0)
{
- ereport(ERROR, (errmsg(
- "Citus does not support COPY FROM with ON_ERROR option.")));
+ ereport(ERROR, (errmsg("Citus does not support "
+ "COPY FROM with ON_ERROR option.")));
}
else if (strcmp(option->defname, "log_verbosity") == 0)
{
@@ -2887,8 +2878,8 @@ ErrorIfCopyHasOnErrorLogVerbosity(CopyStmt *copyStatement)
*/
if (log_verbosity)
{
- ereport(ERROR, (errmsg(
- "Citus does not support COPY FROM with LOG_VERBOSITY option.")));
+ ereport(ERROR, (errmsg("Citus does not support "
+ "COPY FROM with LOG_VERBOSITY option.")));
}
#endif
}
@@ -3999,3 +3990,20 @@ UnclaimCopyConnections(List *connectionStateList)
UnclaimConnection(connectionState->connection);
}
}
+
+
+/*
+ * IsDroppedOrGenerated - helper function for determining if an attribute is
+ * dropped or generated. Used by COPY and Citus DDL to skip such columns.
+ */
+inline bool
+IsDroppedOrGenerated(Form_pg_attribute attr)
+{
+ /*
+ * If the "is dropped" flag is true or the generated column flag
+ * is not the default nul character (in which case its value is 's'
+ * for ATTRIBUTE_GENERATED_STORED or possibly 'v' with PG18+ for
+ * ATTRIBUTE_GENERATED_VIRTUAL) then return true.
+ */
+ return attr->attisdropped || (attr->attgenerated != '\0');
+}
diff --git a/src/backend/distributed/commands/non_main_db_distribute_object_ops.c b/src/backend/distributed/commands/non_main_db_distribute_object_ops.c
index fdd29b1e1..f26ac3306 100644
--- a/src/backend/distributed/commands/non_main_db_distribute_object_ops.c
+++ b/src/backend/distributed/commands/non_main_db_distribute_object_ops.c
@@ -35,13 +35,13 @@
#define EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER \
- "SELECT citus_internal.execute_command_on_remote_nodes_as_user(%s, %s)"
+ "SELECT citus_internal.execute_command_on_remote_nodes_as_user(%s, %s)"
#define START_MANAGEMENT_TRANSACTION \
- "SELECT citus_internal.start_management_transaction('%lu')"
+ "SELECT citus_internal.start_management_transaction('%lu')"
#define MARK_OBJECT_DISTRIBUTED \
- "SELECT citus_internal.mark_object_distributed(%d, %s, %d, %s)"
+ "SELECT citus_internal.mark_object_distributed(%d, %s, %d, %s)"
#define UNMARK_OBJECT_DISTRIBUTED \
- "SELECT pg_catalog.citus_unmark_object_distributed(%d, %d, %d, %s)"
+ "SELECT pg_catalog.citus_unmark_object_distributed(%d, %d, %d, %s)"
/*
diff --git a/src/backend/distributed/commands/publication.c b/src/backend/distributed/commands/publication.c
index 3e03c5505..7c1db06d6 100644
--- a/src/backend/distributed/commands/publication.c
+++ b/src/backend/distributed/commands/publication.c
@@ -196,6 +196,27 @@ BuildCreatePublicationStmt(Oid publicationId)
-1);
createPubStmt->options = lappend(createPubStmt->options, pubViaRootOption);
+/* WITH (publish_generated_columns = ...) option (PG18+) */
+#if PG_VERSION_NUM >= PG_VERSION_18
+ if (publicationForm->pubgencols == 's') /* stored */
+ {
+ DefElem *pubGenColsOption =
+ makeDefElem("publish_generated_columns",
+ (Node *) makeString("stored"),
+ -1);
+
+ createPubStmt->options =
+ lappend(createPubStmt->options, pubGenColsOption);
+ }
+ else if (publicationForm->pubgencols != 'n') /* 'n' = none (default) */
+ {
+ ereport(ERROR,
+ (errmsg("unexpected pubgencols value '%c' for publication %u",
+ publicationForm->pubgencols, publicationId)));
+ }
+#endif
+
+
/* WITH (publish = 'insert, update, delete, truncate') option */
List *publishList = NIL;
diff --git a/src/backend/distributed/commands/rename.c b/src/backend/distributed/commands/rename.c
index 362fc57bb..34cdea09b 100644
--- a/src/backend/distributed/commands/rename.c
+++ b/src/backend/distributed/commands/rename.c
@@ -149,13 +149,14 @@ PreprocessRenameStmt(Node *node, const char *renameCommand,
}
default:
-
+ {
/*
* Nodes that are not supported by Citus: we pass-through to the
* main PostgreSQL executor. Any Citus-supported RenameStmt
* renameType must appear above in the switch, explicitly.
*/
return NIL;
+ }
}
bool isCitusRelation = IsCitusTable(tableRelationId);
diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c
index 0dd544cc6..72cbd7720 100644
--- a/src/backend/distributed/commands/sequence.c
+++ b/src/backend/distributed/commands/sequence.c
@@ -177,8 +177,7 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList,
{
Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex);
- if (attributeForm->attisdropped ||
- attributeForm->attgenerated == ATTRIBUTE_GENERATED_STORED)
+ if (IsDroppedOrGenerated(attributeForm))
{
/* skip dropped columns and columns with GENERATED AS ALWAYS expressions */
continue;
@@ -463,8 +462,8 @@ PreprocessAlterSequenceStmt(Node *node, const char *queryString,
if (IsAnyObjectDistributed(addresses) || SequenceUsedInDistributedTable(address,
DEPENDENCY_INTERNAL))
{
- ereport(ERROR, (errmsg(
- "Altering a distributed sequence is currently not supported.")));
+ ereport(ERROR, (errmsg("Altering a distributed sequence "
+ "is currently not supported.")));
}
/*
@@ -992,8 +991,8 @@ FilterDistributedSequences(GrantStmt *stmt)
{
bool grantOnSequenceCommand = (stmt->targtype == ACL_TARGET_OBJECT &&
stmt->objtype == OBJECT_SEQUENCE);
- bool grantOnAllSequencesInSchemaCommand = (stmt->targtype ==
- ACL_TARGET_ALL_IN_SCHEMA &&
+ bool grantOnAllSequencesInSchemaCommand = (stmt->targtype == ACL_TARGET_ALL_IN_SCHEMA
+ &&
stmt->objtype == OBJECT_SEQUENCE);
/* we are only interested in sequence level grants */
@@ -1034,11 +1033,10 @@ FilterDistributedSequences(GrantStmt *stmt)
*/
if (list_member_oid(namespaceOidList, namespaceOid))
{
- RangeVar *distributedSequence = makeRangeVar(get_namespace_name(
- namespaceOid),
- get_rel_name(
- sequenceAddress->objectId),
- -1);
+ RangeVar *distributedSequence = makeRangeVar(
+ get_namespace_name(namespaceOid),
+ get_rel_name(sequenceAddress->objectId),
+ -1);
grantSequenceList = lappend(grantSequenceList, distributedSequence);
}
}
diff --git a/src/backend/distributed/commands/serialize_distributed_ddls.c b/src/backend/distributed/commands/serialize_distributed_ddls.c
index 2cca64fb0..b20e534c5 100644
--- a/src/backend/distributed/commands/serialize_distributed_ddls.c
+++ b/src/backend/distributed/commands/serialize_distributed_ddls.c
@@ -237,7 +237,9 @@ AcquireCitusAdvisoryObjectClassLockGetOid(ObjectClass objectClass,
}
default:
+ {
elog(ERROR, "unsupported object class: %d", objectClass);
+ }
}
}
@@ -270,6 +272,8 @@ AcquireCitusAdvisoryObjectClassLockCheckPrivileges(ObjectClass objectClass, Oid
}
default:
+ {
elog(ERROR, "unsupported object class: %d", objectClass);
+ }
}
}
diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c
index 7a77b6b3d..e929d42c7 100644
--- a/src/backend/distributed/commands/statistics.c
+++ b/src/backend/distributed/commands/statistics.c
@@ -50,7 +50,7 @@
#define DEFAULT_STATISTICS_TARGET -1
#define ALTER_INDEX_COLUMN_SET_STATS_COMMAND \
- "ALTER INDEX %s ALTER COLUMN %d SET STATISTICS %d"
+ "ALTER INDEX %s ALTER COLUMN %d SET STATISTICS %d"
static char * GenerateAlterIndexColumnSetStatsCommand(char *indexNameWithSchema,
int16 attnum,
diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c
index eaa8b1031..c482c25fa 100644
--- a/src/backend/distributed/commands/table.c
+++ b/src/backend/distributed/commands/table.c
@@ -81,23 +81,23 @@ static void ErrorIfAttachCitusTableToPgLocalTable(Oid parentRelationId,
Oid partitionRelationId);
static bool DeparserSupportsAlterTableAddColumn(AlterTableStmt *alterTableStatement,
AlterTableCmd *addColumnSubCommand);
-static bool ATDefinesFKeyBetweenPostgresAndCitusLocalOrRef(
- AlterTableStmt *alterTableStatement);
+static bool ATDefinesFKeyBetweenPostgresAndCitusLocalOrRef(AlterTableStmt *
+ alterTableStatement);
static bool ShouldMarkConnectedRelationsNotAutoConverted(Oid leftRelationId,
Oid rightRelationId);
static bool RelationIdListContainsCitusTableType(List *relationIdList,
CitusTableType citusTableType);
static bool RelationIdListContainsPostgresTable(List *relationIdList);
-static void ConvertPostgresLocalTablesToCitusLocalTables(
- AlterTableStmt *alterTableStatement);
+static void ConvertPostgresLocalTablesToCitusLocalTables(AlterTableStmt *
+ alterTableStatement);
static bool RangeVarListHasLocalRelationConvertedByUser(List *relationRangeVarList,
AlterTableStmt *
alterTableStatement);
static int CompareRangeVarsByOid(const void *leftElement, const void *rightElement);
-static List * GetAlterTableAddFKeyRightRelationIdList(
- AlterTableStmt *alterTableStatement);
-static List * GetAlterTableAddFKeyRightRelationRangeVarList(
- AlterTableStmt *alterTableStatement);
+static List * GetAlterTableAddFKeyRightRelationIdList(AlterTableStmt *
+ alterTableStatement);
+static List * GetAlterTableAddFKeyRightRelationRangeVarList(AlterTableStmt *
+ alterTableStatement);
static List * GetAlterTableAddFKeyConstraintList(AlterTableStmt *alterTableStatement);
static List * GetAlterTableCommandFKeyConstraintList(AlterTableCmd *command);
static List * GetRangeVarListFromFKeyConstraintList(List *fKeyConstraintList);
@@ -1352,6 +1352,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
constraint);
}
}
+
/*
* When constraint->indexname is not NULL we are handling an
* ADD {PRIMARY KEY, UNIQUE} USING INDEX command. In this case
@@ -1532,6 +1533,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
}
}
}
+
/*
* We check for ALTER COLUMN .. SET/DROP DEFAULT
* we should not propagate anything to shards
@@ -2181,7 +2183,9 @@ AlterTableCommandTypeIsTrigger(AlterTableType alterTableType)
}
default:
+ {
return false;
+ }
}
}
@@ -2719,6 +2723,7 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
}
}
}
+
/*
* We check for ALTER COLUMN .. SET DEFAULT nextval('user_defined_seq')
* we should make sure that the type of the column that uses
@@ -2815,6 +2820,7 @@ FixAlterTableStmtIndexNames(AlterTableStmt *alterTableStatement)
FixPartitionShardIndexNames(relationId, parentIndexOid);
}
+
/*
* If this is an ALTER TABLE .. ATTACH PARTITION command
* we have wrong index names generated on indexes of shards of
@@ -3425,13 +3431,13 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
if (commandList->length > 1 ||
columnConstraints->length > 1)
{
- ereport(ERROR, (errcode(
- ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg(
- "cannot execute ADD COLUMN .. DEFAULT nextval('..')"
- " command with other subcommands/constraints"),
- errhint(
- "You can issue each subcommand separately")));
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg(
+ "cannot execute ADD COLUMN .. DEFAULT nextval('..')"
+ " command with other subcommands/constraints"),
+ errhint(
+ "You can issue each subcommand separately")));
}
/*
@@ -3440,14 +3446,14 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
*/
if (!TableEmpty(relationId))
{
- ereport(ERROR, (errcode(
- ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg(
- "cannot add a column involving DEFAULT nextval('..') "
- "because the table is not empty"),
- errhint(
- "You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint\n"
- "Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..')")));
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg(
+ "cannot add a column involving DEFAULT nextval('..') "
+ "because the table is not empty"),
+ errhint(
+ "You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint\n"
+ "Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..')")));
}
}
}
diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c
index 4d297297b..f09d7ced3 100644
--- a/src/backend/distributed/commands/utility_hook.c
+++ b/src/backend/distributed/commands/utility_hook.c
@@ -1297,7 +1297,8 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
"partial failure, potentially leading to an inconsistent "
"state.\nIf the problematic command is a CREATE operation, "
"consider using the 'IF EXISTS' syntax to drop the object,"
- "\nif applicable, and then re-attempt the original command.")));
+ "\nif applicable, and then re-attempt "
+ "the original command.")));
}
PG_RE_THROW();
diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c
index 3bdabe467..08064b4b0 100644
--- a/src/backend/distributed/commands/vacuum.c
+++ b/src/backend/distributed/commands/vacuum.c
@@ -48,21 +48,27 @@ typedef struct CitusVacuumParams
#endif
} CitusVacuumParams;
+/*
+ * Information we track per VACUUM/ANALYZE target relation.
+ */
+typedef struct CitusVacuumRelation
+{
+ VacuumRelation *vacuumRelation;
+ Oid relationId;
+} CitusVacuumRelation;
+
/* Local functions forward declarations for processing distributed table commands */
-static bool IsDistributedVacuumStmt(List *vacuumRelationIdList);
+static bool IsDistributedVacuumStmt(List *vacuumRelationList);
static List * VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams,
List *vacuumColumnList);
static char * DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams);
static char * DeparseVacuumColumnNames(List *columnNameList);
-static List * VacuumColumnList(VacuumStmt *vacuumStmt, int relationIndex);
-static List * ExtractVacuumTargetRels(VacuumStmt *vacuumStmt);
-static void ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
+static void ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationList,
CitusVacuumParams vacuumParams);
static void ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt,
CitusVacuumParams vacuumParams);
static CitusVacuumParams VacuumStmtParams(VacuumStmt *vacstmt);
-static List * VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams
- vacuumParams);
+static List * VacuumRelationList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams);
/*
* PostprocessVacuumStmt processes vacuum statements that may need propagation to
@@ -97,7 +103,7 @@ PostprocessVacuumStmt(Node *node, const char *vacuumCommand)
* when no table is specified propagate the command as it is;
* otherwise, only propagate when there is at least 1 citus table
*/
- List *relationIdList = VacuumRelationIdList(vacuumStmt, vacuumParams);
+ List *vacuumRelationList = VacuumRelationList(vacuumStmt, vacuumParams);
if (list_length(vacuumStmt->rels) == 0)
{
@@ -105,11 +111,11 @@ PostprocessVacuumStmt(Node *node, const char *vacuumCommand)
ExecuteUnqualifiedVacuumTasks(vacuumStmt, vacuumParams);
}
- else if (IsDistributedVacuumStmt(relationIdList))
+ else if (IsDistributedVacuumStmt(vacuumRelationList))
{
/* there is at least 1 citus table specified */
- ExecuteVacuumOnDistributedTables(vacuumStmt, relationIdList,
+ ExecuteVacuumOnDistributedTables(vacuumStmt, vacuumRelationList,
vacuumParams);
}
@@ -120,39 +126,58 @@ PostprocessVacuumStmt(Node *node, const char *vacuumCommand)
/*
- * VacuumRelationIdList returns the oid of the relations in the given vacuum statement.
+ * VacuumRelationList returns the list of relations in the given vacuum statement,
+ * along with their resolved Oids (if they can be locked).
*/
static List *
-VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
+VacuumRelationList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
{
LOCKMODE lockMode = (vacuumParams.options & VACOPT_FULL) ? AccessExclusiveLock :
ShareUpdateExclusiveLock;
bool skipLocked = (vacuumParams.options & VACOPT_SKIP_LOCKED);
- List *vacuumRelationList = ExtractVacuumTargetRels(vacuumStmt);
+ List *relationList = NIL;
- List *relationIdList = NIL;
-
- RangeVar *vacuumRelation = NULL;
- foreach_declared_ptr(vacuumRelation, vacuumRelationList)
+ VacuumRelation *vacuumRelation = NULL;
+ foreach_declared_ptr(vacuumRelation, vacuumStmt->rels)
{
+ Oid relationId = InvalidOid;
+
/*
* If skip_locked option is enabled, we are skipping that relation
- * if the lock for it is currently not available; else, we get the lock.
+ * if the lock for it is currently not available; otherwise, we get the lock.
*/
- Oid relationId = RangeVarGetRelidExtended(vacuumRelation,
+ if (vacuumRelation->relation)
+ {
+ relationId = RangeVarGetRelidExtended(vacuumRelation->relation,
lockMode,
skipLocked ? RVR_SKIP_LOCKED : 0, NULL,
NULL);
+ }
+ else if (OidIsValid(vacuumRelation->oid))
+ {
+ /* fall back to the Oid directly when provided */
+ if (!skipLocked || ConditionalLockRelationOid(vacuumRelation->oid, lockMode))
+ {
+ if (!skipLocked)
+ {
+ LockRelationOid(vacuumRelation->oid, lockMode);
+ }
+ relationId = vacuumRelation->oid;
+ }
+ }
if (OidIsValid(relationId))
{
- relationIdList = lappend_oid(relationIdList, relationId);
+ CitusVacuumRelation *relation = palloc(sizeof(CitusVacuumRelation));
+ relation->vacuumRelation = vacuumRelation;
+ relation->relationId = relationId;
+ relationList = lappend(relationList, relation);
}
}
- return relationIdList;
+ return relationList;
}
@@ -161,12 +186,13 @@ VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
* otherwise, it returns false.
*/
static bool
-IsDistributedVacuumStmt(List *vacuumRelationIdList)
+IsDistributedVacuumStmt(List *vacuumRelationList)
{
- Oid relationId = InvalidOid;
- foreach_declared_oid(relationId, vacuumRelationIdList)
+ CitusVacuumRelation *vacuumRelation = NULL;
+ foreach_declared_ptr(vacuumRelation, vacuumRelationList)
{
- if (OidIsValid(relationId) && IsCitusTable(relationId))
+ if (OidIsValid(vacuumRelation->relationId) &&
+ IsCitusTable(vacuumRelation->relationId))
{
return true;
}
@@ -181,24 +207,31 @@ IsDistributedVacuumStmt(List *vacuumRelationIdList)
* if they are citus tables.
*/
static void
-ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
+ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationList,
CitusVacuumParams vacuumParams)
{
- int relationIndex = 0;
-
- Oid relationId = InvalidOid;
- foreach_declared_oid(relationId, relationIdList)
+ CitusVacuumRelation *vacuumRelationEntry = NULL;
+ foreach_declared_ptr(vacuumRelationEntry, relationList)
{
+ Oid relationId = vacuumRelationEntry->relationId;
+ VacuumRelation *vacuumRelation = vacuumRelationEntry->vacuumRelation;
+
+ RangeVar *relation = vacuumRelation->relation;
+ if (relation != NULL && !relation->inh)
+ {
+ /* ONLY specified, so don't recurse to shard placements */
+ continue;
+ }
+
if (IsCitusTable(relationId))
{
- List *vacuumColumnList = VacuumColumnList(vacuumStmt, relationIndex);
+ List *vacuumColumnList = vacuumRelation->va_cols;
List *taskList = VacuumTaskList(relationId, vacuumParams, vacuumColumnList);
/* local execution is not implemented for VACUUM commands */
bool localExecutionSupported = false;
ExecuteUtilityTaskList(taskList, localExecutionSupported);
}
- relationIndex++;
}
}
@@ -484,39 +517,6 @@ DeparseVacuumColumnNames(List *columnNameList)
}
-/*
- * VacuumColumnList returns list of columns from relation
- * in the vacuum statement at specified relationIndex.
- */
-static List *
-VacuumColumnList(VacuumStmt *vacuumStmt, int relationIndex)
-{
- VacuumRelation *vacuumRelation = (VacuumRelation *) list_nth(vacuumStmt->rels,
- relationIndex);
-
- return vacuumRelation->va_cols;
-}
-
-
-/*
- * ExtractVacuumTargetRels returns list of target
- * relations from vacuum statement.
- */
-static List *
-ExtractVacuumTargetRels(VacuumStmt *vacuumStmt)
-{
- List *vacuumList = NIL;
-
- VacuumRelation *vacuumRelation = NULL;
- foreach_declared_ptr(vacuumRelation, vacuumStmt->rels)
- {
- vacuumList = lappend(vacuumList, vacuumRelation->relation);
- }
-
- return vacuumList;
-}
-
-
/*
* VacuumStmtParams returns a CitusVacuumParams based on the supplied VacuumStmt.
*/
diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c
index 407de776b..0cb2b7c30 100644
--- a/src/backend/distributed/connection/connection_management.c
+++ b/src/backend/distributed/connection/connection_management.c
@@ -475,8 +475,8 @@ FindAvailableConnection(dlist_head *connections, uint32 flags)
if (flags & OUTSIDE_TRANSACTION)
{
/* don't return connections that are used in transactions */
- if (connection->remoteTransaction.transactionState !=
- REMOTE_TRANS_NOT_STARTED)
+ if (connection->
+ remoteTransaction.transactionState != REMOTE_TRANS_NOT_STARTED)
{
continue;
}
diff --git a/src/backend/distributed/connection/placement_connection.c b/src/backend/distributed/connection/placement_connection.c
index 841deba08..dd1082994 100644
--- a/src/backend/distributed/connection/placement_connection.c
+++ b/src/backend/distributed/connection/placement_connection.c
@@ -191,8 +191,8 @@ static HTAB *ConnectionShardHash;
static MultiConnection * FindPlacementListConnection(int flags, List *placementAccessList,
const char *userName);
-static ConnectionPlacementHashEntry * FindOrCreatePlacementEntry(
- ShardPlacement *placement);
+static ConnectionPlacementHashEntry * FindOrCreatePlacementEntry(ShardPlacement *
+ placement);
static bool CanUseExistingConnection(uint32 flags, const char *userName,
ConnectionReference *placementConnection);
static bool ConnectionAccessedDifferentPlacement(MultiConnection *connection,
diff --git a/src/backend/distributed/connection/shared_connection_stats.c b/src/backend/distributed/connection/shared_connection_stats.c
index 027bb46a2..89a7bd826 100644
--- a/src/backend/distributed/connection/shared_connection_stats.c
+++ b/src/backend/distributed/connection/shared_connection_stats.c
@@ -675,8 +675,9 @@ SharedConnectionStatsShmemInit(void)
ConnectionStatsSharedState->sharedConnectionHashTrancheId = LWLockNewTrancheId();
ConnectionStatsSharedState->sharedConnectionHashTrancheName =
"Shared Connection Tracking Hash Tranche";
- LWLockRegisterTranche(ConnectionStatsSharedState->sharedConnectionHashTrancheId,
- ConnectionStatsSharedState->sharedConnectionHashTrancheName);
+ LWLockRegisterTranche(
+ ConnectionStatsSharedState->sharedConnectionHashTrancheId,
+ ConnectionStatsSharedState->sharedConnectionHashTrancheName);
LWLockInitialize(&ConnectionStatsSharedState->sharedConnectionHashLock,
ConnectionStatsSharedState->sharedConnectionHashTrancheId);
diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c
index ebdb78cc9..24eb75d7c 100644
--- a/src/backend/distributed/deparser/citus_ruleutils.c
+++ b/src/backend/distributed/deparser/citus_ruleutils.c
@@ -471,6 +471,13 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
appendStringInfo(&buffer, " GENERATED ALWAYS AS (%s) STORED",
defaultString);
}
+#if PG_VERSION_NUM >= PG_VERSION_18
+ else if (attributeForm->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
+ {
+ appendStringInfo(&buffer, " GENERATED ALWAYS AS (%s) VIRTUAL",
+ defaultString);
+ }
+#endif
else
{
Oid seqOid = GetSequenceOid(tableRelationId, defaultValue->adnum);
@@ -547,6 +554,13 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
appendStringInfoString(&buffer, "(");
appendStringInfoString(&buffer, checkString);
appendStringInfoString(&buffer, ")");
+
+#if PG_VERSION_NUM >= PG_VERSION_18
+ if (!checkConstraint->ccenforced)
+ {
+ appendStringInfoString(&buffer, " NOT ENFORCED");
+ }
+#endif
}
/* close create table's outer parentheses */
diff --git a/src/backend/distributed/deparser/deparse_extension_stmts.c b/src/backend/distributed/deparser/deparse_extension_stmts.c
index 256d22214..6af279a69 100644
--- a/src/backend/distributed/deparser/deparse_extension_stmts.c
+++ b/src/backend/distributed/deparser/deparse_extension_stmts.c
@@ -28,7 +28,8 @@ static void AppendCreateExtensionStmtOptions(StringInfo buf, List *options);
static void AppendDropExtensionStmt(StringInfo buf, DropStmt *stmt);
static void AppendExtensionNameList(StringInfo buf, List *objects);
static void AppendAlterExtensionSchemaStmt(StringInfo buf,
- AlterObjectSchemaStmt *alterExtensionSchemaStmt);
+ AlterObjectSchemaStmt *
+ alterExtensionSchemaStmt);
static void AppendAlterExtensionStmt(StringInfo buf,
AlterExtensionStmt *alterExtensionStmt);
diff --git a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c
index 6b278f757..2f87979bd 100644
--- a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c
+++ b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c
@@ -290,7 +290,9 @@ GetDefElemActionString(DefElemAction action)
}
default:
+ {
return "";
+ }
}
}
diff --git a/src/backend/distributed/deparser/deparse_function_stmts.c b/src/backend/distributed/deparser/deparse_function_stmts.c
index 1e3e4a651..205d76a60 100644
--- a/src/backend/distributed/deparser/deparse_function_stmts.c
+++ b/src/backend/distributed/deparser/deparse_function_stmts.c
@@ -118,8 +118,10 @@ ObjectTypeToKeyword(ObjectType objtype)
}
default:
+ {
elog(ERROR, "Unknown object type: %d", objtype);
return NULL;
+ }
}
}
diff --git a/src/backend/distributed/deparser/deparse_statistics_stmts.c b/src/backend/distributed/deparser/deparse_statistics_stmts.c
index 79be835b9..f352b8393 100644
--- a/src/backend/distributed/deparser/deparse_statistics_stmts.c
+++ b/src/backend/distributed/deparser/deparse_statistics_stmts.c
@@ -242,8 +242,8 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg(
- "only simple column references are allowed in CREATE STATISTICS")));
+ errmsg("only simple column references are allowed "
+ "in CREATE STATISTICS")));
}
const char *columnName = quote_identifier(column->name);
diff --git a/src/backend/distributed/deparser/deparse_table_stmts.c b/src/backend/distributed/deparser/deparse_table_stmts.c
index d58fbabcc..776a7fa32 100644
--- a/src/backend/distributed/deparser/deparse_table_stmts.c
+++ b/src/backend/distributed/deparser/deparse_table_stmts.c
@@ -536,8 +536,10 @@ GeneratedWhenStr(char generatedWhen)
}
default:
+ {
ereport(ERROR, (errmsg("unrecognized generated_when: %d",
generatedWhen)));
+ }
}
}
@@ -649,13 +651,18 @@ AppendAlterTableCmdAddColumn(StringInfo buf, AlterTableCmd *alterTableCmd,
}
else if (constraint->contype == CONSTR_GENERATED)
{
- char attgenerated = 's';
- appendStringInfo(buf, " GENERATED %s AS (%s) STORED",
+ char attgenerated = ATTRIBUTE_GENERATED_STORED;
+#if PG_VERSION_NUM >= PG_VERSION_18
+ attgenerated = constraint->generated_kind;
+#endif
+ appendStringInfo(buf, " GENERATED %s AS (%s) %s",
GeneratedWhenStr(constraint->generated_when),
DeparseRawExprForColumnDefault(relationId, typeOid, typmod,
columnDefinition->colname,
attgenerated,
- constraint->raw_expr));
+ constraint->raw_expr),
+ (attgenerated == ATTRIBUTE_GENERATED_STORED ? "STORED" :
+ "VIRTUAL"));
}
else if (constraint->contype == CONSTR_CHECK ||
constraint->contype == CONSTR_PRIMARY ||
diff --git a/src/backend/distributed/executor/adaptive_executor.c b/src/backend/distributed/executor/adaptive_executor.c
index 677535591..63c8cdd85 100644
--- a/src/backend/distributed/executor/adaptive_executor.c
+++ b/src/backend/distributed/executor/adaptive_executor.c
@@ -642,11 +642,11 @@ static DistributedExecution * CreateDistributedExecution(RowModifyLevel modLevel
xactProperties,
List *jobIdList,
bool localExecutionSupported);
-static TransactionProperties DecideTransactionPropertiesForTaskList(RowModifyLevel
- modLevel,
- List *taskList,
- bool
- exludeFromTransaction);
+static TransactionProperties DecideTaskListTransactionProperties(RowModifyLevel
+ modLevel,
+ List *taskList,
+ bool
+ excludeFromTransaction);
static void StartDistributedExecution(DistributedExecution *execution);
static void RunLocalExecution(CitusScanState *scanState, DistributedExecution *execution);
static void RunDistributedExecution(DistributedExecution *execution);
@@ -711,8 +711,8 @@ static void PlacementExecutionReady(TaskPlacementExecution *placementExecution);
static TaskExecutionState TaskExecutionStateMachine(ShardCommandExecution *
shardCommandExecution);
static int GetEventSetSize(List *sessionList);
-static bool ProcessSessionsWithFailedWaitEventSetOperations(
- DistributedExecution *execution);
+static bool ProcessSessionsWithFailedWaitEventSetOperations(DistributedExecution *
+ execution);
static bool HasIncompleteConnectionEstablishment(DistributedExecution *execution);
static void RebuildWaitEventSet(DistributedExecution *execution);
static void RebuildWaitEventSetForSessions(DistributedExecution *execution);
@@ -842,7 +842,7 @@ AdaptiveExecutor(CitusScanState *scanState)
bool excludeFromXact = false;
- TransactionProperties xactProperties = DecideTransactionPropertiesForTaskList(
+ TransactionProperties xactProperties = DecideTaskListTransactionProperties(
distributedPlan->modLevel, taskList, excludeFromXact);
/*
@@ -941,7 +941,7 @@ ExecuteUtilityTaskList(List *utilityTaskList, bool localExecutionSupported)
modLevel, utilityTaskList, MaxAdaptiveExecutorPoolSize, localExecutionSupported
);
executionParams->xactProperties =
- DecideTransactionPropertiesForTaskList(modLevel, utilityTaskList, false);
+ DecideTaskListTransactionProperties(modLevel, utilityTaskList, false);
executionParams->isUtilityCommand = true;
return ExecuteTaskListExtended(executionParams);
@@ -963,8 +963,8 @@ ExecuteUtilityTaskListExtended(List *utilityTaskList, int poolSize,
bool excludeFromXact = false;
executionParams->xactProperties =
- DecideTransactionPropertiesForTaskList(modLevel, utilityTaskList,
- excludeFromXact);
+ DecideTaskListTransactionProperties(modLevel, utilityTaskList,
+ excludeFromXact);
executionParams->isUtilityCommand = true;
return ExecuteTaskListExtended(executionParams);
@@ -984,7 +984,7 @@ ExecuteTaskList(RowModifyLevel modLevel, List *taskList)
);
bool excludeFromXact = false;
- executionParams->xactProperties = DecideTransactionPropertiesForTaskList(
+ executionParams->xactProperties = DecideTaskListTransactionProperties(
modLevel, taskList, excludeFromXact);
return ExecuteTaskListExtended(executionParams);
@@ -1010,7 +1010,7 @@ ExecuteTaskListOutsideTransaction(RowModifyLevel modLevel, List *taskList,
modLevel, taskList, targetPoolSize, localExecutionSupported
);
- executionParams->xactProperties = DecideTransactionPropertiesForTaskList(
+ executionParams->xactProperties = DecideTaskListTransactionProperties(
modLevel, taskList, true);
return ExecuteTaskListExtended(executionParams);
}
@@ -1032,7 +1032,7 @@ CreateDefaultExecutionParams(RowModifyLevel modLevel, List *taskList,
modLevel, taskList, targetPoolSize, localExecutionSupported
);
- executionParams->xactProperties = DecideTransactionPropertiesForTaskList(
+ executionParams->xactProperties = DecideTaskListTransactionProperties(
modLevel, taskList, false);
executionParams->expectResults = expectResults;
executionParams->tupleDestination = tupleDest;
@@ -1252,7 +1252,7 @@ CreateDistributedExecution(RowModifyLevel modLevel, List *taskList,
/*
- * DecideTransactionPropertiesForTaskList decides whether to use remote transaction
+ * DecideTaskListTransactionProperties decides whether to use remote transaction
* blocks, whether to use 2PC for the given task list, and whether to error on any
* failure.
*
@@ -1260,8 +1260,8 @@ CreateDistributedExecution(RowModifyLevel modLevel, List *taskList,
* errorOnAnyFailure, but not the other way around) we keep them in the same place.
*/
static TransactionProperties
-DecideTransactionPropertiesForTaskList(RowModifyLevel modLevel, List *taskList, bool
- exludeFromTransaction)
+DecideTaskListTransactionProperties(RowModifyLevel modLevel, List *taskList, bool
+ excludeFromTransaction)
{
TransactionProperties xactProperties;
@@ -1277,7 +1277,7 @@ DecideTransactionPropertiesForTaskList(RowModifyLevel modLevel, List *taskList,
return xactProperties;
}
- if (exludeFromTransaction)
+ if (excludeFromTransaction)
{
xactProperties.useRemoteTransactionBlocks = TRANSACTION_BLOCKS_DISALLOWED;
return xactProperties;
@@ -2634,10 +2634,8 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
connectionFlags |= adaptiveConnectionManagementFlag;
/* open a new connection to the worker */
- MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags,
- workerPool->nodeName,
- workerPool->nodePort,
- NULL, NULL);
+ MultiConnection *connection = StartNodeUserDatabaseConnection(
+ connectionFlags, workerPool->nodeName, workerPool->nodePort, NULL, NULL);
if (!connection)
{
/* connection can only be NULL for optional connections */
diff --git a/src/backend/distributed/executor/citus_custom_scan.c b/src/backend/distributed/executor/citus_custom_scan.c
index 53b0ccb0f..2c301f8aa 100644
--- a/src/backend/distributed/executor/citus_custom_scan.c
+++ b/src/backend/distributed/executor/citus_custom_scan.c
@@ -67,8 +67,8 @@ static void CitusPreExecScan(CitusScanState *scanState);
static bool ModifyJobNeedsEvaluation(Job *workerJob);
static void RegenerateTaskForFasthPathQuery(Job *workerJob);
static void RegenerateTaskListForInsert(Job *workerJob);
-static DistributedPlan * CopyDistributedPlanWithoutCache(
- DistributedPlan *originalDistributedPlan);
+static DistributedPlan * CopyDistributedPlanWithoutCache(DistributedPlan *
+ originalDistributedPlan);
static void CitusEndScan(CustomScanState *node);
static void CitusReScan(CustomScanState *node);
static void EnsureForceDelegationDistributionKey(Job *job);
diff --git a/src/backend/distributed/executor/distributed_intermediate_results.c b/src/backend/distributed/executor/distributed_intermediate_results.c
index 24e8ca8d8..441158e86 100644
--- a/src/backend/distributed/executor/distributed_intermediate_results.c
+++ b/src/backend/distributed/executor/distributed_intermediate_results.c
@@ -69,8 +69,8 @@ static List * WrapTasksForPartitioning(const char *resultIdPrefix,
bool binaryFormat);
static List * ExecutePartitionTaskList(List *partitionTaskList,
CitusTableCacheEntry *targetRelation);
-static PartitioningTupleDest * CreatePartitioningTupleDest(
- CitusTableCacheEntry *targetRelation);
+static PartitioningTupleDest * CreatePartitioningTupleDest(CitusTableCacheEntry *
+ targetRelation);
static void PartitioningTupleDestPutTuple(TupleDestination *self, Task *task,
int placementIndex, int queryNumber,
HeapTuple heapTuple, uint64 tupleLibpqSize);
diff --git a/src/backend/distributed/executor/insert_select_executor.c b/src/backend/distributed/executor/insert_select_executor.c
index 58c172c66..f68848469 100644
--- a/src/backend/distributed/executor/insert_select_executor.c
+++ b/src/backend/distributed/executor/insert_select_executor.c
@@ -66,7 +66,8 @@ static HTAB * ExecutePlanIntoColocatedIntermediateResults(Oid targetRelationId,
List *insertTargetList,
PlannedStmt *selectPlan,
EState *executorState,
- char *intermediateResultIdPrefix);
+ char *
+ intermediateResultIdPrefix);
static int PartitionColumnIndexFromColumnList(Oid relationId, List *columnNameList);
static void WrapTaskListForProjection(List *taskList, List *projectedTargetEntries);
diff --git a/src/backend/distributed/executor/local_executor.c b/src/backend/distributed/executor/local_executor.c
index 0730e792a..90752ea69 100644
--- a/src/backend/distributed/executor/local_executor.c
+++ b/src/backend/distributed/executor/local_executor.c
@@ -824,7 +824,7 @@ RecordNonDistTableAccessesForTask(Task *task)
* if we're wrong.
*/
ereport(ERROR, (errmsg("shard " UINT64_FORMAT " does not have any shard "
- "placements",
+ "placements",
task->anchorShardId)));
}
diff --git a/src/backend/distributed/executor/merge_executor.c b/src/backend/distributed/executor/merge_executor.c
index 56bde62bc..6ce86c7a4 100644
--- a/src/backend/distributed/executor/merge_executor.c
+++ b/src/backend/distributed/executor/merge_executor.c
@@ -38,11 +38,13 @@ static HTAB * ExecuteMergeSourcePlanIntoColocatedIntermediateResults(Oid targetR
sourceTargetList,
PlannedStmt *
sourcePlan,
- EState *executorState,
+ EState *
+ executorState,
char *
intermediateResultIdPrefix,
int
- partitionColumnIndex);
+ partitionColumnIndex)
+;
/*
diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c
index ee334f1b0..2f0120cbd 100644
--- a/src/backend/distributed/metadata/dependency.c
+++ b/src/backend/distributed/metadata/dependency.c
@@ -180,9 +180,10 @@ static bool FollowExtAndInternalDependencies(ObjectAddressCollector *collector,
DependencyDefinition *definition);
static void ApplyAddToDependencyList(ObjectAddressCollector *collector,
DependencyDefinition *definition);
-static void ApplyAddCitusDependedObjectsToDependencyList(
- ObjectAddressCollector *collector,
- DependencyDefinition *definition);
+static void ApplyAddCitusDependedObjectsToDependencyList(ObjectAddressCollector *
+ collector,
+ DependencyDefinition *
+ definition);
static List * GetViewRuleReferenceDependencyList(Oid relationId);
static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector,
ObjectAddress target);
diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c
index 8687a9919..e2954498c 100644
--- a/src/backend/distributed/metadata/distobject.c
+++ b/src/backend/distributed/metadata/distobject.c
@@ -338,8 +338,8 @@ ShouldMarkRelationDistributed(Oid relationId)
bool ownedByExtension = IsTableOwnedByExtension(relationId);
bool alreadyDistributed = IsObjectDistributed(relationAddress);
bool hasUnsupportedDependency =
- DeferErrorIfAnyObjectHasUnsupportedDependency(list_make1(relationAddress)) !=
- NULL;
+ DeferErrorIfAnyObjectHasUnsupportedDependency(
+ list_make1(relationAddress)) != NULL;
bool hasCircularDependency =
DeferErrorIfCircularDependencyExists(relationAddress) != NULL;
diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c
index 1fb3d6fd0..de3671662 100644
--- a/src/backend/distributed/metadata/metadata_utility.c
+++ b/src/backend/distributed/metadata/metadata_utility.c
@@ -127,11 +127,11 @@ static bool SetFieldText(int attno, Datum values[], bool isnull[], bool replace[
static bool SetFieldNull(int attno, Datum values[], bool isnull[], bool replace[]);
#define InitFieldValue(attno, values, isnull, initValue) \
- (void) SetFieldValue((attno), (values), (isnull), NULL, (initValue))
+ (void) SetFieldValue((attno), (values), (isnull), NULL, (initValue))
#define InitFieldText(attno, values, isnull, initValue) \
- (void) SetFieldText((attno), (values), (isnull), NULL, (initValue))
+ (void) SetFieldText((attno), (values), (isnull), NULL, (initValue))
#define InitFieldNull(attno, values, isnull) \
- (void) SetFieldNull((attno), (values), (isnull), NULL)
+ (void) SetFieldNull((attno), (values), (isnull), NULL)
/* exports for SQL callable functions */
PG_FUNCTION_INFO_V1(citus_local_disk_space_stats);
@@ -823,8 +823,8 @@ GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList,
/* SELECT SUM(worker_partitioned_...) FROM VALUES (...) */
char *subqueryForPartitionedShards =
GenerateSizeQueryForRelationNameList(partitionedShardNames,
- GetWorkerPartitionedSizeUDFNameBySizeQueryType(
- sizeQueryType));
+ GetWorkerPartitionedSizeUDFNameBySizeQueryType
+ (sizeQueryType));
/* SELECT SUM(pg_..._size) FROM VALUES (...) */
char *subqueryForNonPartitionedShards =
@@ -4266,10 +4266,9 @@ CancelTasksForJob(int64 jobid)
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(jobid));
const bool indexOK = true;
- SysScanDesc scanDescriptor = systable_beginscan(pgDistBackgroundTasks,
- DistBackgroundTaskJobIdTaskIdIndexId(),
- indexOK, NULL,
- lengthof(scanKey), scanKey);
+ SysScanDesc scanDescriptor = systable_beginscan(
+ pgDistBackgroundTasks, DistBackgroundTaskJobIdTaskIdIndexId(),
+ indexOK, NULL, lengthof(scanKey), scanKey);
List *runningTaskPids = NIL;
HeapTuple taskTuple = NULL;
diff --git a/src/backend/distributed/operations/delete_protocol.c b/src/backend/distributed/operations/delete_protocol.c
index d73b74720..70080f637 100644
--- a/src/backend/distributed/operations/delete_protocol.c
+++ b/src/backend/distributed/operations/delete_protocol.c
@@ -76,7 +76,8 @@ static List * DropTaskList(Oid relationId, char *schemaName, char *relationName,
List *deletableShardIntervalList);
static void ExecuteDropShardPlacementCommandRemotely(ShardPlacement *shardPlacement,
const char *shardRelationName,
- const char *dropShardPlacementCommand);
+ const char *
+ dropShardPlacementCommand);
static char * CreateDropShardPlacementCommand(const char *schemaName,
const char *shardRelationName,
char storageType);
diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c
index 680bda22f..0a7760bd1 100644
--- a/src/backend/distributed/operations/node_protocol.c
+++ b/src/backend/distributed/operations/node_protocol.c
@@ -78,7 +78,8 @@ static void GatherIndexAndConstraintDefinitionListExcludingReplicaIdentity(Form_
indexForm,
List **
indexDDLEventList,
- int indexFlags);
+ int
+ indexFlags);
static Datum WorkerNodeGetDatum(WorkerNode *workerNode, TupleDesc tupleDescriptor);
static char * CitusCreateAlterColumnarTableSet(char *qualifiedRelationName,
diff --git a/src/backend/distributed/operations/shard_cleaner.c b/src/backend/distributed/operations/shard_cleaner.c
index f76476c15..00bcebf3c 100644
--- a/src/backend/distributed/operations/shard_cleaner.c
+++ b/src/backend/distributed/operations/shard_cleaner.c
@@ -939,8 +939,8 @@ TryDropDatabaseOutsideTransaction(char *databaseName, char *nodeName, int nodePo
* because we don't want to open a transaction block on remote nodes as DROP
* DATABASE commands cannot be run inside a transaction block.
*/
- if (ExecuteOptionalRemoteCommand(connection, commandString, NULL) !=
- RESPONSE_OKAY)
+ if (ExecuteOptionalRemoteCommand(
+ connection, commandString, NULL) != RESPONSE_OKAY)
{
executeCommand = false;
break;
diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c
index d62f225ba..efd7537f2 100644
--- a/src/backend/distributed/operations/shard_split.c
+++ b/src/backend/distributed/operations/shard_split.c
@@ -131,17 +131,19 @@ static void UpdateDistributionColumnsForShardGroup(List *colocatedShardList,
uint32 colocationId);
static void InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList,
List *workersForPlacementList);
-static void CreatePartitioningHierarchyForBlockingSplit(
- List *shardGroupSplitIntervalListList,
- List *workersForPlacementList);
+static void CreatePartitioningHierarchyForBlockingSplit(List *
+ shardGroupSplitIntervalListList,
+ List *workersForPlacementList);
static void CreateForeignKeyConstraints(List *shardGroupSplitIntervalListList,
List *workersForPlacementList);
static Task * CreateTaskForDDLCommandList(List *ddlCommandList, WorkerNode *workerNode);
-static StringInfo CreateSplitShardReplicationSetupUDF(
- List *sourceColocatedShardIntervalList, List *shardGroupSplitIntervalListList,
- List *destinationWorkerNodesList,
- DistributionColumnMap *
- distributionColumnOverrides);
+static StringInfo CreateSplitShardReplicationSetupUDF(List *
+ sourceColocatedShardIntervalList,
+ List *
+ shardGroupSplitIntervalListList,
+ List *destinationWorkerNodesList,
+ DistributionColumnMap *
+ distributionColumnOverrides);
static List * ParseReplicationSlotInfoFromResult(PGresult *result);
static List * ExecuteSplitShardReplicationSetupUDF(WorkerNode *sourceWorkerNode,
@@ -816,7 +818,7 @@ CreateAuxiliaryStructuresForShardGroup(List *shardGroupSplitIntervalListList,
ROW_MODIFY_NONE,
ddlTaskExecList,
MaxAdaptiveExecutorPoolSize,
- NULL /* jobIdList (ignored by API implementation) */);
+ NULL /* jobIdList (ignored by API impl.) */);
}
@@ -883,7 +885,7 @@ DoSplitCopy(WorkerNode *sourceShardNode, List *sourceColocatedShardIntervalList,
ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, splitCopyTaskList,
MaxAdaptiveExecutorPoolSize,
- NULL /* jobIdList (ignored by API implementation) */);
+ NULL /* jobIdList (ignored by API impl.) */);
}
@@ -1880,8 +1882,9 @@ ExecuteSplitShardReplicationSetupUDF(WorkerNode *sourceWorkerNode,
ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE),
errmsg(
- "Failed to run worker_split_shard_replication_setup UDF. It should successfully execute "
- " for splitting a shard in a non-blocking way. Please retry.")));
+ "Failed to run worker_split_shard_replication_setup UDF. "
+ "It should successfully execute for splitting a shard in "
+ "a non-blocking way. Please retry.")));
}
/* Get replication slot information */
diff --git a/src/backend/distributed/operations/shard_transfer.c b/src/backend/distributed/operations/shard_transfer.c
index 7d3b0e655..920c6a724 100644
--- a/src/backend/distributed/operations/shard_transfer.c
+++ b/src/backend/distributed/operations/shard_transfer.c
@@ -2064,8 +2064,7 @@ CopyShardsToNode(WorkerNode *sourceNode, WorkerNode *targetNode, List *shardInte
ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, copyTaskList,
MaxAdaptiveExecutorPoolSize,
- NULL /* jobIdList (ignored by API implementation) */
- );
+ NULL /* jobIdList (ignored by API impl.) */);
}
diff --git a/src/backend/distributed/operations/worker_shard_copy.c b/src/backend/distributed/operations/worker_shard_copy.c
index f99c9b537..6c2d76741 100644
--- a/src/backend/distributed/operations/worker_shard_copy.c
+++ b/src/backend/distributed/operations/worker_shard_copy.c
@@ -471,8 +471,8 @@ WriteLocalTuple(TupleTableSlot *slot, ShardCopyDestReceiver *copyDest)
SetLocalExecutionStatus(LOCAL_EXECUTION_REQUIRED);
bool isBinaryCopy = localCopyOutState->binary;
- bool shouldAddBinaryHeaders = (isBinaryCopy && localCopyOutState->fe_msgbuf->len ==
- 0);
+ bool shouldAddBinaryHeaders = (isBinaryCopy &&
+ localCopyOutState->fe_msgbuf->len == 0);
if (shouldAddBinaryHeaders)
{
AppendCopyBinaryHeaders(localCopyOutState);
diff --git a/src/backend/distributed/operations/worker_split_copy_udf.c b/src/backend/distributed/operations/worker_split_copy_udf.c
index eb97dab1a..e1a7cb08b 100644
--- a/src/backend/distributed/operations/worker_split_copy_udf.c
+++ b/src/backend/distributed/operations/worker_split_copy_udf.c
@@ -71,8 +71,8 @@ worker_split_copy(PG_FUNCTION_ARGS)
if (arrayHasNull)
{
ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg(
- "pg_catalog.split_copy_info array cannot contain null values")));
+ errmsg("pg_catalog.split_copy_info array "
+ "cannot contain null values")));
}
const int slice_ndim = 0;
diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c
index be046bf9b..5157d8857 100644
--- a/src/backend/distributed/planner/distributed_planner.c
+++ b/src/backend/distributed/planner/distributed_planner.c
@@ -85,8 +85,8 @@ int PlannerLevel = 0;
static bool ListContainsDistributedTableRTE(List *rangeTableList,
bool *maybeHasForeignDistributedTable);
-static PlannedStmt * CreateDistributedPlannedStmt(
- DistributedPlanningContext *planContext);
+static PlannedStmt * CreateDistributedPlannedStmt(DistributedPlanningContext *
+ planContext);
static PlannedStmt * InlineCtesAndCreateDistributedPlannedStmt(uint64 planId,
DistributedPlanningContext
*planContext);
@@ -125,12 +125,14 @@ static void AdjustReadIntermediateResultsCostInternal(RelOptInfo *relOptInfo,
Const *resultFormatConst);
static List * OuterPlanParamsList(PlannerInfo *root);
static List * CopyPlanParamList(List *originalPlanParamList);
-static PlannerRestrictionContext * CreateAndPushPlannerRestrictionContext(
- FastPathRestrictionContext *fastPathContext);
+static void CreateAndPushPlannerRestrictionContext(DistributedPlanningContext *
+ planContext,
+ FastPathRestrictionContext *
+ fastPathContext);
static PlannerRestrictionContext * CurrentPlannerRestrictionContext(void);
static void PopPlannerRestrictionContext(void);
-static void ResetPlannerRestrictionContext(
- PlannerRestrictionContext *plannerRestrictionContext);
+static void ResetPlannerRestrictionContext(PlannerRestrictionContext *
+ plannerRestrictionContext);
static PlannedStmt * PlanFastPathDistributedStmt(DistributedPlanningContext *planContext);
static PlannedStmt * PlanDistributedStmt(DistributedPlanningContext *planContext,
int rteIdCounter);
@@ -245,9 +247,9 @@ distributed_planner(Query *parse,
*/
HideCitusDependentObjectsOnQueriesOfPgMetaTables((Node *) parse, NULL);
- /* create a restriction context and put it at the end of context list */
- planContext.plannerRestrictionContext = CreateAndPushPlannerRestrictionContext(
- &fastPathContext);
+ /* create a restriction context and put it at the end of our plan context's context list */
+ CreateAndPushPlannerRestrictionContext(&planContext,
+ &fastPathContext);
/*
* We keep track of how many times we've recursed into the planner, primarily
@@ -281,6 +283,9 @@ distributed_planner(Query *parse,
Assert(saveNestLevel > 0);
AtEOXact_GUC(true, saveNestLevel);
}
+
+ /* Pop the plan context from the current restriction context */
+ planContext.plannerRestrictionContext->planContext = NULL;
#endif
needsDistributedPlanning = CheckPostPlanDistribution(&planContext,
needsDistributedPlanning,
@@ -2033,6 +2038,32 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
lappend(relationRestrictionContext->relationRestrictionList, relationRestriction);
MemoryContextSwitchTo(oldMemoryContext);
+
+#if PG_VERSION_NUM >= PG_VERSION_18
+ if (root->query_level == 1 && plannerRestrictionContext->planContext != NULL)
+ {
+ /* We're at the top query with a distributed context; see if Postgres
+ * has changed the query tree we passed to it in distributed_planner().
+ * This check was necessitated by PG commit 1e4351a, becuase in it the
+ * planner modfies a copy of the passed in query tree with the consequence
+ * that changes are not reflected back to the caller of standard_planner().
+ */
+ Query *query = plannerRestrictionContext->planContext->query;
+ if (root->parse != query)
+ {
+ /*
+ * The Postgres planner has reconstructed the query tree, so the query
+ * tree our distributed context passed in (to standard_planner() is
+ * updated to track the new query tree.
+ */
+ ereport(DEBUG4, (errmsg(
+ "Detected query reconstruction by Postgres planner, updating "
+ "planContext to track it")));
+
+ plannerRestrictionContext->planContext->query = root->parse;
+ }
+ }
+#endif
}
@@ -2410,11 +2441,13 @@ CopyPlanParamList(List *originalPlanParamList)
* context with an empty relation restriction context and an empty join and
* a copy of the given fast path restriction context (if present). Finally,
* the planner restriction context is inserted to the beginning of the
- * global plannerRestrictionContextList and it is returned.
+ * global plannerRestrictionContextList and, in PG18+, given a reference to
+ * its distributed plan context.
*/
-static PlannerRestrictionContext *
-CreateAndPushPlannerRestrictionContext(
- FastPathRestrictionContext *fastPathRestrictionContext)
+static void
+CreateAndPushPlannerRestrictionContext(DistributedPlanningContext *planContext,
+ FastPathRestrictionContext *
+ fastPathRestrictionContext)
{
PlannerRestrictionContext *plannerRestrictionContext =
palloc0(sizeof(PlannerRestrictionContext));
@@ -2451,7 +2484,11 @@ CreateAndPushPlannerRestrictionContext(
plannerRestrictionContextList = lcons(plannerRestrictionContext,
plannerRestrictionContextList);
- return plannerRestrictionContext;
+ planContext->plannerRestrictionContext = plannerRestrictionContext;
+
+#if PG_VERSION_NUM >= PG_VERSION_18
+ plannerRestrictionContext->planContext = planContext;
+#endif
}
@@ -2512,6 +2549,18 @@ CurrentPlannerRestrictionContext(void)
static void
PopPlannerRestrictionContext(void)
{
+#if PG_VERSION_NUM >= PG_VERSION_18
+
+ /*
+ * PG18+: Clear the restriction context's planContext pointer; this is done
+ * by distributed_planner() when popping the context, but in case of error
+ * during standard_planner() we want to clean up here also.
+ */
+ PlannerRestrictionContext *plannerRestrictionContext =
+ (PlannerRestrictionContext *) linitial(plannerRestrictionContextList);
+ plannerRestrictionContext->planContext = NULL;
+#endif
+
plannerRestrictionContextList = list_delete_first(plannerRestrictionContextList);
}
diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c
index 4a79dc25a..7f6b107f3 100644
--- a/src/backend/distributed/planner/function_call_delegation.c
+++ b/src/backend/distributed/planner/function_call_delegation.c
@@ -828,12 +828,13 @@ IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId)
Assert(AllowedDistributionColumnValue.isActive);
Assert(ExecutorLevel > AllowedDistributionColumnValue.executorLevel);
- ereport(DEBUG4, errmsg("Comparing saved:%s with Shard key: %s colocationid:%d:%d",
- pretty_format_node_dump(
- nodeToString(
- AllowedDistributionColumnValue.distributionColumnValue)),
- pretty_format_node_dump(nodeToString(shardKey)),
- AllowedDistributionColumnValue.colocationId, colocationId));
+ ereport(DEBUG4, errmsg(
+ "Comparing saved:%s with Shard key: %s colocationid:%d:%d",
+ pretty_format_node_dump(
+ nodeToString(AllowedDistributionColumnValue.
+ distributionColumnValue)),
+ pretty_format_node_dump(nodeToString(shardKey)),
+ AllowedDistributionColumnValue.colocationId, colocationId));
return (equal(AllowedDistributionColumnValue.distributionColumnValue, shardKey) &&
(AllowedDistributionColumnValue.colocationId == colocationId));
diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c
index 554ac631e..afcd36cd8 100644
--- a/src/backend/distributed/planner/insert_select_planner.c
+++ b/src/backend/distributed/planner/insert_select_planner.c
@@ -66,7 +66,8 @@ static bool InsertSelectHasRouterSelect(Query *originalQuery,
PlannerRestrictionContext *
plannerRestrictionContext);
static Task * RouterModifyTaskForShardInterval(Query *originalQuery,
- CitusTableCacheEntry *targetTableCacheEntry,
+ CitusTableCacheEntry *
+ targetTableCacheEntry,
ShardInterval *shardInterval,
PlannerRestrictionContext *
plannerRestrictionContext,
@@ -1152,10 +1153,11 @@ ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte,
exprTypmod((Node *) newSubqueryTargetEntry->expr),
exprCollation((Node *) newSubqueryTargetEntry->expr),
0);
- TargetEntry *newInsertTargetEntry = makeTargetEntry((Expr *) newInsertVar,
- originalAttrNo,
- oldInsertTargetEntry->resname,
- oldInsertTargetEntry->resjunk);
+ TargetEntry *newInsertTargetEntry = makeTargetEntry(
+ (Expr *) newInsertVar,
+ originalAttrNo,
+ oldInsertTargetEntry->resname,
+ oldInsertTargetEntry->resjunk);
newInsertTargetlist = lappend(newInsertTargetlist, newInsertTargetEntry);
resno++;
diff --git a/src/backend/distributed/planner/merge_planner.c b/src/backend/distributed/planner/merge_planner.c
index c456fa341..bf49d5936 100644
--- a/src/backend/distributed/planner/merge_planner.c
+++ b/src/backend/distributed/planner/merge_planner.c
@@ -67,7 +67,8 @@ static DeferredErrorMessage * MergeQualAndTargetListFunctionsSupported(Oid
Query *query,
Node *quals,
List *targetList,
- CmdType commandType);
+ CmdType
+ commandType);
static DistributedPlan * CreateRouterMergePlan(Oid targetRelationId, Query *originalQuery,
Query *query,
@@ -426,10 +427,10 @@ ErrorIfMergeHasUnsupportedTables(Oid targetRelationId, List *rangeTableList)
#if PG_VERSION_NUM >= PG_VERSION_18
case RTE_GROUP:
#endif
- {
- /* Skip them as base table(s) will be checked */
- continue;
- }
+ {
+ /* Skip them as base table(s) will be checked */
+ continue;
+ }
/*
* RTE_NAMEDTUPLESTORE is typically used in ephmeral named relations,
@@ -574,8 +575,8 @@ IsDistributionColumnInMergeSource(Expr *columnExpression, Query *query, bool
Var *distributionColumn = DistPartitionKey(relationId);
/* not all distributed tables have partition column */
- if (distributionColumn != NULL && column->varattno ==
- distributionColumn->varattno)
+ if (distributionColumn != NULL &&
+ column->varattno == distributionColumn->varattno)
{
isDistributionColumn = true;
}
@@ -1045,8 +1046,9 @@ DeferErrorIfTargetHasFalseClause(Oid targetRelationId,
PlannerRestrictionContext *plannerRestrictionContext)
{
ListCell *restrictionCell = NULL;
- foreach(restrictionCell,
- plannerRestrictionContext->relationRestrictionContext->relationRestrictionList)
+ foreach(
+ restrictionCell,
+ plannerRestrictionContext->relationRestrictionContext->relationRestrictionList)
{
RelationRestriction *relationRestriction =
(RelationRestriction *) lfirst(restrictionCell);
@@ -1078,7 +1080,8 @@ DeferErrorIfTargetHasFalseClause(Oid targetRelationId,
*/
static DeferredErrorMessage *
DeferErrorIfRoutableMergeNotSupported(Query *query, List *rangeTableList,
- PlannerRestrictionContext *plannerRestrictionContext,
+ PlannerRestrictionContext *
+ plannerRestrictionContext,
Oid targetRelationId)
{
List *distTablesList = NIL;
@@ -1115,8 +1118,8 @@ DeferErrorIfRoutableMergeNotSupported(Query *query, List *rangeTableList,
if (list_length(distTablesList) > 0 && list_length(localTablesList) > 0)
{
- ereport(DEBUG1, (errmsg(
- "A mix of distributed and local table, try repartitioning")));
+ ereport(DEBUG1, (errmsg("A mix of distributed and local table, "
+ "try repartitioning")));
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"A mix of distributed and citus-local table, "
"routable query is not possible", NULL, NULL);
diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c
index c0679c14e..3c5b44a61 100644
--- a/src/backend/distributed/planner/multi_logical_optimizer.c
+++ b/src/backend/distributed/planner/multi_logical_optimizer.c
@@ -271,7 +271,8 @@ static void AppendTargetEntryToGroupClause(TargetEntry *targetEntry,
static bool WorkerAggregateWalker(Node *node,
WorkerAggregateWalkerContext *walkerContext);
static List * WorkerAggregateExpressionList(Aggref *originalAggregate,
- WorkerAggregateWalkerContext *walkerContextry);
+ WorkerAggregateWalkerContext *
+ walkerContextry);
static AggregateType GetAggregateType(Aggref *aggregatExpression);
static Oid AggregateArgumentType(Aggref *aggregate);
static Expr * FirstAggregateArgument(Aggref *aggregate);
@@ -293,18 +294,19 @@ static Const * MakeIntegerConst(int32 integerValue);
/* Local functions forward declarations for aggregate expression checks */
static bool HasNonDistributableAggregates(MultiNode *logicalPlanNode);
static bool CanPushDownExpression(Node *expression,
- const ExtendedOpNodeProperties *extendedOpNodeProperties);
-static DeferredErrorMessage * DeferErrorIfHasNonDistributableAggregates(
- MultiNode *logicalPlanNode);
-static DeferredErrorMessage * DeferErrorIfUnsupportedArrayAggregate(
- Aggref *arrayAggregateExpression);
+ const ExtendedOpNodeProperties *
+ extendedOpNodeProperties);
+static DeferredErrorMessage * DeferErrorIfHasNonDistributableAggregates(MultiNode *
+ logicalPlanNode);
+static DeferredErrorMessage * DeferErrorIfUnsupportedArrayAggregate(Aggref *
+ arrayAggregateExpression);
static DeferredErrorMessage * DeferErrorIfUnsupportedJsonAggregate(AggregateType type,
Aggref *
aggregateExpression);
-static DeferredErrorMessage * DeferErrorIfUnsupportedAggregateDistinct(
- Aggref *aggregateExpression,
- MultiNode *
- logicalPlanNode);
+static DeferredErrorMessage * DeferErrorIfUnsupportedAggregateDistinct(Aggref *
+ aggregateExpression,
+ MultiNode *
+ logicalPlanNode);
static Var * AggregateDistinctColumn(Aggref *aggregateExpression);
static bool TablePartitioningSupportsDistinct(List *tableNodeList,
MultiExtendedOp *opNode,
@@ -322,10 +324,10 @@ static bool HasOrderByAggregate(List *sortClauseList, List *targetList);
static bool HasOrderByNonCommutativeAggregate(List *sortClauseList, List *targetList);
static bool HasOrderByComplexExpression(List *sortClauseList, List *targetList);
static bool HasOrderByHllType(List *sortClauseList, List *targetList);
-static bool ShouldProcessDistinctOrderAndLimitForWorker(
- ExtendedOpNodeProperties *extendedOpNodeProperties,
- bool pushingDownOriginalGrouping,
- Node *havingQual);
+static bool ShouldProcessDistinctOrderAndLimitForWorker(ExtendedOpNodeProperties *
+ extendedOpNodeProperties,
+ bool pushingDownOriginalGrouping,
+ Node *havingQual);
static bool IsIndexInRange(const List *list, int index);
/*
@@ -5061,10 +5063,10 @@ HasOrderByHllType(List *sortClauseList, List *targetList)
* neither should ProcessLimitOrderByForWorkerQuery.
*/
static bool
-ShouldProcessDistinctOrderAndLimitForWorker(
- ExtendedOpNodeProperties *extendedOpNodeProperties,
- bool pushingDownOriginalGrouping,
- Node *havingQual)
+ShouldProcessDistinctOrderAndLimitForWorker(ExtendedOpNodeProperties *
+ extendedOpNodeProperties,
+ bool pushingDownOriginalGrouping,
+ Node *havingQual)
{
if (extendedOpNodeProperties->pullUpIntermediateRows)
{
diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c
index 14ce199c8..6a67bf684 100644
--- a/src/backend/distributed/planner/multi_router_planner.c
+++ b/src/backend/distributed/planner/multi_router_planner.c
@@ -153,8 +153,8 @@ static String * MakeDummyColumnString(int dummyColumnId);
static List * BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError);
static List * GroupInsertValuesByShardId(List *insertValuesList);
static List * ExtractInsertValuesList(Query *query, Var *partitionColumn);
-static DeferredErrorMessage * DeferErrorIfUnsupportedRouterPlannableSelectQuery(
- Query *query);
+static DeferredErrorMessage * DeferErrorIfUnsupportedRouterPlannableSelectQuery(Query *
+ query);
static DeferredErrorMessage * ErrorIfQueryHasUnroutableModifyingCTE(Query *queryTree);
static DeferredErrorMessage * ErrorIfQueryHasCTEWithSearchClause(Query *queryTree);
static bool ContainsSearchClauseWalker(Node *node, void *context);
@@ -855,7 +855,8 @@ DeferErrorIfUnsupportedLocalTableJoin(List *rangeTableList)
"Modifying local tables with remote local tables is "
"not supported.",
NULL,
- "Consider wrapping remote local table to a CTE, or subquery");
+ "Consider wrapping remote local table to a CTE, "
+ "or subquery");
}
return NULL;
}
@@ -3151,8 +3152,8 @@ TargetShardIntervalForFastPathQuery(Query *query, bool *isMultiShardQuery,
FindShardInterval(inputDistributionKeyValue->constvalue, cache);
if (cachedShardInterval == NULL)
{
- ereport(ERROR, (errmsg(
- "could not find shardinterval to which to send the query")));
+ ereport(ERROR, (errmsg("could not find shardinterval to which to send "
+ "the query")));
}
if (outputPartitionValueConst != NULL)
diff --git a/src/backend/distributed/planner/query_pushdown_planning.c b/src/backend/distributed/planner/query_pushdown_planning.c
index b94412f2b..10ab19ac4 100644
--- a/src/backend/distributed/planner/query_pushdown_planning.c
+++ b/src/backend/distributed/planner/query_pushdown_planning.c
@@ -107,8 +107,12 @@ static AttrNumber FindResnoForVarInTargetList(List *targetList, int varno, int v
static bool RelationInfoContainsOnlyRecurringTuples(PlannerInfo *plannerInfo,
Relids relids);
static char * RecurringTypeDescription(RecurringTuplesType recurType);
-static DeferredErrorMessage * DeferredErrorIfUnsupportedLateralSubquery(
- PlannerInfo *plannerInfo, Relids recurringRelIds, Relids nonRecurringRelIds);
+static DeferredErrorMessage * DeferredErrorIfUnsupportedLateralSubquery(PlannerInfo *
+ plannerInfo,
+ Relids
+ recurringRelIds,
+ Relids
+ nonRecurringRelIds);
static bool ContainsLateralSubquery(PlannerInfo *plannerInfo);
static Var * PartitionColumnForPushedDownSubquery(Query *query);
static bool ContainsReferencesToRelids(Query *query, Relids relids, int *foundRelid);
@@ -790,9 +794,9 @@ FromClauseRecurringTupleType(Query *queryTree)
* such queries have lateral subqueries.
*/
static DeferredErrorMessage *
-DeferredErrorIfUnsupportedRecurringTuplesJoin(
- PlannerRestrictionContext *plannerRestrictionContext,
- bool plannerPhase)
+DeferredErrorIfUnsupportedRecurringTuplesJoin(PlannerRestrictionContext *
+ plannerRestrictionContext,
+ bool plannerPhase)
{
List *joinRestrictionList =
plannerRestrictionContext->joinRestrictionContext->joinRestrictionList;
diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c
index 139b30231..b2151f7f5 100644
--- a/src/backend/distributed/planner/recursive_planning.c
+++ b/src/backend/distributed/planner/recursive_planning.c
@@ -161,7 +161,8 @@ static void RecursivelyPlanNonColocatedSubqueriesInWhere(Query *query,
RecursivePlanningContext *
recursivePlanningContext);
static bool RecursivelyPlanRecurringTupleOuterJoinWalker(Node *node, Query *query,
- RecursivePlanningContext *context,
+ RecursivePlanningContext *
+ context,
bool chainedJoin);
static void RecursivelyPlanDistributedJoinNode(Node *node, Query *query,
RecursivePlanningContext *context);
@@ -207,8 +208,8 @@ static bool CanPushdownRecurringOuterJoinOnOuterRTE(RangeTblEntry *rte);
static bool CanPushdownRecurringOuterJoinOnInnerVar(Var *innervar, RangeTblEntry *rte);
static bool CanPushdownRecurringOuterJoin(JoinExpr *joinExpr, Query *query);
#if PG_VERSION_NUM < PG_VERSION_17
-static bool hasPseudoconstantQuals(
- RelationRestrictionContext *relationRestrictionContext);
+static bool hasPseudoconstantQuals(RelationRestrictionContext *
+ relationRestrictionContext);
#endif
/*
@@ -2192,6 +2193,7 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry)
subquery->targetList = lappend(subquery->targetList, targetEntry);
}
}
+
/*
* If tupleDesc is NULL we have 2 different cases:
*
@@ -2241,6 +2243,7 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry)
columnType = list_nth_oid(rangeTblFunction->funccoltypes,
targetColumnIndex);
}
+
/* use the types in the function definition otherwise */
else
{
@@ -2780,8 +2783,8 @@ CanPushdownRecurringOuterJoinOnInnerVar(Var *innerVar, RangeTblEntry *rte)
}
/* Check if the inner variable is part of the distribution column */
- if (cacheEntry->partitionColumn && innerVar->varattno ==
- cacheEntry->partitionColumn->varattno)
+ if (cacheEntry->partitionColumn &&
+ innerVar->varattno == cacheEntry->partitionColumn->varattno)
{
return true;
}
@@ -2921,8 +2924,8 @@ CanPushdownRecurringOuterJoinExtended(JoinExpr *joinExpr, Query *query,
if (JoinTreeContainsLateral(joinExpr->rarg, query->rtable) || JoinTreeContainsLateral(
joinExpr->larg, query->rtable))
{
- ereport(DEBUG5, (errmsg(
- "Lateral join is not supported for pushdown in this path.")));
+ ereport(DEBUG5, (errmsg("Lateral join is not supported for pushdown "
+ "in this path.")));
return false;
}
@@ -2983,6 +2986,7 @@ CanPushdownRecurringOuterJoinExtended(JoinExpr *joinExpr, Query *query,
return true;
}
}
+
/* the inner table is a subquery, extract the base relation referred in the qual */
else if (rte && rte->rtekind == RTE_SUBQUERY)
{
diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c
index 5a63503f0..26b53cf7c 100644
--- a/src/backend/distributed/planner/relation_restriction_equivalence.c
+++ b/src/backend/distributed/planner/relation_restriction_equivalence.c
@@ -156,9 +156,10 @@ static bool AllDistributedRelationsInRestrictionContextColocated(
restrictionContext);
static bool IsNotSafeRestrictionToRecursivelyPlan(Node *node);
static bool HasPlaceHolderVar(Node *node);
-static JoinRestrictionContext * FilterJoinRestrictionContext(
- JoinRestrictionContext *joinRestrictionContext, Relids
- queryRteIdentities);
+static JoinRestrictionContext * FilterJoinRestrictionContext(JoinRestrictionContext *
+ joinRestrictionContext,
+ Relids
+ queryRteIdentities);
static bool RangeTableArrayContainsAnyRTEIdentities(RangeTblEntry **rangeTableEntries, int
rangeTableArrayLength, Relids
queryRteIdentities);
@@ -613,8 +614,9 @@ RestrictionEquivalenceForPartitionKeys(PlannerRestrictionContext *restrictionCon
List *attributeEquivalenceList = GenerateAllAttributeEquivalences(restrictionContext);
- return RestrictionEquivalenceForPartitionKeysViaEquivalences(restrictionContext,
- attributeEquivalenceList);
+ return RestrictionEquivalenceForPartitionKeysViaEquivalences(
+ restrictionContext,
+ attributeEquivalenceList);
}
@@ -1160,8 +1162,8 @@ GenerateCommonEquivalence(List *attributeEquivalenceList,
* with a single AttributeEquivalenceClassMember.
*/
static AttributeEquivalenceClass *
-GenerateEquivalenceClassForRelationRestriction(
- RelationRestrictionContext *relationRestrictionContext)
+GenerateEquivalenceClassForRelationRestriction(RelationRestrictionContext *
+ relationRestrictionContext)
{
ListCell *relationRestrictionCell = NULL;
AttributeEquivalenceClassMember *eqMember = NULL;
@@ -2071,8 +2073,8 @@ FindQueryContainingRTEIdentityInternal(Node *node,
* distributed relations in the given relation restrictions list are co-located.
*/
static bool
-AllDistributedRelationsInRestrictionContextColocated(
- RelationRestrictionContext *restrictionContext)
+AllDistributedRelationsInRestrictionContextColocated(RelationRestrictionContext *
+ restrictionContext)
{
RelationRestriction *relationRestriction = NULL;
List *relationIdList = NIL;
diff --git a/src/backend/distributed/planner/shard_pruning.c b/src/backend/distributed/planner/shard_pruning.c
index 6408eab5d..6636ac388 100644
--- a/src/backend/distributed/planner/shard_pruning.c
+++ b/src/backend/distributed/planner/shard_pruning.c
@@ -1215,7 +1215,9 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla
}
default:
+ {
Assert(false);
+ }
}
}
diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c
index 4c43d3513..4ad2eca01 100644
--- a/src/backend/distributed/replication/multi_logical_replication.c
+++ b/src/backend/distributed/replication/multi_logical_replication.c
@@ -131,8 +131,8 @@ static void ExecuteRemainingPostLoadTableCommands(List *logicalRepTargetList);
static char * escape_param_str(const char *str);
static XLogRecPtr GetRemoteLSN(MultiConnection *connection, char *command);
static void WaitForMiliseconds(long timeout);
-static XLogRecPtr GetSubscriptionPosition(
- GroupedLogicalRepTargets *groupedLogicalRepTargets);
+static XLogRecPtr GetSubscriptionPosition(GroupedLogicalRepTargets *
+ groupedLogicalRepTargets);
static HTAB * CreateShardMovePublicationInfoHash(WorkerNode *targetNode,
List *shardIntervals);
diff --git a/src/backend/distributed/shardsplit/shardsplit_decoder.c b/src/backend/distributed/shardsplit/shardsplit_decoder.c
index 837009530..4edae3f96 100644
--- a/src/backend/distributed/shardsplit/shardsplit_decoder.c
+++ b/src/backend/distributed/shardsplit/shardsplit_decoder.c
@@ -210,9 +210,11 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
/* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */
default:
+ {
ereport(ERROR, errmsg(
"Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE",
change->action));
+ }
}
#else
switch (change->action)
@@ -245,9 +247,11 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
/* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */
default:
+ {
ereport(ERROR, errmsg(
"Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE",
change->action));
+ }
}
#endif
@@ -318,9 +322,11 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
/* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */
default:
+ {
ereport(ERROR, errmsg(
"Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE",
change->action));
+ }
}
#else
switch (change->action)
@@ -373,9 +379,11 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
/* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */
default:
+ {
ereport(ERROR, errmsg(
"Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE",
change->action));
+ }
}
#endif
}
diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c
index ac8b0a7d6..ea33a8346 100644
--- a/src/backend/distributed/shared_library_init.c
+++ b/src/backend/distributed/shared_library_init.c
@@ -132,15 +132,15 @@ ReadColumnarOptions_type extern_ReadColumnarOptions = NULL;
* module.
*/
#define DEFINE_COLUMNAR_PASSTHROUGH_FUNC(funcname) \
- static PGFunction CppConcat(extern_, funcname); \
- PG_FUNCTION_INFO_V1(funcname); \
- Datum funcname(PG_FUNCTION_ARGS) \
- { \
- return CppConcat(extern_, funcname)(fcinfo); \
- }
+ static PGFunction CppConcat(extern_, funcname); \
+ PG_FUNCTION_INFO_V1(funcname); \
+ Datum funcname(PG_FUNCTION_ARGS) \
+ { \
+ return CppConcat(extern_, funcname)(fcinfo); \
+ }
#define INIT_COLUMNAR_SYMBOL(typename, funcname) \
- CppConcat(extern_, funcname) = \
- (typename) (void *) lookup_external_function(handle, # funcname)
+ CppConcat(extern_, funcname) = \
+ (typename) (void *) lookup_external_function(handle, # funcname)
#define CDC_DECODER_DYNAMIC_LIB_PATH "$libdir/citus_decoders:$libdir"
diff --git a/src/backend/distributed/stats/stat_counters.c b/src/backend/distributed/stats/stat_counters.c
index 03151befd..fc0229c30 100644
--- a/src/backend/distributed/stats/stat_counters.c
+++ b/src/backend/distributed/stats/stat_counters.c
@@ -539,9 +539,10 @@ StatCountersShmemInit(void)
bool sharedBackendStatsSlotArrayAlreadyInit = false;
SharedBackendStatsSlotArray = (BackendStatsSlot *)
- ShmemInitStruct("Citus Shared Backend Stats Slot Array",
- SharedBackendStatsSlotArrayShmemSize(),
- &sharedBackendStatsSlotArrayAlreadyInit);
+ ShmemInitStruct(
+ "Citus Shared Backend Stats Slot Array",
+ SharedBackendStatsSlotArrayShmemSize(),
+ &sharedBackendStatsSlotArrayAlreadyInit);
bool sharedSavedBackendStatsHashLockAlreadyInit = false;
SharedSavedBackendStatsHashLock = ShmemInitStruct(
diff --git a/src/backend/distributed/test/run_from_same_connection.c b/src/backend/distributed/test/run_from_same_connection.c
index d22ee4428..e7fe4bf30 100644
--- a/src/backend/distributed/test/run_from_same_connection.c
+++ b/src/backend/distributed/test/run_from_same_connection.c
@@ -35,9 +35,9 @@
#define ALTER_CURRENT_PROCESS_ID \
- "ALTER SYSTEM SET citus.isolation_test_session_process_id TO %d"
+ "ALTER SYSTEM SET citus.isolation_test_session_process_id TO %d"
#define ALTER_CURRENT_WORKER_PROCESS_ID \
- "ALTER SYSTEM SET citus.isolation_test_session_remote_process_id TO %ld"
+ "ALTER SYSTEM SET citus.isolation_test_session_remote_process_id TO %ld"
#define GET_PROCESS_ID "SELECT process_id FROM get_current_transaction_id()"
diff --git a/src/backend/distributed/test/shard_rebalancer.c b/src/backend/distributed/test/shard_rebalancer.c
index 1b79fc27a..f27535919 100644
--- a/src/backend/distributed/test/shard_rebalancer.c
+++ b/src/backend/distributed/test/shard_rebalancer.c
@@ -34,8 +34,8 @@
#include "distributed/shard_rebalancer.h"
/* static declarations for json conversion */
-static List * JsonArrayToShardPlacementTestInfoList(
- ArrayType *shardPlacementJsonArrayObject);
+static List * JsonArrayToShardPlacementTestInfoList(ArrayType *
+ shardPlacementJsonArrayObject);
static List * JsonArrayToWorkerTestInfoList(ArrayType *workerNodeJsonArrayObject);
static bool JsonFieldValueBoolDefault(Datum jsonDocument, const char *key,
bool defaultValue);
diff --git a/src/backend/distributed/transaction/distributed_deadlock_detection.c b/src/backend/distributed/transaction/distributed_deadlock_detection.c
index 30b423028..02e8eddf9 100644
--- a/src/backend/distributed/transaction/distributed_deadlock_detection.c
+++ b/src/backend/distributed/transaction/distributed_deadlock_detection.c
@@ -395,8 +395,8 @@ AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode)
DistributedTransactionId *currentTransactionId =
¤tBackendData.transactionId;
- if (currentTransactionId->transactionNumber !=
- transactionNode->transactionId.transactionNumber)
+ if (currentTransactionId->transactionNumber != transactionNode->transactionId.
+ transactionNumber)
{
continue;
}
diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c
index 16c5c56fa..236eaac28 100644
--- a/src/backend/distributed/transaction/transaction_management.c
+++ b/src/backend/distributed/transaction/transaction_management.c
@@ -58,7 +58,7 @@
#include "distributed/worker_log_messages.h"
#define COMMIT_MANAGEMENT_COMMAND_2PC \
- "SELECT citus_internal.commit_management_command_2pc()"
+ "SELECT citus_internal.commit_management_command_2pc()"
CoordinatedTransactionState CurrentCoordinatedTransactionState = COORD_TRANS_NONE;
diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c
index 087811053..7da7210b4 100644
--- a/src/backend/distributed/transaction/worker_transaction.c
+++ b/src/backend/distributed/transaction/worker_transaction.c
@@ -634,8 +634,9 @@ SendMetadataCommandListToWorkerListInCoordinatedTransaction(List *workerNodeList
* false.
*/
bool
-SendOptionalCommandListToWorkerOutsideTransactionWithConnection(
- MultiConnection *workerConnection, List *commandList)
+SendOptionalCommandListToWorkerOutsideTransactionWithConnection(MultiConnection *
+ workerConnection, List *
+ commandList)
{
if (PQstatus(workerConnection->pgConn) != CONNECTION_OK)
{
diff --git a/src/backend/distributed/utils/aggregate_utils.c b/src/backend/distributed/utils/aggregate_utils.c
index 51f5b9b4f..70b11113a 100644
--- a/src/backend/distributed/utils/aggregate_utils.c
+++ b/src/backend/distributed/utils/aggregate_utils.c
@@ -376,12 +376,12 @@ ExtractAggregationValues(FunctionCallInfo fcinfo, int argumentIndex,
HeapTupleHeader tupleHeader =
DatumGetHeapTupleHeader(fcGetArgValue(fcinfo, argumentIndex));
- if (HeapTupleHeaderGetNatts(tupleHeader) !=
- aggregationArgumentContext->argumentCount ||
- HeapTupleHeaderGetTypeId(tupleHeader) !=
- aggregationArgumentContext->tupleDesc->tdtypeid ||
- HeapTupleHeaderGetTypMod(tupleHeader) !=
- aggregationArgumentContext->tupleDesc->tdtypmod)
+ if (HeapTupleHeaderGetNatts(
+ tupleHeader) != aggregationArgumentContext->argumentCount ||
+ HeapTupleHeaderGetTypeId(
+ tupleHeader) != aggregationArgumentContext->tupleDesc->tdtypeid ||
+ HeapTupleHeaderGetTypMod(
+ tupleHeader) != aggregationArgumentContext->tupleDesc->tdtypmod)
{
ereport(ERROR, (errmsg("worker_partial_agg_sfunc received "
"incompatible record")));
@@ -817,8 +817,8 @@ coord_combine_agg_ffunc(PG_FUNCTION_ARGS)
if (!TypecheckCoordCombineAggReturnType(fcinfo, ffunc, box))
{
- ereport(ERROR, (errmsg(
- "coord_combine_agg_ffunc could not confirm type correctness")));
+ ereport(ERROR, (errmsg("coord_combine_agg_ffunc could not "
+ "confirm type correctness")));
}
if (ffunc == InvalidOid)
diff --git a/src/backend/distributed/utils/background_jobs.c b/src/backend/distributed/utils/background_jobs.c
index 272f1d6d8..218328b48 100644
--- a/src/backend/distributed/utils/background_jobs.c
+++ b/src/backend/distributed/utils/background_jobs.c
@@ -88,26 +88,26 @@ static shm_mq_result ConsumeTaskWorkerOutput(shm_mq_handle *responseq, StringInf
bool *hadError);
static void UpdateDependingTasks(BackgroundTask *task);
static int64 CalculateBackoffDelay(int retryCount);
-static bool NewExecutorExceedsCitusLimit(
- QueueMonitorExecutionContext *queueMonitorExecutionContext);
+static bool NewExecutorExceedsCitusLimit(QueueMonitorExecutionContext *
+ queueMonitorExecutionContext);
static bool NewExecutorExceedsPgMaxWorkers(BackgroundWorkerHandle *handle,
QueueMonitorExecutionContext *
queueMonitorExecutionContext);
static bool AssignRunnableTaskToNewExecutor(BackgroundTask *runnableTask,
QueueMonitorExecutionContext *
queueMonitorExecutionContext);
-static void AssignRunnableTasks(
- QueueMonitorExecutionContext *queueMonitorExecutionContext);
+static void AssignRunnableTasks(QueueMonitorExecutionContext *
+ queueMonitorExecutionContext);
static List * GetRunningTaskEntries(HTAB *currentExecutors);
-static shm_mq_result ReadFromExecutorQueue(
- BackgroundExecutorHashEntry *backgroundExecutorHashEntry,
- bool *hadError);
-static void CheckAndResetLastWorkerAllocationFailure(
- QueueMonitorExecutionContext *queueMonitorExecutionContext);
-static TaskExecutionStatus TaskConcurrentCancelCheck(
- TaskExecutionContext *taskExecutionContext);
-static TaskExecutionStatus ConsumeExecutorQueue(
- TaskExecutionContext *taskExecutionContext);
+static shm_mq_result ReadFromExecutorQueue(BackgroundExecutorHashEntry *
+ backgroundExecutorHashEntry,
+ bool *hadError);
+static void CheckAndResetLastWorkerAllocationFailure(QueueMonitorExecutionContext *
+ queueMonitorExecutionContext);
+static TaskExecutionStatus TaskConcurrentCancelCheck(TaskExecutionContext *
+ taskExecutionContext);
+static TaskExecutionStatus ConsumeExecutorQueue(TaskExecutionContext *
+ taskExecutionContext);
static void TaskHadError(TaskExecutionContext *taskExecutionContext);
static void TaskEnded(TaskExecutionContext *taskExecutionContext);
static void TerminateAllTaskExecutors(HTAB *currentExecutors);
@@ -537,7 +537,8 @@ NewExecutorExceedsPgMaxWorkers(BackgroundWorkerHandle *handle,
*/
static bool
AssignRunnableTaskToNewExecutor(BackgroundTask *runnableTask,
- QueueMonitorExecutionContext *queueMonitorExecutionContext)
+ QueueMonitorExecutionContext *
+ queueMonitorExecutionContext)
{
Assert(runnableTask && runnableTask->status == BACKGROUND_TASK_STATUS_RUNNABLE);
@@ -649,8 +650,8 @@ GetRunningTaskEntries(HTAB *currentExecutors)
* It also resets the failure timestamp.
*/
static void
-CheckAndResetLastWorkerAllocationFailure(
- QueueMonitorExecutionContext *queueMonitorExecutionContext)
+CheckAndResetLastWorkerAllocationFailure(QueueMonitorExecutionContext *
+ queueMonitorExecutionContext)
{
if (queueMonitorExecutionContext->backgroundWorkerFailedStartTime > 0)
{
diff --git a/src/backend/distributed/utils/citus_copyfuncs.c b/src/backend/distributed/utils/citus_copyfuncs.c
index 6eeaf964f..75dbae3fc 100644
--- a/src/backend/distributed/utils/citus_copyfuncs.c
+++ b/src/backend/distributed/utils/citus_copyfuncs.c
@@ -34,57 +34,57 @@ CitusSetTag(Node *node, int tag)
#define DECLARE_FROM_AND_NEW_NODE(nodeTypeName) \
- nodeTypeName *newnode = \
- (nodeTypeName *) CitusSetTag((Node *) target_node, T_ ## nodeTypeName); \
- nodeTypeName *from = (nodeTypeName *) source_node
+ nodeTypeName *newnode = \
+ (nodeTypeName *) CitusSetTag((Node *) target_node, T_ ## nodeTypeName); \
+ nodeTypeName *from = (nodeTypeName *) source_node
/* Copy a simple scalar field (int, float, bool, enum, etc) */
#define COPY_SCALAR_FIELD(fldname) \
- (newnode->fldname = from->fldname)
+ (newnode->fldname = from->fldname)
/* Copy a field that is a pointer to some kind of Node or Node tree */
#define COPY_NODE_FIELD(fldname) \
- (newnode->fldname = copyObject(from->fldname))
+ (newnode->fldname = copyObject(from->fldname))
/* Copy a field that is a pointer to a C string, or perhaps NULL */
#define COPY_STRING_FIELD(fldname) \
- (newnode->fldname = from->fldname ? pstrdup(from->fldname) : (char *) NULL)
+ (newnode->fldname = from->fldname ? pstrdup(from->fldname) : (char *) NULL)
/* Copy a node array. Target array is also allocated. */
#define COPY_NODE_ARRAY(fldname, type, count) \
- do { \
- int i = 0; \
- newnode->fldname = (type **) palloc(count * sizeof(type *)); \
- for (i = 0; i < count; ++i) \
- { \
- newnode->fldname[i] = copyObject(from->fldname[i]); \
+ do { \
+ int i = 0; \
+ newnode->fldname = (type **) palloc(count * sizeof(type *)); \
+ for (i = 0; i < count; ++i) \
+ { \
+ newnode->fldname[i] = copyObject(from->fldname[i]); \
+ } \
} \
- } \
- while (0)
+ while (0)
/* Copy a scalar array. Target array is also allocated. */
#define COPY_SCALAR_ARRAY(fldname, type, count) \
- do { \
- int i = 0; \
- newnode->fldname = (type *) palloc(count * sizeof(type)); \
- for (i = 0; i < count; ++i) \
- { \
- newnode->fldname[i] = from->fldname[i]; \
+ do { \
+ int i = 0; \
+ newnode->fldname = (type *) palloc(count * sizeof(type)); \
+ for (i = 0; i < count; ++i) \
+ { \
+ newnode->fldname[i] = from->fldname[i]; \
+ } \
} \
- } \
- while (0)
+ while (0)
#define COPY_STRING_LIST(fldname) \
- do { \
- char *curString = NULL; \
- List *newList = NIL; \
- foreach_declared_ptr(curString, from->fldname) { \
- char *newString = curString ? pstrdup(curString) : (char *) NULL; \
- newList = lappend(newList, newString); \
+ do { \
+ char *curString = NULL; \
+ List *newList = NIL; \
+ foreach_declared_ptr(curString, from->fldname) { \
+ char *newString = curString ? pstrdup(curString) : (char *) NULL; \
+ newList = lappend(newList, newString); \
+ } \
+ newnode->fldname = newList; \
} \
- newnode->fldname = newList; \
- } \
- while (0)
+ while (0)
static void CopyTaskQuery(Task *newnode, Task *from);
diff --git a/src/backend/distributed/utils/colocation_utils.c b/src/backend/distributed/utils/colocation_utils.c
index 816e3ce2a..a3713ba5a 100644
--- a/src/backend/distributed/utils/colocation_utils.c
+++ b/src/backend/distributed/utils/colocation_utils.c
@@ -531,8 +531,8 @@ ColocationId(int shardCount, int replicationFactor, Oid distributionColumnType,
continue;
}
- if (colocationId == INVALID_COLOCATION_ID || colocationId >
- colocationForm->colocationid)
+ if (colocationId == INVALID_COLOCATION_ID ||
+ colocationId > colocationForm->colocationid)
{
/*
* We assign the smallest colocation id among all the matches so that we
@@ -1051,8 +1051,8 @@ ColocatedShardIntervalList(ShardInterval *shardInterval)
* Since we iterate over co-located tables, shard count of each table should be
* same and greater than shardIntervalIndex.
*/
- Assert(cacheEntry->shardIntervalArrayLength ==
- colocatedTableCacheEntry->shardIntervalArrayLength);
+ Assert(cacheEntry->shardIntervalArrayLength == colocatedTableCacheEntry->
+ shardIntervalArrayLength);
ShardInterval *colocatedShardInterval =
colocatedTableCacheEntry->sortedShardIntervalArray[shardIntervalIndex];
@@ -1122,8 +1122,8 @@ ColocatedNonPartitionShardIntervalList(ShardInterval *shardInterval)
* Since we iterate over co-located tables, shard count of each table should be
* same and greater than shardIntervalIndex.
*/
- Assert(cacheEntry->shardIntervalArrayLength ==
- colocatedTableCacheEntry->shardIntervalArrayLength);
+ Assert(cacheEntry->shardIntervalArrayLength == colocatedTableCacheEntry->
+ shardIntervalArrayLength);
ShardInterval *colocatedShardInterval =
colocatedTableCacheEntry->sortedShardIntervalArray[shardIntervalIndex];
diff --git a/src/backend/distributed/utils/enable_ssl.c b/src/backend/distributed/utils/enable_ssl.c
index 6830d7058..0e7296dfc 100644
--- a/src/backend/distributed/utils/enable_ssl.c
+++ b/src/backend/distributed/utils/enable_ssl.c
@@ -43,7 +43,7 @@
#define ENABLE_SSL_QUERY "ALTER SYSTEM SET ssl TO on;"
#define RESET_CITUS_NODE_CONNINFO \
- "ALTER SYSTEM SET citus.node_conninfo TO 'sslmode=prefer';"
+ "ALTER SYSTEM SET citus.node_conninfo TO 'sslmode=prefer';"
#define CITUS_AUTO_SSL_COMMON_NAME "citus-auto-ssl"
#define X509_SUBJECT_COMMON_NAME "CN"
@@ -65,7 +65,7 @@
"ECDHE-RSA-AES128-SHA256:" \
"ECDHE-RSA-AES256-SHA384"
#define SET_CITUS_SSL_CIPHERS_QUERY \
- "ALTER SYSTEM SET ssl_ciphers TO '" CITUS_DEFAULT_SSL_CIPHERS "';"
+ "ALTER SYSTEM SET ssl_ciphers TO '" CITUS_DEFAULT_SSL_CIPHERS "';"
/* forward declaration of helper functions */
diff --git a/src/backend/distributed/utils/foreign_key_relationship.c b/src/backend/distributed/utils/foreign_key_relationship.c
index 0025becb4..63732a6cc 100644
--- a/src/backend/distributed/utils/foreign_key_relationship.c
+++ b/src/backend/distributed/utils/foreign_key_relationship.c
@@ -81,7 +81,8 @@ static List * GetRelationshipNodesForFKeyConnectedRelations(
static List * GetAllNeighboursList(ForeignConstraintRelationshipNode *relationshipNode);
static ForeignConstraintRelationshipNode * GetRelationshipNodeForRelationId(Oid
relationId,
- bool *isFound);
+ bool *
+ isFound);
static void CreateForeignConstraintRelationshipGraph(void);
static bool IsForeignConstraintRelationshipGraphValid(void);
static List * GetNeighbourList(ForeignConstraintRelationshipNode *relationshipNode,
@@ -177,8 +178,8 @@ ShouldUndistributeCitusLocalTable(Oid relationId)
* to given relation node via a foreign key relationhip graph.
*/
static List *
-GetRelationshipNodesForFKeyConnectedRelations(
- ForeignConstraintRelationshipNode *relationshipNode)
+GetRelationshipNodesForFKeyConnectedRelations(ForeignConstraintRelationshipNode *
+ relationshipNode)
{
HTAB *oidVisitedMap = CreateSimpleHashSetWithName(Oid, "oid visited hash set");
@@ -566,8 +567,8 @@ PopulateAdjacencyLists(void)
/* we just saw this edge, no need to add it twice */
if (currentFConstraintRelationshipEdge->referencingRelationOID ==
prevReferencingOid &&
- currentFConstraintRelationshipEdge->referencedRelationOID ==
- prevReferencedOid)
+ currentFConstraintRelationshipEdge->referencedRelationOID == prevReferencedOid
+ )
{
continue;
}
diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c
index 063465beb..56794825e 100644
--- a/src/backend/distributed/utils/multi_partitioning_utils.c
+++ b/src/backend/distributed/utils/multi_partitioning_utils.c
@@ -61,8 +61,12 @@ static void CreateFixPartitionShardIndexNames(Oid parentRelationId,
static List * WorkerFixPartitionShardIndexNamesCommandList(uint64 parentShardId,
List *indexIdList,
Oid partitionRelationId);
-static List * WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex(
- char *qualifiedParentShardIndexName, Oid parentIndexId, Oid partitionRelationId);
+static List * WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex(char *
+ qualifiedParentShardIndexName,
+ Oid
+ parentIndexId,
+ Oid
+ partitionRelationId);
static List * WorkerFixPartitionShardIndexNamesCommandListForPartitionIndex(Oid
partitionIndexId,
char *
@@ -652,8 +656,10 @@ WorkerFixPartitionShardIndexNamesCommandList(uint64 parentShardId,
* given partition. Otherwise, all the partitions are included.
*/
static List *
-WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex(
- char *qualifiedParentShardIndexName, Oid parentIndexId, Oid partitionRelationId)
+WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex(char *
+ qualifiedParentShardIndexName,
+ Oid parentIndexId, Oid
+ partitionRelationId)
{
List *commandList = NIL;
diff --git a/src/backend/distributed/utils/reference_table_utils.c b/src/backend/distributed/utils/reference_table_utils.c
index 3e7ccef6c..5bdb4fd75 100644
--- a/src/backend/distributed/utils/reference_table_utils.c
+++ b/src/backend/distributed/utils/reference_table_utils.c
@@ -813,7 +813,7 @@ CopyShardPlacementToWorkerNodeQuery(ShardPlacement *sourceShardPlacement,
appendStringInfo(queryString,
"SELECT pg_catalog.citus_copy_shard_placement("
UINT64_FORMAT ", %d, %d, "
- "transfer_mode := %s)",
+ "transfer_mode := %s)",
sourceShardPlacement->shardId,
sourceShardPlacement->nodeId,
workerNode->nodeId,
diff --git a/src/backend/distributed/utils/resource_lock.c b/src/backend/distributed/utils/resource_lock.c
index 012dc1079..1dbc84c42 100644
--- a/src/backend/distributed/utils/resource_lock.c
+++ b/src/backend/distributed/utils/resource_lock.c
@@ -50,7 +50,7 @@
#include "distributed/worker_transaction.h"
#define LOCK_RELATION_IF_EXISTS \
- "SELECT pg_catalog.lock_relation_if_exists(%s, %s);"
+ "SELECT pg_catalog.lock_relation_if_exists(%s, %s);"
/* static definition and declarations */
struct LockModeToStringType
diff --git a/src/include/columnar/columnar_tableam.h b/src/include/columnar/columnar_tableam.h
index 18331bd70..098d8b1ac 100644
--- a/src/include/columnar/columnar_tableam.h
+++ b/src/include/columnar/columnar_tableam.h
@@ -29,7 +29,7 @@
* to the following interval: [FirstOffsetNumber, MaxHeapTuplesPerPage].
*/
#define VALID_ITEMPOINTER_OFFSETS \
- ((uint64) (MaxHeapTuplesPerPage - FirstOffsetNumber + 1))
+ ((uint64) (MaxHeapTuplesPerPage - FirstOffsetNumber + 1))
/*
* Number of valid ItemPointer BlockNumber's for "row number" <> "ItemPointer"
diff --git a/src/include/columnar/columnar_version_compat.h b/src/include/columnar/columnar_version_compat.h
index be5c3e379..702586d2a 100644
--- a/src/include/columnar/columnar_version_compat.h
+++ b/src/include/columnar/columnar_version_compat.h
@@ -23,6 +23,6 @@
#define ACLCHECK_OBJECT_TABLE OBJECT_TABLE
#define ExplainPropertyLong(qlabel, value, es) \
- ExplainPropertyInteger(qlabel, NULL, value, es)
+ ExplainPropertyInteger(qlabel, NULL, value, es)
#endif /* COLUMNAR_COMPAT_H */
diff --git a/src/include/distributed/argutils.h b/src/include/distributed/argutils.h
index 38efbdc77..601db9750 100644
--- a/src/include/distributed/argutils.h
+++ b/src/include/distributed/argutils.h
@@ -15,11 +15,11 @@
* arguments are allowed to be NULL.
*/
#define PG_ENSURE_ARGNOTNULL(argIndex, argName) \
- if (PG_ARGISNULL(argIndex)) \
- { \
- ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), \
- errmsg("%s cannot be NULL", argName))); \
- }
+ if (PG_ARGISNULL(argIndex)) \
+ { \
+ ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), \
+ errmsg("%s cannot be NULL", argName))); \
+ }
/*
* PG_GETARG_TEXT_TO_CSTRING is the same as PG_GETARG_TEXT_P, but instead of
@@ -27,7 +27,7 @@
* the argument is not NULL.
*/
#define PG_GETARG_TEXT_TO_CSTRING(argIndex) \
- text_to_cstring(PG_GETARG_TEXT_P(argIndex))
+ text_to_cstring(PG_GETARG_TEXT_P(argIndex))
/*
* PG_GETARG_TEXT_TO_CSTRING_OR_NULL is the same as PG_GETARG_TEXT_TO_CSTRING,
@@ -35,14 +35,14 @@
* return a NULL pointer.
*/
#define PG_GETARG_TEXT_TO_CSTRING_OR_NULL(argIndex) \
- PG_ARGISNULL(argIndex) ? NULL : PG_GETARG_TEXT_TO_CSTRING(argIndex)
+ PG_ARGISNULL(argIndex) ? NULL : PG_GETARG_TEXT_TO_CSTRING(argIndex)
/*
* PG_GETARG_NAME_OR_NULL is the same as PG_GETARG_NAME, but it supports the
* case where the argument is NULL. In this case it will return a NULL pointer.
*/
#define PG_GETARG_NAME_OR_NULL(argIndex) \
- PG_ARGISNULL(argIndex) ? NULL : PG_GETARG_NAME(argIndex)
+ PG_ARGISNULL(argIndex) ? NULL : PG_GETARG_NAME(argIndex)
/*
* PG_GETARG_FLOAT4_OR is the same as PG_GETARG_FLOAT4, but it supports the
@@ -50,4 +50,4 @@
* fallback.
*/
#define PG_GETARG_FLOAT4_OR_DEFAULT(argIndex, fallback) \
- PG_ARGISNULL(argIndex) ? (fallback) : PG_GETARG_FLOAT4(argIndex)
+ PG_ARGISNULL(argIndex) ? (fallback) : PG_GETARG_FLOAT4(argIndex)
diff --git a/src/include/distributed/citus_nodefuncs.h b/src/include/distributed/citus_nodefuncs.h
index f7c0061b9..52b2e8a20 100644
--- a/src/include/distributed/citus_nodefuncs.h
+++ b/src/include/distributed/citus_nodefuncs.h
@@ -35,7 +35,7 @@ extern void RegisterNodes(void);
#define READFUNC_ARGS struct ExtensibleNode *node
#define OUTFUNC_ARGS StringInfo str, const struct ExtensibleNode *raw_node
#define COPYFUNC_ARGS struct ExtensibleNode *target_node, const struct \
- ExtensibleNode *source_node
+ ExtensibleNode *source_node
extern void ReadUnsupportedCitusNode(READFUNC_ARGS);
diff --git a/src/include/distributed/citus_ruleutils.h b/src/include/distributed/citus_ruleutils.h
index 28a6198c9..a624767a7 100644
--- a/src/include/distributed/citus_ruleutils.h
+++ b/src/include/distributed/citus_ruleutils.h
@@ -29,7 +29,8 @@ extern char * pg_get_serverdef_string(Oid tableRelationId);
extern char * pg_get_sequencedef_string(Oid sequenceRelid);
extern Form_pg_sequence pg_get_sequencedef(Oid sequenceRelationId);
extern char * pg_get_tableschemadef_string(Oid tableRelationId,
- IncludeSequenceDefaults includeSequenceDefaults,
+ IncludeSequenceDefaults
+ includeSequenceDefaults,
IncludeIdentities includeIdentityDefaults,
char *accessMethod);
extern void EnsureRelationKindSupported(Oid relationId);
diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h
index 19919e32c..2d8ed3b2f 100644
--- a/src/include/distributed/commands.h
+++ b/src/include/distributed/commands.h
@@ -48,7 +48,8 @@ extern void SwitchToSequentialAndLocalExecutionIfRelationNameTooLong(Oid relatio
extern void SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(Oid
parentRelationId,
Oid
- partitionRelationId);
+ partitionRelationId)
+;
/* DistOpsOperationType to be used in DistributeObjectOps */
typedef enum DistOpsOperationType
@@ -560,13 +561,15 @@ extern List * PreprocessAlterSequenceSchemaStmt(Node *node, const char *queryStr
processUtilityContext);
extern List * PostprocessAlterSequenceSchemaStmt(Node *node, const char *queryString);
extern List * PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString,
- ProcessUtilityContext processUtilityContext);
+ ProcessUtilityContext
+ processUtilityContext);
extern List * PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString);
extern List * PreprocessAlterSequencePersistenceStmt(Node *node, const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PreprocessSequenceAlterTableStmt(Node *node, const char *queryString,
- ProcessUtilityContext processUtilityContext);
+ ProcessUtilityContext
+ processUtilityContext);
extern List * PreprocessDropSequenceStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern List * SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok, bool
@@ -639,7 +642,8 @@ extern void PrepareAlterTableStmtForConstraint(AlterTableStmt *alterTableStateme
extern List * PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
ProcessUtilityContext processUtilityContext);
extern List * PreprocessAlterTableMoveAllStmt(Node *node, const char *queryString,
- ProcessUtilityContext processUtilityContext);
+ ProcessUtilityContext
+ processUtilityContext);
extern List * PreprocessAlterTableSchemaStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern void SkipForeignKeyValidationIfConstraintIsFkey(AlterTableStmt *alterTableStmt,
@@ -789,9 +793,9 @@ extern List * PostprocessAlterTriggerDependsStmt(Node *node, const char *querySt
extern List * PreprocessAlterTriggerDependsStmt(Node *node, const char *queryString,
ProcessUtilityContext
processUtilityContext);
-extern void AlterTriggerDependsEventExtendNames(
- AlterObjectDependsStmt *alterTriggerDependsStmt,
- char *schemaName, uint64 shardId);
+extern void AlterTriggerDependsEventExtendNames(AlterObjectDependsStmt *
+ alterTriggerDependsStmt,
+ char *schemaName, uint64 shardId);
extern void ErrorOutForTriggerIfNotSupported(Oid relationId);
extern void ErrorIfRelationHasUnsupportedTrigger(Oid relationId);
extern List * PreprocessDropTriggerStmt(Node *node, const char *queryString,
@@ -834,8 +838,8 @@ extern bool RelationIdListHasReferenceTable(List *relationIdList);
extern List * GetFKeyCreationCommandsForRelationIdList(List *relationIdList);
extern void DropRelationForeignKeys(Oid relationId, int flags);
extern void SetLocalEnableLocalReferenceForeignKeys(bool state);
-extern void ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(
- List *utilityCommandList);
+extern void ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *
+ utilityCmdList);
extern void ExecuteAndLogUtilityCommandList(List *ddlCommandList);
extern void ExecuteAndLogUtilityCommand(const char *commandString);
extern void ExecuteForeignKeyCreateCommandList(List *ddlCommandList,
diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h
index 42b41d557..0f093e087 100644
--- a/src/include/distributed/commands/utility_hook.h
+++ b/src/include/distributed/commands/utility_hook.h
@@ -112,5 +112,6 @@ extern void UndistributeDisconnectedCitusLocalTables(void);
extern void NotifyUtilityHookConstraintDropped(void);
extern void ResetConstraintDropped(void);
extern void ExecuteDistributedDDLJob(DDLJob *ddlJob);
+extern bool IsDroppedOrGenerated(Form_pg_attribute attr);
#endif /* MULTI_UTILITY_H */
diff --git a/src/include/distributed/coordinator_protocol.h b/src/include/distributed/coordinator_protocol.h
index b2170fd2e..0a72fd3f1 100644
--- a/src/include/distributed/coordinator_protocol.h
+++ b/src/include/distributed/coordinator_protocol.h
@@ -59,12 +59,13 @@
/* Remote call definitions to help with data staging and deletion */
#define WORKER_APPLY_SHARD_DDL_COMMAND \
- "SELECT worker_apply_shard_ddl_command (" UINT64_FORMAT ", %s, %s)"
+ "SELECT worker_apply_shard_ddl_command (" UINT64_FORMAT ", %s, %s)"
#define WORKER_APPLY_SHARD_DDL_COMMAND_WITHOUT_SCHEMA \
- "SELECT worker_apply_shard_ddl_command (" UINT64_FORMAT ", %s)"
+ "SELECT worker_apply_shard_ddl_command (" UINT64_FORMAT ", %s)"
#define WORKER_APPLY_INTER_SHARD_DDL_COMMAND \
- "SELECT worker_apply_inter_shard_ddl_command (" UINT64_FORMAT ", %s, " UINT64_FORMAT \
- ", %s, %s)"
+ "SELECT worker_apply_inter_shard_ddl_command (" UINT64_FORMAT \
+ ", %s, " UINT64_FORMAT \
+ ", %s, %s)"
#define SHARD_RANGE_QUERY "SELECT min(%s), max(%s) FROM %s"
#define SHARD_TABLE_SIZE_QUERY "SELECT pg_table_size(%s)"
#define SHARD_CSTORE_TABLE_SIZE_QUERY "SELECT cstore_table_size(%s)"
@@ -225,7 +226,8 @@ extern uint64 GetNextShardId(void);
extern uint64 GetNextPlacementId(void);
extern Oid ResolveRelationId(text *relationName, bool missingOk);
extern List * GetFullTableCreationCommands(Oid relationId,
- IncludeSequenceDefaults includeSequenceDefaults,
+ IncludeSequenceDefaults
+ includeSequenceDefaults,
IncludeIdentities includeIdentityDefaults,
bool creatingShellTableOnRemoteNode);
extern List * GetPostLoadTableCreationCommands(Oid relationId, bool includeIndexes,
diff --git a/src/include/distributed/distributed_planner.h b/src/include/distributed/distributed_planner.h
index 67637cd78..b0c3347be 100644
--- a/src/include/distributed/distributed_planner.h
+++ b/src/include/distributed/distributed_planner.h
@@ -119,6 +119,7 @@ typedef struct FastPathRestrictionContext
bool delayFastPathPlanning;
} FastPathRestrictionContext;
+struct DistributedPlanningContext;
typedef struct PlannerRestrictionContext
{
RelationRestrictionContext *relationRestrictionContext;
@@ -132,6 +133,18 @@ typedef struct PlannerRestrictionContext
*/
FastPathRestrictionContext *fastPathRestrictionContext;
MemoryContext memoryContext;
+
+#if PG_VERSION_NUM >= PG_VERSION_18
+
+ /*
+ * Enable access to the distributed planning context from
+ * planner hooks called by Postgres. Enables Citus to track
+ * changes made by Postgres to the query tree (such as
+ * expansion of virtual columns) and ensure they are reflected
+ * back to subsequent distributed planning.
+ */
+ struct DistributedPlanningContext *planContext;
+#endif
} PlannerRestrictionContext;
typedef struct RelationShard
@@ -238,7 +251,7 @@ extern PlannedStmt * distributed_planner(Query *parse,
* in distributed queries
*/
#define LOCAL_TABLE_SUBQUERY_CTE_HINT \
- "Use CTE's or subqueries to select from local tables and use them in joins"
+ "Use CTE's or subqueries to select from local tables and use them in joins"
extern List * ExtractRangeTableEntryList(Query *query);
extern bool NeedsDistributedPlanning(Query *query);
diff --git a/src/include/distributed/enterprise.h b/src/include/distributed/enterprise.h
index 2ba2fa1ff..9218976fc 100644
--- a/src/include/distributed/enterprise.h
+++ b/src/include/distributed/enterprise.h
@@ -18,10 +18,10 @@
#define NOT_SUPPORTED_IN_COMMUNITY(name) \
- PG_FUNCTION_INFO_V1(name); \
- Datum name(PG_FUNCTION_ARGS) { \
- ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), \
- errmsg(# name "() is only supported on Citus Enterprise"))); \
- }
+ PG_FUNCTION_INFO_V1(name); \
+ Datum name(PG_FUNCTION_ARGS) { \
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), \
+ errmsg(# name "() is only supported on Citus Enterprise"))); \
+ }
#endif
diff --git a/src/include/distributed/errormessage.h b/src/include/distributed/errormessage.h
index 7a38d513c..c9a90e7cc 100644
--- a/src/include/distributed/errormessage.h
+++ b/src/include/distributed/errormessage.h
@@ -37,8 +37,8 @@ typedef struct DeferredErrorMessage
* serialized/copied/deserialized, i.e. can be embedded in plans and such.
*/
#define DeferredError(code, message, detail, hint) \
- DeferredErrorInternal(code, message, detail, hint, __FILE__, __LINE__, \
- PG_FUNCNAME_MACRO)
+ DeferredErrorInternal(code, message, detail, hint, __FILE__, __LINE__, \
+ PG_FUNCNAME_MACRO)
DeferredErrorMessage * DeferredErrorInternal(int code, const char *message,
const char *detail, const char *hint,
@@ -54,21 +54,21 @@ DeferredErrorMessage * DeferredErrorInternal(int code, const char *message,
*/
#ifdef HAVE__BUILTIN_CONSTANT_P
#define RaiseDeferredError(error, elevel) \
- do { \
- RaiseDeferredErrorInternal(error, elevel); \
- if (__builtin_constant_p(elevel) && (elevel) >= ERROR) { \
- pg_unreachable(); } \
- } \
- while (0)
+ do { \
+ RaiseDeferredErrorInternal(error, elevel); \
+ if (__builtin_constant_p(elevel) && (elevel) >= ERROR) { \
+ pg_unreachable(); } \
+ } \
+ while (0)
#else /* !HAVE_BUILTIN_CONSTANT_P */
#define RaiseDeferredError(error, elevel) \
- do { \
- const int elevel_ = (elevel); \
- RaiseDeferredErrorInternal(error, elevel_); \
- if (elevel_ >= ERROR) { \
- pg_unreachable(); } \
- } \
- while (0)
+ do { \
+ const int elevel_ = (elevel); \
+ RaiseDeferredErrorInternal(error, elevel_); \
+ if (elevel_ >= ERROR) { \
+ pg_unreachable(); } \
+ } \
+ while (0)
#endif /* HAVE_BUILTIN_CONSTANT_P */
void RaiseDeferredErrorInternal(DeferredErrorMessage *error, int elevel);
diff --git a/src/include/distributed/extended_op_node_utils.h b/src/include/distributed/extended_op_node_utils.h
index ef75cabec..32f020512 100644
--- a/src/include/distributed/extended_op_node_utils.h
+++ b/src/include/distributed/extended_op_node_utils.h
@@ -40,8 +40,9 @@ typedef struct ExtendedOpNodeProperties
} ExtendedOpNodeProperties;
-extern ExtendedOpNodeProperties BuildExtendedOpNodeProperties(
- MultiExtendedOp *extendedOpNode, bool hasNonDistributableAggregates);
+extern ExtendedOpNodeProperties BuildExtendedOpNodeProperties(MultiExtendedOp *
+ extendedOpNode, bool
+ hasNonDistributableAggregates);
#endif /* EXTENDED_OP_NODE_UTILS_H_ */
diff --git a/src/include/distributed/hash_helpers.h b/src/include/distributed/hash_helpers.h
index b64bfde71..363f52ee1 100644
--- a/src/include/distributed/hash_helpers.h
+++ b/src/include/distributed/hash_helpers.h
@@ -20,21 +20,21 @@
* padding bytes. This is necessary to use a type as a hash key with tag_hash.
*/
#define assert_valid_hash_key2(type, field1, field2) \
- StaticAssertDecl( \
- sizeof(type) == sizeof(((type) { 0 }).field1) \
- + sizeof(((type) { 0 }).field2), \
- # type " has padding bytes, but is used as a hash key in a simple hash");
+ StaticAssertDecl( \
+ sizeof(type) == sizeof(((type) { 0 }).field1) \
+ + sizeof(((type) { 0 }).field2), \
+ # type " has padding bytes, but is used as a hash key in a simple hash");
/*
* assert_valid_hash_key3 checks if a type that contains 3 fields contains no
* padding bytes. This is necessary to use a type as a hash key with tag_hash.
*/
#define assert_valid_hash_key3(type, field1, field2, field3) \
- StaticAssertDecl( \
- sizeof(type) == sizeof(((type) { 0 }).field1) \
- + sizeof(((type) { 0 }).field2) \
- + sizeof(((type) { 0 }).field3), \
- # type " has padding bytes, but is used as a hash key in a simple hash");
+ StaticAssertDecl( \
+ sizeof(type) == sizeof(((type) { 0 }).field1) \
+ + sizeof(((type) { 0 }).field2) \
+ + sizeof(((type) { 0 }).field3), \
+ # type " has padding bytes, but is used as a hash key in a simple hash");
extern void hash_delete_all(HTAB *htab);
@@ -44,10 +44,10 @@ extern void hash_delete_all(HTAB *htab);
*/
#define foreach_htab(var, status, htab) \
- hash_seq_init((status), (htab)); \
- for ((var) = hash_seq_search(status); \
- (var) != NULL; \
- (var) = hash_seq_search(status))
+ hash_seq_init((status), (htab)); \
+ for ((var) = hash_seq_search(status); \
+ (var) != NULL; \
+ (var) = hash_seq_search(status))
extern void foreach_htab_cleanup(void *var, HASH_SEQ_STATUS *status);
@@ -72,28 +72,28 @@ extern HTAB * CreateSimpleHashWithNameAndSizeInternal(Size keysize, Size entrysi
* returning undefined values. You can check this using assert_valid_hash_keyX.
*/
#define CreateSimpleHash(keyType, entryType) \
- CreateSimpleHashWithNameAndSize(keyType, entryType, \
- # entryType "Hash", 32)
+ CreateSimpleHashWithNameAndSize(keyType, entryType, \
+ # entryType "Hash", 32)
/*
* Same as CreateSimpleHash but allows specifying the name
*/
#define CreateSimpleHashWithName(keyType, entryType, name) \
- CreateSimpleHashWithNameAndSize(keyType, entryType, \
- name, 32)
+ CreateSimpleHashWithNameAndSize(keyType, entryType, \
+ name, 32)
/*
* CreateSimpleHashWithSize is the same as CreateSimpleHash, but allows
* configuring of the amount of elements that initially fit in the hash table.
*/
#define CreateSimpleHashWithSize(keyType, entryType, size) \
- CreateSimpleHashWithNameAndSize(keyType, entryType, \
- # entryType "Hash", size)
+ CreateSimpleHashWithNameAndSize(keyType, entryType, \
+ # entryType "Hash", size)
#define CreateSimpleHashWithNameAndSize(keyType, entryType, name, size) \
- CreateSimpleHashWithNameAndSizeInternal(sizeof(keyType), \
- sizeof(entryType), \
- name, size)
+ CreateSimpleHashWithNameAndSizeInternal(sizeof(keyType), \
+ sizeof(entryType), \
+ name, size)
/*
@@ -101,8 +101,8 @@ extern HTAB * CreateSimpleHashWithNameAndSizeInternal(Size keysize, Size entrysi
* tag_hash and stores the values in the CurrentMemoryContext.
*/
#define CreateSimpleHashSet(keyType) \
- CreateSimpleHashWithName(keyType, keyType, \
- # keyType "HashSet")
+ CreateSimpleHashWithName(keyType, keyType, \
+ # keyType "HashSet")
/*
* CreatesSimpleHashSetWithSize creates a hash set that hashes its values using
@@ -110,7 +110,7 @@ extern HTAB * CreateSimpleHashWithNameAndSizeInternal(Size keysize, Size entrysi
* specifying its number of elements.
*/
#define CreateSimpleHashSetWithSize(keyType, size) \
- CreateSimpleHashWithNameAndSize(keyType, keyType, # keyType "HashSet", size)
+ CreateSimpleHashWithNameAndSize(keyType, keyType, # keyType "HashSet", size)
/*
* CreatesSimpleHashSetWithName creates a hash set that hashes its values using the
@@ -118,7 +118,7 @@ extern HTAB * CreateSimpleHashWithNameAndSizeInternal(Size keysize, Size entrysi
* specifying its name.
*/
#define CreateSimpleHashSetWithName(keyType, name) \
- CreateSimpleHashWithName(keyType, keyType, name)
+ CreateSimpleHashWithName(keyType, keyType, name)
/*
* CreatesSimpleHashSetWithName creates a hash set that hashes its values using the
@@ -126,7 +126,7 @@ extern HTAB * CreateSimpleHashWithNameAndSizeInternal(Size keysize, Size entrysi
* specifying its name and number of elements.
*/
#define CreateSimpleHashSetWithNameAndSize(keyType, name, size) \
- CreateSimpleHashWithNameAndSize(keyType, keyType, name, size)
+ CreateSimpleHashWithNameAndSize(keyType, keyType, name, size)
#endif
diff --git a/src/include/distributed/intermediate_results.h b/src/include/distributed/intermediate_results.h
index ca4fa581e..b6ba8c81d 100644
--- a/src/include/distributed/intermediate_results.h
+++ b/src/include/distributed/intermediate_results.h
@@ -112,8 +112,8 @@ extern List * PartitionTasklistResults(const char *resultIdPrefix, List *selectT
int partitionColumnIndex,
CitusTableCacheEntry *distributionScheme,
bool binaryFormat);
-extern char * QueryStringForFragmentsTransfer(
- NodeToNodeFragmentsTransfer *fragmentsTransfer);
+extern char * QueryStringForFragmentsTransfer(NodeToNodeFragmentsTransfer *
+ fragmentsTransfer);
extern void ShardMinMaxValueArrays(ShardInterval **shardIntervalArray, int shardCount,
Oid intervalTypeId, ArrayType **minValueArray,
ArrayType **maxValueArray);
diff --git a/src/include/distributed/listutils.h b/src/include/distributed/listutils.h
index db9ea7ce7..f0b01b80a 100644
--- a/src/include/distributed/listutils.h
+++ b/src/include/distributed/listutils.h
@@ -51,10 +51,10 @@ typedef struct ListCellAndListWrapper
* var is NULL.
*/
#define foreach_declared_ptr(var, l) \
- for (ListCell *(var ## CellDoNotUse) = list_head(l); \
- (var ## CellDoNotUse) != NULL && \
- (((var) = lfirst(var ## CellDoNotUse)) || true); \
- var ## CellDoNotUse = lnext(l, var ## CellDoNotUse))
+ for (ListCell *(var ## CellDoNotUse) = list_head(l); \
+ (var ## CellDoNotUse) != NULL && \
+ (((var) = lfirst(var ## CellDoNotUse)) || true); \
+ var ## CellDoNotUse = lnext(l, var ## CellDoNotUse))
/*
@@ -64,10 +64,10 @@ typedef struct ListCellAndListWrapper
* For explanation of how it works see foreach_declared_ptr.
*/
#define foreach_declared_int(var, l) \
- for (ListCell *(var ## CellDoNotUse) = list_head(l); \
- (var ## CellDoNotUse) != NULL && \
- (((var) = lfirst_int(var ## CellDoNotUse)) || true); \
- var ## CellDoNotUse = lnext(l, var ## CellDoNotUse))
+ for (ListCell *(var ## CellDoNotUse) = list_head(l); \
+ (var ## CellDoNotUse) != NULL && \
+ (((var) = lfirst_int(var ## CellDoNotUse)) || true); \
+ var ## CellDoNotUse = lnext(l, var ## CellDoNotUse))
/*
@@ -77,10 +77,10 @@ typedef struct ListCellAndListWrapper
* For explanation of how it works see foreach_declared_ptr.
*/
#define foreach_declared_oid(var, l) \
- for (ListCell *(var ## CellDoNotUse) = list_head(l); \
- (var ## CellDoNotUse) != NULL && \
- (((var) = lfirst_oid(var ## CellDoNotUse)) || true); \
- var ## CellDoNotUse = lnext(l, var ## CellDoNotUse))
+ for (ListCell *(var ## CellDoNotUse) = list_head(l); \
+ (var ## CellDoNotUse) != NULL && \
+ (((var) = lfirst_oid(var ## CellDoNotUse)) || true); \
+ var ## CellDoNotUse = lnext(l, var ## CellDoNotUse))
/*
* forboth_ptr -
@@ -89,15 +89,15 @@ typedef struct ListCellAndListWrapper
* variables to store the pointer of each of the two cells in.
*/
#define forboth_ptr(var1, l1, var2, l2) \
- for (ListCell *(var1 ## CellDoNotUse) = list_head(l1), \
- *(var2 ## CellDoNotUse) = list_head(l2); \
- (var1 ## CellDoNotUse) != NULL && \
- (var2 ## CellDoNotUse) != NULL && \
- (((var1) = lfirst(var1 ## CellDoNotUse)) || true) && \
- (((var2) = lfirst(var2 ## CellDoNotUse)) || true); \
- var1 ## CellDoNotUse = lnext(l1, var1 ## CellDoNotUse), \
- var2 ## CellDoNotUse = lnext(l2, var2 ## CellDoNotUse) \
- )
+ for (ListCell *(var1 ## CellDoNotUse) = list_head(l1), \
+ *(var2 ## CellDoNotUse) = list_head(l2); \
+ (var1 ## CellDoNotUse) != NULL && \
+ (var2 ## CellDoNotUse) != NULL && \
+ (((var1) = lfirst(var1 ## CellDoNotUse)) || true) && \
+ (((var2) = lfirst(var2 ## CellDoNotUse)) || true); \
+ var1 ## CellDoNotUse = lnext(l1, var1 ## CellDoNotUse), \
+ var2 ## CellDoNotUse = lnext(l2, var2 ## CellDoNotUse) \
+ )
/*
* forboth_ptr_oid -
@@ -107,15 +107,15 @@ typedef struct ListCellAndListWrapper
* variables to store the pointer and the Oid of each of the two cells in.
*/
#define forboth_ptr_oid(var1, l1, var2, l2) \
- for (ListCell *(var1 ## CellDoNotUse) = list_head(l1), \
- *(var2 ## CellDoNotUse) = list_head(l2); \
- (var1 ## CellDoNotUse) != NULL && \
- (var2 ## CellDoNotUse) != NULL && \
- (((var1) = lfirst(var1 ## CellDoNotUse)) || true) && \
- (((var2) = lfirst_oid(var2 ## CellDoNotUse)) || true); \
- var1 ## CellDoNotUse = lnext(l1, var1 ## CellDoNotUse), \
- var2 ## CellDoNotUse = lnext(l2, var2 ## CellDoNotUse) \
- )
+ for (ListCell *(var1 ## CellDoNotUse) = list_head(l1), \
+ *(var2 ## CellDoNotUse) = list_head(l2); \
+ (var1 ## CellDoNotUse) != NULL && \
+ (var2 ## CellDoNotUse) != NULL && \
+ (((var1) = lfirst(var1 ## CellDoNotUse)) || true) && \
+ (((var2) = lfirst_oid(var2 ## CellDoNotUse)) || true); \
+ var1 ## CellDoNotUse = lnext(l1, var1 ## CellDoNotUse), \
+ var2 ## CellDoNotUse = lnext(l2, var2 ## CellDoNotUse) \
+ )
/*
* forboth_int_oid -
@@ -125,15 +125,15 @@ typedef struct ListCellAndListWrapper
* variables to store the int and the Oid of each of the two cells in.
*/
#define forboth_int_oid(var1, l1, var2, l2) \
- for (ListCell *(var1 ## CellDoNotUse) = list_head(l1), \
- *(var2 ## CellDoNotUse) = list_head(l2); \
- (var1 ## CellDoNotUse) != NULL && \
- (var2 ## CellDoNotUse) != NULL && \
- (((var1) = lfirst_int(var1 ## CellDoNotUse)) || true) && \
- (((var2) = lfirst_oid(var2 ## CellDoNotUse)) || true); \
- var1 ## CellDoNotUse = lnext(l1, var1 ## CellDoNotUse), \
- var2 ## CellDoNotUse = lnext(l2, var2 ## CellDoNotUse) \
- )
+ for (ListCell *(var1 ## CellDoNotUse) = list_head(l1), \
+ *(var2 ## CellDoNotUse) = list_head(l2); \
+ (var1 ## CellDoNotUse) != NULL && \
+ (var2 ## CellDoNotUse) != NULL && \
+ (((var1) = lfirst_int(var1 ## CellDoNotUse)) || true) && \
+ (((var2) = lfirst_oid(var2 ## CellDoNotUse)) || true); \
+ var1 ## CellDoNotUse = lnext(l1, var1 ## CellDoNotUse), \
+ var2 ## CellDoNotUse = lnext(l2, var2 ## CellDoNotUse) \
+ )
/*
* foreach_ptr_append -
@@ -157,10 +157,10 @@ typedef struct ListCellAndListWrapper
* - || true is used to always enter the loop even if var is NULL.
*/
#define foreach_ptr_append(var, l) \
- for (int var ## PositionDoNotUse = 0; \
- (var ## PositionDoNotUse) < list_length(l) && \
- (((var) = list_nth(l, var ## PositionDoNotUse)) || true); \
- var ## PositionDoNotUse ++)
+ for (int var ## PositionDoNotUse = 0; \
+ (var ## PositionDoNotUse) < list_length(l) && \
+ (((var) = list_nth(l, var ## PositionDoNotUse)) || true); \
+ var ## PositionDoNotUse++)
/* utility functions declaration shared within this module */
extern List * SortList(List *pointerList,
diff --git a/src/include/distributed/log_utils.h b/src/include/distributed/log_utils.h
index a9333a8a3..83ceb20b0 100644
--- a/src/include/distributed/log_utils.h
+++ b/src/include/distributed/log_utils.h
@@ -23,10 +23,10 @@ extern bool IsLoggableLevel(int logLevel);
#undef ereport
#define ereport(elevel, rest) \
- do { \
- int ereport_loglevel = elevel; \
- (void) (ereport_loglevel); \
- ereport_domain(elevel, TEXTDOMAIN, rest); \
- } while (0)
+ do { \
+ int ereport_loglevel = elevel; \
+ (void) (ereport_loglevel); \
+ ereport_domain(elevel, TEXTDOMAIN, rest); \
+ } while (0)
#endif /* LOG_UTILS_H */
diff --git a/src/include/distributed/metadata_cache.h b/src/include/distributed/metadata_cache.h
index d3a7eaa06..49c903220 100644
--- a/src/include/distributed/metadata_cache.h
+++ b/src/include/distributed/metadata_cache.h
@@ -210,7 +210,8 @@ extern ShardPlacement * ShardPlacementForFunctionColocatedWithDistTable(
DistObjectCacheEntry *procedure, List *argumentList, Var *partitionColumn,
CitusTableCacheEntry
*cacheEntry,
- PlannedStmt *plan);
+ PlannedStmt *
+ plan);
extern bool CitusHasBeenLoaded(void);
extern bool CheckCitusVersion(int elevel);
extern bool CheckAvailableVersion(int elevel);
diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h
index 29583f01f..90a3662c6 100644
--- a/src/include/distributed/metadata_sync.h
+++ b/src/include/distributed/metadata_sync.h
@@ -189,16 +189,16 @@ extern void SendInterTableRelationshipCommands(MetadataSyncContext *context);
#define DELETE_ALL_COLOCATION "DELETE FROM pg_catalog.pg_dist_colocation"
#define DELETE_ALL_TENANT_SCHEMAS "DELETE FROM pg_catalog.pg_dist_schema"
#define WORKER_DROP_ALL_SHELL_TABLES \
- "CALL pg_catalog.worker_drop_all_shell_tables(%s)"
+ "CALL pg_catalog.worker_drop_all_shell_tables(%s)"
#define CITUS_INTERNAL_MARK_NODE_NOT_SYNCED \
- "SELECT citus_internal.mark_node_not_synced(%d, %d)"
+ "SELECT citus_internal.mark_node_not_synced(%d, %d)"
#define REMOVE_ALL_CITUS_TABLES_COMMAND \
- "SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition"
+ "SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition"
#define BREAK_ALL_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND \
- "SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition"
+ "SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition"
#define BREAK_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND \
- "SELECT pg_catalog.worker_drop_sequence_dependency(%s);"
+ "SELECT pg_catalog.worker_drop_sequence_dependency(%s);"
#define DISABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'off'"
#define ENABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'on'"
@@ -206,20 +206,20 @@ extern void SendInterTableRelationshipCommands(MetadataSyncContext *context);
#define ENABLE_METADATA_SYNC "SET citus.enable_metadata_sync TO 'on'"
#define WORKER_APPLY_SEQUENCE_COMMAND "SELECT worker_apply_sequence_command (%s,%s)"
#define UPSERT_PLACEMENT \
- "INSERT INTO pg_dist_placement " \
- "(shardid, shardstate, shardlength, " \
- "groupid, placementid) " \
- "VALUES (" UINT64_FORMAT ", 1, " UINT64_FORMAT \
- ", %d, " UINT64_FORMAT \
- ") " \
- "ON CONFLICT (shardid, groupid) DO UPDATE SET " \
- "shardstate = EXCLUDED.shardstate, " \
- "shardlength = EXCLUDED.shardlength, " \
- "placementid = EXCLUDED.placementid"
+ "INSERT INTO pg_dist_placement " \
+ "(shardid, shardstate, shardlength, " \
+ "groupid, placementid) " \
+ "VALUES (" UINT64_FORMAT ", 1, " UINT64_FORMAT \
+ ", %d, " UINT64_FORMAT \
+ ") " \
+ "ON CONFLICT (shardid, groupid) DO UPDATE SET " \
+ "shardstate = EXCLUDED.shardstate, " \
+ "shardlength = EXCLUDED.shardlength, " \
+ "placementid = EXCLUDED.placementid"
#define METADATA_SYNC_CHANNEL "metadata_sync"
#define WORKER_ADJUST_IDENTITY_COLUMN_SEQ_RANGES \
- "SELECT pg_catalog.worker_adjust_identity_column_seq_ranges(%s)"
+ "SELECT pg_catalog.worker_adjust_identity_column_seq_ranges(%s)"
/* controlled via GUC */
extern char *EnableManualMetadataChangesForUser;
diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h
index b72e2d396..8abbca18a 100644
--- a/src/include/distributed/metadata_utility.h
+++ b/src/include/distributed/metadata_utility.h
@@ -40,7 +40,7 @@
#define WORKER_PARTITIONED_TABLE_SIZE_FUNCTION "worker_partitioned_table_size(%s)"
#define WORKER_PARTITIONED_RELATION_SIZE_FUNCTION "worker_partitioned_relation_size(%s)"
#define WORKER_PARTITIONED_RELATION_TOTAL_SIZE_FUNCTION \
- "worker_partitioned_relation_total_size(%s)"
+ "worker_partitioned_relation_total_size(%s)"
#define SHARD_SIZES_COLUMN_COUNT (2)
@@ -302,12 +302,12 @@ typedef struct BackgroundTask
} BackgroundTask;
#define SET_NULLABLE_FIELD(ptr, field, value) \
- (ptr)->__nullable_storage.field = (value); \
- (ptr)->field = &((ptr)->__nullable_storage.field)
+ (ptr)->__nullable_storage.field = (value); \
+ (ptr)->field = &((ptr)->__nullable_storage.field)
#define UNSET_NULLABLE_FIELD(ptr, field) \
- (ptr)->field = NULL; \
- memset_struct_0((ptr)->__nullable_storage.field)
+ (ptr)->field = NULL; \
+ memset_struct_0((ptr)->__nullable_storage.field)
/* Size functions */
extern Datum citus_table_size(PG_FUNCTION_ARGS);
diff --git a/src/include/distributed/multi_server_executor.h b/src/include/distributed/multi_server_executor.h
index f49ef60c2..38feb0318 100644
--- a/src/include/distributed/multi_server_executor.h
+++ b/src/include/distributed/multi_server_executor.h
@@ -20,8 +20,8 @@
/* Adaptive executor repartitioning related defines */
#define WORKER_CREATE_SCHEMA_QUERY "SELECT worker_create_schema (" UINT64_FORMAT ", %s);"
#define WORKER_REPARTITION_CLEANUP_QUERY "SELECT worker_repartition_cleanup (" \
- UINT64_FORMAT \
- ");"
+ UINT64_FORMAT \
+ ");"
/* Enumeration that represents distributed executor types */
diff --git a/src/include/distributed/placement_connection.h b/src/include/distributed/placement_connection.h
index ca73f016a..3ad89703d 100644
--- a/src/include/distributed/placement_connection.h
+++ b/src/include/distributed/placement_connection.h
@@ -21,7 +21,8 @@ extern MultiConnection * StartPlacementConnection(uint32 flags,
struct ShardPlacement *placement,
const char *userName);
extern MultiConnection * GetConnectionIfPlacementAccessedInXact(int flags,
- List *placementAccessList,
+ List *
+ placementAccessList,
const char *userName);
extern MultiConnection * StartPlacementListConnection(uint32 flags,
List *placementAccessList,
diff --git a/src/include/distributed/query_pushdown_planning.h b/src/include/distributed/query_pushdown_planning.h
index 287c8da62..87ff07aeb 100644
--- a/src/include/distributed/query_pushdown_planning.h
+++ b/src/include/distributed/query_pushdown_planning.h
@@ -26,7 +26,8 @@ extern int ValuesMaterializationThreshold;
extern bool CanPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLimit);
extern bool ShouldUseSubqueryPushDown(Query *originalQuery, Query *rewrittenQuery,
- PlannerRestrictionContext *plannerRestrictionContext);
+ PlannerRestrictionContext *
+ plannerRestrictionContext);
extern bool JoinTreeContainsSubquery(Query *query);
extern bool IsNodeSubquery(Node *node);
extern bool HasEmptyJoinTree(Query *query);
@@ -37,12 +38,12 @@ extern MultiNode * SubqueryMultiNodeTree(Query *originalQuery,
Query *queryTree,
PlannerRestrictionContext *
plannerRestrictionContext);
-extern DeferredErrorMessage * DeferErrorIfUnsupportedSubqueryPushdown(
- Query *originalQuery,
- PlannerRestrictionContext
- *
- plannerRestrictionContext,
- bool plannerPhase);
+extern DeferredErrorMessage * DeferErrorIfUnsupportedSubqueryPushdown(Query *
+ originalQuery,
+ PlannerRestrictionContext
+ *
+ plannerRestrictionContext,
+ bool plannerPhase);
extern DeferredErrorMessage * DeferErrorIfCannotPushdownSubquery(Query *subqueryTree,
bool
outerMostQueryHasLimit);
diff --git a/src/include/distributed/recursive_planning.h b/src/include/distributed/recursive_planning.h
index dec8916bf..941a09074 100644
--- a/src/include/distributed/recursive_planning.h
+++ b/src/include/distributed/recursive_planning.h
@@ -31,8 +31,8 @@ typedef struct RangeTblEntryIndex
Index rteIndex;
}RangeTblEntryIndex;
-extern PlannerRestrictionContext * GetPlannerRestrictionContext(
- RecursivePlanningContext *recursivePlanningContext);
+extern PlannerRestrictionContext * GetPlannerRestrictionContext(RecursivePlanningContext *
+ recursivePlanningContext);
extern List * GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery,
PlannerRestrictionContext *
plannerRestrictionContext,
diff --git a/src/include/distributed/relation_restriction_equivalence.h b/src/include/distributed/relation_restriction_equivalence.h
index f3a7e2b94..e7d87f627 100644
--- a/src/include/distributed/relation_restriction_equivalence.h
+++ b/src/include/distributed/relation_restriction_equivalence.h
@@ -41,10 +41,10 @@ extern PlannerRestrictionContext * FilterPlannerRestrictionForQuery(
extern List * GetRestrictInfoListForRelation(RangeTblEntry *rangeTblEntry,
PlannerRestrictionContext *
plannerRestrictionContext);
-extern RelationRestriction * RelationRestrictionForRelation(
- RangeTblEntry *rangeTableEntry,
- PlannerRestrictionContext *
- plannerRestrictionContext);
+extern RelationRestriction * RelationRestrictionForRelation(RangeTblEntry *
+ rangeTableEntry,
+ PlannerRestrictionContext *
+ plannerRestrictionContext);
extern JoinRestrictionContext * RemoveDuplicateJoinRestrictions(JoinRestrictionContext *
joinRestrictionContext);
diff --git a/src/include/distributed/repartition_executor.h b/src/include/distributed/repartition_executor.h
index f636877e7..9e2892697 100644
--- a/src/include/distributed/repartition_executor.h
+++ b/src/include/distributed/repartition_executor.h
@@ -20,12 +20,12 @@ extern List * GenerateTaskListWithColocatedIntermediateResults(Oid targetRelatio
Query *
modifyQueryViaCoordinatorOrRepartition,
char *resultIdPrefix);
-extern List * GenerateTaskListWithRedistributedResults(
- Query *modifyQueryViaCoordinatorOrRepartition,
- CitusTableCacheEntry *
- targetRelation,
- List **redistributedResults,
- bool useBinaryFormat);
+extern List * GenerateTaskListWithRedistributedResults(Query *
+ modifyQueryViaCoordinatorOrRepartition,
+ CitusTableCacheEntry *
+ targetRelation,
+ List **redistributedResults,
+ bool useBinaryFormat);
extern bool IsSupportedRedistributionTarget(Oid targetRelationId);
extern bool IsRedistributablePlan(Plan *selectPlan);
extern bool HasMergeNotMatchedBySource(Query *query);
diff --git a/src/include/distributed/resource_lock.h b/src/include/distributed/resource_lock.h
index 0696ef6e8..c3f322ac1 100644
--- a/src/include/distributed/resource_lock.h
+++ b/src/include/distributed/resource_lock.h
@@ -63,87 +63,87 @@ typedef enum CitusOperations
/* reuse advisory lock, but with different, unused field 4 (4)*/
#define SET_LOCKTAG_SHARD_METADATA_RESOURCE(tag, db, shardid) \
- SET_LOCKTAG_ADVISORY(tag, \
- db, \
- (uint32) ((shardid) >> 32), \
- (uint32) (shardid), \
- ADV_LOCKTAG_CLASS_CITUS_SHARD_METADATA)
+ SET_LOCKTAG_ADVISORY(tag, \
+ db, \
+ (uint32) ((shardid) >> 32), \
+ (uint32) (shardid), \
+ ADV_LOCKTAG_CLASS_CITUS_SHARD_METADATA)
#define SET_LOCKTAG_COLOCATED_SHARDS_METADATA_RESOURCE(tag, db, colocationId, \
shardIntervalIndex) \
- SET_LOCKTAG_ADVISORY(tag, \
- db, \
- (uint32) shardIntervalIndex, \
- (uint32) colocationId, \
- ADV_LOCKTAG_CLASS_CITUS_COLOCATED_SHARDS_METADATA)
+ SET_LOCKTAG_ADVISORY(tag, \
+ db, \
+ (uint32) shardIntervalIndex, \
+ (uint32) colocationId, \
+ ADV_LOCKTAG_CLASS_CITUS_COLOCATED_SHARDS_METADATA)
/* reuse advisory lock, but with different, unused field 4 (5)*/
#define SET_LOCKTAG_SHARD_RESOURCE(tag, db, shardid) \
- SET_LOCKTAG_ADVISORY(tag, \
- db, \
- (uint32) ((shardid) >> 32), \
- (uint32) (shardid), \
- ADV_LOCKTAG_CLASS_CITUS_SHARD)
+ SET_LOCKTAG_ADVISORY(tag, \
+ db, \
+ (uint32) ((shardid) >> 32), \
+ (uint32) (shardid), \
+ ADV_LOCKTAG_CLASS_CITUS_SHARD)
/* advisory lock for citus shard move/copy operations,
* also it has the database hardcoded to MyDatabaseId,
* to ensure the locks are local to each database */
#define SET_LOCKTAG_SHARD_MOVE(tag, shardid) \
- SET_LOCKTAG_ADVISORY(tag, \
- MyDatabaseId, \
- (uint32) ((shardid) >> 32), \
- (uint32) (shardid), \
- ADV_LOCKTAG_CLASS_CITUS_SHARD_MOVE)
+ SET_LOCKTAG_ADVISORY(tag, \
+ MyDatabaseId, \
+ (uint32) ((shardid) >> 32), \
+ (uint32) (shardid), \
+ ADV_LOCKTAG_CLASS_CITUS_SHARD_MOVE)
/* reuse advisory lock, but with different, unused field 4 (7)
* Also it has the database hardcoded to MyDatabaseId, to ensure the locks
* are local to each database */
#define SET_LOCKTAG_REBALANCE_COLOCATION(tag, colocationOrTableId) \
- SET_LOCKTAG_ADVISORY(tag, \
- MyDatabaseId, \
- (uint32) ((colocationOrTableId) >> 32), \
- (uint32) (colocationOrTableId), \
- ADV_LOCKTAG_CLASS_CITUS_REBALANCE_COLOCATION)
+ SET_LOCKTAG_ADVISORY(tag, \
+ MyDatabaseId, \
+ (uint32) ((colocationOrTableId) >> 32), \
+ (uint32) (colocationOrTableId), \
+ ADV_LOCKTAG_CLASS_CITUS_REBALANCE_COLOCATION)
/* reuse advisory lock, but with different, unused field 4 (13)
* Also it has the database hardcoded to MyDatabaseId, to ensure the locks
* are local to each database */
#define SET_LOCKTAG_REBALANCE_PLACEMENT_COLOCATION(tag, colocationOrTableId) \
- SET_LOCKTAG_ADVISORY(tag, \
- MyDatabaseId, \
- (uint32) ((colocationOrTableId) >> 32), \
- (uint32) (colocationOrTableId), \
- ADV_LOCKTAG_CLASS_CITUS_REBALANCE_PLACEMENT_COLOCATION)
+ SET_LOCKTAG_ADVISORY(tag, \
+ MyDatabaseId, \
+ (uint32) ((colocationOrTableId) >> 32), \
+ (uint32) (colocationOrTableId), \
+ ADV_LOCKTAG_CLASS_CITUS_REBALANCE_PLACEMENT_COLOCATION)
/* advisory lock for citus operations, also it has the database hardcoded to MyDatabaseId,
* to ensure the locks are local to each database */
#define SET_LOCKTAG_CITUS_OPERATION(tag, operationId) \
- SET_LOCKTAG_ADVISORY(tag, \
- MyDatabaseId, \
- (uint32) 0, \
- (uint32) operationId, \
- ADV_LOCKTAG_CLASS_CITUS_OPERATIONS)
+ SET_LOCKTAG_ADVISORY(tag, \
+ MyDatabaseId, \
+ (uint32) 0, \
+ (uint32) operationId, \
+ ADV_LOCKTAG_CLASS_CITUS_OPERATIONS)
/* reuse advisory lock, but with different, unused field 4 (10)
* Also it has the database hardcoded to MyDatabaseId, to ensure the locks
* are local to each database */
#define SET_LOCKTAG_CLEANUP_OPERATION_ID(tag, operationId) \
- SET_LOCKTAG_ADVISORY(tag, \
- MyDatabaseId, \
- (uint32) ((operationId) >> 32), \
- (uint32) operationId, \
- ADV_LOCKTAG_CLASS_CITUS_CLEANUP_OPERATION_ID)
+ SET_LOCKTAG_ADVISORY(tag, \
+ MyDatabaseId, \
+ (uint32) ((operationId) >> 32), \
+ (uint32) operationId, \
+ ADV_LOCKTAG_CLASS_CITUS_CLEANUP_OPERATION_ID)
/* reuse advisory lock, but with different, unused field 4 (14)
* Also it has the database hardcoded to MyDatabaseId, to ensure the locks
* are local to each database */
#define SET_LOCKTAG_BACKGROUND_TASK(tag, taskId) \
- SET_LOCKTAG_ADVISORY(tag, \
- MyDatabaseId, \
- (uint32) ((taskId) >> 32), \
- (uint32) (taskId), \
- ADV_LOCKTAG_CLASS_CITUS_BACKGROUND_TASK)
+ SET_LOCKTAG_ADVISORY(tag, \
+ MyDatabaseId, \
+ (uint32) ((taskId) >> 32), \
+ (uint32) (taskId), \
+ ADV_LOCKTAG_CLASS_CITUS_BACKGROUND_TASK)
/*
* IsNodeWideObjectClass returns true if the given object class is node-wide,
@@ -171,7 +171,8 @@ IsNodeWideObjectClass(ObjectClass objectClass)
* this assertion check based on latest supported major Postgres version.
*/
StaticAssertStmt(PG_MAJORVERSION_NUM <= 18,
- "better to check if any of newly added ObjectClass'es are node-wide");
+ "better to check if any of newly added ObjectClass'es are node-wide")
+ ;
switch (objectClass)
{
@@ -182,12 +183,14 @@ IsNodeWideObjectClass(ObjectClass objectClass)
#if PG_VERSION_NUM >= PG_VERSION_16
case OCLASS_ROLE_MEMBERSHIP:
#endif
- {
- return true;
- }
+ {
+ return true;
+ }
default:
+ {
return false;
+ }
}
}
@@ -202,12 +205,12 @@ IsNodeWideObjectClass(ObjectClass objectClass)
* not node-wide, and global if it is.
*/
#define SET_LOCKTAG_GLOBAL_DDL_SERIALIZATION(tag, objectClass, oid) \
- SET_LOCKTAG_ADVISORY(tag, \
- (uint32) (IsNodeWideObjectClass(objectClass) ? InvalidOid : \
- MyDatabaseId), \
- (uint32) objectClass, \
- (uint32) oid, \
- ADV_LOCKTAG_CLASS_CITUS_GLOBAL_DDL_SERIALIZATION)
+ SET_LOCKTAG_ADVISORY(tag, \
+ (uint32) (IsNodeWideObjectClass(objectClass) ? InvalidOid : \
+ MyDatabaseId), \
+ (uint32) objectClass, \
+ (uint32) oid, \
+ ADV_LOCKTAG_CLASS_CITUS_GLOBAL_DDL_SERIALIZATION)
/*
* DistLockConfigs are used to configure the locking behaviour of AcquireDistributedLockOnRelations
diff --git a/src/include/distributed/shard_rebalancer.h b/src/include/distributed/shard_rebalancer.h
index 3895eced5..14fb7e2ce 100644
--- a/src/include/distributed/shard_rebalancer.h
+++ b/src/include/distributed/shard_rebalancer.h
@@ -30,10 +30,10 @@
/* Definitions for metadata update commands */
#define INSERT_SHARD_PLACEMENT_COMMAND "INSERT INTO pg_dist_shard_placement VALUES(" \
- UINT64_FORMAT ", %d, " UINT64_FORMAT ", '%s', %d)"
+ UINT64_FORMAT ", %d, " UINT64_FORMAT ", '%s', %d)"
#define DELETE_SHARD_PLACEMENT_COMMAND "DELETE FROM pg_dist_shard_placement WHERE " \
"shardid=" UINT64_FORMAT \
- " AND nodename='%s' AND nodeport=%d"
+ " AND nodename='%s' AND nodeport=%d"
/*
* Definitions for shard placement json field names. These names should match
diff --git a/src/include/distributed/shardinterval_utils.h b/src/include/distributed/shardinterval_utils.h
index ed5600a11..443d35c63 100644
--- a/src/include/distributed/shardinterval_utils.h
+++ b/src/include/distributed/shardinterval_utils.h
@@ -38,7 +38,8 @@ typedef struct SortShardIntervalContext
extern ShardInterval ** SortShardIntervalArray(ShardInterval **shardIntervalArray, int
shardCount, Oid collation,
- FmgrInfo *shardIntervalSortCompareFunction);
+ FmgrInfo *
+ shardIntervalSortCompareFunction);
extern int CompareShardIntervals(const void *leftElement, const void *rightElement,
SortShardIntervalContext *sortContext);
extern int CompareShardIntervalsById(const void *leftElement, const void *rightElement);
diff --git a/src/include/distributed/shardsplit_logical_replication.h b/src/include/distributed/shardsplit_logical_replication.h
index a7dc3485e..55372dbb0 100644
--- a/src/include/distributed/shardsplit_logical_replication.h
+++ b/src/include/distributed/shardsplit_logical_replication.h
@@ -34,10 +34,11 @@ extern List * PopulateShardSplitSubscriptionsMetadataList(HTAB *shardSplitInfoHa
List *
shardGroupSplitIntervalListList,
List *workersForPlacementList);
-extern HTAB * CreateShardSplitInfoMapForPublication(
- List *sourceColocatedShardIntervalList,
- List *shardGroupSplitIntervalListList,
- List *destinationWorkerNodesList);
+extern HTAB * CreateShardSplitInfoMapForPublication(List *
+ sourceColocatedShardIntervalList,
+ List *
+ shardGroupSplitIntervalListList,
+ List *destinationWorkerNodesList);
/* Functions to drop publisher-subscriber resources */
extern void DropAllShardSplitLeftOvers(WorkerNode *sourceNode,
diff --git a/src/include/distributed/shardsplit_shared_memory.h b/src/include/distributed/shardsplit_shared_memory.h
index d06fdca01..215df238f 100644
--- a/src/include/distributed/shardsplit_shared_memory.h
+++ b/src/include/distributed/shardsplit_shared_memory.h
@@ -74,7 +74,8 @@ void StoreShardSplitSharedMemoryHandle(dsm_handle dsmHandle);
/* Functions for creating and accessing shared memory segments consisting shard split information */
extern ShardSplitInfoSMHeader * CreateSharedMemoryForShardSplitInfo(int
shardSplitInfoCount,
- dsm_handle *dsmHandle);
+ dsm_handle *
+ dsmHandle);
extern void ReleaseSharedMemoryOfShardSplitInfo(void);
extern ShardSplitInfoSMHeader * GetShardSplitInfoSMHeader(void);
diff --git a/src/include/distributed/stats/stat_counters.h b/src/include/distributed/stats/stat_counters.h
index c673c062c..3eae60461 100644
--- a/src/include/distributed/stats/stat_counters.h
+++ b/src/include/distributed/stats/stat_counters.h
@@ -14,7 +14,7 @@
/* saved backend stats - constants */
#define SAVED_BACKEND_STATS_HASH_LOCK_TRANCHE_NAME \
- "citus_stat_counters saved backend stats hash"
+ "citus_stat_counters saved backend stats hash"
/* default value for the GUC variable */
#define ENABLE_STAT_COUNTERS_DEFAULT false
diff --git a/src/include/distributed/string_utils.h b/src/include/distributed/string_utils.h
index 1c5b4ad1a..e224f00e4 100644
--- a/src/include/distributed/string_utils.h
+++ b/src/include/distributed/string_utils.h
@@ -16,6 +16,6 @@
extern char * ConvertIntToString(int val);
#define StringStartsWith(str, prefix) \
- (strncmp(str, prefix, strlen(prefix)) == 0)
+ (strncmp(str, prefix, strlen(prefix)) == 0)
#endif /* CITUS_STRING_UTILS_H */
diff --git a/src/include/distributed/worker_shard_copy.h b/src/include/distributed/worker_shard_copy.h
index 77f57c761..b35c93834 100644
--- a/src/include/distributed/worker_shard_copy.h
+++ b/src/include/distributed/worker_shard_copy.h
@@ -16,7 +16,8 @@
extern bool EnableBinaryProtocol;
extern DestReceiver * CreateShardCopyDestReceiver(EState *executorState,
- List *destinationShardFullyQualifiedName,
+ List *
+ destinationShardFullyQualifiedName,
uint32_t destinationNodeId);
extern const char * CopyableColumnNamesFromRelationName(const char *schemaName, const
diff --git a/src/include/distributed/worker_transaction.h b/src/include/distributed/worker_transaction.h
index 1b3809a0e..272b75928 100644
--- a/src/include/distributed/worker_transaction.h
+++ b/src/include/distributed/worker_transaction.h
@@ -81,8 +81,10 @@ extern bool SendOptionalCommandListToWorkerOutsideTransactionWithConnection(
List *
commandList);
extern bool SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(const
- char *nodeName,
- int32 nodePort,
+ char *
+ nodeName,
+ int32
+ nodePort,
const char *
nodeUser,
List *
@@ -100,16 +102,17 @@ extern void SendCommandListToWorkerOutsideTransaction(const char *nodeName,
int32 nodePort,
const char *nodeUser,
List *commandList);
-extern void SendCommandListToWorkerOutsideTransactionWithConnection(
- MultiConnection *workerConnection,
- List *commandList);
+extern void SendCommandListToWorkerOutsideTransactionWithConnection(MultiConnection *
+ workerConnection,
+ List *commandList);
extern void SendCommandListToWorkerListWithBareConnections(List *workerConnections,
List *commandList);
-extern void SendMetadataCommandListToWorkerListInCoordinatedTransaction(
- List *workerNodeList,
- const char *
- nodeUser,
- List *commandList);
+extern void SendMetadataCommandListToWorkerListInCoordinatedTransaction(List *
+ workerNodeList,
+ const char *
+ nodeUser,
+ List *
+ commandList);
extern void RemoveWorkerTransaction(const char *nodeName, int32 nodePort);
/* helper functions for worker transactions */
diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h
index a30925e4e..496538056 100644
--- a/src/include/pg_version_compat.h
+++ b/src/include/pg_version_compat.h
@@ -15,20 +15,20 @@
#if PG_VERSION_NUM >= PG_VERSION_18
#define create_foreignscan_path_compat(a, b, c, d, e, f, g, h, i, j, k) \
- create_foreignscan_path( \
- (a), /* root */ \
- (b), /* rel */ \
- (c), /* target */ \
- (d), /* rows */ \
- 0, /* disabled_nodes */ \
- (e), /* startup_cost */ \
- (f), /* total_cost */ \
- (g), /* pathkeys */ \
- (h), /* required_outer */ \
- (i), /* fdw_outerpath */ \
- (j), /* fdw_restrictinfo*/ \
- (k) /* fdw_private */ \
- )
+ create_foreignscan_path( \
+ (a), /* root */ \
+ (b), /* rel */ \
+ (c), /* target */ \
+ (d), /* rows */ \
+ 0, /* disabled_nodes */ \
+ (e), /* startup_cost */ \
+ (f), /* total_cost */ \
+ (g), /* pathkeys */ \
+ (h), /* required_outer */ \
+ (i), /* fdw_outerpath */ \
+ (j), /* fdw_restrictinfo*/ \
+ (k) /* fdw_private */ \
+ )
/* PG-18 introduced get_op_index_interpretation, old name was get_op_btree_interpretation */
#define get_op_btree_interpretation(opno) get_op_index_interpretation(opno)
@@ -38,11 +38,11 @@
#elif PG_VERSION_NUM >= PG_VERSION_17
#define create_foreignscan_path_compat(a, b, c, d, e, f, g, h, i, j, k) \
- create_foreignscan_path( \
- (a), (b), (c), (d), \
- (e), (f), \
- (g), (h), (i), (j), (k) \
- )
+ create_foreignscan_path( \
+ (a), (b), (c), (d), \
+ (e), (f), \
+ (g), (h), (i), (j), (k) \
+ )
#endif
@@ -364,7 +364,9 @@ getObjectClass(const ObjectAddress *object)
}
case TransformRelationId:
+ {
return OCLASS_TRANSFORM;
+ }
}
/* shouldn't get here */
@@ -638,13 +640,13 @@ typedef RangeTblEntry RTEPermissionInfo;
#define fcGetArgValue(fc, n) ((fc)->args[n].value)
#define fcGetArgNull(fc, n) ((fc)->args[n].isnull)
#define fcSetArgExt(fc, n, val, is_null) \
- (((fc)->args[n].isnull = (is_null)), ((fc)->args[n].value = (val)))
+ (((fc)->args[n].isnull = (is_null)), ((fc)->args[n].value = (val)))
#define fcSetArg(fc, n, value) fcSetArgExt(fc, n, value, false)
#define fcSetArgNull(fc, n) fcSetArgExt(fc, n, (Datum) 0, true)
#define CREATE_SEQUENCE_COMMAND \
- "CREATE %sSEQUENCE IF NOT EXISTS %s AS %s INCREMENT BY " INT64_FORMAT \
- " MINVALUE " INT64_FORMAT " MAXVALUE " INT64_FORMAT \
- " START WITH " INT64_FORMAT " CACHE " INT64_FORMAT " %sCYCLE"
+ "CREATE %sSEQUENCE IF NOT EXISTS %s AS %s INCREMENT BY " INT64_FORMAT \
+ " MINVALUE " INT64_FORMAT " MAXVALUE " INT64_FORMAT \
+ " START WITH " INT64_FORMAT " CACHE " INT64_FORMAT " %sCYCLE"
#endif /* PG_VERSION_COMPAT_H */
diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed
index 1ec8fd923..325a62ef0 100644
--- a/src/test/regress/bin/normalize.sed
+++ b/src/test/regress/bin/normalize.sed
@@ -364,9 +364,6 @@ s/(Actual[[:space:]]+Rows:[[:space:]]*)N\.N/\1N/gI
# ignore any “Disabled:” lines in test output
/^\s*Disabled:/d
-# ignore any JSON-style Disabled field
-/^\s*"Disabled":/d
-
# ignore XML true or false
/^\s*.*<\/Disabled>/d
# pg18 “Disabled” change end
diff --git a/src/test/regress/expected/create_drop_database_propagation_pg15.out b/src/test/regress/expected/create_drop_database_propagation_pg15.out
index 9a501558a..e1646dac6 100644
--- a/src/test/regress/expected/create_drop_database_propagation_pg15.out
+++ b/src/test/regress/expected/create_drop_database_propagation_pg15.out
@@ -1,13 +1,3 @@
---
--- PG15
---
-SHOW server_version \gset
-SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
-\gset
-\if :server_version_ge_15
-\else
-\q
-\endif
-- create/drop database for pg >= 15
set citus.enable_create_database_propagation=on;
CREATE DATABASE mydatabase
diff --git a/src/test/regress/expected/create_drop_database_propagation_pg15_0.out b/src/test/regress/expected/create_drop_database_propagation_pg15_0.out
deleted file mode 100644
index b1ed9cc5b..000000000
--- a/src/test/regress/expected/create_drop_database_propagation_pg15_0.out
+++ /dev/null
@@ -1,9 +0,0 @@
---
--- PG15
---
-SHOW server_version \gset
-SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
-\gset
-\if :server_version_ge_15
-\else
-\q
diff --git a/src/test/regress/expected/merge_unsupported.out b/src/test/regress/expected/merge_unsupported.out
index 62f51a679..af465d3a9 100644
--- a/src/test/regress/expected/merge_unsupported.out
+++ b/src/test/regress/expected/merge_unsupported.out
@@ -2,7 +2,6 @@ SHOW server_version \gset
SELECT CASE
WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+'
WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16'
- WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14'
ELSE 'Unsupported version'
END AS version_category;
version_category
@@ -10,12 +9,6 @@ SELECT CASE
17+
(1 row)
-SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
-\gset
-\if :server_version_ge_15
-\else
-\q
-\endif
--
-- MERGE test from PG community (adapted to Citus by converting all tables to Citus local)
--
diff --git a/src/test/regress/expected/merge_unsupported_0.out b/src/test/regress/expected/merge_unsupported_0.out
index b788c1670..e322a0f1e 100644
--- a/src/test/regress/expected/merge_unsupported_0.out
+++ b/src/test/regress/expected/merge_unsupported_0.out
@@ -2,7 +2,6 @@ SHOW server_version \gset
SELECT CASE
WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+'
WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16'
- WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14'
ELSE 'Unsupported version'
END AS version_category;
version_category
@@ -10,12 +9,6 @@ SELECT CASE
15_16
(1 row)
-SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
-\gset
-\if :server_version_ge_15
-\else
-\q
-\endif
--
-- MERGE test from PG community (adapted to Citus by converting all tables to Citus local)
--
diff --git a/src/test/regress/expected/merge_unsupported_1.out b/src/test/regress/expected/merge_unsupported_1.out
deleted file mode 100644
index 187c5d630..000000000
--- a/src/test/regress/expected/merge_unsupported_1.out
+++ /dev/null
@@ -1,17 +0,0 @@
-SHOW server_version \gset
-SELECT CASE
- WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+'
- WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16'
- WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14'
- ELSE 'Unsupported version'
- END AS version_category;
- version_category
----------------------------------------------------------------------
- 14
-(1 row)
-
-SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
-\gset
-\if :server_version_ge_15
-\else
-\q
diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out
index e7f1ecac5..ca97fc0c1 100644
--- a/src/test/regress/expected/multi_explain.out
+++ b/src/test/regress/expected/multi_explain.out
@@ -6,6 +6,10 @@
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15
--
+-- This test file has an alternative output because of the following in PG18:
+-- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a
+-- The alternative output can be deleted when we drop support for PG17
+--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
server_version_ge_16
@@ -13,6 +17,12 @@ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
t
(1 row)
+SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18;
+ server_version_ge_18
+---------------------------------------------------------------------
+ t
+(1 row)
+
SET citus.next_shard_id TO 570000;
\a\t
SET citus.explain_distributed_queries TO on;
@@ -111,6 +121,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Node Type": "Sort",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"],
"Plans": [
{
@@ -120,6 +131,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Group Key": ["remote_scan.l_quantity"],
"Plans": [
{
@@ -128,6 +140,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Distributed Query": {
"Job": {
"Task Count": 2,
@@ -144,6 +157,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Partial Mode": "Simple",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Group Key": ["l_quantity"],
"Plans": [
{
@@ -152,7 +166,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false,
"Async Capable": false,
"Relation Name": "lineitem_360000",
- "Alias": "lineitem"
+ "Alias": "lineitem",
+ "Disabled": false
}
]
}
@@ -1184,6 +1199,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Partial Mode": "Simple",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Plans": [
{
"Node Type": "Custom Scan",
@@ -1191,6 +1207,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Distributed Query": {
"Job": {
"Task Count": 6,
@@ -1744,8 +1761,9 @@ SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
"Node Type": "Result", +
"Parallel Aware": false,+
"Async Capable": false, +
- "Actual Rows": 1, +
- "Actual Loops": 1 +
+ "Actual Rows": 1, +
+ "Actual Loops": 1, +
+ "Disabled": false +
}, +
"Triggers": [ +
] +
@@ -2170,108 +2188,69 @@ SELECT * FROM explain_pk ORDER BY 1;
ROLLBACK;
-- test EXPLAIN ANALYZE with non-text output formats
BEGIN;
-EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
-[
- {
- "Plan": {
- "Node Type": "Custom Scan",
- "Custom Plan Provider": "Citus Adaptive",
- "Parallel Aware": false,
- "Async Capable": false,
- "Actual Rows": 0,
- "Actual Loops": 1,
- "Distributed Query": {
- "Job": {
- "Task Count": 2,
- "Tasks Shown": "One of 2",
- "Tasks": [
- {
- "Node": "host=localhost port=xxxxx dbname=regression",
- "Remote Plan": [
- [
- {
- "Plan": {
- "Node Type": "ModifyTable",
- "Operation": "Insert",
- "Parallel Aware": false,
- "Async Capable": false,
- "Relation Name": "explain_pk_570013",
- "Alias": "citus_table_alias",
- "Actual Rows": 0,
- "Actual Loops": 1,
- "Plans": [
- {
- "Node Type": "Result",
- "Parent Relationship": "Outer",
- "Parallel Aware": false,
- "Async Capable": false,
- "Actual Rows": 1,
- "Actual Loops": 1
- }
- ]
- },
- "Triggers": [
- ]
- }
- ]
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
+- Plan:
+ Node Type: "Custom Scan"
+ Custom Plan Provider: "Citus Adaptive"
+ Parallel Aware: false
+ Async Capable: false
+ Actual Rows: 0
+ Actual Loops: 1
+ Distributed Query:
+ Job:
+ Task Count: 2
+ Tasks Shown: "One of 2"
+ Tasks:
+ - Node: "host=localhost port=xxxxx dbname=regression"
+ Remote Plan:
+ - Plan:
+ Node Type: "ModifyTable"
+ Operation: "Insert"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "explain_pk_570013"
+ Alias: "citus_table_alias"
+ Actual Rows: 0
+ Actual Loops: 1
+ Plans:
+ - Node Type: "Result"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Actual Rows: 1
+ Actual Loops: 1
+ Triggers:
- ]
- }
- ]
- }
- }
- },
- "Triggers": [
- ]
- }
-]
+ Triggers:
ROLLBACK;
-EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) SELECT * FROM explain_pk;
-[
- {
- "Plan": {
- "Node Type": "Custom Scan",
- "Custom Plan Provider": "Citus Adaptive",
- "Parallel Aware": false,
- "Async Capable": false,
- "Actual Rows": 0,
- "Actual Loops": 1,
- "Distributed Query": {
- "Job": {
- "Task Count": 4,
- "Tuple data received from nodes": "0 bytes",
- "Tasks Shown": "One of 4",
- "Tasks": [
- {
- "Tuple data received from node": "0 bytes",
- "Node": "host=localhost port=xxxxx dbname=regression",
- "Remote Plan": [
- [
- {
- "Plan": {
- "Node Type": "Seq Scan",
- "Parallel Aware": false,
- "Async Capable": false,
- "Relation Name": "explain_pk_570013",
- "Alias": "explain_pk",
- "Actual Rows": 0,
- "Actual Loops": 1
- },
- "Triggers": [
- ]
- }
- ]
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) SELECT * FROM explain_pk;
+- Plan:
+ Node Type: "Custom Scan"
+ Custom Plan Provider: "Citus Adaptive"
+ Parallel Aware: false
+ Async Capable: false
+ Actual Rows: 0
+ Actual Loops: 1
+ Distributed Query:
+ Job:
+ Task Count: 4
+ Tuple data received from nodes: "0 bytes"
+ Tasks Shown: "One of 4"
+ Tasks:
+ - Tuple data received from node: "0 bytes"
+ Node: "host=localhost port=xxxxx dbname=regression"
+ Remote Plan:
+ - Plan:
+ Node Type: "Seq Scan"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "explain_pk_570013"
+ Alias: "explain_pk"
+ Actual Rows: 0
+ Actual Loops: 1
+ Triggers:
- ]
- }
- ]
- }
- }
- },
- "Triggers": [
- ]
- }
-]
+ Triggers:
BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
@@ -3286,6 +3265,7 @@ SELECT * FROM a;
"Async Capable": false,
"Actual Rows": 1,
"Actual Loops": 1,
+ "Disabled": false,
"Distributed Query": {
"Subplans": [
{
@@ -3300,6 +3280,7 @@ SELECT * FROM a;
"Async Capable": false,
"Actual Rows": 1,
"Actual Loops": 1,
+ "Disabled": false,
"Distributed Query": {
"Job": {
"Task Count": 1,
@@ -3321,6 +3302,7 @@ SELECT * FROM a;
"Alias": "test_subplans_570038",
"Actual Rows": 1,
"Actual Loops": 1,
+ "Disabled": false,
"Plans": [
{
"Node Type": "Result",
@@ -3328,7 +3310,8 @@ SELECT * FROM a;
"Parallel Aware": false,
"Async Capable": false,
"Actual Rows": 1,
- "Actual Loops": 1
+ "Actual Loops": 1,
+ "Disabled": false
}
]
},
@@ -3367,7 +3350,8 @@ SELECT * FROM a;
"Function Name": "read_intermediate_result",
"Alias": "intermediate_result",
"Actual Rows": 1,
- "Actual Loops": 1
+ "Actual Loops": 1,
+ "Disabled": false
},
"Triggers": [
]
diff --git a/src/test/regress/expected/multi_explain_0.out b/src/test/regress/expected/multi_explain_0.out
index 60cd87316..667f932dc 100644
--- a/src/test/regress/expected/multi_explain_0.out
+++ b/src/test/regress/expected/multi_explain_0.out
@@ -6,9 +6,19 @@
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15
--
+-- This test file has an alternative output because of the following in PG18:
+-- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a
+-- The alternative output can be deleted when we drop support for PG17
+--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
server_version_ge_16
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18;
+ server_version_ge_18
---------------------------------------------------------------------
f
(1 row)
@@ -665,7 +675,7 @@ Aggregate
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
-> Sort
- Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), events.event_time
-> Hash Join
Hash Cond: (users.composite_id = events.composite_id)
-> Seq Scan on users_1400289 users
@@ -757,7 +767,7 @@ HashAggregate
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
-> Sort
- Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone, events.event_time
-> Hash Left Join
Hash Cond: (users.composite_id = subquery_2.composite_id)
-> HashAggregate
@@ -870,7 +880,7 @@ Sort
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
Filter: (array_ndims(array_agg(('action=>1'::text) ORDER BY events.event_time)) > 0)
-> Sort
- Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay, events.event_time
-> Hash Left Join
Hash Cond: (users.composite_id = subquery_2.composite_id)
-> HashAggregate
@@ -975,7 +985,7 @@ Limit
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
-> Sort
- Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), events.event_time
-> Nested Loop Left Join
-> Limit
-> Sort
@@ -2170,108 +2180,69 @@ SELECT * FROM explain_pk ORDER BY 1;
ROLLBACK;
-- test EXPLAIN ANALYZE with non-text output formats
BEGIN;
-EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
-[
- {
- "Plan": {
- "Node Type": "Custom Scan",
- "Custom Plan Provider": "Citus Adaptive",
- "Parallel Aware": false,
- "Async Capable": false,
- "Actual Rows": 0,
- "Actual Loops": 1,
- "Distributed Query": {
- "Job": {
- "Task Count": 2,
- "Tasks Shown": "One of 2",
- "Tasks": [
- {
- "Node": "host=localhost port=xxxxx dbname=regression",
- "Remote Plan": [
- [
- {
- "Plan": {
- "Node Type": "ModifyTable",
- "Operation": "Insert",
- "Parallel Aware": false,
- "Async Capable": false,
- "Relation Name": "explain_pk_570013",
- "Alias": "citus_table_alias",
- "Actual Rows": 0,
- "Actual Loops": 1,
- "Plans": [
- {
- "Node Type": "Result",
- "Parent Relationship": "Outer",
- "Parallel Aware": false,
- "Async Capable": false,
- "Actual Rows": 1,
- "Actual Loops": 1
- }
- ]
- },
- "Triggers": [
- ]
- }
- ]
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
+- Plan:
+ Node Type: "Custom Scan"
+ Custom Plan Provider: "Citus Adaptive"
+ Parallel Aware: false
+ Async Capable: false
+ Actual Rows: 0
+ Actual Loops: 1
+ Distributed Query:
+ Job:
+ Task Count: 2
+ Tasks Shown: "One of 2"
+ Tasks:
+ - Node: "host=localhost port=xxxxx dbname=regression"
+ Remote Plan:
+ - Plan:
+ Node Type: "ModifyTable"
+ Operation: "Insert"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "explain_pk_570013"
+ Alias: "citus_table_alias"
+ Actual Rows: 0
+ Actual Loops: 1
+ Plans:
+ - Node Type: "Result"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Actual Rows: 1
+ Actual Loops: 1
+ Triggers:
- ]
- }
- ]
- }
- }
- },
- "Triggers": [
- ]
- }
-]
+ Triggers:
ROLLBACK;
-EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) SELECT * FROM explain_pk;
-[
- {
- "Plan": {
- "Node Type": "Custom Scan",
- "Custom Plan Provider": "Citus Adaptive",
- "Parallel Aware": false,
- "Async Capable": false,
- "Actual Rows": 0,
- "Actual Loops": 1,
- "Distributed Query": {
- "Job": {
- "Task Count": 4,
- "Tuple data received from nodes": "0 bytes",
- "Tasks Shown": "One of 4",
- "Tasks": [
- {
- "Tuple data received from node": "0 bytes",
- "Node": "host=localhost port=xxxxx dbname=regression",
- "Remote Plan": [
- [
- {
- "Plan": {
- "Node Type": "Seq Scan",
- "Parallel Aware": false,
- "Async Capable": false,
- "Relation Name": "explain_pk_570013",
- "Alias": "explain_pk",
- "Actual Rows": 0,
- "Actual Loops": 1
- },
- "Triggers": [
- ]
- }
- ]
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) SELECT * FROM explain_pk;
+- Plan:
+ Node Type: "Custom Scan"
+ Custom Plan Provider: "Citus Adaptive"
+ Parallel Aware: false
+ Async Capable: false
+ Actual Rows: 0
+ Actual Loops: 1
+ Distributed Query:
+ Job:
+ Task Count: 4
+ Tuple data received from nodes: "0 bytes"
+ Tasks Shown: "One of 4"
+ Tasks:
+ - Tuple data received from node: "0 bytes"
+ Node: "host=localhost port=xxxxx dbname=regression"
+ Remote Plan:
+ - Plan:
+ Node Type: "Seq Scan"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "explain_pk_570013"
+ Alias: "explain_pk"
+ Actual Rows: 0
+ Actual Loops: 1
+ Triggers:
- ]
- }
- ]
- }
- }
- },
- "Triggers": [
- ]
- }
-]
+ Triggers:
BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
@@ -2407,11 +2378,16 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Tuple data received from node: 8 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate (actual rows=1 loops=1)
- -> Hash Join (actual rows=10 loops=1)
- Hash Cond: (ref_table.a = intermediate_result.a)
- -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
- -> Hash (actual rows=10 loops=1)
+ -> Merge Join (actual rows=10 loops=1)
+ Merge Cond: (intermediate_result.a = ref_table.a)
+ -> Sort (actual rows=10 loops=1)
+ Sort Key: intermediate_result.a
+ Sort Method: quicksort Memory: 25kB
-> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
+ -> Sort (actual rows=10 loops=1)
+ Sort Key: ref_table.a
+ Sort Method: quicksort Memory: 25kB
+ -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
EXPLAIN :default_analyze_flags
SELECT count(distinct a) FROM (SELECT GREATEST(random(), 2) r, a FROM dist_table) t NATURAL JOIN ref_table;
Aggregate (actual rows=1 loops=1)
@@ -2470,9 +2446,12 @@ Aggregate (actual rows=1 loops=1)
-> Aggregate (actual rows=1 loops=1)
InitPlan 1
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
- -> Result (actual rows=4 loops=1)
- One-Time Filter: (InitPlan 1).col1
- -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
+ -> Sort (actual rows=4 loops=1)
+ Sort Key: dist_table.a
+ Sort Method: quicksort Memory: 25kB
+ -> Result (actual rows=4 loops=1)
+ One-Time Filter: (InitPlan 1).col1
+ -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
BEGIN;
EXPLAIN :default_analyze_flags
WITH r AS (
@@ -2514,7 +2493,10 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Tuple data received from node: 8 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate (actual rows=1 loops=1)
- -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
+ -> Sort (actual rows=10 loops=1)
+ Sort Key: intermediate_result.a2
+ Sort Method: quicksort Memory: 25kB
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
ROLLBACK;
-- https://github.com/citusdata/citus/issues/4074
prepare ref_select(int) AS select * from ref_table where 1 = $1;
diff --git a/src/test/regress/expected/multi_explain_1.out b/src/test/regress/expected/multi_explain_1.out
new file mode 100644
index 000000000..13434c256
--- /dev/null
+++ b/src/test/regress/expected/multi_explain_1.out
@@ -0,0 +1,3363 @@
+--
+-- MULTI_EXPLAIN
+--
+-- This test file has an alternative output because of the following in PG16:
+-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
+-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
+-- The alternative output can be deleted when we drop support for PG15
+--
+-- This test file has an alternative output because of the following in PG18:
+-- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a
+-- The alternative output can be deleted when we drop support for PG17
+--
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
+ server_version_ge_16
+---------------------------------------------------------------------
+ f
+(1 row)
+
+SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18;
+ server_version_ge_18
+---------------------------------------------------------------------
+ f
+(1 row)
+
+SET citus.next_shard_id TO 570000;
+\a\t
+SET citus.explain_distributed_queries TO on;
+SET citus.enable_repartition_joins to ON;
+-- Ensure tuple data in explain analyze output is the same on all PG versions
+SET citus.enable_binary_protocol = TRUE;
+-- Function that parses explain output as JSON
+CREATE OR REPLACE FUNCTION explain_json(query text)
+RETURNS jsonb
+AS $BODY$
+DECLARE
+ result jsonb;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+CREATE OR REPLACE FUNCTION explain_analyze_json(query text)
+RETURNS jsonb
+AS $BODY$
+DECLARE
+ result jsonb;
+BEGIN
+ EXECUTE format('EXPLAIN (ANALYZE TRUE, FORMAT JSON) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- Function that parses explain output as XML
+CREATE OR REPLACE FUNCTION explain_xml(query text)
+RETURNS xml
+AS $BODY$
+DECLARE
+ result xml;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- Function that parses explain output as XML
+CREATE OR REPLACE FUNCTION explain_analyze_xml(query text)
+RETURNS xml
+AS $BODY$
+DECLARE
+ result xml;
+BEGIN
+ EXECUTE format('EXPLAIN (ANALYZE true, FORMAT XML) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- VACUMM related tables to ensure test outputs are stable
+VACUUM ANALYZE lineitem;
+VACUUM ANALYZE orders;
+-- Test Text format
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Sort
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ -> HashAggregate
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_360000 lineitem
+-- Test disable hash aggregate
+SET enable_hashagg TO off;
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Sort
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ -> GroupAggregate
+ Group Key: remote_scan.l_quantity
+ -> Sort
+ Sort Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_360000 lineitem
+SET enable_hashagg TO on;
+-- Test JSON format
+EXPLAIN (COSTS FALSE, FORMAT JSON)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+[
+ {
+ "Plan": {
+ "Node Type": "Sort",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"],
+ "Plans": [
+ {
+ "Node Type": "Aggregate",
+ "Strategy": "Hashed",
+ "Partial Mode": "Simple",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Group Key": ["remote_scan.l_quantity"],
+ "Plans": [
+ {
+ "Node Type": "Custom Scan",
+ "Parent Relationship": "Outer",
+ "Custom Plan Provider": "Citus Adaptive",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Distributed Query": {
+ "Job": {
+ "Task Count": 2,
+ "Tasks Shown": "One of 2",
+ "Tasks": [
+ {
+ "Node": "host=localhost port=xxxxx dbname=regression",
+ "Remote Plan": [
+ [
+ {
+ "Plan": {
+ "Node Type": "Aggregate",
+ "Strategy": "Hashed",
+ "Partial Mode": "Simple",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Group Key": ["l_quantity"],
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Relation Name": "lineitem_360000",
+ "Alias": "lineitem"
+ }
+ ]
+ }
+ }
+ ]
+
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }
+ }
+]
+-- Validate JSON format
+SELECT true AS valid FROM explain_json($$
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
+t
+SELECT true AS valid FROM explain_analyze_json($$
+ WITH a AS (
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity LIMIT 10)
+ SELECT count(*) FROM a
+$$);
+t
+-- Test XML format
+EXPLAIN (COSTS FALSE, FORMAT XML)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+
+
+
+ Sort
+ false
+ false
+
+ - (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
+ - remote_scan.l_quantity
+
+
+
+ Aggregate
+ Hashed
+ Simple
+ Outer
+ false
+ false
+
+ - remote_scan.l_quantity
+
+
+
+ Custom Scan
+ Outer
+ Citus Adaptive
+ false
+ false
+
+
+ 2
+ One of 2
+
+
+ host=localhost port=xxxxx dbname=regression
+
+
+
+
+ Aggregate
+ Hashed
+ Simple
+ false
+ false
+
+ - l_quantity
+
+
+
+ Seq Scan
+ Outer
+ false
+ false
+ lineitem_360000
+ lineitem
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+-- Validate XML format
+SELECT true AS valid FROM explain_xml($$
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
+t
+SELECT true AS valid FROM explain_analyze_xml($$
+ WITH a AS (
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity LIMIT 10)
+ SELECT count(*) FROM a
+$$);
+t
+-- Test YAML format
+EXPLAIN (COSTS FALSE, FORMAT YAML)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+- Plan:
+ Node Type: "Sort"
+ Parallel Aware: false
+ Async Capable: false
+ Sort Key:
+ - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))"
+ - "remote_scan.l_quantity"
+ Plans:
+ - Node Type: "Aggregate"
+ Strategy: "Hashed"
+ Partial Mode: "Simple"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Group Key:
+ - "remote_scan.l_quantity"
+ Plans:
+ - Node Type: "Custom Scan"
+ Parent Relationship: "Outer"
+ Custom Plan Provider: "Citus Adaptive"
+ Parallel Aware: false
+ Async Capable: false
+ Distributed Query:
+ Job:
+ Task Count: 2
+ Tasks Shown: "One of 2"
+ Tasks:
+ - Node: "host=localhost port=xxxxx dbname=regression"
+ Remote Plan:
+ - Plan:
+ Node Type: "Aggregate"
+ Strategy: "Hashed"
+ Partial Mode: "Simple"
+ Parallel Aware: false
+ Async Capable: false
+ Group Key:
+ - "l_quantity"
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "lineitem_360000"
+ Alias: "lineitem"
+
+-- Test Text format
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Sort
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ -> HashAggregate
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_360000 lineitem
+-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
+SELECT public.plan_normalize_memory($Q$
+EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+$Q$);
+Sort (actual rows=50 loops=1)
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ Sort Method: quicksort Memory: xxx
+ -> HashAggregate (actual rows=50 loops=1)
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 1800 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 900 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate (actual rows=50 loops=1)
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_360000 lineitem (actual rows=5894 loops=1)
+-- EXPLAIN ANALYZE doesn't show worker tasks for repartition joins yet
+SET citus.shard_count TO 3;
+CREATE TABLE t1(a int, b int);
+CREATE TABLE t2(a int, b int);
+SELECT create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a');
+|
+BEGIN;
+SET LOCAL citus.enable_repartition_joins TO true;
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b;
+Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 6
+ Tuple data received from nodes: 48 bytes
+ Tasks Shown: None, not supported for re-partition queries
+ -> MapMergeJob
+ Map Task Count: 3
+ Merge Task Count: 6
+ -> MapMergeJob
+ Map Task Count: 3
+ Merge Task Count: 6
+-- Confirm repartiton join in distributed subplan works
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
+WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b)
+SELECT count(*) from repartition;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 14 bytes
+ Result destination: Write locally
+ -> Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 6
+ Tuple data received from nodes: 48 bytes
+ Tasks Shown: None, not supported for re-partition queries
+ -> MapMergeJob
+ Map Task Count: 3
+ Merge Task Count: 6
+ -> MapMergeJob
+ Map Task Count: 3
+ Merge Task Count: 6
+ Task Count: 1
+ Tuple data received from nodes: 8 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate (actual rows=1 loops=1)
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
+END;
+DROP TABLE t1, t2;
+-- Test query text output, with ANALYZE ON
+SELECT public.plan_normalize_memory($Q$
+EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE, BUFFERS OFF)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+$Q$);
+Sort (actual rows=50 loops=1)
+ Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ Sort Method: quicksort Memory: xxx
+ -> HashAggregate (actual rows=50 loops=1)
+ Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
+ Output: remote_scan.l_quantity, remote_scan.count_quantity
+ Task Count: 2
+ Tuple data received from nodes: 1800 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Query: SELECT l_quantity, count(*) AS count_quantity FROM public.lineitem_360000 lineitem WHERE true GROUP BY l_quantity
+ Tuple data received from node: 900 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate (actual rows=50 loops=1)
+ Output: l_quantity, count(*)
+ Group Key: lineitem.l_quantity
+ -> Seq Scan on public.lineitem_360000 lineitem (actual rows=5894 loops=1)
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+-- Test query text output, with ANALYZE OFF
+EXPLAIN (COSTS FALSE, ANALYZE FALSE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Sort
+ Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ -> HashAggregate
+ Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_quantity, remote_scan.count_quantity
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Query: SELECT l_quantity, count(*) AS count_quantity FROM public.lineitem_360000 lineitem WHERE true GROUP BY l_quantity
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Output: l_quantity, count(*)
+ Group Key: lineitem.l_quantity
+ -> Seq Scan on public.lineitem_360000 lineitem
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+-- Test verbose
+EXPLAIN (COSTS FALSE, VERBOSE TRUE)
+ SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
+Aggregate
+ Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM public.lineitem_360000 lineitem WHERE true
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
+ -> Seq Scan on public.lineitem_360000 lineitem
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+-- Test join
+EXPLAIN (COSTS FALSE)
+ SELECT * FROM lineitem
+ JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5.0
+ ORDER BY l_quantity LIMIT 10;
+Limit
+ -> Sort
+ Sort Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ -> Sort
+ Sort Key: lineitem.l_quantity
+ -> Hash Join
+ Hash Cond: (lineitem.l_orderkey = orders.o_orderkey)
+ -> Seq Scan on lineitem_360000 lineitem
+ Filter: (l_quantity < 5.0)
+ -> Hash
+ -> Seq Scan on orders_360002 orders
+-- Test insert
+EXPLAIN (COSTS FALSE)
+ INSERT INTO lineitem VALUES (1,0), (2, 0), (3, 0), (4, 0);
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on lineitem_360000 citus_table_alias
+ -> Values Scan on "*VALUES*"
+-- Test update
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem
+ SET l_suppkey = 12
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_360000 lineitem
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey = 1)
+ Filter: (l_partkey = 0)
+-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
+BEGIN;
+select public.explain_filter('
+EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
+ UPDATE lineitem
+ SET l_suppkey = 12
+ WHERE l_orderkey = 1 AND l_partkey = 0
+ ');
+Custom Scan (Citus Adaptive) (actual rows=N loops=N)
+ Task Count: N
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=N dbname=regression
+ -> Update on lineitem_360000 lineitem (actual rows=N loops=N)
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=N loops=N)
+ Index Cond: (l_orderkey = N)
+ Filter: (l_partkey = N)
+ Rows Removed by Filter: N
+ROLLBACk;
+-- Test delete
+EXPLAIN (COSTS FALSE)
+ DELETE FROM lineitem
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_360000 lineitem
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey = 1)
+ Filter: (l_partkey = 0)
+-- Test zero-shard update
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem
+ SET l_suppkey = 12
+ WHERE l_orderkey = 1 AND l_orderkey = 0;
+Custom Scan (Citus Adaptive)
+ Task Count: 0
+ Tasks Shown: All
+-- Test zero-shard delete
+EXPLAIN (COSTS FALSE)
+ DELETE FROM lineitem
+ WHERE l_orderkey = 1 AND l_orderkey = 0;
+Custom Scan (Citus Adaptive)
+ Task Count: 0
+ Tasks Shown: All
+-- Test single-shard SELECT
+EXPLAIN (COSTS FALSE)
+ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey = 5)
+SELECT true AS valid FROM explain_xml($$
+ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$);
+t
+SELECT true AS valid FROM explain_json($$
+ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$);
+t
+-- Test CREATE TABLE ... AS
+EXPLAIN (COSTS FALSE)
+ CREATE TABLE explain_result AS
+ SELECT * FROM lineitem;
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on lineitem_360000 lineitem
+-- Test having
+EXPLAIN (COSTS FALSE, VERBOSE TRUE)
+ SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem
+ HAVING sum(l_quantity) > 100;
+Aggregate
+ Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
+ Filter: (sum(remote_scan.worker_column_4) > '100'::numeric)
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) AS worker_column_4 FROM public.lineitem_360000 lineitem WHERE true
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity)
+ -> Seq Scan on public.lineitem_360000 lineitem
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+-- Test having without aggregate
+EXPLAIN (COSTS FALSE, VERBOSE TRUE)
+ SELECT l_quantity FROM lineitem
+ GROUP BY l_quantity
+ HAVING l_quantity > (100 * random());
+HashAggregate
+ Output: remote_scan.l_quantity
+ Group Key: remote_scan.l_quantity
+ Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random()))
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_quantity, remote_scan.worker_column_2
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Query: SELECT l_quantity, l_quantity AS worker_column_2 FROM public.lineitem_360000 lineitem WHERE true GROUP BY l_quantity
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Output: l_quantity, l_quantity
+ Group Key: lineitem.l_quantity
+ -> Seq Scan on public.lineitem_360000 lineitem
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+-- Subquery pushdown tests with explain
+EXPLAIN (COSTS OFF)
+SELECT
+ avg(array_length(events, 1)) AS event_average
+FROM
+ (SELECT
+ tenant_id,
+ user_id,
+ array_agg(event_type ORDER BY event_time) AS events
+ FROM
+ (SELECT
+ (users.composite_id).tenant_id,
+ (users.composite_id).user_id,
+ event_type,
+ events.event_time
+ FROM
+ users,
+ events
+ WHERE
+ (users.composite_id) = (events.composite_id) AND
+ users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type IN ('click', 'submit', 'pay')) AS subquery
+ GROUP BY
+ tenant_id,
+ user_id) AS subquery;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> GroupAggregate
+ Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
+ -> Sort
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
+ -> Hash Join
+ Hash Cond: (users.composite_id = events.composite_id)
+ -> Seq Scan on users_1400289 users
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
+ -> Hash
+ -> Seq Scan on events_1400285 events
+ Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[]))
+SELECT success FROM run_command_on_workers('alter system set enable_nestloop to off');
+t
+t
+SELECT success FROM run_command_on_workers('alter system set enable_sort to off');
+t
+t
+SELECT success FROM run_command_on_workers('select pg_reload_conf()');
+t
+t
+-- Union and left join subquery pushdown
+EXPLAIN (COSTS OFF)
+SELECT
+ avg(array_length(events, 1)) AS event_average,
+ hasdone
+FROM
+ (SELECT
+ subquery_1.tenant_id,
+ subquery_1.user_id,
+ array_agg(event ORDER BY event_time) AS events,
+ COALESCE(hasdone, 'Has not done paying') AS hasdone
+ FROM
+ (
+ (SELECT
+ (users.composite_id).tenant_id,
+ (users.composite_id).user_id,
+ (users.composite_id) as composite_id,
+ 'action=>1'AS event,
+ events.event_time
+ FROM
+ users,
+ events
+ WHERE
+ (users.composite_id) = (events.composite_id) AND
+ users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type = 'click')
+ UNION
+ (SELECT
+ (users.composite_id).tenant_id,
+ (users.composite_id).user_id,
+ (users.composite_id) as composite_id,
+ 'action=>2'AS event,
+ events.event_time
+ FROM
+ users,
+ events
+ WHERE
+ (users.composite_id) = (events.composite_id) AND
+ users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type = 'submit')
+ ) AS subquery_1
+ LEFT JOIN
+ (SELECT
+ DISTINCT ON ((composite_id).tenant_id, (composite_id).user_id) composite_id,
+ (composite_id).tenant_id,
+ (composite_id).user_id,
+ 'Has done paying'::TEXT AS hasdone
+ FROM
+ events
+ WHERE
+ events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type = 'pay') AS subquery_2
+ ON
+ subquery_1.composite_id = subquery_2.composite_id
+ GROUP BY
+ subquery_1.tenant_id,
+ subquery_1.user_id,
+ hasdone) AS subquery_top
+GROUP BY
+ hasdone;
+HashAggregate
+ Group Key: remote_scan.hasdone
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Group Key: COALESCE(subquery_2.hasdone, 'Has not done paying'::text)
+ -> GroupAggregate
+ Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
+ -> Sort
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
+ -> Hash Left Join
+ Hash Cond: (users.composite_id = subquery_2.composite_id)
+ -> HashAggregate
+ Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time
+ -> Append
+ -> Hash Join
+ Hash Cond: (users.composite_id = events.composite_id)
+ -> Seq Scan on users_1400289 users
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
+ -> Hash
+ -> Seq Scan on events_1400285 events
+ Filter: ((event_type)::text = 'click'::text)
+ -> Hash Join
+ Hash Cond: (users_1.composite_id = events_1.composite_id)
+ -> Seq Scan on users_1400289 users_1
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
+ -> Hash
+ -> Seq Scan on events_1400285 events_1
+ Filter: ((event_type)::text = 'submit'::text)
+ -> Hash
+ -> Subquery Scan on subquery_2
+ -> Unique
+ -> Sort
+ Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id)
+ -> Seq Scan on events_1400285 events_2
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
+-- Union, left join and having subquery pushdown
+EXPLAIN (COSTS OFF)
+ SELECT
+ avg(array_length(events, 1)) AS event_average,
+ count_pay
+ FROM (
+ SELECT
+ subquery_1.tenant_id,
+ subquery_1.user_id,
+ array_agg(event ORDER BY event_time) AS events,
+ COALESCE(count_pay, 0) AS count_pay
+ FROM
+ (
+ (SELECT
+ (users.composite_id).tenant_id,
+ (users.composite_id).user_id,
+ (users.composite_id),
+ 'action=>1'AS event,
+ events.event_time
+ FROM
+ users,
+ events
+ WHERE
+ (users.composite_id) = (events.composite_id) AND
+ users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type = 'click')
+ UNION
+ (SELECT
+ (users.composite_id).tenant_id,
+ (users.composite_id).user_id,
+ (users.composite_id),
+ 'action=>2'AS event,
+ events.event_time
+ FROM
+ users,
+ events
+ WHERE
+ (users.composite_id) = (events.composite_id) AND
+ users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type = 'submit')
+ ) AS subquery_1
+ LEFT JOIN
+ (SELECT
+ (composite_id).tenant_id,
+ (composite_id).user_id,
+ composite_id,
+ COUNT(*) AS count_pay
+ FROM
+ events
+ WHERE
+ events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type = 'pay'
+ GROUP BY
+ composite_id
+ HAVING
+ COUNT(*) > 2) AS subquery_2
+ ON
+ subquery_1.composite_id = subquery_2.composite_id
+ GROUP BY
+ subquery_1.tenant_id,
+ subquery_1.user_id,
+ count_pay) AS subquery_top
+WHERE
+ array_ndims(events) > 0
+GROUP BY
+ count_pay
+ORDER BY
+ count_pay;
+Sort
+ Sort Key: remote_scan.count_pay
+ -> HashAggregate
+ Group Key: remote_scan.count_pay
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Group Key: COALESCE(subquery_2.count_pay, '0'::bigint)
+ -> GroupAggregate
+ Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
+ Filter: (array_ndims(array_agg(('action=>1'::text) ORDER BY events.event_time)) > 0)
+ -> Sort
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
+ -> Hash Left Join
+ Hash Cond: (users.composite_id = subquery_2.composite_id)
+ -> HashAggregate
+ Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time
+ -> Append
+ -> Hash Join
+ Hash Cond: (users.composite_id = events.composite_id)
+ -> Seq Scan on users_1400289 users
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
+ -> Hash
+ -> Seq Scan on events_1400285 events
+ Filter: ((event_type)::text = 'click'::text)
+ -> Hash Join
+ Hash Cond: (users_1.composite_id = events_1.composite_id)
+ -> Seq Scan on users_1400289 users_1
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
+ -> Hash
+ -> Seq Scan on events_1400285 events_1
+ Filter: ((event_type)::text = 'submit'::text)
+ -> Hash
+ -> Subquery Scan on subquery_2
+ -> HashAggregate
+ Group Key: events_2.composite_id
+ Filter: (count(*) > 2)
+ -> Seq Scan on events_1400285 events_2
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
+SELECT success FROM run_command_on_workers('alter system reset enable_nestloop');
+t
+t
+SELECT success FROM run_command_on_workers('alter system reset enable_sort');
+t
+t
+SELECT success FROM run_command_on_workers('select pg_reload_conf()');
+t
+t
+-- Lateral join subquery pushdown
+-- set subquery_pushdown due to limit in the query
+SET citus.subquery_pushdown to ON;
+NOTICE: Setting citus.subquery_pushdown flag is discouraged becuase it forces the planner to pushdown certain queries, skipping relevant correctness checks.
+DETAIL: When enabled, the planner skips many correctness checks for subqueries and pushes down the queries to shards as-is. It means that the queries are likely to return wrong results unless the user is absolutely sure that pushing down the subquery is safe. This GUC is maintained only for backward compatibility, no new users are supposed to use it. The planner is capable of pushing down as much computation as possible to the shards depending on the query.
+EXPLAIN (COSTS OFF)
+SELECT
+ tenant_id,
+ user_id,
+ user_lastseen,
+ event_array
+FROM
+ (SELECT
+ tenant_id,
+ user_id,
+ max(lastseen) as user_lastseen,
+ array_agg(event_type ORDER BY event_time) AS event_array
+ FROM
+ (SELECT
+ (composite_id).tenant_id,
+ (composite_id).user_id,
+ composite_id,
+ lastseen
+ FROM
+ users
+ WHERE
+ composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ composite_id <= '(1, 9223372036854775807)'::user_composite_type
+ ORDER BY
+ lastseen DESC
+ LIMIT
+ 10
+ ) AS subquery_top
+ LEFT JOIN LATERAL
+ (SELECT
+ event_type,
+ event_time
+ FROM
+ events
+ WHERE
+ (composite_id) = subquery_top.composite_id
+ ORDER BY
+ event_time DESC
+ LIMIT
+ 99) AS subquery_lateral
+ ON
+ true
+ GROUP BY
+ tenant_id,
+ user_id
+ ) AS shard_union
+ORDER BY
+ user_lastseen DESC
+LIMIT
+ 10;
+Limit
+ -> Sort
+ Sort Key: remote_scan.user_lastseen DESC
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ -> Sort
+ Sort Key: (max(users.lastseen)) DESC
+ -> GroupAggregate
+ Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
+ -> Sort
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
+ -> Nested Loop Left Join
+ -> Limit
+ -> Sort
+ Sort Key: users.lastseen DESC
+ -> Seq Scan on users_1400289 users
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
+ -> Limit
+ -> Sort
+ Sort Key: events.event_time DESC
+ -> Seq Scan on events_1400285 events
+ Filter: (composite_id = users.composite_id)
+RESET citus.subquery_pushdown;
+-- Test all tasks output
+SET citus.explain_all_tasks TO on;
+EXPLAIN (COSTS FALSE)
+ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_pkey_360001 on lineitem_360001 lineitem
+ Index Cond: (l_orderkey > 9030)
+SELECT true AS valid FROM explain_xml($$
+ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$);
+t
+SELECT true AS valid FROM explain_json($$
+ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$);
+t
+-- Test multi shard update
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem_hash_part
+ SET l_suppkey = 12;
+Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360041 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360042 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360043 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360044 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem_hash_part
+ SET l_suppkey = 12
+ WHERE l_orderkey = 1 OR l_orderkey = 3;
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360041 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
+ Filter: ((l_orderkey = 1) OR (l_orderkey = 3))
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360042 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part
+ Filter: ((l_orderkey = 1) OR (l_orderkey = 3))
+-- Test multi shard delete
+EXPLAIN (COSTS FALSE)
+ DELETE FROM lineitem_hash_part;
+Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360041 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360042 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360043 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360044 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part
+-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
+SELECT public.plan_normalize_memory($Q$
+EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+$Q$);
+Sort (actual rows=50 loops=1)
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ Sort Method: quicksort Memory: xxx
+ -> HashAggregate (actual rows=50 loops=1)
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 1800 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 900 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate (actual rows=50 loops=1)
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_360000 lineitem (actual rows=5894 loops=1)
+ -> Task
+ Tuple data received from node: 900 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate (actual rows=50 loops=1)
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_360001 lineitem (actual rows=6106 loops=1)
+SET citus.explain_all_tasks TO off;
+-- Test update with subquery
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem_hash_part
+ SET l_suppkey = 12
+ FROM orders_hash_part
+ WHERE orders_hash_part.o_orderkey = lineitem_hash_part.l_orderkey;
+Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360041 lineitem_hash_part
+ -> Hash Join
+ Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey)
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
+ -> Hash
+ -> Seq Scan on orders_hash_part_360045 orders_hash_part
+-- Test delete with subquery
+EXPLAIN (COSTS FALSE)
+ DELETE FROM lineitem_hash_part
+ USING orders_hash_part
+ WHERE orders_hash_part.o_orderkey = lineitem_hash_part.l_orderkey;
+Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360041 lineitem_hash_part
+ -> Hash Join
+ Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey)
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
+ -> Hash
+ -> Seq Scan on orders_hash_part_360045 orders_hash_part
+-- Test track tracker
+EXPLAIN (COSTS FALSE)
+ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey > 9030)
+-- Test re-partition join
+EXPLAIN (COSTS FALSE)
+ SELECT count(*)
+ FROM lineitem, orders, customer_append, supplier_single_shard
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 6
+ Tasks Shown: None, not supported for re-partition queries
+ -> MapMergeJob
+ Map Task Count: 6
+ Merge Task Count: 6
+ -> MapMergeJob
+ Map Task Count: 2
+ Merge Task Count: 6
+ -> MapMergeJob
+ Map Task Count: 1
+ Merge Task Count: 6
+ -> MapMergeJob
+ Map Task Count: 1
+ Merge Task Count: 6
+EXPLAIN (COSTS FALSE, FORMAT JSON)
+ SELECT count(*)
+ FROM lineitem, orders, customer_append, supplier_single_shard
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+[
+ {
+ "Plan": {
+ "Node Type": "Aggregate",
+ "Strategy": "Plain",
+ "Partial Mode": "Simple",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Plans": [
+ {
+ "Node Type": "Custom Scan",
+ "Parent Relationship": "Outer",
+ "Custom Plan Provider": "Citus Adaptive",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Distributed Query": {
+ "Job": {
+ "Task Count": 6,
+ "Tasks Shown": "None, not supported for re-partition queries",
+ "Dependent Jobs": [
+ {
+ "Map Task Count": 6,
+ "Merge Task Count": 6,
+ "Dependent Jobs": [
+ {
+ "Map Task Count": 2,
+ "Merge Task Count": 6
+ },
+ {
+ "Map Task Count": 1,
+ "Merge Task Count": 6
+ }
+ ]
+ },
+ {
+ "Map Task Count": 1,
+ "Merge Task Count": 6
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+ }
+]
+SELECT true AS valid FROM explain_json($$
+ SELECT count(*)
+ FROM lineitem, orders, customer_append, supplier_single_shard
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey$$);
+t
+EXPLAIN (COSTS FALSE, FORMAT XML)
+ SELECT count(*)
+ FROM lineitem, orders, customer_append, supplier_single_shard
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+
+
+
+ Aggregate
+ Plain
+ Simple
+ false
+ false
+
+
+ Custom Scan
+ Outer
+ Citus Adaptive
+ false
+ false
+
+
+ 6
+ None, not supported for re-partition queries
+
+
+ 6
+ 6
+
+
+ 2
+ 6
+
+
+ 1
+ 6
+
+
+
+
+ 1
+ 6
+
+
+
+
+
+
+
+
+
+SELECT true AS valid FROM explain_xml($$
+ SELECT count(*)
+ FROM lineitem, orders, customer_append, supplier
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey$$);
+t
+-- make sure that EXPLAIN works without
+-- problems for queries that inlvolves only
+-- reference tables
+SELECT true AS valid FROM explain_xml($$
+ SELECT count(*)
+ FROM nation
+ WHERE n_name = 'CHINA'$$);
+t
+SELECT true AS valid FROM explain_xml($$
+ SELECT count(*)
+ FROM nation, supplier
+ WHERE nation.n_nationkey = supplier.s_nationkey$$);
+t
+EXPLAIN (COSTS FALSE, FORMAT YAML)
+ SELECT count(*)
+ FROM lineitem, orders, customer, supplier_single_shard
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+- Plan:
+ Node Type: "Aggregate"
+ Strategy: "Plain"
+ Partial Mode: "Simple"
+ Parallel Aware: false
+ Async Capable: false
+ Plans:
+ - Node Type: "Custom Scan"
+ Parent Relationship: "Outer"
+ Custom Plan Provider: "Citus Adaptive"
+ Parallel Aware: false
+ Async Capable: false
+ Distributed Query:
+ Job:
+ Task Count: 6
+ Tasks Shown: "None, not supported for re-partition queries"
+ Dependent Jobs:
+ - Map Task Count: 2
+ Merge Task Count: 6
+ - Map Task Count: 1
+ Merge Task Count: 6
+-- ensure local plans display correctly
+CREATE TABLE lineitem_clone (LIKE lineitem);
+EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone;
+Aggregate
+ -> Seq Scan on lineitem_clone
+DROP TABLE lineitem_clone;
+-- ensure distributed plans don't break
+EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_360000 lineitem
+-- ensure EXPLAIN EXECUTE doesn't crash
+PREPARE task_tracker_query AS
+ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
+EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey > 9030)
+PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
+EXPLAIN EXECUTE router_executor_query;
+Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5)
+ Index Cond: (l_orderkey = 5)
+PREPARE real_time_executor_query AS
+ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
+EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey > 9030)
+-- EXPLAIN EXECUTE of parametrized prepared statements is broken, but
+-- at least make sure to fail without crashing
+PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1;
+EXPLAIN EXECUTE router_executor_query_param(5);
+Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5)
+ Index Cond: (l_orderkey = 5)
+select public.explain_filter('EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5)');
+Custom Scan (Citus Adaptive) (actual rows=N loops=N)
+ Task Count: N
+ Tuple data received from nodes: N bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: N bytes
+ Node: host=localhost port=N dbname=regression
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=N loops=N)
+ Index Cond: (l_orderkey = N)
+\set VERBOSITY TERSE
+PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1;
+BEGIN;
+EXPLAIN (COSTS OFF) EXECUTE multi_shard_query_param(5);
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_360000 lineitem
+ -> Seq Scan on lineitem_360000 lineitem
+ROLLBACK;
+BEGIN;
+EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE multi_shard_query_param(5);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_360000 lineitem (actual rows=0 loops=1)
+ -> Seq Scan on lineitem_360000 lineitem (actual rows=5894 loops=1)
+ROLLBACK;
+\set VERBOSITY DEFAULT
+-- test explain in a transaction with alter table to test we use right connections
+BEGIN;
+CREATE TABLE explain_table(id int);
+SELECT create_distributed_table('explain_table', 'id');
+
+ALTER TABLE explain_table ADD COLUMN value int;
+ROLLBACK;
+-- test explain with local INSERT ... SELECT
+EXPLAIN (COSTS OFF)
+INSERT INTO lineitem_hash_part
+SELECT o_orderkey FROM orders_hash_part LIMIT 3;
+Custom Scan (Citus INSERT ... SELECT)
+ INSERT/SELECT method: pull to coordinator
+ -> Limit
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ -> Seq Scan on orders_hash_part_360045 orders_hash_part
+SELECT true AS valid FROM explain_json($$
+ INSERT INTO lineitem_hash_part (l_orderkey)
+ SELECT o_orderkey FROM orders_hash_part LIMIT 3;
+$$);
+t
+EXPLAIN (COSTS OFF)
+INSERT INTO lineitem_hash_part (l_orderkey, l_quantity)
+SELECT o_orderkey, 5 FROM orders_hash_part LIMIT 3;
+Custom Scan (Citus INSERT ... SELECT)
+ INSERT/SELECT method: pull to coordinator
+ -> Limit
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ -> Seq Scan on orders_hash_part_360045 orders_hash_part
+EXPLAIN (COSTS OFF)
+INSERT INTO lineitem_hash_part (l_orderkey)
+SELECT s FROM generate_series(1,5) s;
+Custom Scan (Citus INSERT ... SELECT)
+ INSERT/SELECT method: pull to coordinator
+ -> Function Scan on generate_series s
+-- WHERE EXISTS forces pg12 to materialize cte
+SELECT public.explain_with_pg17_initplan_format($Q$
+EXPLAIN (COSTS OFF)
+WITH cte1 AS (SELECT s FROM generate_series(1,10) s)
+INSERT INTO lineitem_hash_part
+WITH cte1 AS (SELECT * FROM cte1 WHERE EXISTS (SELECT * FROM cte1) LIMIT 5)
+SELECT s FROM cte1 WHERE EXISTS (SELECT * FROM cte1);
+$Q$);
+Custom Scan (Citus INSERT ... SELECT)
+ INSERT/SELECT method: pull to coordinator
+ -> Result
+ One-Time Filter: (InitPlan 4).col1
+ CTE cte1
+ -> Function Scan on generate_series s
+ CTE cte1
+ -> Limit
+ InitPlan 2
+ -> CTE Scan on cte1 cte1_1
+ -> Result
+ One-Time Filter: (InitPlan 2).col1
+ -> CTE Scan on cte1 cte1_2
+ InitPlan 4
+ -> CTE Scan on cte1 cte1_3
+ -> CTE Scan on cte1
+EXPLAIN (COSTS OFF)
+INSERT INTO lineitem_hash_part
+( SELECT s FROM generate_series(1,5) s) UNION
+( SELECT s FROM generate_series(5,10) s);
+Custom Scan (Citus INSERT ... SELECT)
+ INSERT/SELECT method: pull to coordinator
+ -> Subquery Scan on citus_insert_select_subquery
+ -> HashAggregate
+ Group Key: s.s
+ -> Append
+ -> Function Scan on generate_series s
+ -> Function Scan on generate_series s_1
+-- explain with recursive planning
+EXPLAIN (COSTS OFF, VERBOSE true)
+WITH keys AS MATERIALIZED (
+ SELECT DISTINCT l_orderkey FROM lineitem_hash_part
+),
+series AS MATERIALIZED (
+ SELECT s FROM generate_series(1,10) s
+)
+SELECT l_orderkey FROM series JOIN keys ON (s = l_orderkey)
+ORDER BY s;
+Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_orderkey
+ -> Distributed Subplan XXX_1
+ -> HashAggregate
+ Output: remote_scan.l_orderkey
+ Group Key: remote_scan.l_orderkey
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_orderkey
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Query: SELECT DISTINCT l_orderkey FROM public.lineitem_hash_part_360041 lineitem_hash_part WHERE true
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Output: l_orderkey
+ Group Key: lineitem_hash_part.l_orderkey
+ -> Seq Scan on public.lineitem_hash_part_360041 lineitem_hash_part
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+ -> Distributed Subplan XXX_2
+ -> Function Scan on pg_catalog.generate_series s
+ Output: s
+ Function Call: generate_series(1, 10)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Query: SELECT keys.l_orderkey FROM ((SELECT intermediate_result.s FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(s integer)) series JOIN (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) keys ON ((series.s OPERATOR(pg_catalog.=) keys.l_orderkey))) ORDER BY series.s
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Merge Join
+ Output: intermediate_result_1.l_orderkey, intermediate_result.s
+ Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey)
+ -> Sort
+ Output: intermediate_result.s
+ Sort Key: intermediate_result.s
+ -> Function Scan on pg_catalog.read_intermediate_result intermediate_result
+ Output: intermediate_result.s
+ Function Call: read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format)
+ -> Sort
+ Output: intermediate_result_1.l_orderkey
+ Sort Key: intermediate_result_1.l_orderkey
+ -> Function Scan on pg_catalog.read_intermediate_result intermediate_result_1
+ Output: intermediate_result_1.l_orderkey
+ Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format)
+SELECT true AS valid FROM explain_json($$
+ WITH result AS (
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity
+ ),
+ series AS (
+ SELECT s FROM generate_series(1,10) s
+ )
+ SELECT * FROM result JOIN series ON (s = count_quantity) JOIN orders_hash_part ON (s = o_orderkey)
+$$);
+t
+SELECT true AS valid FROM explain_xml($$
+ WITH result AS (
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity
+ ),
+ series AS (
+ SELECT s FROM generate_series(1,10) s
+ )
+ SELECT * FROM result JOIN series ON (s = l_quantity) JOIN orders_hash_part ON (s = o_orderkey)
+$$);
+t
+--
+-- Test EXPLAIN ANALYZE udfs
+--
+\a\t
+\set default_opts '''{"costs": false, "timing": false, "summary": false}'''::jsonb
+CREATE TABLE explain_analyze_test(a int, b text);
+INSERT INTO explain_analyze_test VALUES (1, 'value 1'), (2, 'value 2'), (3, 'value 3'), (4, 'value 4');
+-- simple select
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', :default_opts) as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Result (actual rows=1 loops=1)+
+
+(1 row)
+
+END;
+-- insert into select
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze($Q$
+ INSERT INTO explain_analyze_test SELECT i, i::text FROM generate_series(1, 5) i $Q$,
+ :default_opts) as (a int);
+ a
+---------------------------------------------------------------------
+(0 rows)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Insert on explain_analyze_test (actual rows=0 loops=1) +
+ -> Function Scan on generate_series i (actual rows=5 loops=1)+
+
+(1 row)
+
+ROLLBACK;
+-- select from table
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze($Q$SELECT * FROM explain_analyze_test$Q$,
+ :default_opts) as (a int, b text);
+ a | b
+---------------------------------------------------------------------
+ 1 | value 1
+ 2 | value 2
+ 3 | value 3
+ 4 | value 4
+(4 rows)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Seq Scan on explain_analyze_test (actual rows=4 loops=1)+
+
+(1 row)
+
+ROLLBACK;
+-- insert into with returning
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze($Q$
+ INSERT INTO explain_analyze_test SELECT i, i::text FROM generate_series(1, 5) i
+ RETURNING a, b$Q$,
+ :default_opts) as (a int, b text);
+ a | b
+---------------------------------------------------------------------
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+(5 rows)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Insert on explain_analyze_test (actual rows=5 loops=1) +
+ -> Function Scan on generate_series i (actual rows=5 loops=1)+
+
+(1 row)
+
+ROLLBACK;
+-- delete with returning
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze($Q$
+ DELETE FROM explain_analyze_test WHERE a % 2 = 0
+ RETURNING a, b$Q$,
+ :default_opts) as (a int, b text);
+ a | b
+---------------------------------------------------------------------
+ 2 | value 2
+ 4 | value 4
+(2 rows)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Delete on explain_analyze_test (actual rows=2 loops=1) +
+ -> Seq Scan on explain_analyze_test (actual rows=2 loops=1)+
+ Filter: ((a % 2) = 0) +
+ Rows Removed by Filter: 2 +
+
+(1 row)
+
+ROLLBACK;
+-- delete without returning
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze($Q$
+ DELETE FROM explain_analyze_test WHERE a % 2 = 0$Q$,
+ :default_opts) as (a int);
+ a
+---------------------------------------------------------------------
+(0 rows)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Delete on explain_analyze_test (actual rows=0 loops=1) +
+ -> Seq Scan on explain_analyze_test (actual rows=2 loops=1)+
+ Filter: ((a % 2) = 0) +
+ Rows Removed by Filter: 2 +
+
+(1 row)
+
+ROLLBACK;
+-- multiple queries (should ERROR)
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1; SELECT 2', :default_opts) as (a int);
+ERROR: cannot EXPLAIN ANALYZE multiple queries
+-- error in query
+SELECT * FROM worker_save_query_explain_analyze('SELECT x', :default_opts) as (a int);
+ERROR: column "x" does not exist
+-- error in format string
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "invlaid_format"}') as (a int);
+ERROR: Invalid explain analyze format: "invlaid_format"
+-- test formats
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "text", "costs": false}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Result (actual rows=1 loops=1)+
+
+(1 row)
+
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "json", "costs": false}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ [ +
+ { +
+ "Plan": { +
+ "Node Type": "Result", +
+ "Parallel Aware": false,+
+ "Async Capable": false, +
+ "Actual Rows": 1, +
+ "Actual Loops": 1 +
+ }, +
+ "Triggers": [ +
+ ] +
+ } +
+ ]
+(1 row)
+
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "xml", "costs": false}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ +
+ +
+ +
+ Result +
+ false +
+ false +
+ 1 +
+ 1 +
+ +
+ +
+ +
+ +
+
+(1 row)
+
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "yaml", "costs": false}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ - Plan: +
+ Node Type: "Result" +
+ Parallel Aware: false+
+ Async Capable: false +
+ Actual Rows: 1 +
+ Actual Loops: 1 +
+ Triggers:
+(1 row)
+
+END;
+-- costs on, timing off
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": true}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+SELECT explain_analyze_output ~ 'Seq Scan.*\(cost=0.00.*\) \(actual rows.*\)' FROM worker_last_saved_explain_analyze();
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+END;
+-- costs off, timing on
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": true, "costs": false}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+SELECT explain_analyze_output ~ 'Seq Scan on explain_analyze_test \(actual time=.* rows=.* loops=1\)' FROM worker_last_saved_explain_analyze();
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+END;
+-- summary on
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"timing": false, "costs": false, "summary": true}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT explain_analyze_output ~ 'Planning Time:.*Execution Time:.*' FROM worker_last_saved_explain_analyze();
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+END;
+-- buffers on
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": false, "buffers": true}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+SELECT explain_analyze_output ~ 'Buffers:' FROM worker_last_saved_explain_analyze();
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+END;
+-- verbose on
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": false, "verbose": true}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+SELECT explain_analyze_output ~ 'Output: a, b' FROM worker_last_saved_explain_analyze();
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+END;
+-- make sure deleted at transaction end
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT count(*) FROM worker_last_saved_explain_analyze();
+ count
+---------------------------------------------------------------------
+ 0
+(1 row)
+
+-- should be deleted at the end of prepare commit
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('UPDATE explain_analyze_test SET a=6 WHERE a=4', '{}') as (a int);
+ a
+---------------------------------------------------------------------
+(0 rows)
+
+SELECT count(*) FROM worker_last_saved_explain_analyze();
+ count
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+PREPARE TRANSACTION 'citus_0_1496350_7_0';
+SELECT count(*) FROM worker_last_saved_explain_analyze();
+ count
+---------------------------------------------------------------------
+ 0
+(1 row)
+
+COMMIT PREPARED 'citus_0_1496350_7_0';
+-- verify execution time makes sense
+BEGIN;
+SELECT count(*) FROM worker_save_query_explain_analyze('SELECT pg_sleep(0.05)', :default_opts) as (a int);
+ count
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT execution_duration BETWEEN 30 AND 200 FROM worker_last_saved_explain_analyze();
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+END;
+--
+-- verify we handle parametrized queries properly
+--
+CREATE TABLE t(a int);
+INSERT INTO t VALUES (1), (2), (3);
+-- simple case
+PREPARE save_explain AS
+SELECT $1, * FROM worker_save_query_explain_analyze('SELECT $1::int', :default_opts) as (a int);
+EXECUTE save_explain(1);
+ ?column? | a
+---------------------------------------------------------------------
+ 1 | 1
+(1 row)
+
+deallocate save_explain;
+-- Call a UDF first to make sure that we handle stacks of executorBoundParams properly.
+--
+-- The prepared statement will first call f() which will force new executor run with new
+-- set of parameters. Then it will call worker_save_query_explain_analyze with a
+-- parametrized query. If we don't have the correct set of parameters here, it will fail.
+CREATE FUNCTION f() RETURNS INT
+AS $$
+PREPARE pp1 AS SELECT $1 WHERE $2 = $3;
+EXECUTE pp1(4, 5, 5);
+deallocate pp1;
+SELECT 1$$ LANGUAGE sql volatile;
+PREPARE save_explain AS
+ SELECT $1, CASE WHEN i < 2 THEN
+ f() = 1
+ ELSE
+ EXISTS(SELECT * FROM worker_save_query_explain_analyze('SELECT $1::int', :default_opts) as (a int)
+ WHERE a = 1)
+ END
+ FROM generate_series(1, 4) i;
+EXECUTE save_explain(1);
+ ?column? | exists
+---------------------------------------------------------------------
+ 1 | t
+ 1 | t
+ 1 | t
+ 1 | t
+(4 rows)
+
+deallocate save_explain;
+DROP FUNCTION f();
+DROP TABLE t;
+SELECT * FROM explain_analyze_test ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 1 | value 1
+ 2 | value 2
+ 3 | value 3
+ 6 | value 4
+(4 rows)
+
+\a\t
+--
+-- Test different cases of EXPLAIN ANALYZE
+--
+SET citus.shard_count TO 4;
+SET client_min_messages TO WARNING;
+SELECT create_distributed_table('explain_analyze_test', 'a');
+
+\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off)'
+\set default_explain_flags '(ANALYZE off, COSTS off, TIMING off, SUMMARY off)'
+-- router SELECT
+EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 1;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 11 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 11 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
+ Filter: (a = 1)
+-- multi-shard SELECT
+EXPLAIN :default_analyze_flags SELECT count(*) FROM explain_analyze_test;
+Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=4 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 32 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate (actual rows=1 loops=1)
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
+-- empty router SELECT
+EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 10000;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1)
+ Filter: (a = 10000)
+ Rows Removed by Filter: 1
+-- empty multi-shard SELECT
+EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE b = 'does not exist';
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
+ Filter: (b = 'does not exist'::text)
+ Rows Removed by Filter: 1
+-- router DML
+BEGIN;
+EXPLAIN :default_analyze_flags DELETE FROM explain_analyze_test WHERE a = 1;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
+ Filter: (a = 1)
+EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'b' WHERE a = 2;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570012 explain_analyze_test (actual rows=1 loops=1)
+ Filter: (a = 2)
+SELECT * FROM explain_analyze_test ORDER BY a;
+2|b
+3|value 3
+6|value 4
+ROLLBACK;
+-- multi-shard DML
+BEGIN;
+EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'b' WHERE a IN (1, 2);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
+ Filter: (a = ANY ('{1,2}'::integer[]))
+EXPLAIN :default_analyze_flags DELETE FROM explain_analyze_test;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
+SELECT * FROM explain_analyze_test ORDER BY a;
+ROLLBACK;
+-- router DML with RETURNING with empty result
+EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'something' WHERE a = 10000 RETURNING *;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1)
+ Filter: (a = 10000)
+ Rows Removed by Filter: 1
+-- multi-shard DML with RETURNING with empty result
+EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'something' WHERE b = 'does not exist' RETURNING *;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
+ Filter: (b = 'does not exist'::text)
+ Rows Removed by Filter: 1
+-- single-row insert
+BEGIN;
+EXPLAIN :default_analyze_flags INSERT INTO explain_analyze_test VALUES (5, 'value 5');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on explain_analyze_test_570009 (actual rows=0 loops=1)
+ -> Result (actual rows=1 loops=1)
+ROLLBACK;
+-- multi-row insert
+BEGIN;
+EXPLAIN :default_analyze_flags INSERT INTO explain_analyze_test VALUES (5, 'value 5'), (6, 'value 6');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on explain_analyze_test_570009 citus_table_alias (actual rows=0 loops=1)
+ -> Result (actual rows=1 loops=1)
+ROLLBACK;
+-- distributed insert/select
+BEGIN;
+EXPLAIN :default_analyze_flags INSERT INTO explain_analyze_test SELECT * FROM explain_analyze_test;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on explain_analyze_test_570009 citus_table_alias (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
+ Filter: (a IS NOT NULL)
+ROLLBACK;
+DROP TABLE explain_analyze_test;
+-- test EXPLAIN ANALYZE works fine with primary keys
+CREATE TABLE explain_pk(a int primary key, b int);
+SELECT create_distributed_table('explain_pk', 'a');
+
+BEGIN;
+EXPLAIN :default_analyze_flags INSERT INTO explain_pk VALUES (1, 2), (2, 3);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on explain_pk_570013 citus_table_alias (actual rows=0 loops=1)
+ -> Result (actual rows=1 loops=1)
+SELECT * FROM explain_pk ORDER BY 1;
+1|2
+2|3
+ROLLBACK;
+-- test EXPLAIN ANALYZE with non-text output formats
+BEGIN;
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
+- Plan:
+ Node Type: "Custom Scan"
+ Custom Plan Provider: "Citus Adaptive"
+ Parallel Aware: false
+ Async Capable: false
+ Actual Rows: 0
+ Actual Loops: 1
+ Distributed Query:
+ Job:
+ Task Count: 2
+ Tasks Shown: "One of 2"
+ Tasks:
+ - Node: "host=localhost port=xxxxx dbname=regression"
+ Remote Plan:
+ - Plan:
+ Node Type: "ModifyTable"
+ Operation: "Insert"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "explain_pk_570013"
+ Alias: "citus_table_alias"
+ Actual Rows: 0
+ Actual Loops: 1
+ Plans:
+ - Node Type: "Result"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Actual Rows: 1
+ Actual Loops: 1
+ Triggers:
+
+ Triggers:
+ROLLBACK;
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) SELECT * FROM explain_pk;
+- Plan:
+ Node Type: "Custom Scan"
+ Custom Plan Provider: "Citus Adaptive"
+ Parallel Aware: false
+ Async Capable: false
+ Actual Rows: 0
+ Actual Loops: 1
+ Distributed Query:
+ Job:
+ Task Count: 4
+ Tuple data received from nodes: "0 bytes"
+ Tasks Shown: "One of 4"
+ Tasks:
+ - Tuple data received from node: "0 bytes"
+ Node: "host=localhost port=xxxxx dbname=regression"
+ Remote Plan:
+ - Plan:
+ Node Type: "Seq Scan"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "explain_pk_570013"
+ Alias: "explain_pk"
+ Actual Rows: 0
+ Actual Loops: 1
+ Triggers:
+
+ Triggers:
+BEGIN;
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
+
+
+
+ Custom Scan
+ Citus Adaptive
+ false
+ false
+ 0
+ 1
+
+
+ 2
+ One of 2
+
+
+ host=localhost port=xxxxx dbname=regression
+
+
+
+
+ ModifyTable
+ Insert
+ false
+ false
+ explain_pk_570013
+ citus_table_alias
+ 0
+ 1
+
+
+ Result
+ Outer
+ false
+ false
+ 1
+ 1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ROLLBACK;
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) SELECT * FROM explain_pk;
+
+
+
+ Custom Scan
+ Citus Adaptive
+ false
+ false
+ 0
+ 1
+
+
+ 4
+ 0 bytes
+ One of 4
+
+
+ 0 bytes
+ host=localhost port=xxxxx dbname=regression
+
+
+
+
+ Seq Scan
+ false
+ false
+ explain_pk_570013
+ explain_pk
+ 0
+ 1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+DROP TABLE explain_pk;
+-- test EXPLAIN ANALYZE with CTEs and subqueries
+CREATE TABLE dist_table(a int, b int);
+SELECT create_distributed_table('dist_table', 'a');
+
+CREATE TABLE ref_table(a int);
+SELECT create_reference_table('ref_table');
+
+INSERT INTO dist_table SELECT i, i*i FROM generate_series(1, 10) i;
+INSERT INTO ref_table SELECT i FROM generate_series(1, 10) i;
+EXPLAIN :default_analyze_flags
+WITH r AS (
+ SELECT GREATEST(random(), 2) r, a FROM dist_table
+)
+SELECT count(distinct a) from r NATURAL JOIN ref_table;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 220 bytes
+ Result destination: Send to 3 nodes
+ -> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 120 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 48 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 8 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate (actual rows=1 loops=1)
+ -> Hash Join (actual rows=10 loops=1)
+ Hash Cond: (ref_table.a = intermediate_result.a)
+ -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
+ -> Hash (actual rows=10 loops=1)
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
+EXPLAIN :default_analyze_flags
+SELECT count(distinct a) FROM (SELECT GREATEST(random(), 2) r, a FROM dist_table) t NATURAL JOIN ref_table;
+Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=4 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 32 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate (actual rows=1 loops=1)
+ -> Merge Join (actual rows=4 loops=1)
+ Merge Cond: (t.a = ref_table.a)
+ -> Sort (actual rows=4 loops=1)
+ Sort Key: t.a
+ Sort Method: quicksort Memory: 25kB
+ -> Subquery Scan on t (actual rows=4 loops=1)
+ -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
+ -> Sort (actual rows=10 loops=1)
+ Sort Key: ref_table.a
+ Sort Method: quicksort Memory: 25kB
+ -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
+SELECT public.explain_with_pg17_initplan_format($Q$
+EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF)
+SELECT count(distinct a) FROM dist_table
+WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table);
+$Q$);
+Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=4 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 70 bytes
+ Result destination: Send to 2 nodes
+ -> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 10 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Merge Join (actual rows=4 loops=1)
+ Merge Cond: (dist_table.a = ref_table.a)
+ -> Sort (actual rows=4 loops=1)
+ Sort Key: dist_table.a
+ Sort Method: quicksort Memory: 25kB
+ -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
+ -> Sort (actual rows=10 loops=1)
+ Sort Key: ref_table.a
+ Sort Method: quicksort Memory: 25kB
+ -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 32 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate (actual rows=1 loops=1)
+ InitPlan 1
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
+ -> Result (actual rows=4 loops=1)
+ One-Time Filter: (InitPlan 1).col1
+ -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
+BEGIN;
+EXPLAIN :default_analyze_flags
+WITH r AS (
+ INSERT INTO dist_table SELECT a, a * a FROM dist_table
+ RETURNING a
+), s AS (
+ SELECT random() < 2, a * a a2 FROM r
+)
+SELECT count(distinct a2) FROM s;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 100 bytes
+ Result destination: Write locally
+ -> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 80 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 32 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on dist_table_570017 citus_table_alias (actual rows=4 loops=1)
+ -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
+ Filter: (a IS NOT NULL)
+ -> Distributed Subplan XXX_2
+ Intermediate Data Size: 150 bytes
+ Result destination: Write locally
+ -> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 50 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 50 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 8 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate (actual rows=1 loops=1)
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
+ROLLBACK;
+-- https://github.com/citusdata/citus/issues/4074
+prepare ref_select(int) AS select * from ref_table where 1 = $1;
+explain :default_analyze_flags execute ref_select(1);
+Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 40 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 40 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Result (actual rows=10 loops=1)
+ One-Time Filter: (1 = $1)
+ -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
+deallocate ref_select;
+DROP TABLE ref_table, dist_table;
+-- test EXPLAIN ANALYZE with different replication factors
+SET citus.shard_count = 2;
+SET citus.shard_replication_factor = 1;
+CREATE TABLE dist_table_rep1(a int);
+SELECT create_distributed_table('dist_table_rep1', 'a');
+
+SET citus.shard_replication_factor = 2;
+CREATE TABLE dist_table_rep2(a int);
+SELECT create_distributed_table('dist_table_rep2', 'a');
+
+EXPLAIN :default_analyze_flags INSERT INTO dist_table_rep1 VALUES(1), (2), (3), (4), (10), (100) RETURNING *;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on dist_table_rep1_570022 citus_table_alias (actual rows=4 loops=1)
+ -> Values Scan on "*VALUES*" (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags SELECT * from dist_table_rep1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags INSERT INTO dist_table_rep2 VALUES(1), (2), (3), (4), (10), (100) RETURNING *;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 48 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 32 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on dist_table_rep2_570024 citus_table_alias (actual rows=4 loops=1)
+ -> Values Scan on "*VALUES*" (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags SELECT * from dist_table_rep2;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep2_570024 dist_table_rep2 (actual rows=4 loops=1)
+prepare p1 as SELECT * FROM dist_table_rep1;
+EXPLAIN :default_analyze_flags EXECUTE p1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags EXECUTE p1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags EXECUTE p1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags EXECUTE p1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags EXECUTE p1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags EXECUTE p1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+prepare p2 AS SELECT * FROM dist_table_rep1 WHERE a = $1;
+EXPLAIN :default_analyze_flags EXECUTE p2(1);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(1);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(1);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(1);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(1);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(1);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(10);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 10)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(100);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570023 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 100)
+ Rows Removed by Filter: 1
+prepare p3 AS SELECT * FROM dist_table_rep1 WHERE a = 1;
+EXPLAIN :default_analyze_flags EXECUTE p3;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p3;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p3;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p3;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p3;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p3;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+DROP TABLE dist_table_rep1, dist_table_rep2;
+-- https://github.com/citusdata/citus/issues/2009
+CREATE TABLE simple (id integer, name text);
+SELECT create_distributed_table('simple', 'id');
+
+PREPARE simple_router AS SELECT *, $1 FROM simple WHERE id = 1;
+EXPLAIN :default_explain_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+deallocate simple_router;
+-- prepared multi-row insert
+PREPARE insert_query AS INSERT INTO simple VALUES ($1, 2), (2, $2);
+EXPLAIN :default_explain_flags EXECUTE insert_query(3, 4);
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on simple_570026 citus_table_alias
+ -> Result
+EXPLAIN :default_analyze_flags EXECUTE insert_query(3, 4);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on simple_570026 citus_table_alias (actual rows=0 loops=1)
+ -> Result (actual rows=1 loops=1)
+deallocate insert_query;
+-- prepared updates
+PREPARE update_query AS UPDATE simple SET name=$1 WHERE name=$2;
+EXPLAIN :default_explain_flags EXECUTE update_query('x', 'y');
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on simple_570026 simple
+ -> Seq Scan on simple_570026 simple
+ Filter: (name = 'y'::text)
+EXPLAIN :default_analyze_flags EXECUTE update_query('x', 'y');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on simple_570026 simple (actual rows=0 loops=1)
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (name = $2)
+ Rows Removed by Filter: 1
+deallocate update_query;
+-- prepared deletes
+PREPARE delete_query AS DELETE FROM simple WHERE name=$1 OR name=$2;
+EXPLAIN (COSTS OFF) EXECUTE delete_query('x', 'y');
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on simple_570026 simple
+ -> Seq Scan on simple_570026 simple
+ Filter: ((name = 'x'::text) OR (name = 'y'::text))
+EXPLAIN :default_analyze_flags EXECUTE delete_query('x', 'y');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on simple_570026 simple (actual rows=0 loops=1)
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: ((name = $1) OR (name = $2))
+ Rows Removed by Filter: 1
+deallocate delete_query;
+-- prepared distributed insert/select
+-- we don't support EXPLAIN for prepared insert/selects of other types.
+PREPARE distributed_insert_select AS INSERT INTO simple SELECT * FROM simple WHERE name IN ($1, $2);
+EXPLAIN :default_explain_flags EXECUTE distributed_insert_select('x', 'y');
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on simple_570026 citus_table_alias
+ -> Seq Scan on simple_570026 simple
+ Filter: ((id IS NOT NULL) AND (name = ANY ('{x,y}'::text[])))
+EXPLAIN :default_analyze_flags EXECUTE distributed_insert_select('x', 'y');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on simple_570026 citus_table_alias (actual rows=0 loops=1)
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: ((id IS NOT NULL) AND (name = ANY (ARRAY[$1, $2])))
+ Rows Removed by Filter: 1
+deallocate distributed_insert_select;
+DROP TABLE simple;
+-- prepared cte
+BEGIN;
+PREPARE cte_query AS
+WITH keys AS (
+ SELECT count(*) FROM
+ (SELECT DISTINCT l_orderkey, GREATEST(random(), 2) FROM lineitem_hash_part WHERE l_quantity > $1) t
+),
+series AS (
+ SELECT s FROM generate_series(1, $2) s
+),
+delete_result AS (
+ DELETE FROM lineitem_hash_part WHERE l_quantity < $3 RETURNING *
+)
+SELECT s FROM series;
+EXPLAIN :default_explain_flags EXECUTE cte_query(2, 10, -1);
+Custom Scan (Citus Adaptive)
+ -> Distributed Subplan XXX_1
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360041 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
+ Filter: (l_quantity < '-1'::numeric)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Function Scan on generate_series s
+EXPLAIN :default_analyze_flags EXECUTE cte_query(2, 10, -1);
+Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 0 bytes
+ Result destination: Send to 0 nodes
+ -> Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360041 lineitem_hash_part (actual rows=0 loops=1)
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part (actual rows=0 loops=1)
+ Filter: (l_quantity < '-1'::numeric)
+ Rows Removed by Filter: 2885
+ Task Count: 1
+ Tuple data received from nodes: 40 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 40 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Function Scan on generate_series s (actual rows=10 loops=1)
+ROLLBACK;
+-- https://github.com/citusdata/citus/issues/2009#issuecomment-653036502
+CREATE TABLE users_table_2 (user_id int primary key, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint);
+SELECT create_reference_table('users_table_2');
+
+PREPARE p4 (int, int) AS insert into users_table_2 ( value_1, user_id) select value_1, user_id + $2 FROM users_table_2 ON CONFLICT (user_id) DO UPDATE SET value_2 = EXCLUDED.value_1 + $1;
+EXPLAIN :default_explain_flags execute p4(20,20);
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on users_table_2_570028 citus_table_alias
+ Conflict Resolution: UPDATE
+ Conflict Arbiter Indexes: users_table_2_pkey_570028
+ -> Seq Scan on users_table_2_570028 users_table_2
+EXPLAIN :default_analyze_flags execute p4(20,20);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on users_table_2_570028 citus_table_alias (actual rows=0 loops=1)
+ Conflict Resolution: UPDATE
+ Conflict Arbiter Indexes: users_table_2_pkey_570028
+ Tuples Inserted: 0
+ Conflicting Tuples: 0
+ -> Seq Scan on users_table_2_570028 users_table_2 (actual rows=0 loops=1)
+-- simple test to confirm we can fetch long (>4KB) plans
+EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM users_table_2 WHERE value_1::text = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000X';
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on users_table_2_570028 users_table_2 (actual rows=0 loops=1)
+ Filter: ((value_1)::text = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000X'::text)
+DROP TABLE users_table_2;
+-- sorted explain analyze output
+CREATE TABLE explain_analyze_execution_time (a int);
+INSERT INTO explain_analyze_execution_time VALUES (2);
+SELECT create_distributed_table('explain_analyze_execution_time', 'a');
+
+-- show that we can sort the output wrt execution time
+-- we do the following hack to make the test outputs
+-- be consistent. First, ingest a single row then add
+-- pg_sleep() call on the query. Postgres will only
+-- sleep for the shard that has the single row, so that
+-- will definitely be slower
+set citus.explain_analyze_sort_method to "taskId";
+EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on explain_analyze_execution_time_570029 explain_analyze_execution_time (actual rows=0 loops=1)
+set citus.explain_analyze_sort_method to "execution-time";
+EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on explain_analyze_execution_time_570030 explain_analyze_execution_time (actual rows=1 loops=1)
+-- reset back
+reset citus.explain_analyze_sort_method;
+DROP TABLE explain_analyze_execution_time;
+CREATE SCHEMA multi_explain;
+SET search_path TO multi_explain;
+-- test EXPLAIN ANALYZE when original query returns no columns
+CREATE TABLE reference_table(a int);
+SELECT create_reference_table('reference_table');
+
+INSERT INTO reference_table VALUES (1);
+EXPLAIN :default_analyze_flags SELECT FROM reference_table;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on reference_table_570031 reference_table (actual rows=1 loops=1)
+CREATE TABLE distributed_table_1(a int, b int);
+SELECT create_distributed_table('distributed_table_1','a');
+
+INSERT INTO distributed_table_1 values (1,1);
+select public.explain_filter('
+EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off) SELECT row_number() OVER() AS r FROM distributed_table_1
+', true);
+WindowAgg (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
+CREATE TABLE distributed_table_2(a int, b int);
+SELECT create_distributed_table('distributed_table_2','a');
+
+INSERT INTO distributed_table_2 VALUES (1,1);
+select public.explain_filter('
+EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off)
+WITH r AS (SELECT row_number() OVER () AS r FROM distributed_table_1)
+SELECT * FROM distributed_table_2
+JOIN r ON (r = distributed_table_2.b)
+LIMIT 3
+', true);
+Limit (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 14 bytes
+ Result destination: Send to 2 nodes
+ -> WindowAgg (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 16 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit (actual rows=1 loops=1)
+ -> Nested Loop (actual rows=1 loops=1)
+ Join Filter: (distributed_table_2.b = intermediate_result.r)
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
+ -> Seq Scan on distributed_table_2_570034 distributed_table_2 (actual rows=1 loops=1)
+EXPLAIN :default_analyze_flags SELECT FROM (SELECT * FROM reference_table) subquery;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on reference_table_570031 reference_table (actual rows=1 loops=1)
+PREPARE dummy_prep_stmt(int) AS SELECT FROM distributed_table_1;
+EXPLAIN :default_analyze_flags EXECUTE dummy_prep_stmt(50);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
+CREATE TYPE multi_explain.int_wrapper_type AS (int_field int);
+CREATE TABLE tbl (a int, b multi_explain.int_wrapper_type);
+SELECT create_distributed_table('tbl', 'a');
+
+EXPLAIN :default_analyze_flags SELECT * FROM tbl;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on tbl_570036 tbl (actual rows=0 loops=1)
+PREPARE q1(int_wrapper_type) AS WITH a AS (SELECT * FROM tbl WHERE b = $1 AND a = 1 OFFSET 0) SELECT * FROM a;
+EXPLAIN (COSTS false) EXECUTE q1('(1)');
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on tbl_570036 tbl
+ Filter: ((b = '(1)'::multi_explain.int_wrapper_type) AND (a = 1))
+EXPLAIN :default_analyze_flags EXECUTE q1('(1)');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on tbl_570036 tbl (actual rows=0 loops=1)
+ Filter: ((b = $1) AND (a = 1))
+PREPARE q2(int_wrapper_type) AS WITH a AS (UPDATE tbl SET b = $1 WHERE a = 1 RETURNING *) SELECT * FROM a;
+EXPLAIN (COSTS false) EXECUTE q2('(1)');
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> CTE Scan on a
+ CTE a
+ -> Update on tbl_570036 tbl
+ -> Seq Scan on tbl_570036 tbl
+ Filter: (a = 1)
+EXPLAIN :default_analyze_flags EXECUTE q2('(1)');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> CTE Scan on a (actual rows=0 loops=1)
+ CTE a
+ -> Update on tbl_570036 tbl (actual rows=0 loops=1)
+ -> Seq Scan on tbl_570036 tbl (actual rows=0 loops=1)
+ Filter: (a = 1)
+-- EXPLAIN ANALYZE shouldn't execute SubPlans twice (bug #4212)
+SET search_path TO multi_explain;
+CREATE TABLE test_subplans (x int primary key, y int);
+SELECT create_distributed_table('test_subplans','x');
+
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
+WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
+SELECT * FROM a;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 18 bytes
+ Result destination: Write locally
+ -> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 16 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on test_subplans_570038 (actual rows=1 loops=1)
+ -> Result (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 8 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
+-- Only one row must exist
+SELECT * FROM test_subplans;
+1|2
+-- Will fail with duplicate pk
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
+WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
+SELECT * FROM a;
+ERROR: duplicate key value violates unique constraint "test_subplans_pkey_570038"
+DETAIL: Key (x)=(1) already exists.
+CONTEXT: while executing command on localhost:xxxxx
+-- Test JSON format
+TRUNCATE test_subplans;
+EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF)
+WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *)
+SELECT * FROM a;
+[
+ {
+ "Plan": {
+ "Node Type": "Custom Scan",
+ "Custom Plan Provider": "Citus Adaptive",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Actual Rows": 1,
+ "Actual Loops": 1,
+ "Distributed Query": {
+ "Subplans": [
+ {
+ "Intermediate Data Size": "18 bytes",
+ "Result destination": "Write locally",
+ "PlannedStmt": [
+ {
+ "Plan": {
+ "Node Type": "Custom Scan",
+ "Custom Plan Provider": "Citus Adaptive",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Actual Rows": 1,
+ "Actual Loops": 1,
+ "Distributed Query": {
+ "Job": {
+ "Task Count": 1,
+ "Tuple data received from nodes": "16 bytes",
+ "Tasks Shown": "All",
+ "Tasks": [
+ {
+ "Tuple data received from node": "16 bytes",
+ "Node": "host=localhost port=xxxxx dbname=regression",
+ "Remote Plan": [
+ [
+ {
+ "Plan": {
+ "Node Type": "ModifyTable",
+ "Operation": "Insert",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Relation Name": "test_subplans_570038",
+ "Alias": "test_subplans_570038",
+ "Actual Rows": 1,
+ "Actual Loops": 1,
+ "Plans": [
+ {
+ "Node Type": "Result",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Actual Rows": 1,
+ "Actual Loops": 1
+ }
+ ]
+ },
+ "Triggers": [
+ ]
+ }
+ ]
+
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "Triggers": [
+ ]
+ }
+ ]
+ }
+ ],
+ "Job": {
+ "Task Count": 1,
+ "Tuple data received from nodes": "8 bytes",
+ "Tasks Shown": "All",
+ "Tasks": [
+ {
+ "Tuple data received from node": "8 bytes",
+ "Node": "host=localhost port=xxxxx dbname=regression",
+ "Remote Plan": [
+ [
+ {
+ "Plan": {
+ "Node Type": "Function Scan",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Function Name": "read_intermediate_result",
+ "Alias": "intermediate_result",
+ "Actual Rows": 1,
+ "Actual Loops": 1
+ },
+ "Triggers": [
+ ]
+ }
+ ]
+
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "Triggers": [
+ ]
+ }
+]
+-- Only one row must exist
+SELECT * FROM test_subplans;
+1|2
+-- check when auto explain + analyze is enabled, we do not allow local execution.
+CREATE SCHEMA test_auto_explain;
+SET search_path TO 'test_auto_explain';
+CREATE TABLE test_ref_table (key int PRIMARY KEY);
+SELECT create_reference_table('test_ref_table');
+
+LOAD 'auto_explain';
+SET auto_explain.log_min_duration = 0;
+set auto_explain.log_analyze to true;
+-- the following should not be locally executed since explain analyze is on
+select * from test_ref_table;
+DROP SCHEMA test_auto_explain CASCADE;
+SET client_min_messages TO ERROR;
+DROP SCHEMA multi_explain CASCADE;
diff --git a/src/test/regress/expected/multi_metadata_sync_0.out b/src/test/regress/expected/multi_metadata_sync_0.out
deleted file mode 100644
index 5ff926ff6..000000000
--- a/src/test/regress/expected/multi_metadata_sync_0.out
+++ /dev/null
@@ -1,2264 +0,0 @@
---
--- MULTI_METADATA_SYNC
---
--- this test has different output for PG14 compared to PG15
--- In PG15, public schema is owned by pg_database_owner role
--- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
-SHOW server_version \gset
-SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
- server_version_ge_15
----------------------------------------------------------------------
- f
-(1 row)
-
--- Tests for metadata snapshot functions, metadata syncing functions and propagation of
--- metadata changes to MX tables.
--- Turn metadata sync off at first
-SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
-NOTICE: dropping metadata on the node (localhost,57637)
- stop_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
-NOTICE: dropping metadata on the node (localhost,57638)
- stop_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
-ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 2;
-SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id
-\gset
-ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100000;
-SELECT nextval('pg_catalog.pg_dist_groupid_seq') AS last_group_id \gset
-SELECT nextval('pg_catalog.pg_dist_node_nodeid_seq') AS last_node_id \gset
--- Create the necessary test utility function
-SET citus.enable_metadata_sync TO OFF;
-CREATE FUNCTION activate_node_snapshot()
- RETURNS text[]
- LANGUAGE C STRICT
- AS 'citus';
-RESET citus.enable_metadata_sync;
-COMMENT ON FUNCTION activate_node_snapshot()
- IS 'commands to activate node snapshot';
--- Show that none of the existing tables are qualified to be MX tables
-SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s';
- logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
----------------------------------------------------------------------
-(0 rows)
-
--- Since password_encryption default has been changed to sha from md5 with PG14
--- we are updating it manually just for consistent test results between PG versions.
-ALTER SYSTEM SET password_encryption TO md5;
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
-SELECT pg_sleep(0.1);
- pg_sleep
----------------------------------------------------------------------
-
-(1 row)
-
-SET client_min_messages TO ERROR;
-ALTER ROLE CURRENT_USER WITH PASSWORD 'dummypassword';
-RESET client_min_messages;
--- Show that, with no MX tables, activate node snapshot contains only the delete commands,
--- pg_dist_node entries, pg_dist_object entries and roles.
-SELECT unnest(activate_node_snapshot()) order by 1;
- unnest
----------------------------------------------------------------------
- ALTER DATABASE regression OWNER TO postgres;
- CALL pg_catalog.worker_drop_all_shell_tables(true)
- CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
- DELETE FROM pg_catalog.pg_dist_colocation
- DELETE FROM pg_catalog.pg_dist_object
- DELETE FROM pg_catalog.pg_dist_schema
- DELETE FROM pg_dist_node
- DELETE FROM pg_dist_partition
- DELETE FROM pg_dist_placement
- DELETE FROM pg_dist_shard
- GRANT CREATE ON SCHEMA public TO PUBLIC;
- GRANT CREATE ON SCHEMA public TO postgres;
- GRANT USAGE ON SCHEMA public TO PUBLIC;
- GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
- RESET ROLE
- RESET ROLE
- SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
- SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
- SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
- SET ROLE postgres
- SET ROLE postgres
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'on'
- SET citus.enable_ddl_propagation TO 'on'
- UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
-(33 rows)
-
--- Create a test table with constraints and SERIAL and default from user defined sequence
-CREATE SEQUENCE user_defined_seq;
-CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL, col_4 BIGINT DEFAULT nextval('user_defined_seq'));
-set citus.shard_count to 8;
-set citus.shard_replication_factor to 1;
-SELECT create_distributed_table('mx_test_table', 'col_1');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-reset citus.shard_count;
--- Set the replication model of the test table to streaming replication so that it is
--- considered as an MX table
-UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::regclass;
--- add a single shard table and verify the creation commands are included in the activate node snapshot
-CREATE TABLE single_shard_tbl(a int);
-SELECT create_distributed_table('single_shard_tbl', null);
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-INSERT INTO single_shard_tbl VALUES (1);
-reset citus.shard_replication_factor;
--- Show that the created MX table is and its sequences are included in the activate node snapshot
-SELECT unnest(activate_node_snapshot()) order by 1;
- unnest
----------------------------------------------------------------------
- ALTER DATABASE regression OWNER TO postgres;
- ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
- ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
- ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
- ALTER TABLE public.mx_test_table OWNER TO postgres
- ALTER TABLE public.single_shard_tbl OWNER TO postgres
- CALL pg_catalog.worker_drop_all_shell_tables(true)
- CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
- CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
- CREATE TABLE public.single_shard_tbl (a integer) USING heap
- DELETE FROM pg_catalog.pg_dist_colocation
- DELETE FROM pg_catalog.pg_dist_object
- DELETE FROM pg_catalog.pg_dist_schema
- DELETE FROM pg_dist_node
- DELETE FROM pg_dist_partition
- DELETE FROM pg_dist_placement
- DELETE FROM pg_dist_shard
- DROP TABLE IF EXISTS public.mx_test_table CASCADE
- DROP TABLE IF EXISTS public.single_shard_tbl CASCADE
- GRANT CREATE ON SCHEMA public TO PUBLIC;
- GRANT CREATE ON SCHEMA public TO postgres;
- GRANT USAGE ON SCHEMA public TO PUBLIC;
- GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
- RESET ROLE
- RESET ROLE
- SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
- SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
- SELECT citus_internal.add_partition_metadata ('public.single_shard_tbl'::regclass, 'n', NULL, 3, 's')
- SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table');
- SELECT pg_catalog.worker_drop_sequence_dependency('public.single_shard_tbl');
- SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
- SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
- SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
- SELECT worker_create_truncate_trigger('public.mx_test_table')
- SELECT worker_create_truncate_trigger('public.single_shard_tbl')
- SET ROLE postgres
- SET ROLE postgres
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'on'
- SET citus.enable_ddl_propagation TO 'on'
- UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
- WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
- WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
- WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
-(61 rows)
-
--- Drop single shard table
-DROP TABLE single_shard_tbl;
--- Show that CREATE INDEX commands are included in the activate node snapshot
-CREATE INDEX mx_index ON mx_test_table(col_2);
-SELECT unnest(activate_node_snapshot()) order by 1;
- unnest
----------------------------------------------------------------------
- ALTER DATABASE regression OWNER TO postgres;
- ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
- ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
- ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
- ALTER TABLE public.mx_test_table OWNER TO postgres
- CALL pg_catalog.worker_drop_all_shell_tables(true)
- CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2)
- CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
- CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
- DELETE FROM pg_catalog.pg_dist_colocation
- DELETE FROM pg_catalog.pg_dist_object
- DELETE FROM pg_catalog.pg_dist_schema
- DELETE FROM pg_dist_node
- DELETE FROM pg_dist_partition
- DELETE FROM pg_dist_placement
- DELETE FROM pg_dist_shard
- DROP TABLE IF EXISTS public.mx_test_table CASCADE
- GRANT CREATE ON SCHEMA public TO PUBLIC;
- GRANT CREATE ON SCHEMA public TO postgres;
- GRANT USAGE ON SCHEMA public TO PUBLIC;
- GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
- RESET ROLE
- RESET ROLE
- SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
- SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
- SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table');
- SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
- SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
- SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
- SELECT worker_create_truncate_trigger('public.mx_test_table')
- SET ROLE postgres
- SET ROLE postgres
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'on'
- SET citus.enable_ddl_propagation TO 'on'
- UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
- WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
-(52 rows)
-
--- Show that schema changes are included in the activate node snapshot
-CREATE SCHEMA mx_testing_schema;
-ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema;
-SELECT unnest(activate_node_snapshot()) order by 1;
- unnest
----------------------------------------------------------------------
- ALTER DATABASE regression OWNER TO postgres;
- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
- ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
- ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
- ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
- CALL pg_catalog.worker_drop_all_shell_tables(true)
- CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
- CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
- CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
- CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
- DELETE FROM pg_catalog.pg_dist_colocation
- DELETE FROM pg_catalog.pg_dist_object
- DELETE FROM pg_catalog.pg_dist_schema
- DELETE FROM pg_dist_node
- DELETE FROM pg_dist_partition
- DELETE FROM pg_dist_placement
- DELETE FROM pg_dist_shard
- DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE
- GRANT CREATE ON SCHEMA public TO PUBLIC;
- GRANT CREATE ON SCHEMA public TO postgres;
- GRANT USAGE ON SCHEMA public TO PUBLIC;
- GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
- RESET ROLE
- RESET ROLE
- SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
- SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
- SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table');
- SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
- SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
- SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
- SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
- SET ROLE postgres
- SET ROLE postgres
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'on'
- SET citus.enable_ddl_propagation TO 'on'
- UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
- WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
-(54 rows)
-
--- Show that append distributed tables are not included in the activate node snapshot
-CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
-SELECT create_distributed_table('non_mx_test_table', 'col_1', 'append');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass;
-SELECT unnest(activate_node_snapshot()) order by 1;
- unnest
----------------------------------------------------------------------
- ALTER DATABASE regression OWNER TO postgres;
- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
- ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
- ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
- ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
- CALL pg_catalog.worker_drop_all_shell_tables(true)
- CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
- CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
- CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
- CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
- DELETE FROM pg_catalog.pg_dist_colocation
- DELETE FROM pg_catalog.pg_dist_object
- DELETE FROM pg_catalog.pg_dist_schema
- DELETE FROM pg_dist_node
- DELETE FROM pg_dist_partition
- DELETE FROM pg_dist_placement
- DELETE FROM pg_dist_shard
- DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE
- GRANT CREATE ON SCHEMA public TO PUBLIC;
- GRANT CREATE ON SCHEMA public TO postgres;
- GRANT USAGE ON SCHEMA public TO PUBLIC;
- GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
- RESET ROLE
- RESET ROLE
- SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
- SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
- SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table');
- SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
- SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
- SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
- SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
- SET ROLE postgres
- SET ROLE postgres
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'on'
- SET citus.enable_ddl_propagation TO 'on'
- UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
- WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
-(54 rows)
-
--- Show that range distributed tables are not included in the activate node snapshot
-UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
-SELECT unnest(activate_node_snapshot()) order by 1;
- unnest
----------------------------------------------------------------------
- ALTER DATABASE regression OWNER TO postgres;
- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
- ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
- ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
- ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
- CALL pg_catalog.worker_drop_all_shell_tables(true)
- CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
- CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
- CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
- CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
- DELETE FROM pg_catalog.pg_dist_colocation
- DELETE FROM pg_catalog.pg_dist_object
- DELETE FROM pg_catalog.pg_dist_schema
- DELETE FROM pg_dist_node
- DELETE FROM pg_dist_partition
- DELETE FROM pg_dist_placement
- DELETE FROM pg_dist_shard
- DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE
- GRANT CREATE ON SCHEMA public TO PUBLIC;
- GRANT CREATE ON SCHEMA public TO postgres;
- GRANT USAGE ON SCHEMA public TO PUBLIC;
- GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
- RESET ROLE
- RESET ROLE
- SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
- SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
- SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table');
- SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
- SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
- SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
- SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
- SET ROLE postgres
- SET ROLE postgres
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'on'
- SET citus.enable_ddl_propagation TO 'on'
- UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
- WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
-(54 rows)
-
--- Test start_metadata_sync_to_node and citus_activate_node UDFs
--- Ensure that hasmetadata=false for all nodes except for the coordinator node
-SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true;
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
--- Show that metadata can not be synced on secondary node
-SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset
-SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary');
- master_add_node
----------------------------------------------------------------------
- 5
-(1 row)
-
-SELECT start_metadata_sync_to_node('localhost', 8888);
- start_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888;
- hasmetadata
----------------------------------------------------------------------
- f
-(1 row)
-
-SELECT stop_metadata_sync_to_node('localhost', 8888);
-NOTICE: (localhost,8888) is a secondary node: to clear the metadata, you should clear metadata from the primary node
- stop_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888;
- hasmetadata
----------------------------------------------------------------------
- f
-(1 row)
-
--- Add a node to another cluster to make sure it's also synced
-SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
- master_add_secondary_node
----------------------------------------------------------------------
- 6
-(1 row)
-
-\c - - - :master_port
--- Run start_metadata_sync_to_node and citus_activate_node and check that it marked hasmetadata for that worker
-SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port;
- nodeid | hasmetadata
----------------------------------------------------------------------
- 2 | t
-(1 row)
-
--- Check that the metadata has been copied to the worker
-\c - - - :worker_1_port
-SELECT * FROM pg_dist_local_group;
- groupid
----------------------------------------------------------------------
- 1
-(1 row)
-
-SELECT * FROM pg_dist_node ORDER BY nodeid;
- nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
----------------------------------------------------------------------
- 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
- 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
- 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
- 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
- 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
-(5 rows)
-
-SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
- logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
----------------------------------------------------------------------
- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varreturningtype 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
-(1 row)
-
-SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;
- logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue
----------------------------------------------------------------------
- mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737
- mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825
- mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913
- mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1
- mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911
- mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823
- mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735
- mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647
-(8 rows)
-
-SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport;
- shardid | shardstate | shardlength | nodename | nodeport | placementid
----------------------------------------------------------------------
- 1310000 | 1 | 0 | localhost | 57637 | 100000
- 1310001 | 1 | 0 | localhost | 57638 | 100001
- 1310002 | 1 | 0 | localhost | 57637 | 100002
- 1310003 | 1 | 0 | localhost | 57638 | 100003
- 1310004 | 1 | 0 | localhost | 57637 | 100004
- 1310005 | 1 | 0 | localhost | 57638 | 100005
- 1310006 | 1 | 0 | localhost | 57637 | 100006
- 1310007 | 1 | 0 | localhost | 57638 | 100007
-(8 rows)
-
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass;
- Column | Type | Modifiers
----------------------------------------------------------------------
- col_1 | integer |
- col_2 | text | not null
- col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass)
- col_4 | bigint | default nextval('user_defined_seq'::regclass)
-(4 rows)
-
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass;
- Column | Type | Definition
----------------------------------------------------------------------
- col_1 | integer | col_1
-(1 row)
-
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_testing_schema.mx_index'::regclass;
- Column | Type | Definition
----------------------------------------------------------------------
- col_2 | text | col_2
-(1 row)
-
--- Check that pg_dist_colocation is synced
-SELECT * FROM pg_dist_colocation ORDER BY colocationid;
- colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation
----------------------------------------------------------------------
- 2 | 8 | 1 | 23 | 0
-(1 row)
-
--- Make sure that truncate trigger has been set for the MX table on worker
-SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
--- Make sure that citus_activate_node considers foreign key constraints
-\c - - - :master_port
--- Since we're superuser, we can set the replication model to 'streaming' to
--- create some MX tables
-SET citus.shard_replication_factor TO 1;
-CREATE SCHEMA mx_testing_schema_2;
-CREATE TABLE mx_testing_schema.fk_test_1 (col1 int, col2 text, col3 int, UNIQUE(col1, col3));
-CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text,
- FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1 (col1, col3));
-SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
--- Check that foreign key metadata exists on the worker
-\c - - - :worker_1_port
-SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass;
- Constraint | Definition
----------------------------------------------------------------------
- fk_test_2_col1_col2_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3)
-(1 row)
-
-\c - - - :master_port
-DROP TABLE mx_testing_schema_2.fk_test_2;
-DROP TABLE mx_testing_schema.fk_test_1;
-RESET citus.shard_replication_factor;
--- Check that repeated calls to citus_activate_node has no side effects
-\c - - - :master_port
-SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-\c - - - :worker_1_port
-SELECT * FROM pg_dist_local_group;
- groupid
----------------------------------------------------------------------
- 1
-(1 row)
-
-SELECT * FROM pg_dist_node ORDER BY nodeid;
- nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
----------------------------------------------------------------------
- 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
- 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
- 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
- 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
- 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
-(5 rows)
-
-SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
- logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
----------------------------------------------------------------------
- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varreturningtype 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
-(1 row)
-
-SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;
- logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue
----------------------------------------------------------------------
- mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737
- mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825
- mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913
- mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1
- mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911
- mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823
- mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735
- mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647
-(8 rows)
-
-SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport;
- shardid | shardstate | shardlength | nodename | nodeport | placementid
----------------------------------------------------------------------
- 1310000 | 1 | 0 | localhost | 57637 | 100000
- 1310001 | 1 | 0 | localhost | 57638 | 100001
- 1310002 | 1 | 0 | localhost | 57637 | 100002
- 1310003 | 1 | 0 | localhost | 57638 | 100003
- 1310004 | 1 | 0 | localhost | 57637 | 100004
- 1310005 | 1 | 0 | localhost | 57638 | 100005
- 1310006 | 1 | 0 | localhost | 57637 | 100006
- 1310007 | 1 | 0 | localhost | 57638 | 100007
-(8 rows)
-
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass;
- Column | Type | Modifiers
----------------------------------------------------------------------
- col_1 | integer |
- col_2 | text | not null
- col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass)
- col_4 | bigint | default nextval('user_defined_seq'::regclass)
-(4 rows)
-
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass;
- Column | Type | Definition
----------------------------------------------------------------------
- col_1 | integer | col_1
-(1 row)
-
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_testing_schema.mx_index'::regclass;
- Column | Type | Definition
----------------------------------------------------------------------
- col_2 | text | col_2
-(1 row)
-
-SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
--- Make sure that citus_activate_node can be called inside a transaction and rollbacked
-\c - - - :master_port
-BEGIN;
-SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-ROLLBACK;
-SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
- hasmetadata
----------------------------------------------------------------------
- f
-(1 row)
-
--- Check that the distributed table can be queried from the worker
-\c - - - :master_port
-SET citus.shard_replication_factor TO 1;
-SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-CREATE TABLE mx_query_test (a int, b text, c int);
-SELECT create_distributed_table('mx_query_test', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE single_shard_tbl(a int);
-SELECT create_distributed_table('single_shard_tbl', null);
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-INSERT INTO single_shard_tbl VALUES (1);
-SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_query_test'::regclass;
- repmodel
----------------------------------------------------------------------
- s
-(1 row)
-
-INSERT INTO mx_query_test VALUES (1, 'one', 1);
-INSERT INTO mx_query_test VALUES (2, 'two', 4);
-INSERT INTO mx_query_test VALUES (3, 'three', 9);
-INSERT INTO mx_query_test VALUES (4, 'four', 16);
-INSERT INTO mx_query_test VALUES (5, 'five', 24);
-\c - - - :worker_1_port
-SELECT * FROM mx_query_test ORDER BY a;
- a | b | c
----------------------------------------------------------------------
- 1 | one | 1
- 2 | two | 4
- 3 | three | 9
- 4 | four | 16
- 5 | five | 24
-(5 rows)
-
-INSERT INTO mx_query_test VALUES (6, 'six', 36);
-UPDATE mx_query_test SET c = 25 WHERE a = 5;
-SELECT * FROM single_shard_tbl ORDER BY a;
- a
----------------------------------------------------------------------
- 1
-(1 row)
-
-INSERT INTO single_shard_tbl VALUES (2);
-\c - - - :master_port
-SELECT * FROM mx_query_test ORDER BY a;
- a | b | c
----------------------------------------------------------------------
- 1 | one | 1
- 2 | two | 4
- 3 | three | 9
- 4 | four | 16
- 5 | five | 25
- 6 | six | 36
-(6 rows)
-
-SELECT * FROM single_shard_tbl ORDER BY a;
- a
----------------------------------------------------------------------
- 1
- 2
-(2 rows)
-
-\c - - - :master_port
-DROP TABLE mx_query_test;
-DROP TABLE single_shard_tbl;
--- Check that stop_metadata_sync_to_node function sets hasmetadata of the node to false
-\c - - - :master_port
-SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
- start_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port;
- hasmetadata
----------------------------------------------------------------------
- t
-(1 row)
-
-SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
-NOTICE: dropping metadata on the node (localhost,57637)
- stop_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port;
- hasmetadata
----------------------------------------------------------------------
- f
-(1 row)
-
--- Test DDL propagation in MX tables
-SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
- start_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-SET citus.shard_count = 5;
-CREATE SCHEMA mx_test_schema_1;
-CREATE SCHEMA mx_test_schema_2;
--- Create MX tables
-SET citus.shard_replication_factor TO 1;
-CREATE TABLE mx_test_schema_1.mx_table_1 (col1 int UNIQUE, col2 text);
-CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 (col1);
-CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text);
-CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 (col2);
-ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col1) REFERENCES mx_test_schema_1.mx_table_1(col1);
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
- Column | Type | Modifiers
----------------------------------------------------------------------
- col1 | integer |
- col2 | text |
-(2 rows)
-
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_test_schema_1.mx_table_1_col1_key'::regclass;
- Column | Type | Definition
----------------------------------------------------------------------
- col1 | integer | col1
-(1 row)
-
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_test_schema_1.mx_index_1'::regclass;
- Column | Type | Definition
----------------------------------------------------------------------
- col1 | integer | col1
-(1 row)
-
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_2.mx_table_2'::regclass;
- Column | Type | Modifiers
----------------------------------------------------------------------
- col1 | integer |
- col2 | text |
-(2 rows)
-
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_test_schema_2.mx_index_2'::regclass;
- Column | Type | Definition
----------------------------------------------------------------------
- col2 | text | col2
-(1 row)
-
-SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_2.mx_table_2'::regclass;
- Constraint | Definition
----------------------------------------------------------------------
- mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1)
-(1 row)
-
-SELECT create_distributed_table('mx_test_schema_1.mx_table_1', 'col1');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT create_distributed_table('mx_test_schema_2.mx_table_2', 'col1');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
--- Check that created tables are marked as streaming replicated tables
-SELECT
- logicalrelid, repmodel
-FROM
- pg_dist_partition
-WHERE
- logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass
- OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass
-ORDER BY
- logicalrelid::text;
- logicalrelid | repmodel
----------------------------------------------------------------------
- mx_test_schema_1.mx_table_1 | s
- mx_test_schema_2.mx_table_2 | s
-(2 rows)
-
--- See the shards and placements of the mx tables
-SELECT
- logicalrelid, shardid, nodename, nodeport
-FROM
- pg_dist_shard NATURAL JOIN pg_dist_shard_placement
-WHERE
- logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass
- OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass
-ORDER BY
- logicalrelid::text, shardid;
- logicalrelid | shardid | nodename | nodeport
----------------------------------------------------------------------
- mx_test_schema_1.mx_table_1 | 1310022 | localhost | 57637
- mx_test_schema_1.mx_table_1 | 1310023 | localhost | 57638
- mx_test_schema_1.mx_table_1 | 1310024 | localhost | 57637
- mx_test_schema_1.mx_table_1 | 1310025 | localhost | 57638
- mx_test_schema_1.mx_table_1 | 1310026 | localhost | 57637
- mx_test_schema_2.mx_table_2 | 1310027 | localhost | 57637
- mx_test_schema_2.mx_table_2 | 1310028 | localhost | 57638
- mx_test_schema_2.mx_table_2 | 1310029 | localhost | 57637
- mx_test_schema_2.mx_table_2 | 1310030 | localhost | 57638
- mx_test_schema_2.mx_table_2 | 1310031 | localhost | 57637
-(10 rows)
-
--- Check that metadata of MX tables exist on the metadata worker
-\c - - - :worker_1_port
--- Check that tables are created
-\dt mx_test_schema_?.mx_table_?
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
- mx_test_schema_1 | mx_table_1 | table | postgres
- mx_test_schema_2 | mx_table_2 | table | postgres
-(2 rows)
-
--- Check that table metadata are created
-SELECT
- logicalrelid, repmodel
-FROM
- pg_dist_partition
-WHERE
- logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass
- OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass
-ORDER BY
- logicalrelid::text;
- logicalrelid | repmodel
----------------------------------------------------------------------
- mx_test_schema_1.mx_table_1 | s
- mx_test_schema_2.mx_table_2 | s
-(2 rows)
-
--- Check that shard and placement data are created
-SELECT
- logicalrelid, shardid, nodename, nodeport
-FROM
- pg_dist_shard NATURAL JOIN pg_dist_shard_placement
-WHERE
- logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass
- OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass
-ORDER BY
- logicalrelid::text, shardid;
- logicalrelid | shardid | nodename | nodeport
----------------------------------------------------------------------
- mx_test_schema_1.mx_table_1 | 1310022 | localhost | 57637
- mx_test_schema_1.mx_table_1 | 1310023 | localhost | 57638
- mx_test_schema_1.mx_table_1 | 1310024 | localhost | 57637
- mx_test_schema_1.mx_table_1 | 1310025 | localhost | 57638
- mx_test_schema_1.mx_table_1 | 1310026 | localhost | 57637
- mx_test_schema_2.mx_table_2 | 1310027 | localhost | 57637
- mx_test_schema_2.mx_table_2 | 1310028 | localhost | 57638
- mx_test_schema_2.mx_table_2 | 1310029 | localhost | 57637
- mx_test_schema_2.mx_table_2 | 1310030 | localhost | 57638
- mx_test_schema_2.mx_table_2 | 1310031 | localhost | 57637
-(10 rows)
-
--- Check that metadata of MX tables don't exist on the non-metadata worker
-\c - - - :worker_2_port
-\d mx_test_schema_1.mx_table_1
-\d mx_test_schema_2.mx_table_2
-SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_test_schema%';
- logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
----------------------------------------------------------------------
-(0 rows)
-
-SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_test_schema%';
- logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue
----------------------------------------------------------------------
-(0 rows)
-
-SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport;
- shardid | shardstate | shardlength | nodename | nodeport | placementid
----------------------------------------------------------------------
-(0 rows)
-
--- Check that CREATE INDEX statement is propagated
-\c - - - :master_port
-SET client_min_messages TO 'ERROR';
-CREATE INDEX mx_index_3 ON mx_test_schema_2.mx_table_2 USING hash (col1);
-ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQUE (col1);
-\c - - - :worker_1_port
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_test_schema_2.mx_index_3'::regclass;
- Column | Type | Definition
----------------------------------------------------------------------
- col1 | integer | col1
-(1 row)
-
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_test_schema_2.mx_table_2_col1_key'::regclass;
- Column | Type | Definition
----------------------------------------------------------------------
- col1 | integer | col1
-(1 row)
-
--- Check that DROP INDEX statement is propagated
-\c - - - :master_port
-DROP INDEX mx_test_schema_2.mx_index_3;
-\c - - - :worker_1_port
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_test_schema_2.mx_index_3'::regclass;
-ERROR: relation "mx_test_schema_2.mx_index_3" does not exist
--- Check that ALTER TABLE statements are propagated
-\c - - - :master_port
-ALTER TABLE mx_test_schema_1.mx_table_1 ADD COLUMN col3 NUMERIC;
-ALTER TABLE mx_test_schema_1.mx_table_1 ALTER COLUMN col3 SET DATA TYPE INT;
-ALTER TABLE
- mx_test_schema_1.mx_table_1
-ADD CONSTRAINT
- mx_fk_constraint
-FOREIGN KEY
- (col1)
-REFERENCES
- mx_test_schema_2.mx_table_2(col1);
-\c - - - :worker_1_port
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
- Column | Type | Modifiers
----------------------------------------------------------------------
- col1 | integer |
- col2 | text |
- col3 | integer |
-(3 rows)
-
-SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
- Constraint | Definition
----------------------------------------------------------------------
- mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1)
-(1 row)
-
--- Check that foreign key constraint with NOT VALID works as well
-\c - - - :master_port
-ALTER TABLE mx_test_schema_1.mx_table_1 DROP CONSTRAINT mx_fk_constraint;
-ALTER TABLE
- mx_test_schema_1.mx_table_1
-ADD CONSTRAINT
- mx_fk_constraint_2
-FOREIGN KEY
- (col1)
-REFERENCES
- mx_test_schema_2.mx_table_2(col1)
-NOT VALID;
-\c - - - :worker_1_port
-SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
- Constraint | Definition
----------------------------------------------------------------------
- mx_fk_constraint_2 | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1)
-(1 row)
-
--- Check that update_distributed_table_colocation call propagates the changes to the workers
-\c - - - :master_port
-SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gset
-ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000;
-SET citus.shard_count TO 7;
-SET citus.shard_replication_factor TO 1;
-CREATE TABLE mx_colocation_test_1 (a int);
-SELECT create_distributed_table('mx_colocation_test_1', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE mx_colocation_test_2 (a int);
-SELECT create_distributed_table('mx_colocation_test_2', 'a', colocate_with:='none');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
--- Reset the colocation IDs of the test tables
-DELETE FROM
- pg_dist_colocation
-WHERE EXISTS (
- SELECT 1
- FROM pg_dist_partition
- WHERE
- colocationid = pg_dist_partition.colocationid
- AND pg_dist_partition.logicalrelid = 'mx_colocation_test_1'::regclass);
--- Check the colocation IDs of the created tables
-SELECT
- logicalrelid, colocationid
-FROM
- pg_dist_partition
-WHERE
- logicalrelid = 'mx_colocation_test_1'::regclass
- OR logicalrelid = 'mx_colocation_test_2'::regclass
-ORDER BY logicalrelid::text;
- logicalrelid | colocationid
----------------------------------------------------------------------
- mx_colocation_test_1 | 10000
- mx_colocation_test_2 | 10001
-(2 rows)
-
--- Update colocation and see the changes on the master and the worker
-SELECT update_distributed_table_colocation('mx_colocation_test_1', colocate_with => 'mx_colocation_test_2');
- update_distributed_table_colocation
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT
- logicalrelid, colocationid
-FROM
- pg_dist_partition
-WHERE
- logicalrelid = 'mx_colocation_test_1'::regclass
- OR logicalrelid = 'mx_colocation_test_2'::regclass
-ORDER BY
- logicalrelid::text;
- logicalrelid | colocationid
----------------------------------------------------------------------
- mx_colocation_test_1 | 10001
- mx_colocation_test_2 | 10001
-(2 rows)
-
-\c - - - :worker_1_port
-SELECT
- logicalrelid, colocationid
-FROM
- pg_dist_partition
-WHERE
- logicalrelid = 'mx_colocation_test_1'::regclass
- OR logicalrelid = 'mx_colocation_test_2'::regclass
-ORDER BY
- logicalrelid::text;
- logicalrelid | colocationid
----------------------------------------------------------------------
- mx_colocation_test_1 | 10001
- mx_colocation_test_2 | 10001
-(2 rows)
-
-\c - - - :master_port
--- Check that DROP TABLE on MX tables works
-DROP TABLE mx_colocation_test_1;
-DROP TABLE mx_colocation_test_2;
-\d mx_colocation_test_1
-\d mx_colocation_test_2
-\c - - - :worker_1_port
-\d mx_colocation_test_1
-\d mx_colocation_test_2
--- Check that dropped MX table can be recreated again
-\c - - - :master_port
-SET citus.shard_count TO 7;
-SET citus.shard_replication_factor TO 1;
-CREATE TABLE mx_temp_drop_test (a int);
-SELECT create_distributed_table('mx_temp_drop_test', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass;
- logicalrelid | repmodel
----------------------------------------------------------------------
- mx_temp_drop_test | s
-(1 row)
-
-DROP TABLE mx_temp_drop_test;
-CREATE TABLE mx_temp_drop_test (a int);
-SELECT create_distributed_table('mx_temp_drop_test', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass;
- logicalrelid | repmodel
----------------------------------------------------------------------
- mx_temp_drop_test | s
-(1 row)
-
-DROP TABLE mx_temp_drop_test;
--- Check that MX tables can be created with SERIAL columns
-\c - - - :master_port
-SET citus.shard_count TO 3;
-SET citus.shard_replication_factor TO 1;
-SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
-NOTICE: dropping metadata on the node (localhost,57637)
- stop_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
-NOTICE: dropping metadata on the node (localhost,57638)
- stop_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
--- sync table with serial column after create_distributed_table
-CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL);
-SELECT create_distributed_table('mx_table_with_small_sequence', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-DROP TABLE mx_table_with_small_sequence;
--- Show that create_distributed_table works with a serial column
-CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL);
-SELECT create_distributed_table('mx_table_with_small_sequence', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-INSERT INTO mx_table_with_small_sequence VALUES (0);
-\c - - - :worker_1_port
--- Insert doesn't work because the defaults are of type int and smallint
-INSERT INTO mx_table_with_small_sequence VALUES (1), (3);
-ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint
-\c - - - :master_port
-SET citus.shard_replication_factor TO 1;
--- Create an MX table with (BIGSERIAL) sequences
-CREATE TABLE mx_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL);
-SELECT create_distributed_table('mx_table_with_sequence', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-INSERT INTO mx_table_with_sequence VALUES (0);
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass;
- Column | Type | Modifiers
----------------------------------------------------------------------
- a | integer |
- b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass)
- c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass)
-(3 rows)
-
-\ds mx_table_with_sequence_b_seq
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
- public | mx_table_with_sequence_b_seq | sequence | postgres
-(1 row)
-
-\ds mx_table_with_sequence_c_seq
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
- public | mx_table_with_sequence_c_seq | sequence | postgres
-(1 row)
-
--- Check that the sequences created on the metadata worker as well
-\c - - - :worker_1_port
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass;
- Column | Type | Modifiers
----------------------------------------------------------------------
- a | integer |
- b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass)
- c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass)
-(3 rows)
-
-\ds mx_table_with_sequence_b_seq
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
- public | mx_table_with_sequence_b_seq | sequence | postgres
-(1 row)
-
-\ds mx_table_with_sequence_c_seq
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
- public | mx_table_with_sequence_c_seq | sequence | postgres
-(1 row)
-
--- Insert works because the defaults are of type bigint
-INSERT INTO mx_table_with_sequence VALUES (1), (3);
--- check that pg_depend records exist on the worker
-SELECT refobjsubid FROM pg_depend
-WHERE objid = 'mx_table_with_sequence_b_seq'::regclass AND refobjid = 'mx_table_with_sequence'::regclass;
- refobjsubid
----------------------------------------------------------------------
- 2
-(1 row)
-
-SELECT refobjsubid FROM pg_depend
-WHERE objid = 'mx_table_with_sequence_c_seq'::regclass AND refobjid = 'mx_table_with_sequence'::regclass;
- refobjsubid
----------------------------------------------------------------------
- 3
-(1 row)
-
--- Check that the sequences on the worker have their own space
-SELECT nextval('mx_table_with_sequence_b_seq');
- nextval
----------------------------------------------------------------------
- 281474976710659
-(1 row)
-
-SELECT nextval('mx_table_with_sequence_c_seq');
- nextval
----------------------------------------------------------------------
- 281474976710659
-(1 row)
-
--- Check that adding a new metadata node sets the sequence space correctly
-\c - - - :master_port
-SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-\c - - - :worker_2_port
-SELECT groupid FROM pg_dist_local_group;
- groupid
----------------------------------------------------------------------
- 2
-(1 row)
-
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass;
- Column | Type | Modifiers
----------------------------------------------------------------------
- a | integer |
- b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass)
- c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass)
-(3 rows)
-
-\ds mx_table_with_sequence_b_seq
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
- public | mx_table_with_sequence_b_seq | sequence | postgres
-(1 row)
-
-\ds mx_table_with_sequence_c_seq
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
- public | mx_table_with_sequence_c_seq | sequence | postgres
-(1 row)
-
-SELECT nextval('mx_table_with_sequence_b_seq');
- nextval
----------------------------------------------------------------------
- 562949953421313
-(1 row)
-
-SELECT nextval('mx_table_with_sequence_c_seq');
- nextval
----------------------------------------------------------------------
- 562949953421313
-(1 row)
-
--- Insert doesn't work because the defaults are of type int and smallint
-INSERT INTO mx_table_with_small_sequence VALUES (2), (4);
-ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint
--- Insert works because the defaults are of type bigint
-INSERT INTO mx_table_with_sequence VALUES (2), (4);
--- Check that dropping the mx table with sequences works as expected
-\c - - - :master_port
--- check our small sequence values
-SELECT a, b, c FROM mx_table_with_small_sequence ORDER BY a,b,c;
- a | b | c
----------------------------------------------------------------------
- 0 | 1 | 1
-(1 row)
-
---check our bigint sequence values
-SELECT a, b, c FROM mx_table_with_sequence ORDER BY a,b,c;
- a | b | c
----------------------------------------------------------------------
- 0 | 1 | 1
- 1 | 281474976710657 | 281474976710657
- 2 | 562949953421314 | 562949953421314
- 3 | 281474976710658 | 281474976710658
- 4 | 562949953421315 | 562949953421315
-(5 rows)
-
--- Check that dropping the mx table with sequences works as expected
-DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence;
-\d mx_table_with_sequence
-\ds mx_table_with_sequence_b_seq
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
-(0 rows)
-
-\ds mx_table_with_sequence_c_seq
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
-(0 rows)
-
--- Check that the sequences are dropped from the workers
-\c - - - :worker_1_port
-\d mx_table_with_sequence
-\ds mx_table_with_sequence_b_seq
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
-(0 rows)
-
-\ds mx_table_with_sequence_c_seq
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
-(0 rows)
-
--- Check that the sequences are dropped from the workers
-\c - - - :worker_2_port
-\ds mx_table_with_sequence_b_seq
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
-(0 rows)
-
-\ds mx_table_with_sequence_c_seq
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
-(0 rows)
-
--- Check that MX sequences play well with non-super users
-\c - - - :master_port
--- Remove a node so that shards and sequences won't be created on table creation. Therefore,
--- we can test that citus_activate_node can actually create the sequence with proper
--- owner
-CREATE TABLE pg_dist_placement_temp AS SELECT * FROM pg_dist_placement;
-CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition;
-CREATE TABLE pg_dist_object_temp AS SELECT * FROM pg_catalog.pg_dist_object;
-DELETE FROM pg_dist_placement;
-DELETE FROM pg_dist_partition;
-DELETE FROM pg_catalog.pg_dist_object;
-SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset
-SELECT master_remove_node('localhost', :worker_2_port);
- master_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
- -- the master user needs superuser permissions to change the replication model
-CREATE USER mx_user WITH SUPERUSER;
-\c - mx_user - :master_port
--- Create an mx table as a different user
-CREATE TABLE mx_table (a int, b BIGSERIAL);
-SET citus.shard_replication_factor TO 1;
-SELECT create_distributed_table('mx_table', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-\c - postgres - :master_port
-SELECT master_add_node('localhost', :worker_2_port);
- master_add_node
----------------------------------------------------------------------
- 7
-(1 row)
-
-\c - mx_user - :worker_1_port
-SELECT nextval('mx_table_b_seq');
- nextval
----------------------------------------------------------------------
- 281474976710657
-(1 row)
-
-INSERT INTO mx_table (a) VALUES (37);
-INSERT INTO mx_table (a) VALUES (38);
-SELECT * FROM mx_table ORDER BY a;
- a | b
----------------------------------------------------------------------
- 37 | 281474976710658
- 38 | 281474976710659
-(2 rows)
-
-\c - mx_user - :worker_2_port
-SELECT nextval('mx_table_b_seq');
- nextval
----------------------------------------------------------------------
- 1125899906842625
-(1 row)
-
-INSERT INTO mx_table (a) VALUES (39);
-INSERT INTO mx_table (a) VALUES (40);
-SELECT * FROM mx_table ORDER BY a;
- a | b
----------------------------------------------------------------------
- 37 | 281474976710658
- 38 | 281474976710659
- 39 | 1125899906842626
- 40 | 1125899906842627
-(4 rows)
-
-\c - mx_user - :master_port
-DROP TABLE mx_table;
--- put the metadata back into a consistent state
-\c - postgres - :master_port
-INSERT INTO pg_dist_placement SELECT * FROM pg_dist_placement_temp;
-INSERT INTO pg_dist_partition SELECT * FROM pg_dist_partition_temp;
-INSERT INTO pg_catalog.pg_dist_object SELECT * FROM pg_dist_object_temp ON CONFLICT ON CONSTRAINT pg_dist_object_pkey DO NOTHING;
-DROP TABLE pg_dist_placement_temp;
-DROP TABLE pg_dist_partition_temp;
-DROP TABLE pg_dist_object_temp;
-UPDATE pg_dist_placement
- SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port)
- WHERE groupid = :old_worker_2_group;
-\c - - - :worker_1_port
-UPDATE pg_dist_placement
- SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port)
- WHERE groupid = :old_worker_2_group;
-\c - - - :worker_2_port
-UPDATE pg_dist_placement
- SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port)
- WHERE groupid = :old_worker_2_group;
-\c - - - :master_port
-SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
-NOTICE: dropping metadata on the node (localhost,57638)
- stop_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-DROP USER mx_user;
--- Check that create_reference_table creates the metadata on workers
-\c - - - :master_port
-CREATE TABLE mx_ref (col_1 int, col_2 text);
-SELECT create_reference_table('mx_ref');
- create_reference_table
----------------------------------------------------------------------
-
-(1 row)
-
--- make sure that adding/removing nodes doesn't cause
--- multiple colocation entries for reference tables
-SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0;
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
-\dt mx_ref
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
- public | mx_ref | table | postgres
-(1 row)
-
-\c - - - :worker_1_port
-\dt mx_ref
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
- public | mx_ref | table | postgres
-(1 row)
-
-SELECT
- logicalrelid, partmethod, repmodel, shardid, placementid, nodename, nodeport
-FROM
- pg_dist_partition
- NATURAL JOIN pg_dist_shard
- NATURAL JOIN pg_dist_shard_placement
-WHERE
- logicalrelid = 'mx_ref'::regclass
-ORDER BY
- nodeport;
- logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport
----------------------------------------------------------------------
- mx_ref | n | t | 1310074 | 100074 | localhost | 57636
- mx_ref | n | t | 1310074 | 100075 | localhost | 57637
- mx_ref | n | t | 1310074 | 100076 | localhost | 57638
-(3 rows)
-
-SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset
--- make sure we have the pg_dist_colocation record on the worker
-SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0;
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
--- Check that DDL commands are propagated to reference tables on workers
-\c - - - :master_port
-ALTER TABLE mx_ref ADD COLUMN col_3 NUMERIC DEFAULT 0;
-CREATE INDEX mx_ref_index ON mx_ref(col_1);
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass;
- Column | Type | Modifiers
----------------------------------------------------------------------
- col_1 | integer |
- col_2 | text |
- col_3 | numeric | default 0
-(3 rows)
-
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_ref_index'::regclass;
- Column | Type | Definition
----------------------------------------------------------------------
- col_1 | integer | col_1
-(1 row)
-
-\c - - - :worker_1_port
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass;
- Column | Type | Modifiers
----------------------------------------------------------------------
- col_1 | integer |
- col_2 | text |
- col_3 | numeric | default 0
-(3 rows)
-
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_ref_index'::regclass;
- Column | Type | Definition
----------------------------------------------------------------------
- col_1 | integer | col_1
-(1 row)
-
--- Check that metada is cleaned successfully upon drop table
-\c - - - :master_port
-DROP TABLE mx_ref;
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_ref_index'::regclass;
-ERROR: relation "mx_ref_index" does not exist
-\c - - - :worker_1_port
-SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
- relid = 'mx_ref_index'::regclass;
-ERROR: relation "mx_ref_index" does not exist
-SELECT * FROM pg_dist_shard WHERE shardid=:ref_table_shardid;
- logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue
----------------------------------------------------------------------
-(0 rows)
-
-SELECT * FROM pg_dist_shard_placement WHERE shardid=:ref_table_shardid;
- shardid | shardstate | shardlength | nodename | nodeport | placementid
----------------------------------------------------------------------
-(0 rows)
-
--- Check that master_add_node propagates the metadata about new placements of a reference table
-\c - - - :master_port
-SELECT groupid AS old_worker_2_group
- FROM pg_dist_node WHERE nodeport = :worker_2_port \gset
-CREATE TABLE tmp_placement AS
- SELECT * FROM pg_dist_placement WHERE groupid = :old_worker_2_group;
-DELETE FROM pg_dist_placement
- WHERE groupid = :old_worker_2_group;
-SELECT master_remove_node('localhost', :worker_2_port);
-WARNING: could not find any shard placements for shardId 1310001
-WARNING: could not find any shard placements for shardId 1310023
-WARNING: could not find any shard placements for shardId 1310028
- master_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE mx_ref (col_1 int, col_2 text);
-SELECT create_reference_table('mx_ref');
- create_reference_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT shardid, nodename, nodeport
-FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
-WHERE logicalrelid='mx_ref'::regclass;
- shardid | nodename | nodeport
----------------------------------------------------------------------
- 1310075 | localhost | 57636
- 1310075 | localhost | 57637
-(2 rows)
-
-\c - - - :worker_1_port
-SELECT shardid, nodename, nodeport
-FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
-WHERE logicalrelid='mx_ref'::regclass;
- shardid | nodename | nodeport
----------------------------------------------------------------------
- 1310075 | localhost | 57636
- 1310075 | localhost | 57637
-(2 rows)
-
-\c - - - :master_port
-SET client_min_messages TO ERROR;
-SELECT master_add_node('localhost', :worker_2_port);
- master_add_node
----------------------------------------------------------------------
- 8
-(1 row)
-
-RESET client_min_messages;
-SELECT shardid, nodename, nodeport
-FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
-WHERE logicalrelid='mx_ref'::regclass
-ORDER BY shardid, nodeport;
- shardid | nodename | nodeport
----------------------------------------------------------------------
- 1310075 | localhost | 57636
- 1310075 | localhost | 57637
-(2 rows)
-
-\c - - - :worker_1_port
-SELECT shardid, nodename, nodeport
-FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
-WHERE logicalrelid='mx_ref'::regclass
-ORDER BY shardid, nodeport;
- shardid | nodename | nodeport
----------------------------------------------------------------------
- 1310075 | localhost | 57636
- 1310075 | localhost | 57637
-(2 rows)
-
--- Get the metadata back into a consistent state
-\c - - - :master_port
-INSERT INTO pg_dist_placement (SELECT * FROM tmp_placement);
-DROP TABLE tmp_placement;
-UPDATE pg_dist_placement
- SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port)
- WHERE groupid = :old_worker_2_group;
-\c - - - :worker_1_port
-UPDATE pg_dist_placement
- SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port)
- WHERE groupid = :old_worker_2_group;
--- Confirm that shouldhaveshards is 'true'
-\c - - - :master_port
-select shouldhaveshards from pg_dist_node where nodeport = 8888;
- shouldhaveshards
----------------------------------------------------------------------
- t
-(1 row)
-
-\c - postgres - :worker_1_port
-select shouldhaveshards from pg_dist_node where nodeport = 8888;
- shouldhaveshards
----------------------------------------------------------------------
- t
-(1 row)
-
--- Check that setting shouldhaveshards to false is correctly transferred to other mx nodes
-\c - - - :master_port
-SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', false);
- master_set_node_property
----------------------------------------------------------------------
-
-(1 row)
-
-select shouldhaveshards from pg_dist_node where nodeport = 8888;
- shouldhaveshards
----------------------------------------------------------------------
- f
-(1 row)
-
-\c - postgres - :worker_1_port
-select shouldhaveshards from pg_dist_node where nodeport = 8888;
- shouldhaveshards
----------------------------------------------------------------------
- f
-(1 row)
-
--- Check that setting shouldhaveshards to true is correctly transferred to other mx nodes
-\c - postgres - :master_port
-SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', true);
- master_set_node_property
----------------------------------------------------------------------
-
-(1 row)
-
-select shouldhaveshards from pg_dist_node where nodeport = 8888;
- shouldhaveshards
----------------------------------------------------------------------
- t
-(1 row)
-
-\c - postgres - :worker_1_port
-select shouldhaveshards from pg_dist_node where nodeport = 8888;
- shouldhaveshards
----------------------------------------------------------------------
- t
-(1 row)
-
-\c - - - :master_port
---
--- Check that metadata commands error out if any nodes are out-of-sync
---
--- increase metadata_sync intervals to avoid metadata sync while we test
-ALTER SYSTEM SET citus.metadata_sync_interval TO 300000;
-ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 300000;
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
-SET citus.shard_replication_factor TO 1;
-CREATE TABLE dist_table_1(a int);
-SELECT create_distributed_table('dist_table_1', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-UPDATE pg_dist_node SET metadatasynced=false WHERE nodeport=:worker_1_port;
-SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport=:worker_1_port;
- hasmetadata | metadatasynced
----------------------------------------------------------------------
- t | f
-(1 row)
-
-CREATE TABLE dist_table_2(a int);
-SELECT create_distributed_table('dist_table_2', 'a');
-ERROR: localhost:xxxxx is a metadata node, but is out of sync
-HINT: If the node is up, wait until metadata gets synced to it and try again.
-SELECT create_reference_table('dist_table_2');
-ERROR: localhost:xxxxx is a metadata node, but is out of sync
-HINT: If the node is up, wait until metadata gets synced to it and try again.
-ALTER TABLE dist_table_1 ADD COLUMN b int;
-ERROR: localhost:xxxxx is a metadata node, but is out of sync
-HINT: If the node is up, wait until metadata gets synced to it and try again.
-SELECT citus_disable_node_and_wait('localhost', :worker_1_port);
-ERROR: disabling the first worker node in the metadata is not allowed
-DETAIL: Citus uses the first worker node in the metadata for certain internal operations when replicated tables are modified. Synchronous mode ensures that all nodes have the same view of the first worker node, which is used for certain locking operations.
-HINT: You can force disabling node, SELECT citus_disable_node('localhost', 57637, synchronous:=true);
-CONTEXT: SQL statement "SELECT pg_catalog.citus_disable_node(nodename, nodeport, force)"
-PL/pgSQL function citus_disable_node_and_wait(text,integer,boolean) line XX at PERFORM
-SELECT citus_disable_node_and_wait('localhost', :worker_2_port);
-ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
-DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table
-HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
-CONTEXT: SQL statement "SELECT pg_catalog.citus_disable_node(nodename, nodeport, force)"
-PL/pgSQL function citus_disable_node_and_wait(text,integer,boolean) line XX at PERFORM
-SELECT master_remove_node('localhost', :worker_1_port);
-ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
-DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table
-HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
-SELECT master_remove_node('localhost', :worker_2_port);
-ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
-DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table
-HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
--- master_update_node should succeed
-SELECT nodeid AS worker_2_nodeid FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
-SELECT master_update_node(:worker_2_nodeid, 'localhost', 4444);
- master_update_node
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT master_update_node(:worker_2_nodeid, 'localhost', :worker_2_port);
- master_update_node
----------------------------------------------------------------------
-
-(1 row)
-
-ALTER SYSTEM SET citus.metadata_sync_interval TO DEFAULT;
-ALTER SYSTEM SET citus.metadata_sync_retry_interval TO DEFAULT;
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
--- make sure that all the nodes have valid metadata before moving forward
-SELECT wait_until_metadata_sync(60000);
- wait_until_metadata_sync
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT master_add_node('localhost', :worker_2_port);
- master_add_node
----------------------------------------------------------------------
- 8
-(1 row)
-
-CREATE SEQUENCE mx_test_sequence_0;
-CREATE SEQUENCE mx_test_sequence_1;
--- test create_distributed_table
-CREATE TABLE test_table (id int DEFAULT nextval('mx_test_sequence_0'));
-SELECT create_distributed_table('test_table', 'id');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
--- shouldn't work since it's partition column
-ALTER TABLE test_table ALTER COLUMN id SET DEFAULT nextval('mx_test_sequence_1');
-ERROR: cannot execute ALTER TABLE command involving partition column
--- test different plausible commands
-ALTER TABLE test_table ADD COLUMN id2 int DEFAULT nextval('mx_test_sequence_1');
-ALTER TABLE test_table ALTER COLUMN id2 DROP DEFAULT;
-ALTER TABLE test_table ALTER COLUMN id2 SET DEFAULT nextval('mx_test_sequence_1');
-SELECT unnest(activate_node_snapshot()) order by 1;
- unnest
----------------------------------------------------------------------
- ALTER DATABASE regression OWNER TO postgres;
- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
- ALTER SEQUENCE public.mx_test_sequence_0 OWNER TO postgres
- ALTER SEQUENCE public.mx_test_sequence_1 OWNER TO postgres
- ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
- ALTER TABLE mx_test_schema_1.mx_table_1 ADD CONSTRAINT mx_fk_constraint_2 FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) NOT VALID
- ALTER TABLE mx_test_schema_1.mx_table_1 ADD CONSTRAINT mx_table_1_col1_key UNIQUE (col1)
- ALTER TABLE mx_test_schema_1.mx_table_1 OWNER TO postgres
- ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1)
- ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQUE (col1)
- ALTER TABLE mx_test_schema_2.mx_table_2 OWNER TO postgres
- ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
- ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
- ALTER TABLE public.dist_table_1 OWNER TO postgres
- ALTER TABLE public.mx_ref OWNER TO postgres
- ALTER TABLE public.test_table OWNER TO postgres
- CALL pg_catalog.worker_drop_all_shell_tables(true)
- CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
- CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 USING btree (col1)
- CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 USING btree (col2)
- CREATE SCHEMA IF NOT EXISTS mx_test_schema_1 AUTHORIZATION postgres
- CREATE SCHEMA IF NOT EXISTS mx_test_schema_2 AUTHORIZATION postgres
- CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
- CREATE SCHEMA IF NOT EXISTS mx_testing_schema_2 AUTHORIZATION postgres
- CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
- CREATE TABLE mx_test_schema_1.mx_table_1 (col1 integer, col2 text, col3 integer) USING heap
- CREATE TABLE mx_test_schema_2.mx_table_2 (col1 integer, col2 text) USING heap
- CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
- CREATE TABLE public.dist_table_1 (a integer) USING heap
- CREATE TABLE public.mx_ref (col_1 integer, col_2 text) USING heap
- CREATE TABLE public.test_table (id integer DEFAULT worker_nextval('public.mx_test_sequence_0'::regclass), id2 integer DEFAULT worker_nextval('public.mx_test_sequence_1'::regclass)) USING heap
- DELETE FROM pg_catalog.pg_dist_colocation
- DELETE FROM pg_catalog.pg_dist_object
- DELETE FROM pg_catalog.pg_dist_schema
- DELETE FROM pg_dist_node
- DELETE FROM pg_dist_partition
- DELETE FROM pg_dist_placement
- DELETE FROM pg_dist_shard
- DROP TABLE IF EXISTS mx_test_schema_1.mx_table_1 CASCADE
- DROP TABLE IF EXISTS mx_test_schema_2.mx_table_2 CASCADE
- DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE
- DROP TABLE IF EXISTS public.dist_table_1 CASCADE
- DROP TABLE IF EXISTS public.mx_ref CASCADE
- DROP TABLE IF EXISTS public.test_table CASCADE
- GRANT CREATE ON SCHEMA public TO PUBLIC;
- GRANT CREATE ON SCHEMA public TO postgres;
- GRANT USAGE ON SCHEMA public TO PUBLIC;
- GRANT USAGE ON SCHEMA public TO postgres;
- INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (5, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(6, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(8, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE)
- RESET ROLE
- RESET ROLE
- SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
- SELECT citus_internal.add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 7, 's')
- SELECT citus_internal.add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 7, 's')
- SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
- SELECT citus_internal.add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's')
- SELECT citus_internal.add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't')
- SELECT citus_internal.add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's')
- SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_1.mx_table_1');
- SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_2.mx_table_2');
- SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table');
- SELECT pg_catalog.worker_drop_sequence_dependency('public.dist_table_1');
- SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_ref');
- SELECT pg_catalog.worker_drop_sequence_dependency('public.test_table');
- SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
- SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_sequence_0 AS integer INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE','integer')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_sequence_1 AS integer INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE','integer')
- SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
- SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
- SELECT worker_create_truncate_trigger('mx_test_schema_1.mx_table_1')
- SELECT worker_create_truncate_trigger('mx_test_schema_2.mx_table_2')
- SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
- SELECT worker_create_truncate_trigger('public.dist_table_1')
- SELECT worker_create_truncate_trigger('public.mx_ref')
- SELECT worker_create_truncate_trigger('public.test_table')
- SET ROLE postgres
- SET ROLE postgres
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'off'
- SET citus.enable_ddl_propagation TO 'on'
- SET citus.enable_ddl_propagation TO 'on'
- UPDATE pg_dist_local_group SET groupid = 1
- UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
- UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
- WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
- WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
- WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
- WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
- WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
- WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
- WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
- WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
-(118 rows)
-
--- shouldn't work since test_table is MX
-ALTER TABLE test_table ADD COLUMN id3 bigserial;
-ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers
--- shouldn't work since the above operations should be the only subcommands
-ALTER TABLE test_table ADD COLUMN id4 int DEFAULT nextval('mx_test_sequence_1') CHECK (id4 > 0);
-ERROR: cannot execute ADD COLUMN .. DEFAULT nextval('..') command with other subcommands/constraints
-HINT: You can issue each subcommand separately
-ALTER TABLE test_table ADD COLUMN id4 int, ADD COLUMN id5 int DEFAULT nextval('mx_test_sequence_1');
-ERROR: cannot execute ADD COLUMN .. DEFAULT nextval('..') command with other subcommands/constraints
-HINT: You can issue each subcommand separately
-ALTER TABLE test_table ALTER COLUMN id1 SET DEFAULT nextval('mx_test_sequence_1'), ALTER COLUMN id2 DROP DEFAULT;
-ERROR: cannot execute ALTER COLUMN COLUMN .. SET DEFAULT nextval('..') command with other subcommands
-HINT: You can issue each subcommand separately
-ALTER TABLE test_table ADD COLUMN id4 bigserial CHECK (id4 > 0);
-ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers
-\c - - - :worker_1_port
-\ds
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
- public | mx_test_sequence_0 | sequence | postgres
- public | mx_test_sequence_1 | sequence | postgres
- public | mx_test_table_col_3_seq | sequence | postgres
- public | sequence_rollback | sequence | postgres
- public | sequence_rollback(citus_backup_0) | sequence | postgres
- public | user_defined_seq | sequence | postgres
-(6 rows)
-
-\c - - - :master_port
-CREATE SEQUENCE local_sequence;
--- verify that DROP SEQUENCE will propagate the command to workers for
--- the distributed sequences mx_test_sequence_0 and mx_test_sequence_1
-DROP SEQUENCE mx_test_sequence_0, mx_test_sequence_1, local_sequence CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to default value for column id2 of table test_table
-drop cascades to default value for column id of table test_table
-\c - - - :worker_1_port
-\ds
- List of relations
- Schema | Name | Type | Owner
----------------------------------------------------------------------
- public | mx_test_table_col_3_seq | sequence | postgres
- public | sequence_rollback | sequence | postgres
- public | sequence_rollback(citus_backup_0) | sequence | postgres
- public | user_defined_seq | sequence | postgres
-(4 rows)
-
-\c - - - :master_port
-DROP TABLE test_table CASCADE;
--- Cleanup
-SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
-NOTICE: dropping metadata on the node (localhost,57637)
- stop_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
-NOTICE: dropping metadata on the node (localhost,57638)
- stop_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-DROP TABLE mx_test_schema_2.mx_table_2 CASCADE;
-NOTICE: drop cascades to constraint mx_fk_constraint_2 on table mx_test_schema_1.mx_table_1
-DROP TABLE mx_test_schema_1.mx_table_1 CASCADE;
-DROP TABLE mx_testing_schema.mx_test_table;
-DROP TABLE mx_ref;
-DROP TABLE dist_table_1, dist_table_2;
-SET client_min_messages TO ERROR;
-SET citus.enable_ddl_propagation TO off; -- for enterprise
-CREATE USER non_super_metadata_user;
-SET citus.enable_ddl_propagation TO on;
-RESET client_min_messages;
-SELECT run_command_on_workers('CREATE USER non_super_metadata_user');
- run_command_on_workers
----------------------------------------------------------------------
- (localhost,57637,t,"CREATE ROLE")
- (localhost,57638,t,"CREATE ROLE")
-(2 rows)
-
-GRANT EXECUTE ON FUNCTION start_metadata_sync_to_node(text,int) TO non_super_metadata_user;
-GRANT EXECUTE ON FUNCTION stop_metadata_sync_to_node(text,int,bool) TO non_super_metadata_user;
-GRANT ALL ON pg_dist_node TO non_super_metadata_user;
-GRANT ALL ON pg_dist_local_group TO non_super_metadata_user;
-GRANT ALL ON SCHEMA citus TO non_super_metadata_user;
-GRANT INSERT ON ALL TABLES IN SCHEMA citus TO non_super_metadata_user;
-GRANT USAGE ON SCHEMA mx_testing_schema TO non_super_metadata_user;
-GRANT USAGE ON SCHEMA mx_testing_schema_2 TO non_super_metadata_user;
-GRANT USAGE ON SCHEMA mx_test_schema_1 TO non_super_metadata_user;
-GRANT USAGE ON SCHEMA mx_test_schema_2 TO non_super_metadata_user;
-SELECT run_command_on_workers('GRANT ALL ON pg_dist_node TO non_super_metadata_user');
- run_command_on_workers
----------------------------------------------------------------------
- (localhost,57637,t,GRANT)
- (localhost,57638,t,GRANT)
-(2 rows)
-
-SELECT run_command_on_workers('GRANT ALL ON pg_dist_local_group TO non_super_metadata_user');
- run_command_on_workers
----------------------------------------------------------------------
- (localhost,57637,t,GRANT)
- (localhost,57638,t,GRANT)
-(2 rows)
-
-SELECT run_command_on_workers('GRANT ALL ON SCHEMA citus TO non_super_metadata_user');
- run_command_on_workers
----------------------------------------------------------------------
- (localhost,57637,t,GRANT)
- (localhost,57638,t,GRANT)
-(2 rows)
-
-SELECT run_command_on_workers('ALTER SEQUENCE user_defined_seq OWNER TO non_super_metadata_user');
- run_command_on_workers
----------------------------------------------------------------------
- (localhost,57637,t,"ALTER SEQUENCE")
- (localhost,57638,t,"ALTER SEQUENCE")
-(2 rows)
-
-SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA citus TO non_super_metadata_user');
- run_command_on_workers
----------------------------------------------------------------------
- (localhost,57637,t,GRANT)
- (localhost,57638,t,GRANT)
-(2 rows)
-
-SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_testing_schema TO non_super_metadata_user');
- run_command_on_workers
----------------------------------------------------------------------
- (localhost,57637,t,GRANT)
- (localhost,57638,t,GRANT)
-(2 rows)
-
-SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_testing_schema_2 TO non_super_metadata_user');
- run_command_on_workers
----------------------------------------------------------------------
- (localhost,57637,t,GRANT)
- (localhost,57638,t,GRANT)
-(2 rows)
-
-SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_test_schema_1 TO non_super_metadata_user');
- run_command_on_workers
----------------------------------------------------------------------
- (localhost,57637,t,GRANT)
- (localhost,57638,t,GRANT)
-(2 rows)
-
-SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_test_schema_2 TO non_super_metadata_user');
- run_command_on_workers
----------------------------------------------------------------------
- (localhost,57637,t,GRANT)
- (localhost,57638,t,GRANT)
-(2 rows)
-
-SET ROLE non_super_metadata_user;
--- user must be super user stop/start metadata
-SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
-ERROR: operation is not allowed
-HINT: Run the command with a superuser.
-SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
-ERROR: operation is not allowed
-HINT: Run the command with a superuser.
-RESET ROLE;
-SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
-NOTICE: dropping metadata on the node (localhost,57637)
- stop_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
- start_metadata_sync_to_node
----------------------------------------------------------------------
-
-(1 row)
-
-RESET citus.shard_count;
-RESET citus.shard_replication_factor;
-ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id;
-ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id;
-ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id;
-ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
--- Activate them at the end
-SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
diff --git a/src/test/regress/expected/multi_mx_explain.out b/src/test/regress/expected/multi_mx_explain.out
index beb374d23..46b163712 100644
--- a/src/test/regress/expected/multi_mx_explain.out
+++ b/src/test/regress/expected/multi_mx_explain.out
@@ -86,6 +86,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Node Type": "Sort",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"],
"Plans": [
{
@@ -95,6 +96,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Group Key": ["remote_scan.l_quantity"],
"Plans": [
{
@@ -103,6 +105,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Distributed Query": {
"Job": {
"Task Count": 16,
@@ -119,6 +122,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Partial Mode": "Simple",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Group Key": ["l_quantity"],
"Plans": [
{
@@ -127,7 +131,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false,
"Async Capable": false,
"Relation Name": "lineitem_mx_1220052",
- "Alias": "lineitem_mx"
+ "Alias": "lineitem_mx",
+ "Disabled": false
}
]
}
@@ -553,6 +558,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Partial Mode": "Simple",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Plans": [
{
"Node Type": "Custom Scan",
@@ -560,6 +566,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Distributed Query": {
"Job": {
"Task Count": 16,
@@ -576,6 +583,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Partial Mode": "Simple",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Plans": [
{
"Node Type": "Hash Join",
@@ -583,6 +591,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false,
"Async Capable": false,
"Join Type": "Inner",
+ "Disabled": false,
"Inner Unique": false,
"Hash Cond": "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)",
"Plans": [
@@ -592,6 +601,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false,
"Async Capable": false,
"Join Type": "Inner",
+ "Disabled": false,
"Inner Unique": false,
"Hash Cond": "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)",
"Plans": [
@@ -601,13 +611,15 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false,
"Async Capable": false,
"Relation Name": "supplier_mx_1220087",
- "Alias": "supplier_mx"
+ "Alias": "supplier_mx",
+ "Disabled": false
},
{
"Node Type": "Hash",
"Parent Relationship": "Inner",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Plans": [
{
"Node Type": "Seq Scan",
@@ -615,7 +627,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false,
"Async Capable": false,
"Relation Name": "lineitem_mx_1220052",
- "Alias": "lineitem_mx"
+ "Alias": "lineitem_mx",
+ "Disabled": false
}
]
}
@@ -626,6 +639,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parent Relationship": "Inner",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Plans": [
{
"Node Type": "Hash Join",
@@ -633,6 +647,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false,
"Async Capable": false,
"Join Type": "Inner",
+ "Disabled": false,
"Inner Unique": false,
"Hash Cond": "(customer_mx.c_custkey = orders_mx.o_custkey)",
"Plans": [
@@ -642,13 +657,15 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false,
"Async Capable": false,
"Relation Name": "customer_mx_1220084",
- "Alias": "customer_mx"
+ "Alias": "customer_mx",
+ "Disabled": false
},
{
"Node Type": "Hash",
"Parent Relationship": "Inner",
"Parallel Aware": false,
"Async Capable": false,
+ "Disabled": false,
"Plans": [
{
"Node Type": "Seq Scan",
@@ -656,7 +673,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false,
"Async Capable": false,
"Relation Name": "orders_mx_1220068",
- "Alias": "orders_mx"
+ "Alias": "orders_mx",
+ "Disabled": false
}
]
}
diff --git a/src/test/regress/expected/multi_mx_explain_0.out b/src/test/regress/expected/multi_mx_explain_0.out
new file mode 100644
index 000000000..beb374d23
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_explain_0.out
@@ -0,0 +1,934 @@
+--
+-- MULTI_MX_EXPLAIN
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
+\c - - - :worker_1_port
+\c - - - :worker_2_port
+\c - - - :master_port
+\a\t
+SET citus.explain_distributed_queries TO on;
+VACUUM ANALYZE lineitem_mx;
+VACUUM ANALYZE orders_mx;
+VACUUM ANALYZE customer_mx;
+VACUUM ANALYZE supplier_mx;
+\c - - - :worker_1_port
+-- Function that parses explain output as JSON
+SET citus.enable_metadata_sync TO OFF;
+CREATE FUNCTION explain_json(query text)
+RETURNS jsonb
+AS $BODY$
+DECLARE
+ result jsonb;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- Function that parses explain output as XML
+CREATE FUNCTION explain_xml(query text)
+RETURNS xml
+AS $BODY$
+DECLARE
+ result xml;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+\c - - - :worker_2_port
+-- Function that parses explain output as JSON
+SET citus.enable_metadata_sync TO OFF;
+CREATE FUNCTION explain_json(query text)
+RETURNS jsonb
+AS $BODY$
+DECLARE
+ result jsonb;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- Function that parses explain output as XML
+CREATE FUNCTION explain_xml(query text)
+RETURNS xml
+AS $BODY$
+DECLARE
+ result xml;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+RESET citus.enable_metadata_sync;
+-- Test Text format
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Sort
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ -> HashAggregate
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+-- Test JSON format
+EXPLAIN (COSTS FALSE, FORMAT JSON)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+[
+ {
+ "Plan": {
+ "Node Type": "Sort",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"],
+ "Plans": [
+ {
+ "Node Type": "Aggregate",
+ "Strategy": "Hashed",
+ "Partial Mode": "Simple",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Group Key": ["remote_scan.l_quantity"],
+ "Plans": [
+ {
+ "Node Type": "Custom Scan",
+ "Parent Relationship": "Outer",
+ "Custom Plan Provider": "Citus Adaptive",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Distributed Query": {
+ "Job": {
+ "Task Count": 16,
+ "Tasks Shown": "One of 16",
+ "Tasks": [
+ {
+ "Node": "host=localhost port=xxxxx dbname=regression",
+ "Remote Plan": [
+ [
+ {
+ "Plan": {
+ "Node Type": "Aggregate",
+ "Strategy": "Hashed",
+ "Partial Mode": "Simple",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Group Key": ["l_quantity"],
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Relation Name": "lineitem_mx_1220052",
+ "Alias": "lineitem_mx"
+ }
+ ]
+ }
+ }
+ ]
+
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }
+ }
+]
+-- Validate JSON format
+SELECT true AS valid FROM explain_json($$
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
+t
+\c - - - :worker_1_port
+-- Test XML format
+EXPLAIN (COSTS FALSE, FORMAT XML)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+
+
+
+ Sort
+ false
+ false
+
+ - (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
+ - remote_scan.l_quantity
+
+
+
+ Aggregate
+ Hashed
+ Simple
+ Outer
+ false
+ false
+
+ - remote_scan.l_quantity
+
+
+
+ Custom Scan
+ Outer
+ Citus Adaptive
+ false
+ false
+
+
+ 16
+ One of 16
+
+
+ host=localhost port=xxxxx dbname=regression
+
+
+
+
+ Aggregate
+ Hashed
+ Simple
+ false
+ false
+
+ - l_quantity
+
+
+
+ Seq Scan
+ Outer
+ false
+ false
+ lineitem_mx_1220052
+ lineitem_mx
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+-- Validate XML format
+SELECT true AS valid FROM explain_xml($$
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
+t
+-- Test YAML format
+EXPLAIN (COSTS FALSE, FORMAT YAML)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+- Plan:
+ Node Type: "Sort"
+ Parallel Aware: false
+ Async Capable: false
+ Sort Key:
+ - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))"
+ - "remote_scan.l_quantity"
+ Plans:
+ - Node Type: "Aggregate"
+ Strategy: "Hashed"
+ Partial Mode: "Simple"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Group Key:
+ - "remote_scan.l_quantity"
+ Plans:
+ - Node Type: "Custom Scan"
+ Parent Relationship: "Outer"
+ Custom Plan Provider: "Citus Adaptive"
+ Parallel Aware: false
+ Async Capable: false
+ Distributed Query:
+ Job:
+ Task Count: 16
+ Tasks Shown: "One of 16"
+ Tasks:
+ - Node: "host=localhost port=xxxxx dbname=regression"
+ Remote Plan:
+ - Plan:
+ Node Type: "Aggregate"
+ Strategy: "Hashed"
+ Partial Mode: "Simple"
+ Parallel Aware: false
+ Async Capable: false
+ Group Key:
+ - "l_quantity"
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "lineitem_mx_1220052"
+ Alias: "lineitem_mx"
+
+-- Test Text format
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Sort
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ -> HashAggregate
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+\c - - - :worker_2_port
+-- Test verbose
+EXPLAIN (COSTS FALSE, VERBOSE TRUE)
+ SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx;
+Aggregate
+ Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM public.lineitem_mx_1220052 lineitem_mx WHERE true
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
+ -> Seq Scan on public.lineitem_mx_1220052 lineitem_mx
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+-- Test join
+EXPLAIN (COSTS FALSE)
+ SELECT * FROM lineitem_mx
+ JOIN orders_mx ON l_orderkey = o_orderkey AND l_quantity < 5.0
+ ORDER BY l_quantity LIMIT 10;
+Limit
+ -> Sort
+ Sort Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ -> Sort
+ Sort Key: lineitem_mx.l_quantity
+ -> Hash Join
+ Hash Cond: (lineitem_mx.l_orderkey = orders_mx.o_orderkey)
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+ Filter: (l_quantity < 5.0)
+ -> Hash
+ -> Seq Scan on orders_mx_1220068 orders_mx
+-- Test insert
+EXPLAIN (COSTS FALSE)
+ INSERT INTO lineitem_mx VALUES(1,0);
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on lineitem_mx_1220052
+ -> Result
+-- Test update
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem_mx
+ SET l_suppkey = 12
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_mx_1220052 lineitem_mx
+ -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx
+ Index Cond: (l_orderkey = 1)
+ Filter: (l_partkey = 0)
+-- Test delete
+EXPLAIN (COSTS FALSE)
+ DELETE FROM lineitem_mx
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_mx_1220052 lineitem_mx
+ -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx
+ Index Cond: (l_orderkey = 1)
+ Filter: (l_partkey = 0)
+-- make the outputs more consistent
+VACUUM ANALYZE lineitem_mx;
+VACUUM ANALYZE orders_mx;
+VACUUM ANALYZE customer_mx;
+VACUUM ANALYZE supplier_mx;
+-- Test single-shard SELECT
+EXPLAIN (COSTS FALSE)
+ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5;
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Index Scan using lineitem_mx_pkey_1220055 on lineitem_mx_1220055 lineitem_mx
+ Index Cond: (l_orderkey = 5)
+SELECT true AS valid FROM explain_xml($$
+ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$);
+t
+SELECT true AS valid FROM explain_json($$
+ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$);
+t
+-- Test CREATE TABLE ... AS
+EXPLAIN (COSTS FALSE)
+ CREATE TABLE explain_result AS
+ SELECT * FROM lineitem_mx;
+Custom Scan (Citus Adaptive)
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+-- Test all tasks output
+SET citus.explain_all_tasks TO on;
+EXPLAIN (COSTS FALSE)
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 16
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220053 on lineitem_mx_1220053 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220054 on lineitem_mx_1220054 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220055 on lineitem_mx_1220055 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220056 on lineitem_mx_1220056 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220057 on lineitem_mx_1220057 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220058 on lineitem_mx_1220058 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220059 on lineitem_mx_1220059 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220060 on lineitem_mx_1220060 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220061 on lineitem_mx_1220061 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220062 on lineitem_mx_1220062 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220063 on lineitem_mx_1220063 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220064 on lineitem_mx_1220064 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220065 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220066 on lineitem_mx_1220066 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220067 on lineitem_mx_1220067 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+SELECT true AS valid FROM explain_xml($$
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$);
+t
+SELECT true AS valid FROM explain_json($$
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$);
+t
+-- Test track tracker
+SET citus.explain_all_tasks TO off;
+EXPLAIN (COSTS FALSE)
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx
+ Index Cond: (l_orderkey > 9030)
+-- Test re-partition join
+EXPLAIN (COSTS FALSE)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Hash Join
+ Hash Cond: (lineitem_mx.l_orderkey = orders_mx.o_orderkey)
+ -> Hash Join
+ Hash Cond: (supplier_mx.s_suppkey = lineitem_mx.l_suppkey)
+ -> Seq Scan on supplier_mx_1220087 supplier_mx
+ -> Hash
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+ -> Hash
+ -> Hash Join
+ Hash Cond: (customer_mx.c_custkey = orders_mx.o_custkey)
+ -> Seq Scan on customer_mx_1220084 customer_mx
+ -> Hash
+ -> Seq Scan on orders_mx_1220068 orders_mx
+EXPLAIN (COSTS FALSE, FORMAT JSON)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+[
+ {
+ "Plan": {
+ "Node Type": "Aggregate",
+ "Strategy": "Plain",
+ "Partial Mode": "Simple",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Plans": [
+ {
+ "Node Type": "Custom Scan",
+ "Parent Relationship": "Outer",
+ "Custom Plan Provider": "Citus Adaptive",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Distributed Query": {
+ "Job": {
+ "Task Count": 16,
+ "Tasks Shown": "One of 16",
+ "Tasks": [
+ {
+ "Node": "host=localhost port=xxxxx dbname=regression",
+ "Remote Plan": [
+ [
+ {
+ "Plan": {
+ "Node Type": "Aggregate",
+ "Strategy": "Plain",
+ "Partial Mode": "Simple",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Plans": [
+ {
+ "Node Type": "Hash Join",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Join Type": "Inner",
+ "Inner Unique": false,
+ "Hash Cond": "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)",
+ "Plans": [
+ {
+ "Node Type": "Hash Join",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Join Type": "Inner",
+ "Inner Unique": false,
+ "Hash Cond": "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)",
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Relation Name": "supplier_mx_1220087",
+ "Alias": "supplier_mx"
+ },
+ {
+ "Node Type": "Hash",
+ "Parent Relationship": "Inner",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Relation Name": "lineitem_mx_1220052",
+ "Alias": "lineitem_mx"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Node Type": "Hash",
+ "Parent Relationship": "Inner",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Plans": [
+ {
+ "Node Type": "Hash Join",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Join Type": "Inner",
+ "Inner Unique": false,
+ "Hash Cond": "(customer_mx.c_custkey = orders_mx.o_custkey)",
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Relation Name": "customer_mx_1220084",
+ "Alias": "customer_mx"
+ },
+ {
+ "Node Type": "Hash",
+ "Parent Relationship": "Inner",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Relation Name": "orders_mx_1220068",
+ "Alias": "orders_mx"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ ]
+
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+ }
+]
+SELECT true AS valid FROM explain_json($$
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey$$);
+t
+EXPLAIN (COSTS FALSE, FORMAT XML)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+
+
+
+ Aggregate
+ Plain
+ Simple
+ false
+ false
+
+
+ Custom Scan
+ Outer
+ Citus Adaptive
+ false
+ false
+
+
+ 16
+ One of 16
+
+
+ host=localhost port=xxxxx dbname=regression
+
+
+
+
+ Aggregate
+ Plain
+ Simple
+ false
+ false
+
+
+ Hash Join
+ Outer
+ false
+ false
+ Inner
+ false
+ (lineitem_mx.l_orderkey = orders_mx.o_orderkey)
+
+
+ Hash Join
+ Outer
+ false
+ false
+ Inner
+ false
+ (supplier_mx.s_suppkey = lineitem_mx.l_suppkey)
+
+
+ Seq Scan
+ Outer
+ false
+ false
+ supplier_mx_1220087
+ supplier_mx
+
+
+ Hash
+ Inner
+ false
+ false
+
+
+ Seq Scan
+ Outer
+ false
+ false
+ lineitem_mx_1220052
+ lineitem_mx
+
+
+
+
+
+
+ Hash
+ Inner
+ false
+ false
+
+
+ Hash Join
+ Outer
+ false
+ false
+ Inner
+ false
+ (customer_mx.c_custkey = orders_mx.o_custkey)
+
+
+ Seq Scan
+ Outer
+ false
+ false
+ customer_mx_1220084
+ customer_mx
+
+
+ Hash
+ Inner
+ false
+ false
+
+
+ Seq Scan
+ Outer
+ false
+ false
+ orders_mx_1220068
+ orders_mx
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+SELECT true AS valid FROM explain_xml($$
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey$$);
+t
+EXPLAIN (COSTS FALSE, FORMAT YAML)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+- Plan:
+ Node Type: "Aggregate"
+ Strategy: "Plain"
+ Partial Mode: "Simple"
+ Parallel Aware: false
+ Async Capable: false
+ Plans:
+ - Node Type: "Custom Scan"
+ Parent Relationship: "Outer"
+ Custom Plan Provider: "Citus Adaptive"
+ Parallel Aware: false
+ Async Capable: false
+ Distributed Query:
+ Job:
+ Task Count: 16
+ Tasks Shown: "One of 16"
+ Tasks:
+ - Node: "host=localhost port=xxxxx dbname=regression"
+ Remote Plan:
+ - Plan:
+ Node Type: "Aggregate"
+ Strategy: "Plain"
+ Partial Mode: "Simple"
+ Parallel Aware: false
+ Async Capable: false
+ Plans:
+ - Node Type: "Hash Join"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Join Type: "Inner"
+ Inner Unique: false
+ Hash Cond: "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)"
+ Plans:
+ - Node Type: "Hash Join"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Join Type: "Inner"
+ Inner Unique: false
+ Hash Cond: "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)"
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "supplier_mx_1220087"
+ Alias: "supplier_mx"
+ - Node Type: "Hash"
+ Parent Relationship: "Inner"
+ Parallel Aware: false
+ Async Capable: false
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "lineitem_mx_1220052"
+ Alias: "lineitem_mx"
+ - Node Type: "Hash"
+ Parent Relationship: "Inner"
+ Parallel Aware: false
+ Async Capable: false
+ Plans:
+ - Node Type: "Hash Join"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Join Type: "Inner"
+ Inner Unique: false
+ Hash Cond: "(customer_mx.c_custkey = orders_mx.o_custkey)"
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "customer_mx_1220084"
+ Alias: "customer_mx"
+ - Node Type: "Hash"
+ Parent Relationship: "Inner"
+ Parallel Aware: false
+ Async Capable: false
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "orders_mx_1220068"
+ Alias: "orders_mx"
+
diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out
deleted file mode 100644
index 62271f9a7..000000000
--- a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out
+++ /dev/null
@@ -1,167 +0,0 @@
---
--- MULTI_MX_INSERT_SELECT_REPARTITION
---
--- Test behaviour of repartitioned INSERT ... SELECT in MX setup
---
--- This test file has an alternative output because of the change in the
--- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
--- The alternative output can be deleted when we drop support for PG14
---
-SHOW server_version \gset
-SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
- server_version_ge_15
----------------------------------------------------------------------
- f
-(1 row)
-
-CREATE SCHEMA multi_mx_insert_select_repartition;
-SET search_path TO multi_mx_insert_select_repartition;
-SET citus.next_shard_id TO 4213581;
-SET citus.shard_replication_factor TO 1;
-SET citus.shard_count TO 4;
-CREATE TABLE source_table(a int, b int);
-SELECT create_distributed_table('source_table', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-INSERT INTO source_table SELECT floor(i/4), i*i FROM generate_series(1, 20) i;
-SET citus.shard_count TO 3;
-CREATE TABLE target_table(a int, b int);
-SELECT create_distributed_table('target_table', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE FUNCTION square(int) RETURNS INT
- AS $$ SELECT $1 * $1 $$
- LANGUAGE SQL;
-select create_distributed_function('square(int)');
-NOTICE: procedure multi_mx_insert_select_repartition.square is already distributed
-DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands
- create_distributed_function
----------------------------------------------------------------------
-
-(1 row)
-
-select public.colocate_proc_with_table('square', 'source_table'::regclass, 0);
- colocate_proc_with_table
----------------------------------------------------------------------
-
-(1 row)
-
--- Test along with function delegation
--- function delegation only happens for "SELECT f()", and we don't use
--- repartitioned INSERT/SELECT when task count is 1, so the following
--- should go via coordinator
-EXPLAIN (costs off) INSERT INTO target_table(a) SELECT square(4);
- QUERY PLAN
----------------------------------------------------------------------
- Custom Scan (Citus INSERT ... SELECT)
- INSERT/SELECT method: pull to coordinator
- -> Result
-(3 rows)
-
-INSERT INTO target_table(a) SELECT square(4);
-SELECT * FROM target_table;
- a | b
----------------------------------------------------------------------
- 16 |
-(1 row)
-
-TRUNCATE target_table;
---
--- Test repartitioned INSERT/SELECT from MX worker
---
-\c - - - :worker_1_port
-SET search_path TO multi_mx_insert_select_repartition;
-EXPLAIN (costs off) INSERT INTO target_table SELECT a, max(b) FROM source_table GROUP BY a;
- QUERY PLAN
----------------------------------------------------------------------
- Custom Scan (Citus INSERT ... SELECT)
- INSERT/SELECT method: repartition
- -> Custom Scan (Citus Adaptive)
- Task Count: 4
- Tasks Shown: One of 4
- -> Task
- Node: host=localhost port=xxxxx dbname=regression
- -> HashAggregate
- Group Key: a
- -> Seq Scan on source_table_4213581 source_table
-(10 rows)
-
-INSERT INTO target_table SELECT a, max(b) FROM source_table GROUP BY a;
-SET citus.log_local_commands to on;
--- INSERT .. SELECT via repartitioning with local execution
-BEGIN;
- select count(*) from source_table WHERE a = 1;
-NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE (a OPERATOR(pg_catalog.=) 1)
- count
----------------------------------------------------------------------
- 4
-(1 row)
-
- -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky
- SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%';
- insert into target_table SELECT a*2 FROM source_table RETURNING a;
-NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0
-NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a
-NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a
- a
----------------------------------------------------------------------
- 0
- 0
- 0
- 2
- 2
- 2
- 2
- 4
- 4
- 4
- 4
- 6
- 6
- 6
- 6
- 8
- 8
- 8
- 8
- 10
-(20 rows)
-
-ROLLBACK;
-BEGIN;
- select count(*) from source_table WHERE a = 1;
-NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE (a OPERATOR(pg_catalog.=) 1)
- count
----------------------------------------------------------------------
- 4
-(1 row)
-
- insert into target_table SELECT a FROM source_table LIMIT 10;
-NOTICE: executing the command locally: SELECT a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true LIMIT '10'::bigint
-NOTICE: executing the command locally: SELECT a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true LIMIT '10'::bigint
-NOTICE: executing the copy locally for shard xxxxx
-ROLLBACK;
-\c - - - :master_port
-SET search_path TO multi_mx_insert_select_repartition;
-SELECT * FROM target_table ORDER BY a;
- a | b
----------------------------------------------------------------------
- 0 | 9
- 1 | 49
- 2 | 121
- 3 | 225
- 4 | 361
- 5 | 400
-(6 rows)
-
-RESET client_min_messages;
-\set VERBOSITY terse
-DROP SCHEMA multi_mx_insert_select_repartition CASCADE;
-NOTICE: drop cascades to 3 other objects
diff --git a/src/test/regress/expected/pg17.out b/src/test/regress/expected/pg17.out
index b93e790b0..3fd185045 100644
--- a/src/test/regress/expected/pg17.out
+++ b/src/test/regress/expected/pg17.out
@@ -2149,81 +2149,61 @@ CONTEXT: PL/pgSQL function public.explain_filter(text,boolean) line XX at FOR o
Planning Time: N.N
(1 row)
-select public.explain_filter('explain (memory, analyze, buffers false, format json) select * from int8_tbl i8');
-NOTICE: issuing SELECT * FROM worker_save_query_explain_analyze('SELECT q1, q2 FROM pg17.int8_tbl_12242024 i8 WHERE true', '{"verbose": false, "costs": true, "buffers": false, "wal": false, "memory": true, "serialize": "none", "timing": true, "summary": true, "format": "JSON"}') AS (field_0 bigint, field_1 bigint)
+select public.explain_filter('explain (memory, analyze, buffers false, format yaml) select * from int8_tbl i8');
+NOTICE: issuing SELECT * FROM worker_save_query_explain_analyze('SELECT q1, q2 FROM pg17.int8_tbl_12242024 i8 WHERE true', '{"verbose": false, "costs": true, "buffers": false, "wal": false, "memory": true, "serialize": "none", "timing": true, "summary": true, "format": "YAML"}') AS (field_0 bigint, field_1 bigint)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
CONTEXT: PL/pgSQL function public.explain_filter(text,boolean) line XX at FOR over EXECUTE statement
explain_filter
---------------------------------------------------------------------
- [ +
- { +
- "Plan": { +
- "Node Type": "Custom Scan", +
- "Custom Plan Provider": "Citus Adaptive", +
- "Parallel Aware": false, +
- "Async Capable": false, +
- "Startup Cost": N.N, +
- "Total Cost": N.N, +
- "Plan Rows": N, +
- "Plan Width": N, +
- "Actual Startup Time": N.N, +
- "Actual Total Time": N.N, +
- "Actual Rows": N, +
- "Actual Loops": N, +
- "Distributed Query": { +
- "Job": { +
- "Task Count": N, +
- "Tuple data received from nodes": "N bytes", +
- "Tasks Shown": "One of N", +
- "Tasks": [ +
- { +
- "Tuple data received from node": "N bytes", +
- "Node": "host=localhost port=N dbname=regression",+
- "Remote Plan": [ +
- [ +
- { +
- "Plan": { +
- "Node Type": "Seq Scan", +
- "Parallel Aware": false, +
- "Async Capable": false, +
- "Relation Name": "int8_tbl_12242024", +
- "Alias": "i8", +
- "Startup Cost": N.N, +
- "Total Cost": N.N, +
- "Plan Rows": N, +
- "Plan Width": N, +
- "Actual Startup Time": N.N, +
- "Actual Total Time": N.N, +
- "Actual Rows": N, +
- "Actual Loops": N +
- }, +
- "Planning": { +
- "Memory Used": N, +
- "Memory Allocated": N +
- }, +
- "Planning Time": N.N, +
- "Triggers": [ +
- ], +
- "Execution Time": N.N +
- } +
- ] +
- +
- ] +
- } +
- ] +
- } +
- } +
- }, +
- "Planning": { +
- "Memory Used": N, +
- "Memory Allocated": N +
- }, +
- "Planning Time": N.N, +
- "Triggers": [ +
- ], +
- "Execution Time": N.N +
- } +
- ]
+ - Plan: +
+ Node Type: "Custom Scan" +
+ Custom Plan Provider: "Citus Adaptive" +
+ Parallel Aware: false +
+ Async Capable: false +
+ Startup Cost: N.N +
+ Total Cost: N.N +
+ Plan Rows: N +
+ Plan Width: N +
+ Actual Startup Time: N.N +
+ Actual Total Time: N.N +
+ Actual Rows: N +
+ Actual Loops: N +
+ Distributed Query: +
+ Job: +
+ Task Count: N +
+ Tuple data received from nodes: "N bytes" +
+ Tasks Shown: "One of N" +
+ Tasks: +
+ - Tuple data received from node: "N bytes" +
+ Node: "host=localhost port=N dbname=regression"+
+ Remote Plan: +
+ - Plan: +
+ Node Type: "Seq Scan" +
+ Parallel Aware: false +
+ Async Capable: false +
+ Relation Name: "int8_tbl_12242024" +
+ Alias: "i8" +
+ Startup Cost: N.N +
+ Total Cost: N.N +
+ Plan Rows: N +
+ Plan Width: N +
+ Actual Startup Time: N.N +
+ Actual Total Time: N.N +
+ Actual Rows: N +
+ Actual Loops: N +
+ Planning: +
+ Memory Used: N +
+ Memory Allocated: N +
+ Planning Time: N.N +
+ Triggers: +
+ Execution Time: N.N +
+ +
+ Planning: +
+ Memory Used: N +
+ Memory Allocated: N +
+ Planning Time: N.N +
+ Triggers: +
+ Execution Time: N.N
(1 row)
prepare int8_query as select * from int8_tbl i8;
diff --git a/src/test/regress/expected/pg18.out b/src/test/regress/expected/pg18.out
index 174da2457..e303104b2 100644
--- a/src/test/regress/expected/pg18.out
+++ b/src/test/regress/expected/pg18.out
@@ -1070,6 +1070,1385 @@ CREATE MATERIALIZED VIEW copytest_mv AS
SELECT create_distributed_table('copytest_mv', 'id');
ERROR: copytest_mv is not a regular, foreign or partitioned table
-- After that, any command on the materialized view is outside Citus support.
+-- PG18: verify publish_generated_columns is preserved for distributed tables
+-- https://github.com/postgres/postgres/commit/7054186c4
+\c - - - :master_port
+CREATE SCHEMA pg18_publication;
+SET search_path TO pg18_publication;
+-- table with a stored generated column
+CREATE TABLE gen_pub_tab (
+ id int primary key,
+ a int,
+ b int GENERATED ALWAYS AS (a * 10) STORED
+);
+-- make it distributed so CREATE PUBLICATION goes through Citus metadata/DDL path
+SELECT create_distributed_table('gen_pub_tab', 'id', colocate_with := 'none');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+-- publication using the new PG18 option: stored
+CREATE PUBLICATION pub_gen_cols_stored
+ FOR TABLE gen_pub_tab
+ WITH (publish = 'insert, update', publish_generated_columns = stored);
+-- second publication explicitly using "none" for completeness
+CREATE PUBLICATION pub_gen_cols_none
+ FOR TABLE gen_pub_tab
+ WITH (publish = 'insert, update', publish_generated_columns = none);
+-- On coordinator: pubgencols must be 's' and 'n' respectively
+SELECT pubname, pubgencols
+FROM pg_publication
+WHERE pubname IN ('pub_gen_cols_stored', 'pub_gen_cols_none')
+ORDER BY pubname;
+ pubname | pubgencols
+---------------------------------------------------------------------
+ pub_gen_cols_none | n
+ pub_gen_cols_stored | s
+(2 rows)
+
+-- On worker 1: both publications must exist and keep pubgencols in sync
+\c - - - :worker_1_port
+SET search_path TO pg18_publication;
+SELECT pubname, pubgencols
+FROM pg_publication
+WHERE pubname IN ('pub_gen_cols_stored', 'pub_gen_cols_none')
+ORDER BY pubname;
+ pubname | pubgencols
+---------------------------------------------------------------------
+ pub_gen_cols_none | n
+ pub_gen_cols_stored | s
+(2 rows)
+
+-- On worker 2: same check
+\c - - - :worker_2_port
+SET search_path TO pg18_publication;
+SELECT pubname, pubgencols
+FROM pg_publication
+WHERE pubname IN ('pub_gen_cols_stored', 'pub_gen_cols_none')
+ORDER BY pubname;
+ pubname | pubgencols
+---------------------------------------------------------------------
+ pub_gen_cols_none | n
+ pub_gen_cols_stored | s
+(2 rows)
+
+-- Now verify ALTER PUBLICATION .. SET (publish_generated_columns = none)
+-- propagates to workers as well.
+\c - - - :master_port
+SET search_path TO pg18_publication;
+ALTER PUBLICATION pub_gen_cols_stored
+ SET (publish_generated_columns = none);
+-- coordinator: both publications should now have pubgencols = 'n'
+SELECT pubname, pubgencols
+FROM pg_publication
+WHERE pubname IN ('pub_gen_cols_stored', 'pub_gen_cols_none')
+ORDER BY pubname;
+ pubname | pubgencols
+---------------------------------------------------------------------
+ pub_gen_cols_none | n
+ pub_gen_cols_stored | n
+(2 rows)
+
+-- worker 1: pubgencols must match coordinator
+\c - - - :worker_1_port
+SET search_path TO pg18_publication;
+SELECT pubname, pubgencols
+FROM pg_publication
+WHERE pubname IN ('pub_gen_cols_stored', 'pub_gen_cols_none')
+ORDER BY pubname;
+ pubname | pubgencols
+---------------------------------------------------------------------
+ pub_gen_cols_none | n
+ pub_gen_cols_stored | n
+(2 rows)
+
+-- worker 2: same check
+\c - - - :worker_2_port
+SET search_path TO pg18_publication;
+SELECT pubname, pubgencols
+FROM pg_publication
+WHERE pubname IN ('pub_gen_cols_stored', 'pub_gen_cols_none')
+ORDER BY pubname;
+ pubname | pubgencols
+---------------------------------------------------------------------
+ pub_gen_cols_none | n
+ pub_gen_cols_stored | n
+(2 rows)
+
+-- Column list precedence test: Citus must preserve both prattrs and pubgencols
+\c - - - :master_port
+SET search_path TO pg18_publication;
+-- Case 1: column list explicitly includes the generated column, flag = none
+CREATE PUBLICATION pub_gen_cols_list_includes_b
+ FOR TABLE gen_pub_tab (id, a, b)
+ WITH (publish_generated_columns = none);
+-- Case 2: column list excludes the generated column, flag = stored
+CREATE PUBLICATION pub_gen_cols_list_excludes_b
+ FOR TABLE gen_pub_tab (id, a)
+ WITH (publish_generated_columns = stored);
+-- Helper: show pubname, pubgencols, and column list (prattrs) for gen_pub_tab
+SELECT p.pubname,
+ p.pubgencols,
+ r.prattrs
+FROM pg_publication p
+JOIN pg_publication_rel r ON p.oid = r.prpubid
+JOIN pg_class c ON c.oid = r.prrelid
+WHERE p.pubname IN ('pub_gen_cols_list_includes_b',
+ 'pub_gen_cols_list_excludes_b')
+ AND c.relname = 'gen_pub_tab'
+ORDER BY p.pubname;
+ pubname | pubgencols | prattrs
+---------------------------------------------------------------------
+ pub_gen_cols_list_excludes_b | s | 1 2
+ pub_gen_cols_list_includes_b | n | 1 2 3
+(2 rows)
+
+-- worker 1: must see the same pubgencols + prattrs
+\c - - - :worker_1_port
+SET search_path TO pg18_publication;
+SELECT p.pubname,
+ p.pubgencols,
+ r.prattrs
+FROM pg_publication p
+JOIN pg_publication_rel r ON p.oid = r.prpubid
+JOIN pg_class c ON c.oid = r.prrelid
+WHERE p.pubname IN ('pub_gen_cols_list_includes_b',
+ 'pub_gen_cols_list_excludes_b')
+ AND c.relname = 'gen_pub_tab'
+ORDER BY p.pubname;
+ pubname | pubgencols | prattrs
+---------------------------------------------------------------------
+ pub_gen_cols_list_excludes_b | s | 1 2
+ pub_gen_cols_list_includes_b | n | 1 2 3
+(2 rows)
+
+-- worker 2: same check
+\c - - - :worker_2_port
+SET search_path TO pg18_publication;
+SELECT p.pubname,
+ p.pubgencols,
+ r.prattrs
+FROM pg_publication p
+JOIN pg_publication_rel r ON p.oid = r.prpubid
+JOIN pg_class c ON c.oid = r.prrelid
+WHERE p.pubname IN ('pub_gen_cols_list_includes_b',
+ 'pub_gen_cols_list_excludes_b')
+ AND c.relname = 'gen_pub_tab'
+ORDER BY p.pubname;
+ pubname | pubgencols | prattrs
+---------------------------------------------------------------------
+ pub_gen_cols_list_excludes_b | s | 1 2
+ pub_gen_cols_list_includes_b | n | 1 2 3
+(2 rows)
+
+-- back to coordinator for subsequent tests / cleanup
+\c - - - :master_port
+SET search_path TO pg18_publication;
+DROP PUBLICATION pub_gen_cols_stored;
+DROP PUBLICATION pub_gen_cols_none;
+DROP PUBLICATION pub_gen_cols_list_includes_b;
+DROP PUBLICATION pub_gen_cols_list_excludes_b;
+DROP SCHEMA pg18_publication CASCADE;
+NOTICE: drop cascades to table gen_pub_tab
+SET search_path TO pg18_nn;
+-- END: PG18: verify publish_generated_columns is preserved for distributed tables
+-- PG18 Feature: FOREIGN KEY constraints can be specified as NOT ENFORCED
+-- PG18 commit: https://github.com/postgres/postgres/commit/eec0040c4
+CREATE TABLE customers(
+ customer_id INT GENERATED ALWAYS AS IDENTITY,
+ customer_name VARCHAR(255) NOT NULL,
+ PRIMARY KEY(customer_id)
+);
+SET citus.shard_replication_factor TO 1;
+SELECT create_distributed_table('customers', 'customer_id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+CREATE TABLE contacts(
+ contact_id INT GENERATED ALWAYS AS IDENTITY,
+ customer_id INT,
+ contact_name VARCHAR(255) NOT NULL,
+ phone VARCHAR(15),
+ email VARCHAR(100),
+ CONSTRAINT fk_customer
+ FOREIGN KEY(customer_id)
+ REFERENCES customers(customer_id)
+ ON DELETE CASCADE NOT ENFORCED
+);
+-- The foreign key constraint is propagated to worker nodes.
+SELECT create_distributed_table('contacts', 'customer_id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
+WHERE conrelid = 'contacts'::regclass AND conname = 'fk_customer';
+ Definition
+---------------------------------------------------------------------
+ FOREIGN KEY (customer_id) REFERENCES customers(customer_id) ON DELETE CASCADE NOT ENFORCED
+(1 row)
+
+INSERT INTO customers(customer_name)
+VALUES('BlueBird Inc'),
+ ('Dolphin LLC');
+INSERT INTO contacts(customer_id, contact_name, phone, email)
+VALUES(1,'John Doe','(408)-111-1234','john.doe@example.com'),
+ (1,'Jane Doe','(408)-111-1235','jane.doe@example.com'),
+ (2,'David Wright','(408)-222-1234','david.wright@example.com');
+DELETE FROM customers WHERE customer_name = 'Dolphin LLC';
+-- After deleting 'Dolphin LLC' from customers, the corresponding contact
+-- 'David Wright' is not deleted from contacts due to the NOT ENFORCED.
+SELECT * FROM contacts ORDER BY contact_id;
+ contact_id | customer_id | contact_name | phone | email
+---------------------------------------------------------------------
+ 1 | 1 | John Doe | (408)-111-1234 | john.doe@example.com
+ 2 | 1 | Jane Doe | (408)-111-1235 | jane.doe@example.com
+ 3 | 2 | David Wright | (408)-222-1234 | david.wright@example.com
+(3 rows)
+
+-- Test that ALTER TABLE .. ADD CONSTRAINT .. FOREIGN KEY .. NOT ENFORCED
+-- is propagated to worker nodes. First drop the foreign key:
+ALTER TABLE contacts DROP CONSTRAINT fk_customer;
+SELECT pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
+WHERE conrelid = 'contacts'::regclass AND conname = 'fk_customer';
+ Definition
+---------------------------------------------------------------------
+(0 rows)
+
+-- Now add the foreign key constraint back with NOT ENFORCED.
+ALTER TABLE contacts ADD CONSTRAINT fk_customer
+ FOREIGN KEY(customer_id)
+ REFERENCES customers(customer_id)
+ ON DELETE CASCADE NOT ENFORCED;
+-- The foreign key is propagated to worker nodes.
+SELECT pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
+WHERE conrelid = 'contacts'::regclass AND conname = 'fk_customer';
+ Definition
+---------------------------------------------------------------------
+ FOREIGN KEY (customer_id) REFERENCES customers(customer_id) ON DELETE CASCADE NOT ENFORCED
+(1 row)
+
+DELETE FROM customers WHERE customer_name = 'BlueBird Inc';
+-- The customers table is now empty but the contacts table still has
+-- the contacts due to the NOT ENFORCED foreign key.
+SELECT * FROM customers ORDER BY customer_id;
+ customer_id | customer_name
+---------------------------------------------------------------------
+(0 rows)
+
+SELECT * FROM contacts ORDER BY contact_id;
+ contact_id | customer_id | contact_name | phone | email
+---------------------------------------------------------------------
+ 1 | 1 | John Doe | (408)-111-1234 | john.doe@example.com
+ 2 | 1 | Jane Doe | (408)-111-1235 | jane.doe@example.com
+ 3 | 2 | David Wright | (408)-222-1234 | david.wright@example.com
+(3 rows)
+
+-- ALTER TABLE .. ALTER CONSTRAINT is not supported in Citus,
+-- so the following command should fail
+ALTER TABLE contacts ALTER CONSTRAINT fk_customer ENFORCED;
+ERROR: alter table command is currently unsupported
+DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported.
+-- PG18 Feature: ENFORCED / NOT ENFORCED check constraints
+-- PG18 commit: https://github.com/postgres/postgres/commit/ca87c415e
+-- In Citus, CHECK constraints are propagated on promoting a postgres table
+-- to a citus table, on adding a new CHECK constraint to a citus table, and
+-- on adding a node to a citus cluster. Postgres does not support altering a
+-- check constraint's enforcement status, so Citus does not either.
+CREATE TABLE NE_CHECK_TBL (x int, y int,
+ CONSTRAINT CHECK_X CHECK (x > 3) NOT ENFORCED,
+ CONSTRAINT CHECK_Y CHECK (y < 20) ENFORCED
+);
+SET citus.next_shard_id TO 4754044;
+SELECT create_distributed_table('ne_check_tbl', 'x');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+-- CHECK_X is NOT ENFORCED, so these inserts should succeed
+INSERT INTO NE_CHECK_TBL (x) VALUES (5), (4), (3), (2), (6), (1);
+SELECT x FROM NE_CHECK_TBL ORDER BY x;
+ x
+---------------------------------------------------------------------
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+(6 rows)
+
+-- CHECK_Y is ENFORCED, so this insert should fail
+INSERT INTO NE_CHECK_TBL (x, y) VALUES (1, 15), (2, 25), (3, 10), (4, 30);
+ERROR: new row for relation "ne_check_tbl_4754045" violates check constraint "check_y"
+DETAIL: Failing row contains (4, 30).
+CONTEXT: while executing command on localhost:xxxxx
+-- Test adding new constraints with enforcement status
+ALTER TABLE NE_CHECK_TBL
+ ADD CONSTRAINT CHECK_Y2 CHECK (y > 10) NOT ENFORCED;
+-- CHECK_Y2 is NOT ENFORCED, so these inserts should succeed
+INSERT INTO NE_CHECK_TBL (x, y) VALUES (1, 8), (2, 9), (3, 10), (4, 11);
+SELECT x, y FROM NE_CHECK_TBL ORDER BY x, y;
+ x | y
+---------------------------------------------------------------------
+ 1 | 8
+ 1 |
+ 2 | 9
+ 2 |
+ 3 | 10
+ 3 |
+ 4 | 11
+ 4 |
+ 5 |
+ 6 |
+(10 rows)
+
+ALTER TABLE NE_CHECK_TBL
+ ADD CONSTRAINT CHECK_X2 CHECK (x < 10) ENFORCED;
+-- CHECK_X2 is ENFORCED, so these inserts should fail
+INSERT INTO NE_CHECK_TBL (x) VALUES (5), (15), (8), (12);
+ERROR: new row for relation "ne_check_tbl_4754044" violates check constraint "check_x2_4754044"
+DETAIL: Failing row contains (15, null).
+CONTEXT: while executing command on localhost:xxxxx
+-- PG18 Feature: Generated Virtual Columns
+-- PG18 commit: https://github.com/postgres/postgres/commit/83ea6c540
+-- Verify that generated virtual columns are supported on distributed tables.
+CREATE TABLE v_reading (
+ celsius DECIMAL(5,2),
+ farenheit DECIMAL(6, 2) GENERATED ALWAYS AS (celsius * 9/5 + 32) VIRTUAL,
+ created_at TIMESTAMPTZ DEFAULT now(),
+ device_id INT
+);
+-- Cannot distribute on a generated column (#4616) applies
+-- to VIRTUAL columns.
+SELECT create_distributed_table('v_reading', 'farenheit');
+ERROR: cannot distribute relation: v_reading
+DETAIL: Distribution column must not use GENERATED ALWAYS AS (...) VIRTUAL.
+SELECT create_distributed_table('v_reading', 'device_id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO v_reading (celsius, device_id) VALUES (0, 1), (100, 1), (37.5, 2), (25, 2), (-40, 3);
+SELECT device_id, celsius, farenheit FROM v_reading ORDER BY device_id;
+ device_id | celsius | farenheit
+---------------------------------------------------------------------
+ 1 | 0.00 | 32.00
+ 1 | 100.00 | 212.00
+ 2 | 37.50 | 99.50
+ 2 | 25.00 | 77.00
+ 3 | -40.00 | -40.00
+(5 rows)
+
+ALTER TABLE v_reading ADD COLUMN kelvin DECIMAL(6, 2) GENERATED ALWAYS AS (celsius + 273.15) VIRTUAL;
+SELECT device_id, celsius, kelvin FROM v_reading ORDER BY device_id, celsius;
+ device_id | celsius | kelvin
+---------------------------------------------------------------------
+ 1 | 0.00 | 273.15
+ 1 | 100.00 | 373.15
+ 2 | 25.00 | 298.15
+ 2 | 37.50 | 310.65
+ 3 | -40.00 | 233.15
+(5 rows)
+
+-- Show all columns that are generated
+ SELECT s.relname, a.attname, a.attgenerated
+ FROM pg_class s
+ JOIN pg_attribute a ON a.attrelid=s.oid
+ WHERE s.relname LIKE 'v_reading%' and attgenerated::int != 0
+ ORDER BY 1,2;
+ relname | attname | attgenerated
+---------------------------------------------------------------------
+ v_reading | farenheit | v
+ v_reading | kelvin | v
+(2 rows)
+
+-- Generated columns are virtual by default - repeat the test without VIRTUAL keyword
+CREATE TABLE d_reading (
+ celsius DECIMAL(5,2),
+ farenheit DECIMAL(6, 2) GENERATED ALWAYS AS (celsius * 9/5 + 32),
+ created_at TIMESTAMPTZ DEFAULT now(),
+ device_id INT
+);
+SELECT create_distributed_table('d_reading', 'farenheit');
+ERROR: cannot distribute relation: d_reading
+DETAIL: Distribution column must not use GENERATED ALWAYS AS (...) VIRTUAL.
+SELECT create_distributed_table('d_reading', 'device_id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO d_reading (celsius, device_id) VALUES (0, 1), (100, 1), (37.5, 2), (25, 2), (-40, 3);
+SELECT device_id, celsius, farenheit FROM d_reading ORDER BY device_id;
+ device_id | celsius | farenheit
+---------------------------------------------------------------------
+ 1 | 0.00 | 32.00
+ 1 | 100.00 | 212.00
+ 2 | 37.50 | 99.50
+ 2 | 25.00 | 77.00
+ 3 | -40.00 | -40.00
+(5 rows)
+
+ALTER TABLE d_reading ADD COLUMN kelvin DECIMAL(6, 2) GENERATED ALWAYS AS (celsius + 273.15) VIRTUAL;
+SELECT device_id, celsius, kelvin FROM d_reading ORDER BY device_id, celsius;
+ device_id | celsius | kelvin
+---------------------------------------------------------------------
+ 1 | 0.00 | 273.15
+ 1 | 100.00 | 373.15
+ 2 | 25.00 | 298.15
+ 2 | 37.50 | 310.65
+ 3 | -40.00 | 233.15
+(5 rows)
+
+-- Show all columns that are generated
+ SELECT s.relname, a.attname, a.attgenerated
+ FROM pg_class s
+ JOIN pg_attribute a ON a.attrelid=s.oid
+ WHERE s.relname LIKE 'd_reading%' and attgenerated::int != 0
+ ORDER BY 1,2;
+ relname | attname | attgenerated
+---------------------------------------------------------------------
+ d_reading | farenheit | v
+ d_reading | kelvin | v
+(2 rows)
+
+-- COPY implementation needs to handle GENERATED ALWAYS AS (...) VIRTUAL columns.
+\COPY d_reading FROM STDIN WITH DELIMITER ','
+SELECT device_id, count(device_id) as count, round(avg(celsius), 2) as avg, min(farenheit), max(farenheit)
+FROM d_reading
+GROUP BY device_id
+ORDER BY count DESC;
+ device_id | count | avg | min | max
+---------------------------------------------------------------------
+ 1 | 12 | 20.00 | 32.00 | 212.00
+ 5 | 10 | 13.20 | 33.80 | 73.40
+ 2 | 2 | 31.25 | 77.00 | 99.50
+ 3 | 1 | -40.00 | -40.00 | -40.00
+(4 rows)
+
+-- Test GROUP BY on tables with generated virtual columns - this requires
+-- special case handling in distributed planning. Test it out on some
+-- some queries involving joins and set operations.
+SELECT device_id, max(kelvin) as Kel
+FROM v_reading
+WHERE (device_id, celsius) NOT IN (SELECT device_id, max(celsius) FROM v_reading GROUP BY device_id)
+GROUP BY device_id
+ORDER BY device_id ASC;
+ device_id | kel
+---------------------------------------------------------------------
+ 1 | 273.15
+ 2 | 298.15
+(2 rows)
+
+SELECT device_id, round(AVG( (d_farenheit + v_farenheit) / 2), 2) as Avg_Far
+FROM (SELECT *
+ FROM (SELECT device_id, round(AVG(farenheit),2) as d_farenheit
+ FROM d_reading
+ GROUP BY device_id) AS subq
+ RIGHT JOIN (SELECT device_id, MAX(farenheit) AS v_farenheit
+ FROM d_reading
+ GROUP BY device_id) AS subq2
+ USING (device_id)
+ ) AS finalq
+GROUP BY device_id
+ORDER BY device_id ASC;
+ device_id | avg_far
+---------------------------------------------------------------------
+ 1 | 140.00
+ 2 | 93.88
+ 3 | -40.00
+ 5 | 64.58
+(4 rows)
+
+SELECT device_id, MAX(farenheit) as farenheit
+FROM
+((SELECT device_id, round(AVG(farenheit),2) as farenheit
+ FROM d_reading
+ GROUP BY device_id)
+UNION ALL (SELECT device_id, MAX(farenheit) AS farenheit
+ FROM d_reading
+ GROUP BY device_id) ) AS unioned
+GROUP BY device_id
+ORDER BY device_id ASC;
+ device_id | farenheit
+---------------------------------------------------------------------
+ 1 | 212.00
+ 2 | 99.50
+ 3 | -40.00
+ 5 | 73.40
+(4 rows)
+
+SELECT device_id, MAX(farenheit) as farenheit
+FROM
+((SELECT device_id, round(AVG(farenheit),2) as farenheit
+ FROM d_reading
+ GROUP BY device_id)
+INTERSECT (SELECT device_id, MAX(farenheit) AS farenheit
+ FROM d_reading
+ GROUP BY device_id) ) AS intersected
+GROUP BY device_id
+ORDER BY device_id ASC;
+ device_id | farenheit
+---------------------------------------------------------------------
+ 3 | -40.00
+(1 row)
+
+SELECT device_id, MAX(farenheit) as farenheit
+FROM
+((SELECT device_id, round(AVG(farenheit),2) as farenheit
+ FROM d_reading
+ GROUP BY device_id)
+EXCEPT (SELECT device_id, MAX(farenheit) AS farenheit
+ FROM d_reading
+ GROUP BY device_id) ) AS excepted
+GROUP BY device_id
+ORDER BY device_id ASC;
+ device_id | farenheit
+---------------------------------------------------------------------
+ 1 | 68.00
+ 2 | 88.25
+ 5 | 55.76
+(3 rows)
+
+-- Ensure that UDFs such as alter_distributed_table, undistribute_table
+-- and add_local_table_to_metadata work fine with VIRTUAL columns. For
+-- this, PR #4616 changes are modified to handle VIRTUAL columns in
+-- addition to STORED columns.
+CREATE TABLE generated_stored_dist (
+ col_1 int,
+ "col\'_2" text,
+ col_3 text generated always as (UPPER("col\'_2")) virtual
+);
+SELECT create_distributed_table ('generated_stored_dist', 'col_1');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO generated_stored_dist VALUES (1, 'text_1'), (2, 'text_2');
+SELECT * FROM generated_stored_dist ORDER BY 1,2,3;
+ col_1 | col\'_2 | col_3
+---------------------------------------------------------------------
+ 1 | text_1 | TEXT_1
+ 2 | text_2 | TEXT_2
+(2 rows)
+
+INSERT INTO generated_stored_dist VALUES (1, 'text_1'), (2, 'text_2');
+SELECT alter_distributed_table('generated_stored_dist', shard_count := 5, cascade_to_colocated := false);
+NOTICE: creating a new table for pg18_nn.generated_stored_dist
+NOTICE: moving the data of pg18_nn.generated_stored_dist
+NOTICE: dropping the old pg18_nn.generated_stored_dist
+NOTICE: renaming the new table to pg18_nn.generated_stored_dist
+ alter_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT * FROM generated_stored_dist ORDER BY 1,2,3;
+ col_1 | col\'_2 | col_3
+---------------------------------------------------------------------
+ 1 | text_1 | TEXT_1
+ 1 | text_1 | TEXT_1
+ 2 | text_2 | TEXT_2
+ 2 | text_2 | TEXT_2
+(4 rows)
+
+CREATE TABLE generated_stored_local (
+ col_1 int,
+ "col\'_2" text,
+ col_3 text generated always as (UPPER("col\'_2")) stored
+);
+SELECT citus_add_local_table_to_metadata('generated_stored_local');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO generated_stored_local VALUES (1, 'text_1'), (2, 'text_2');
+SELECT * FROM generated_stored_local ORDER BY 1,2,3;
+ col_1 | col\'_2 | col_3
+---------------------------------------------------------------------
+ 1 | text_1 | TEXT_1
+ 2 | text_2 | TEXT_2
+(2 rows)
+
+SELECT create_distributed_table ('generated_stored_local', 'col_1');
+NOTICE: Copying data from local table...
+NOTICE: copying the data has completed
+DETAIL: The local data in the table is no longer visible, but is still on disk.
+HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$pg18_nn.generated_stored_local$$)
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO generated_stored_local VALUES (1, 'text_1'), (2, 'text_2');
+SELECT * FROM generated_stored_local ORDER BY 1,2,3;
+ col_1 | col\'_2 | col_3
+---------------------------------------------------------------------
+ 1 | text_1 | TEXT_1
+ 1 | text_1 | TEXT_1
+ 2 | text_2 | TEXT_2
+ 2 | text_2 | TEXT_2
+(4 rows)
+
+CREATE TABLE generated_stored_ref (
+ col_1 int,
+ col_2 int,
+ col_3 int generated always as (col_1+col_2) virtual,
+ col_4 int,
+ col_5 int generated always as (col_4*2-col_1) virtual
+);
+SELECT create_reference_table ('generated_stored_ref');
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO generated_stored_ref (col_1, col_4) VALUES (1,2), (11,12);
+INSERT INTO generated_stored_ref (col_1, col_2, col_4) VALUES (100,101,102), (200,201,202);
+SELECT * FROM generated_stored_ref ORDER BY 1,2,3,4,5;
+ col_1 | col_2 | col_3 | col_4 | col_5
+---------------------------------------------------------------------
+ 1 | | | 2 | 3
+ 11 | | | 12 | 13
+ 100 | 101 | 201 | 102 | 104
+ 200 | 201 | 401 | 202 | 204
+(4 rows)
+
+BEGIN;
+ SELECT undistribute_table('generated_stored_ref');
+NOTICE: creating a new table for pg18_nn.generated_stored_ref
+NOTICE: moving the data of pg18_nn.generated_stored_ref
+NOTICE: dropping the old pg18_nn.generated_stored_ref
+NOTICE: renaming the new table to pg18_nn.generated_stored_ref
+ undistribute_table
+---------------------------------------------------------------------
+
+(1 row)
+
+ INSERT INTO generated_stored_ref (col_1, col_4) VALUES (11,12), (21,22);
+ INSERT INTO generated_stored_ref (col_1, col_2, col_4) VALUES (200,201,202), (300,301,302);
+ SELECT * FROM generated_stored_ref ORDER BY 1,2,3,4,5;
+ col_1 | col_2 | col_3 | col_4 | col_5
+---------------------------------------------------------------------
+ 1 | | | 2 | 3
+ 11 | | | 12 | 13
+ 11 | | | 12 | 13
+ 21 | | | 22 | 23
+ 100 | 101 | 201 | 102 | 104
+ 200 | 201 | 401 | 202 | 204
+ 200 | 201 | 401 | 202 | 204
+ 300 | 301 | 601 | 302 | 304
+(8 rows)
+
+ROLLBACK;
+BEGIN;
+ -- drop some of the columns not having "generated always as virtual" expressions
+ SET client_min_messages TO WARNING;
+ ALTER TABLE generated_stored_ref DROP COLUMN col_1 CASCADE;
+ RESET client_min_messages;
+ ALTER TABLE generated_stored_ref DROP COLUMN col_4;
+ -- show that undistribute_table works fine
+ SELECT undistribute_table('generated_stored_ref');
+NOTICE: creating a new table for pg18_nn.generated_stored_ref
+NOTICE: moving the data of pg18_nn.generated_stored_ref
+NOTICE: dropping the old pg18_nn.generated_stored_ref
+NOTICE: renaming the new table to pg18_nn.generated_stored_ref
+ undistribute_table
+---------------------------------------------------------------------
+
+(1 row)
+
+ INSERT INTO generated_stored_ref VALUES (5);
+ SELECT * FROM generated_stored_REF ORDER BY 1;
+ col_2
+---------------------------------------------------------------------
+ 5
+ 101
+ 201
+
+
+(5 rows)
+
+ROLLBACK;
+BEGIN;
+ -- now drop all columns
+ ALTER TABLE generated_stored_ref DROP COLUMN col_3;
+ ALTER TABLE generated_stored_ref DROP COLUMN col_5;
+ ALTER TABLE generated_stored_ref DROP COLUMN col_1;
+ ALTER TABLE generated_stored_ref DROP COLUMN col_2;
+ ALTER TABLE generated_stored_ref DROP COLUMN col_4;
+ -- show that undistribute_table works fine
+ SELECT undistribute_table('generated_stored_ref');
+NOTICE: creating a new table for pg18_nn.generated_stored_ref
+NOTICE: moving the data of pg18_nn.generated_stored_ref
+NOTICE: dropping the old pg18_nn.generated_stored_ref
+NOTICE: renaming the new table to pg18_nn.generated_stored_ref
+ undistribute_table
+---------------------------------------------------------------------
+
+(1 row)
+
+ SELECT * FROM generated_stored_ref;
+--
+(4 rows)
+
+ROLLBACK;
+-- PG18 Feature: VACUUM/ANALYZE support ONLY to limit processing to the parent.
+-- For Citus, ensure ONLY does not trigger shard propagation.
+-- PG18 commit: https://github.com/postgres/postgres/commit/62ddf7ee9
+CREATE SCHEMA pg18_vacuum_part;
+SET search_path TO pg18_vacuum_part;
+CREATE TABLE vac_analyze_only (a int);
+SELECT create_distributed_table('vac_analyze_only', 'a');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO vac_analyze_only VALUES (1), (2), (3);
+-- ANALYZE (no ONLY) should recurse into shard placements
+ANALYZE vac_analyze_only;
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part;
+SELECT coalesce(max(last_analyze), 'epoch'::timestamptz) AS analyze_before_only
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part'
+ AND relname LIKE 'vac_analyze_only_%'
+\gset
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part;
+-- ANALYZE ONLY should not recurse into shard placements
+ANALYZE ONLY vac_analyze_only;
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part;
+SELECT max(last_analyze) = :'analyze_before_only'::timestamptz
+ AS analyze_only_skipped
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part'
+ AND relname LIKE 'vac_analyze_only_%';
+ analyze_only_skipped
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part;
+-- VACUUM (no ONLY) should recurse into shard placements
+VACUUM vac_analyze_only;
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part;
+SELECT coalesce(max(last_vacuum), 'epoch'::timestamptz) AS vacuum_before_only
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part'
+ AND relname LIKE 'vac_analyze_only_%'
+\gset
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part;
+-- VACUUM ONLY should not recurse into shard placements
+VACUUM ONLY vac_analyze_only;
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part;
+SELECT max(last_vacuum) = :'vacuum_before_only'::timestamptz
+ AS vacuum_only_skipped
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part'
+ AND relname LIKE 'vac_analyze_only_%';
+ vacuum_only_skipped
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part;
+DROP SCHEMA pg18_vacuum_part CASCADE;
+NOTICE: drop cascades to table vac_analyze_only
+SET search_path TO pg18_nn;
+-- END PG18 Feature: VACUUM/ANALYZE support ONLY to limit processing to the parent
+-- PG18 Feature: VACUUM/ANALYZE ONLY on a partitioned distributed table
+-- Ensure Citus does not recurse into shard placements when ONLY is used
+-- on the partitioned parent.
+-- PG18 commit: https://github.com/postgres/postgres/commit/62ddf7ee9
+CREATE SCHEMA pg18_vacuum_part_dist;
+SET search_path TO pg18_vacuum_part_dist;
+SET citus.shard_count = 2;
+SET citus.shard_replication_factor = 1;
+CREATE TABLE part_dist (id int, v int) PARTITION BY RANGE (id);
+CREATE TABLE part_dist_1 PARTITION OF part_dist FOR VALUES FROM (1) TO (100);
+CREATE TABLE part_dist_2 PARTITION OF part_dist FOR VALUES FROM (100) TO (200);
+SELECT create_distributed_table('part_dist', 'id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO part_dist
+SELECT g, g FROM generate_series(1, 199) g;
+-- ANALYZE (no ONLY) should recurse into partitions and shard placements
+ANALYZE part_dist;
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part_dist;
+SELECT coalesce(max(last_analyze), 'epoch'::timestamptz) AS analyze_before_only
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part_dist'
+ AND relname LIKE 'part_dist_%'
+\gset
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part_dist;
+-- ANALYZE ONLY should not recurse into shard placements
+ANALYZE ONLY part_dist;
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part_dist;
+SELECT max(last_analyze) = :'analyze_before_only'::timestamptz
+ AS analyze_only_partitioned_skipped
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part_dist'
+ AND relname LIKE 'part_dist_%';
+ analyze_only_partitioned_skipped
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part_dist;
+-- VACUUM (no ONLY) should recurse into partitions and shard placements
+VACUUM part_dist;
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part_dist;
+SELECT coalesce(max(last_vacuum), 'epoch'::timestamptz) AS vacuum_before_only
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part_dist'
+ AND relname LIKE 'part_dist_%'
+\gset
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part_dist;
+-- VACUUM ONLY parent: core warns and does no work; Citus must not
+-- propagate to shard placements.
+VACUUM ONLY part_dist;
+WARNING: VACUUM ONLY of partitioned table "part_dist" has no effect
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part_dist;
+SELECT max(last_vacuum) = :'vacuum_before_only'::timestamptz
+ AS vacuum_only_partitioned_skipped
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part_dist'
+ AND relname LIKE 'part_dist_%';
+ vacuum_only_partitioned_skipped
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part_dist;
+DROP SCHEMA pg18_vacuum_part_dist CASCADE;
+NOTICE: drop cascades to table part_dist
+SET search_path TO pg18_nn;
+-- END PG18 Feature: VACUUM/ANALYZE ONLY on partitioned distributed table
+-- PG18 Feature: text search with nondeterministic collations
+-- PG18 commit: https://github.com/postgres/postgres/commit/329304c90
+-- This test verifies that the PG18 tests apply to Citus tables; Citus
+-- just passes through the collation info and text search queries to
+-- worker shards.
+CREATE COLLATION ignore_accents (provider = icu, locale = '@colStrength=primary;colCaseLevel=yes', deterministic = false);
+NOTICE: using standard form "und-u-kc-ks-level1" for ICU locale "@colStrength=primary;colCaseLevel=yes"
+-- nondeterministic collations
+CREATE COLLATION ctest_det (provider = icu, locale = '', deterministic = true);
+NOTICE: using standard form "und" for ICU locale ""
+CREATE COLLATION ctest_nondet (provider = icu, locale = '', deterministic = false);
+NOTICE: using standard form "und" for ICU locale ""
+CREATE TABLE strtest1 (a int, b text);
+SELECT create_distributed_table('strtest1', 'a');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO strtest1 VALUES (1, U&'zy\00E4bc');
+INSERT INTO strtest1 VALUES (2, U&'zy\0061\0308bc');
+INSERT INTO strtest1 VALUES (3, U&'ab\00E4cd');
+INSERT INTO strtest1 VALUES (4, U&'ab\0061\0308cd');
+INSERT INTO strtest1 VALUES (5, U&'ab\00E4cd');
+INSERT INTO strtest1 VALUES (6, U&'ab\0061\0308cd');
+INSERT INTO strtest1 VALUES (7, U&'ab\00E4cd');
+SELECT * FROM strtest1 WHERE b = 'zyäbc' COLLATE ctest_det ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 1 | zyäbc
+(1 row)
+
+SELECT * FROM strtest1 WHERE b = 'zyäbc' COLLATE ctest_nondet ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 1 | zyäbc
+ 2 | zyäbc
+(2 rows)
+
+SELECT strpos(b COLLATE ctest_det, 'bc') FROM strtest1 ORDER BY a;
+ strpos
+---------------------------------------------------------------------
+ 4
+ 5
+ 0
+ 0
+ 0
+ 0
+ 0
+(7 rows)
+
+SELECT strpos(b COLLATE ctest_nondet, 'bc') FROM strtest1 ORDER BY a;
+ strpos
+---------------------------------------------------------------------
+ 4
+ 5
+ 0
+ 0
+ 0
+ 0
+ 0
+(7 rows)
+
+SELECT replace(b COLLATE ctest_det, U&'\00E4b', 'X') FROM strtest1 ORDER BY a;
+ replace
+---------------------------------------------------------------------
+ zyXc
+ zyäbc
+ abäcd
+ abäcd
+ abäcd
+ abäcd
+ abäcd
+(7 rows)
+
+SELECT replace(b COLLATE ctest_nondet, U&'\00E4b', 'X') FROM strtest1 ORDER BY a;
+ replace
+---------------------------------------------------------------------
+ zyXc
+ zyXc
+ abäcd
+ abäcd
+ abäcd
+ abäcd
+ abäcd
+(7 rows)
+
+SELECT a, split_part(b COLLATE ctest_det, U&'\00E4b', 2) FROM strtest1 ORDER BY a;
+ a | split_part
+---------------------------------------------------------------------
+ 1 | c
+ 2 |
+ 3 |
+ 4 |
+ 5 |
+ 6 |
+ 7 |
+(7 rows)
+
+SELECT a, split_part(b COLLATE ctest_nondet, U&'\00E4b', 2) FROM strtest1 ORDER BY a;
+ a | split_part
+---------------------------------------------------------------------
+ 1 | c
+ 2 | c
+ 3 |
+ 4 |
+ 5 |
+ 6 |
+ 7 |
+(7 rows)
+
+SELECT a, split_part(b COLLATE ctest_det, U&'\00E4b', -1) FROM strtest1 ORDER BY a;
+ a | split_part
+---------------------------------------------------------------------
+ 1 | c
+ 2 | zyäbc
+ 3 | abäcd
+ 4 | abäcd
+ 5 | abäcd
+ 6 | abäcd
+ 7 | abäcd
+(7 rows)
+
+SELECT a, split_part(b COLLATE ctest_nondet, U&'\00E4b', -1) FROM strtest1 ORDER BY a;
+ a | split_part
+---------------------------------------------------------------------
+ 1 | c
+ 2 | c
+ 3 | abäcd
+ 4 | abäcd
+ 5 | abäcd
+ 6 | abäcd
+ 7 | abäcd
+(7 rows)
+
+SELECT a, string_to_array(b COLLATE ctest_det, U&'\00E4b') FROM strtest1 ORDER BY a;
+ a | string_to_array
+---------------------------------------------------------------------
+ 1 | {zy,c}
+ 2 | {zyäbc}
+ 3 | {abäcd}
+ 4 | {abäcd}
+ 5 | {abäcd}
+ 6 | {abäcd}
+ 7 | {abäcd}
+(7 rows)
+
+SELECT a, string_to_array(b COLLATE ctest_nondet, U&'\00E4b') FROM strtest1 ORDER BY a;
+ a | string_to_array
+---------------------------------------------------------------------
+ 1 | {zy,c}
+ 2 | {zy,c}
+ 3 | {abäcd}
+ 4 | {abäcd}
+ 5 | {abäcd}
+ 6 | {abäcd}
+ 7 | {abäcd}
+(7 rows)
+
+SELECT * FROM strtest1 WHERE b LIKE 'zyäbc' COLLATE ctest_det ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 1 | zyäbc
+(1 row)
+
+SELECT * FROM strtest1 WHERE b LIKE 'zyäbc' COLLATE ctest_nondet ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 1 | zyäbc
+ 2 | zyäbc
+(2 rows)
+
+CREATE TABLE strtest2 (a int, b text);
+SELECT create_distributed_table('strtest2', 'a');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO strtest2 VALUES (1, 'cote'), (2, 'côte'), (3, 'coté'), (4, 'côté');
+CREATE TABLE strtest2nfd (a int, b text);
+SELECT create_distributed_table('strtest2nfd', 'a');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO strtest2nfd VALUES (1, 'cote'), (2, 'côte'), (3, 'coté'), (4, 'côté');
+UPDATE strtest2nfd SET b = normalize(b, nfd);
+-- This shows why replace should be greedy. Otherwise, in the NFD
+-- case, the match would stop before the decomposed accents, which
+-- would leave the accents in the results.
+SELECT a, b, replace(b COLLATE ignore_accents, 'co', 'ma') FROM strtest2 ORDER BY a, b;
+ a | b | replace
+---------------------------------------------------------------------
+ 1 | cote | mate
+ 2 | côte | mate
+ 3 | coté | maté
+ 4 | côté | maté
+(4 rows)
+
+SELECT a, b, replace(b COLLATE ignore_accents, 'co', 'ma') FROM strtest2nfd ORDER BY a, b;
+ a | b | replace
+---------------------------------------------------------------------
+ 1 | cote | mate
+ 2 | côte | mate
+ 3 | coté | maté
+ 4 | côté | maté
+(4 rows)
+
+-- PG18 Feature: LIKE support for non-deterministic collations
+-- PG18 commit: https://github.com/postgres/postgres/commit/85b7efa1c
+-- As with non-deterministic collation text search, we verify that
+-- LIKE with non-deterministic collation is passed through by Citus
+-- and expected results are returned by the queries.
+INSERT INTO strtest1 VALUES (8, U&'abc');
+INSERT INTO strtest1 VALUES (9, 'abc');
+SELECT a, b FROM strtest1
+WHERE b LIKE 'abc' COLLATE ctest_det
+ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+(2 rows)
+
+SELECT a, b FROM strtest1
+WHERE b LIKE 'a\bc' COLLATE ctest_det
+ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+(2 rows)
+
+SELECT a, b FROM strtest1
+WHERE b LIKE 'abc' COLLATE ctest_nondet
+ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+(2 rows)
+
+SELECT a, b FROM strtest1
+WHERE b LIKE 'a\bc' COLLATE ctest_nondet
+ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+(2 rows)
+
+CREATE COLLATION case_insensitive (provider = icu, locale = '@colStrength=secondary', deterministic = false);
+NOTICE: using standard form "und-u-ks-level2" for ICU locale "@colStrength=secondary"
+SELECT a, b FROM strtest1
+WHERE b LIKE 'ABC' COLLATE case_insensitive
+ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+(2 rows)
+
+SELECT a, b FROM strtest1
+WHERE b LIKE 'ABC%' COLLATE case_insensitive
+ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+(2 rows)
+
+INSERT INTO strtest1 VALUES (10, U&'\00E4bc');
+INSERT INTO strtest1 VALUES (12, U&'\0061\0308bc');
+SELECT * FROM strtest1
+WHERE b LIKE 'äbc' COLLATE ctest_det
+ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 10 | äbc
+(1 row)
+
+SELECT * FROM strtest1
+WHERE b LIKE 'äbc' COLLATE ctest_nondet
+ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 10 | äbc
+ 12 | äbc
+(2 rows)
+
+-- Tests with ignore_accents collation. Taken from
+-- PG18 regress tests and applied to a Citus table.
+INSERT INTO strtest1 VALUES (10, U&'\0061\0308bc');
+INSERT INTO strtest1 VALUES (11, U&'\00E4bc');
+INSERT INTO strtest1 VALUES (12, U&'cb\0061\0308');
+INSERT INTO strtest1 VALUES (13, U&'\0308bc');
+INSERT INTO strtest1 VALUES (14, 'foox');
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'\00E4_c' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+ 10 | äbc
+ 10 | äbc
+ 11 | äbc
+ 12 | äbc
+(6 rows)
+
+-- and in reverse:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'\0061\0308_c' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+ 10 | äbc
+ 10 | äbc
+ 11 | äbc
+ 12 | äbc
+(6 rows)
+
+-- inner % matches b:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'\00E4%c' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+ 10 | äbc
+ 10 | äbc
+ 11 | äbc
+ 12 | äbc
+(6 rows)
+
+-- inner %% matches b then zero:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'\00E4%%c' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+ 10 | äbc
+ 10 | äbc
+ 11 | äbc
+ 12 | äbc
+(6 rows)
+
+-- inner %% matches b then zero:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'c%%\00E4' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 12 | cbä
+(1 row)
+
+-- trailing _ matches two codepoints that form one grapheme:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'cb_' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+(0 rows)
+
+-- trailing __ matches two codepoints that form one grapheme:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'cb__' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 12 | cbä
+(1 row)
+
+-- leading % matches zero:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'%\00E4bc' COLLATE ignore_accents
+ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 1 | zyäbc
+ 2 | zyäbc
+ 8 | abc
+ 9 | abc
+ 10 | äbc
+ 10 | äbc
+ 11 | äbc
+ 12 | äbc
+(8 rows)
+
+-- leading % matches zero (with later %):
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'%\00E4%c' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 1 | zyäbc
+ 2 | zyäbc
+ 8 | abc
+ 9 | abc
+ 10 | äbc
+ 10 | äbc
+ 11 | äbc
+ 12 | äbc
+(8 rows)
+
+-- trailing % matches zero:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'\00E4bc%' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+ 10 | äbc
+ 10 | äbc
+ 11 | äbc
+ 12 | äbc
+(6 rows)
+
+-- trailing % matches zero (with previous %):
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'\00E4%c%' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 3 | abäcd
+ 4 | abäcd
+ 5 | abäcd
+ 6 | abäcd
+ 7 | abäcd
+ 8 | abc
+ 9 | abc
+ 10 | äbc
+ 10 | äbc
+ 11 | äbc
+ 12 | äbc
+(11 rows)
+
+-- _ versus two codepoints that form one grapheme:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'_bc' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+ 10 | äbc
+ 10 | äbc
+ 11 | äbc
+ 12 | äbc
+ 13 | ̈bc
+(7 rows)
+
+-- (actually this matches because)
+SELECT a, b FROM strtest1
+WHERE b = 'bc' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 13 | ̈bc
+(1 row)
+
+-- __ matches two codepoints that form one grapheme:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'__bc' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 10 | äbc
+ 12 | äbc
+(2 rows)
+
+-- _ matches one codepoint that forms half a grapheme:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'_\0308bc' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+ 8 | abc
+ 9 | abc
+ 10 | äbc
+ 10 | äbc
+ 11 | äbc
+ 12 | äbc
+ 13 | ̈bc
+(7 rows)
+
+-- doesn't match because \00e4 doesn't match only \0308
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'_\00e4bc' COLLATE ignore_accents ORDER BY a, b;
+ a | b
+---------------------------------------------------------------------
+(0 rows)
+
+-- escape character at end of pattern
+SELECT a, b FROM strtest1
+WHERE b LIKE 'foo\' COLLATE ignore_accents ORDER BY a, b;
+ERROR: LIKE pattern must not end with escape character
+CONTEXT: while executing command on localhost:xxxxx
+DROP TABLE strtest1;
+DROP COLLATION ignore_accents;
+DROP COLLATION ctest_det;
+DROP COLLATION ctest_nondet;
+DROP COLLATION case_insensitive;
-- cleanup with minimum verbosity
SET client_min_messages TO ERROR;
RESET search_path;
diff --git a/src/test/regress/expected/publication_0.out b/src/test/regress/expected/publication_0.out
deleted file mode 100644
index e768a1d41..000000000
--- a/src/test/regress/expected/publication_0.out
+++ /dev/null
@@ -1,276 +0,0 @@
-CREATE SCHEMA publication;
-CREATE SCHEMA "publication-1";
-SET search_path TO publication;
-SET citus.shard_replication_factor TO 1;
-CREATE OR REPLACE FUNCTION activate_node_snapshot()
- RETURNS text[]
- LANGUAGE C STRICT
- AS 'citus';
-COMMENT ON FUNCTION activate_node_snapshot()
- IS 'commands to activate node snapshot';
-\c - - - :worker_1_port
-SET citus.enable_ddl_propagation TO off;
-CREATE OR REPLACE FUNCTION activate_node_snapshot()
- RETURNS text[]
- LANGUAGE C STRICT
- AS 'citus';
-COMMENT ON FUNCTION activate_node_snapshot()
- IS 'commands to activate node snapshot';
-\c - - - :worker_2_port
-SET citus.enable_ddl_propagation TO off;
-CREATE OR REPLACE FUNCTION activate_node_snapshot()
- RETURNS text[]
- LANGUAGE C STRICT
- AS 'citus';
-COMMENT ON FUNCTION activate_node_snapshot()
- IS 'commands to activate node snapshot';
--- create some publications with conflicting names on worker node
--- publication will be different from coordinator
-CREATE PUBLICATION "pub-all";
--- publication will be same as coordinator
-CREATE PUBLICATION "pub-all-insertupdateonly" FOR ALL TABLES WITH (publish = 'insert, update');;
-\c - - - :master_port
-SET search_path TO publication;
-SET citus.shard_replication_factor TO 1;
--- do not create publications on worker 2 initially
-SELECT citus_remove_node('localhost', :worker_2_port);
- citus_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
--- create a non-distributed publication
-SET citus.enable_ddl_propagation TO off;
-CREATE PUBLICATION pubnotdistributed WITH (publish = 'delete');
-RESET citus.enable_ddl_propagation;
-ALTER PUBLICATION pubnotdistributed SET (publish = 'truncate');
--- create regular, distributed publications
-CREATE PUBLICATION pubempty;
-CREATE PUBLICATION pubinsertonly WITH (publish = 'insert');
-CREATE PUBLICATION "pub-all" FOR ALL TABLES;
-CREATE PUBLICATION "pub-all-insertupdateonly" FOR ALL TABLES WITH (publish = 'insert, update');
--- add worker 2 with publications
-SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
--- Check publications on all the nodes, if we see the same publication name twice then its definition differs
--- Note that publications are special in the sense that the coordinator object might differ from
--- worker objects due to the presence of regular tables.
-SELECT DISTINCT c FROM (
- SELECT unnest(result::text[]) c
- FROM run_command_on_workers($$
- SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' ORDER BY 1) s$$)
- ORDER BY c) s;
- c
----------------------------------------------------------------------
- SELECT worker_create_or_replace_object('CREATE PUBLICATION "pub-all" FOR ALL TABLES WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')');
- SELECT worker_create_or_replace_object('CREATE PUBLICATION "pub-all-insertupdateonly" FOR ALL TABLES WITH (publish_via_partition_root = ''false'', publish = ''insert, update'')');
- SELECT worker_create_or_replace_object('CREATE PUBLICATION pubempty WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')');
- SELECT worker_create_or_replace_object('CREATE PUBLICATION pubinsertonly WITH (publish_via_partition_root = ''false'', publish = ''insert'')');
-(4 rows)
-
-CREATE TABLE test (x int primary key, y int, "column-1" int, doc xml);
-CREATE TABLE "test-pubs" (x int primary key, y int, "column-1" int);
-CREATE TABLE "publication-1"."test-pubs" (x int primary key, y int, "column-1" int);
--- various operations on a publication with only local tables
-CREATE PUBLICATION pubtables_orig FOR TABLE test, "test-pubs", "publication-1"."test-pubs" WITH (publish = 'insert, truncate');
-ALTER PUBLICATION pubtables_orig DROP TABLE test;
-ALTER PUBLICATION pubtables_orig ADD TABLE test;
--- publication will be empty on worker nodes, since all tables are local
-SELECT DISTINCT c FROM (
- SELECT unnest(result::text[]) c
- FROM run_command_on_workers($$
- SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%pubtables%' ORDER BY 1) s$$)
- ORDER BY c) s;
- c
----------------------------------------------------------------------
- SELECT worker_create_or_replace_object('CREATE PUBLICATION pubtables_orig WITH (publish_via_partition_root = ''false'', publish = ''insert, truncate'')');
-(1 row)
-
--- distribute a table and create a tenant schema, creating a mixed publication
-SELECT create_distributed_table('test','x', colocate_with := 'none');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SET citus.enable_schema_based_sharding TO ON;
-CREATE SCHEMA citus_schema_1;
-CREATE TABLE citus_schema_1.test (x int primary key, y int, "column-1" int, doc xml);
-SET citus.enable_schema_based_sharding TO OFF;
-ALTER PUBLICATION pubtables_orig ADD TABLE citus_schema_1.test;
--- some generic operations
-ALTER PUBLICATION pubtables_orig RENAME TO pubtables;
-ALTER PUBLICATION pubtables SET (publish = 'insert, update, delete');
-ALTER PUBLICATION pubtables OWNER TO postgres;
-ALTER PUBLICATION pubtables SET (publish = 'inert, update, delete');
-ERROR: unrecognized "publish" value: "inert"
-ALTER PUBLICATION pubtables ADD TABLE notexist;
-ERROR: relation "notexist" does not exist
--- operations with a distributed table
-ALTER PUBLICATION pubtables DROP TABLE test;
-ALTER PUBLICATION pubtables ADD TABLE test;
-ALTER PUBLICATION pubtables SET TABLE test, "test-pubs", "publication-1"."test-pubs", citus_schema_1.test;
--- operations with a tenant schema table
-ALTER PUBLICATION pubtables DROP TABLE citus_schema_1.test;
-ALTER PUBLICATION pubtables ADD TABLE citus_schema_1.test;
-ALTER PUBLICATION pubtables SET TABLE test, "test-pubs", "publication-1"."test-pubs", citus_schema_1.test;
--- operations with a local table in a mixed publication
-ALTER PUBLICATION pubtables DROP TABLE "test-pubs";
-ALTER PUBLICATION pubtables ADD TABLE "test-pubs";
-SELECT create_distributed_table('"test-pubs"', 'x');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
--- test and test-pubs will show up in worker nodes
-SELECT DISTINCT c FROM (
- SELECT unnest(result::text[]) c
- FROM run_command_on_workers($$
- SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%pubtables%' ORDER BY 1) s$$)
- ORDER BY c) s;
- c
----------------------------------------------------------------------
- SELECT worker_create_or_replace_object('CREATE PUBLICATION pubtables FOR TABLE publication.test, citus_schema_1.test, publication."test-pubs" WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete'')');
-(1 row)
-
--- operations with a strangely named distributed table in a mixed publication
-ALTER PUBLICATION pubtables DROP TABLE "test-pubs";
-ALTER PUBLICATION pubtables ADD TABLE "test-pubs";
--- create a publication with distributed and local tables
-DROP PUBLICATION pubtables;
-CREATE PUBLICATION pubtables FOR TABLE test, "test-pubs", "publication-1"."test-pubs", citus_schema_1.test;
--- change distributed tables
-SELECT alter_distributed_table('test', shard_count := 5, cascade_to_colocated := true);
-NOTICE: creating a new table for publication.test
-NOTICE: moving the data of publication.test
-NOTICE: dropping the old publication.test
-NOTICE: renaming the new table to publication.test
-NOTICE: creating a new table for publication."test-pubs"
-NOTICE: moving the data of publication."test-pubs"
-NOTICE: dropping the old publication."test-pubs"
-NOTICE: renaming the new table to publication."test-pubs"
- alter_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT undistribute_table('test');
-NOTICE: creating a new table for publication.test
-NOTICE: moving the data of publication.test
-NOTICE: dropping the old publication.test
-NOTICE: renaming the new table to publication.test
- undistribute_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT citus_add_local_table_to_metadata('test');
- citus_add_local_table_to_metadata
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT create_distributed_table_concurrently('test', 'x');
- create_distributed_table_concurrently
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT undistribute_table('"test-pubs"');
-NOTICE: creating a new table for publication."test-pubs"
-NOTICE: moving the data of publication."test-pubs"
-NOTICE: dropping the old publication."test-pubs"
-NOTICE: renaming the new table to publication."test-pubs"
- undistribute_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT create_reference_table('"test-pubs"');
- create_reference_table
----------------------------------------------------------------------
-
-(1 row)
-
--- publications are unchanged despite various tranformations
-SELECT DISTINCT c FROM (
- SELECT unnest(result::text[]) c
- FROM run_command_on_workers($$
- SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%pubtables%' ORDER BY 1) s$$)
- ORDER BY c) s;
- c
----------------------------------------------------------------------
- SELECT worker_create_or_replace_object('CREATE PUBLICATION pubtables FOR TABLE citus_schema_1.test, publication.test, publication."test-pubs" WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')');
-(1 row)
-
--- partitioned table
-CREATE TABLE testpub_partitioned (a int, b text, c text) PARTITION BY RANGE (a);
-CREATE TABLE testpub_partitioned_0 PARTITION OF testpub_partitioned FOR VALUES FROM (1) TO (10);
-ALTER TABLE testpub_partitioned_0 ADD PRIMARY KEY (a);
-ALTER TABLE testpub_partitioned_0 REPLICA IDENTITY USING INDEX testpub_partitioned_0_pkey;
-CREATE TABLE testpub_partitioned_1 PARTITION OF testpub_partitioned FOR VALUES FROM (11) TO (20);
-ALTER TABLE testpub_partitioned_1 ADD PRIMARY KEY (a);
-ALTER TABLE testpub_partitioned_1 REPLICA IDENTITY USING INDEX testpub_partitioned_1_pkey;
-CREATE PUBLICATION pubpartitioned FOR TABLE testpub_partitioned WITH (publish_via_partition_root = 'true');
-SELECT create_distributed_table('testpub_partitioned', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT DISTINCT c FROM (
- SELECT unnest(result::text[]) c
- FROM run_command_on_workers($$
- SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%pubpartitioned%' ORDER BY 1) s$$)
- ORDER BY c) s;
- c
----------------------------------------------------------------------
- SELECT worker_create_or_replace_object('CREATE PUBLICATION pubpartitioned FOR TABLE publication.testpub_partitioned WITH (publish_via_partition_root = ''true'', publish = ''insert, update, delete, truncate'')');
-(1 row)
-
-DROP PUBLICATION pubpartitioned;
-CREATE PUBLICATION pubpartitioned FOR TABLE testpub_partitioned WITH (publish_via_partition_root = 'true');
--- add a partition
-ALTER PUBLICATION pubpartitioned ADD TABLE testpub_partitioned_1;
-SELECT DISTINCT c FROM (
- SELECT unnest(result::text[]) c
- FROM run_command_on_workers($$
- SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLIATION%' AND c LIKE '%pubpartitioned%' ORDER BY 1) s$$)
- ORDER BY c) s;
-ERROR: malformed array literal: ""
-DETAIL: Array value must start with "{" or dimension information.
--- make sure we can sync all the publication metadata
-SELECT start_metadata_sync_to_all_nodes();
- start_metadata_sync_to_all_nodes
----------------------------------------------------------------------
- t
-(1 row)
-
-DROP PUBLICATION pubempty;
-DROP PUBLICATION pubtables;
-DROP PUBLICATION pubinsertonly;
-DROP PUBLICATION "pub-all-insertupdateonly";
-DROP PUBLICATION "pub-all";
-DROP PUBLICATION pubpartitioned;
-DROP PUBLICATION pubnotdistributed;
-SHOW server_version \gset
-SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
-\gset
-\if :server_version_ge_15
-\else
-SET client_min_messages TO ERROR;
-DROP SCHEMA publication CASCADE;
-DROP SCHEMA "publication-1" CASCADE;
-DROP SCHEMA citus_schema_1 CASCADE;
-SELECT public.wait_for_resource_cleanup();
- wait_for_resource_cleanup
----------------------------------------------------------------------
-
-(1 row)
-
-\q
diff --git a/src/test/regress/expected/single_node_0.out b/src/test/regress/expected/single_node_0.out
deleted file mode 100644
index a94c02951..000000000
--- a/src/test/regress/expected/single_node_0.out
+++ /dev/null
@@ -1,2582 +0,0 @@
---
--- SINGLE_NODE
---
--- This test file has an alternative output because of the change in the
--- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
--- The alternative output can be deleted when we drop support for PG14
---
-SHOW server_version \gset
-SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
- server_version_ge_15
----------------------------------------------------------------------
- f
-(1 row)
-
-CREATE SCHEMA single_node;
-SET search_path TO single_node;
-SET citus.shard_count TO 4;
-SET citus.shard_replication_factor TO 1;
-SET citus.next_shard_id TO 90630500;
--- Ensure tuple data in explain analyze output is the same on all PG versions
-SET citus.enable_binary_protocol = TRUE;
--- do not cache any connections for now, will enable it back soon
-ALTER SYSTEM SET citus.max_cached_conns_per_worker TO 0;
--- adding the coordinator as inactive is disallowed
-SELECT 1 FROM master_add_inactive_node('localhost', :master_port, groupid => 0);
-ERROR: coordinator node cannot be added as inactive node
--- before adding a node we are not officially a coordinator
-SELECT citus_is_coordinator();
- citus_is_coordinator
----------------------------------------------------------------------
- f
-(1 row)
-
--- idempotently add node to allow this test to run without add_coordinator
-SET client_min_messages TO WARNING;
-SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
--- after adding a node we are officially a coordinator
-SELECT citus_is_coordinator();
- citus_is_coordinator
----------------------------------------------------------------------
- t
-(1 row)
-
--- coordinator cannot be disabled
-SELECT 1 FROM citus_disable_node('localhost', :master_port);
-ERROR: cannot change "isactive" field of the coordinator node
-RESET client_min_messages;
-SELECT 1 FROM master_remove_node('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-SELECT count(*) FROM pg_dist_node;
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
--- there are no workers now, but we should still be able to create Citus tables
--- force local execution when creating the index
-ALTER SYSTEM SET citus.local_shared_pool_size TO -1;
--- Postmaster might not ack SIGHUP signal sent by pg_reload_conf() immediately,
--- so we need to sleep for some amount of time to do our best to ensure that
--- postmaster reflects GUC changes.
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
-SELECT pg_sleep(0.1);
- pg_sleep
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE failover_to_local (a int);
-SELECT create_distributed_table('failover_to_local', 'a', shard_count=>32);
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE INDEX CONCURRENTLY ON failover_to_local(a);
-WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state.
-If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object,
-if applicable, and then re-attempt the original command.
-ERROR: the total number of connections on the server is more than max_connections(100)
-HINT: Consider using a higher value for max_connections
--- reset global GUC changes
-ALTER SYSTEM RESET citus.local_shared_pool_size;
-ALTER SYSTEM RESET citus.max_cached_conns_per_worker;
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
-CREATE TABLE single_node_nullkey_c1(a int, b int);
-SELECT create_distributed_table('single_node_nullkey_c1', null, colocate_with=>'none', distribution_type=>null);
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE single_node_nullkey_c2(a int, b int);
-SELECT create_distributed_table('single_node_nullkey_c2', null, colocate_with=>'none', distribution_type=>null);
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
--- created on different colocation groups ..
-SELECT
-(
- SELECT colocationid FROM pg_dist_partition
- WHERE logicalrelid = 'single_node.single_node_nullkey_c1'::regclass
-)
-!=
-(
- SELECT colocationid FROM pg_dist_partition
- WHERE logicalrelid = 'single_node.single_node_nullkey_c2'::regclass
-);
- ?column?
----------------------------------------------------------------------
- t
-(1 row)
-
--- .. but both are associated to coordinator
-SELECT groupid = 0 FROM pg_dist_placement
-WHERE shardid = (
- SELECT shardid FROM pg_dist_shard
- WHERE logicalrelid = 'single_node.single_node_nullkey_c1'::regclass
-);
- ?column?
----------------------------------------------------------------------
- t
-(1 row)
-
-SELECT groupid = 0 FROM pg_dist_placement
-WHERE shardid = (
- SELECT shardid FROM pg_dist_shard
- WHERE logicalrelid = 'single_node.single_node_nullkey_c2'::regclass
-);
- ?column?
----------------------------------------------------------------------
- t
-(1 row)
-
--- try creating a single-shard table from a shard relation
-SELECT shardid AS round_robin_test_c1_shard_id FROM pg_dist_shard WHERE logicalrelid = 'single_node.single_node_nullkey_c1'::regclass \gset
-SELECT create_distributed_table('single_node_nullkey_c1_' || :round_robin_test_c1_shard_id , null, colocate_with=>'none', distribution_type=>null);
-ERROR: relation "single_node_nullkey_c1_90630532" is a shard relation
--- create a tenant schema on single node setup
-SET citus.enable_schema_based_sharding TO ON;
-CREATE SCHEMA tenant_1;
-CREATE TABLE tenant_1.tbl_1 (a int);
--- verify that we recorded tenant_1 in pg_dist_schema
-SELECT COUNT(*)=1 FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'tenant_1';
- ?column?
----------------------------------------------------------------------
- t
-(1 row)
-
--- verify that tenant_1.tbl_1 is recorded in pg_dist_partition, as a single-shard table
-SELECT COUNT(*)=1 FROM pg_dist_partition
-WHERE logicalrelid = 'tenant_1.tbl_1'::regclass AND
- partmethod = 'n' AND repmodel = 's' AND colocationid IS NOT NULL;
- ?column?
----------------------------------------------------------------------
- t
-(1 row)
-
-RESET citus.enable_schema_based_sharding;
--- Test lazy conversion from Citus local to single-shard tables
--- and reference tables, on single node. This means that no shard
--- replication should be needed.
-CREATE TABLE ref_table_conversion_test (
- a int PRIMARY KEY
-);
-SELECT citus_add_local_table_to_metadata('ref_table_conversion_test');
- citus_add_local_table_to_metadata
----------------------------------------------------------------------
-
-(1 row)
-
--- save old shardid and placementid
-SELECT get_shard_id_for_distribution_column('single_node.ref_table_conversion_test') AS ref_table_conversion_test_old_shard_id \gset
-SELECT placementid AS ref_table_conversion_test_old_coord_placement_id FROM pg_dist_placement WHERE shardid = :ref_table_conversion_test_old_shard_id \gset
-SELECT create_reference_table('ref_table_conversion_test');
- create_reference_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT public.verify_pg_dist_partition_for_reference_table('single_node.ref_table_conversion_test');
- verify_pg_dist_partition_for_reference_table
----------------------------------------------------------------------
- t
-(1 row)
-
-SELECT public.verify_shard_placements_for_reference_table('single_node.ref_table_conversion_test',
- :ref_table_conversion_test_old_shard_id,
- :ref_table_conversion_test_old_coord_placement_id);
- verify_shard_placements_for_reference_table
----------------------------------------------------------------------
- t
-(1 row)
-
-CREATE TABLE single_shard_conversion_test_1 (
- int_col_1 int PRIMARY KEY,
- text_col_1 text UNIQUE,
- int_col_2 int
-);
-SELECT citus_add_local_table_to_metadata('single_shard_conversion_test_1');
- citus_add_local_table_to_metadata
----------------------------------------------------------------------
-
-(1 row)
-
--- save old shardid
-SELECT get_shard_id_for_distribution_column('single_node.single_shard_conversion_test_1') AS single_shard_conversion_test_1_old_shard_id \gset
-SELECT create_distributed_table('single_shard_conversion_test_1', null, colocate_with=>'none');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT public.verify_pg_dist_partition_for_single_shard_table('single_node.single_shard_conversion_test_1');
- verify_pg_dist_partition_for_single_shard_table
----------------------------------------------------------------------
- t
-(1 row)
-
-SELECT public.verify_shard_placement_for_single_shard_table('single_node.single_shard_conversion_test_1', :single_shard_conversion_test_1_old_shard_id, true);
- verify_shard_placement_for_single_shard_table
----------------------------------------------------------------------
- t
-(1 row)
-
-CREATE TABLE single_shard_conversion_test_2 (
- int_col_1 int
-);
-SELECT citus_add_local_table_to_metadata('single_shard_conversion_test_2');
- citus_add_local_table_to_metadata
----------------------------------------------------------------------
-
-(1 row)
-
--- save old shardid
-SELECT get_shard_id_for_distribution_column('single_node.single_shard_conversion_test_2') AS single_shard_conversion_test_2_old_shard_id \gset
-SELECT create_distributed_table('single_shard_conversion_test_2', null, colocate_with=>'none');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT public.verify_pg_dist_partition_for_single_shard_table('single_node.single_shard_conversion_test_2');
- verify_pg_dist_partition_for_single_shard_table
----------------------------------------------------------------------
- t
-(1 row)
-
-SELECT public.verify_shard_placement_for_single_shard_table('single_node.single_shard_conversion_test_2', :single_shard_conversion_test_2_old_shard_id, true);
- verify_shard_placement_for_single_shard_table
----------------------------------------------------------------------
- t
-(1 row)
-
--- make sure that they're created on different colocation groups
-SELECT
-(
- SELECT colocationid FROM pg_dist_partition
- WHERE logicalrelid = 'single_node.single_shard_conversion_test_1'::regclass
-)
-!=
-(
- SELECT colocationid FROM pg_dist_partition
- WHERE logicalrelid = 'single_node.single_shard_conversion_test_2'::regclass
-);
- ?column?
----------------------------------------------------------------------
- t
-(1 row)
-
-SET client_min_messages TO WARNING;
-DROP TABLE failover_to_local, single_node_nullkey_c1, single_node_nullkey_c2, ref_table_conversion_test, single_shard_conversion_test_1, single_shard_conversion_test_2;
-DROP SCHEMA tenant_1 CASCADE;
-RESET client_min_messages;
--- so that we don't have to update rest of the test output
-SET citus.next_shard_id TO 90630500;
-CREATE TABLE ref(x int, y int);
-SELECT create_reference_table('ref');
- create_reference_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT groupid, nodename, nodeport, isactive, shouldhaveshards, hasmetadata, metadatasynced FROM pg_dist_node;
- groupid | nodename | nodeport | isactive | shouldhaveshards | hasmetadata | metadatasynced
----------------------------------------------------------------------
- 0 | localhost | 57636 | t | t | t | t
-(1 row)
-
-DROP TABLE ref;
--- remove the coordinator to try again with create_reference_table
-SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node WHERE groupid = 0;
- master_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE loc(x int, y int);
-SELECT citus_add_local_table_to_metadata('loc');
- citus_add_local_table_to_metadata
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT groupid, nodename, nodeport, isactive, shouldhaveshards, hasmetadata, metadatasynced FROM pg_dist_node;
- groupid | nodename | nodeport | isactive | shouldhaveshards | hasmetadata | metadatasynced
----------------------------------------------------------------------
- 0 | localhost | 57636 | t | t | t | t
-(1 row)
-
-DROP TABLE loc;
--- remove the coordinator to try again with create_distributed_table
-SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node WHERE groupid = 0;
- master_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
--- verify the coordinator gets auto added with the localhost guc
-ALTER SYSTEM SET citus.local_hostname TO '127.0.0.1'; --although not a hostname, should work for connecting locally
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
-SELECT pg_sleep(.1); -- wait to make sure the config has changed before running the GUC
- pg_sleep
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE test(x int, y int);
-SELECT create_distributed_table('test','x');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT groupid, nodename, nodeport, isactive, shouldhaveshards, hasmetadata, metadatasynced FROM pg_dist_node;
- groupid | nodename | nodeport | isactive | shouldhaveshards | hasmetadata | metadatasynced
----------------------------------------------------------------------
- 0 | 127.0.0.1 | 57636 | t | t | t | t
-(1 row)
-
-DROP TABLE test;
--- remove the coordinator to try again
-SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node WHERE groupid = 0;
- master_remove_node
----------------------------------------------------------------------
-
-(1 row)
-
-ALTER SYSTEM RESET citus.local_hostname;
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
-SELECT pg_sleep(.1); -- wait to make sure the config has changed before running the GUC
- pg_sleep
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE test(x int, y int);
-SELECT create_distributed_table('test','x');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT groupid, nodename, nodeport, isactive, shouldhaveshards, hasmetadata, metadatasynced FROM pg_dist_node;
- groupid | nodename | nodeport | isactive | shouldhaveshards | hasmetadata | metadatasynced
----------------------------------------------------------------------
- 0 | localhost | 57636 | t | t | t | t
-(1 row)
-
-BEGIN;
- -- we should not enable MX for this temporary node just because
- -- it'd spawn a bg worker targeting this node
- -- and that changes the connection count specific tests
- -- here
- SET LOCAL citus.enable_metadata_sync TO OFF;
- -- cannot add workers with specific IP as long as I have a placeholder coordinator record
- SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port);
-ERROR: cannot add a worker node when the coordinator hostname is set to localhost
-DETAIL: Worker nodes need to be able to connect to the coordinator to transfer data.
-HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname
-COMMIT;
-BEGIN;
- -- we should not enable MX for this temporary node just because
- -- it'd spawn a bg worker targeting this node
- -- and that changes the connection count specific tests
- -- here
- SET LOCAL citus.enable_metadata_sync TO OFF;
- -- adding localhost workers is ok
- SELECT 1 FROM master_add_node('localhost', :worker_1_port);
-NOTICE: shards are still on the coordinator after adding the new node
-HINT: Use SELECT rebalance_table_shards(); to balance shards data between workers and coordinator or SELECT citus_drain_node('localhost',57636); to permanently move shards away from the coordinator.
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-COMMIT;
--- we don't need this node anymore
-SELECT 1 FROM master_remove_node('localhost', :worker_1_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
--- set the coordinator host to something different than localhost
-SELECT 1 FROM citus_set_coordinator_host('127.0.0.1');
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-BEGIN;
- -- we should not enable MX for this temporary node just because
- -- it'd spawn a bg worker targeting this node
- -- and that changes the connection count specific tests
- -- here
- SET LOCAL citus.enable_metadata_sync TO OFF;
- -- adding workers with specific IP is ok now
- SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port);
-NOTICE: shards are still on the coordinator after adding the new node
-HINT: Use SELECT rebalance_table_shards(); to balance shards data between workers and coordinator or SELECT citus_drain_node('127.0.0.1',57636); to permanently move shards away from the coordinator.
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-COMMIT;
--- we don't need this node anymore
-SELECT 1 FROM master_remove_node('127.0.0.1', :worker_1_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
--- set the coordinator host back to localhost for the remainder of tests
-SELECT 1 FROM citus_set_coordinator_host('localhost');
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
--- should have shards setting should not really matter for a single node
-SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
-CREATE TYPE new_type AS (n int, m text);
-CREATE TABLE test_2(x int, y int, z new_type);
-SELECT create_distributed_table('test_2','x');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE ref(a int, b int);
-SELECT create_reference_table('ref');
- create_reference_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE local(c int, d int);
-CREATE TABLE public.another_schema_table(a int, b int);
-SELECT create_distributed_table('public.another_schema_table', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE non_binary_copy_test (key int PRIMARY KEY, value new_type);
-SELECT create_distributed_table('non_binary_copy_test', 'key');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-INSERT INTO non_binary_copy_test SELECT i, (i, 'citus9.5')::new_type FROM generate_series(0,1000)i;
--- Confirm the basics work
-INSERT INTO test VALUES (1, 2), (3, 4), (5, 6), (2, 7), (4, 5);
-SELECT * FROM test WHERE x = 1;
- x | y
----------------------------------------------------------------------
- 1 | 2
-(1 row)
-
-SELECT count(*) FROM test;
- count
----------------------------------------------------------------------
- 5
-(1 row)
-
-SELECT * FROM test ORDER BY x;
- x | y
----------------------------------------------------------------------
- 1 | 2
- 2 | 7
- 3 | 4
- 4 | 5
- 5 | 6
-(5 rows)
-
-UPDATE test SET y = y + 1 RETURNING *;
- x | y
----------------------------------------------------------------------
- 1 | 3
- 2 | 8
- 3 | 5
- 4 | 6
- 5 | 7
-(5 rows)
-
-WITH cte_1 AS (UPDATE test SET y = y - 1 RETURNING *) SELECT * FROM cte_1 ORDER BY 1,2;
- x | y
----------------------------------------------------------------------
- 1 | 2
- 2 | 7
- 3 | 4
- 4 | 5
- 5 | 6
-(5 rows)
-
--- show that we can filter remote commands
--- given that citus.grep_remote_commands, we log all commands
-SET citus.log_local_commands to true;
-SELECT count(*) FROM public.another_schema_table WHERE a = 1;
-NOTICE: executing the command locally: SELECT count(*) AS count FROM public.another_schema_table_90630515 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 1)
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
--- grep matches all commands
-SET citus.grep_remote_commands TO "%%";
-SELECT count(*) FROM public.another_schema_table WHERE a = 1;
-NOTICE: executing the command locally: SELECT count(*) AS count FROM public.another_schema_table_90630515 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 1)
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
--- only filter a specific shard for the local execution
-BEGIN;
- SET LOCAL citus.grep_remote_commands TO "%90630515%";
- SELECT count(*) FROM public.another_schema_table;
-NOTICE: executing the command locally: SELECT count(*) AS count FROM public.another_schema_table_90630515 another_schema_table WHERE true
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
- -- match nothing
- SET LOCAL citus.grep_remote_commands TO '%nothing%';
- SELECT count(*) FROM public.another_schema_table;
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
-COMMIT;
--- only filter a specific shard for the remote execution
-BEGIN;
- SET LOCAL citus.enable_local_execution TO FALSE;
- SET LOCAL citus.grep_remote_commands TO '%90630515%';
- SET LOCAL citus.log_remote_commands TO ON;
- SELECT count(*) FROM public.another_schema_table;
-NOTICE: issuing SELECT count(*) AS count FROM public.another_schema_table_90630515 another_schema_table WHERE true
-DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
- -- match nothing
- SET LOCAL citus.grep_remote_commands TO '%nothing%';
- SELECT count(*) FROM public.another_schema_table;
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
-COMMIT;
-RESET citus.log_local_commands;
-RESET citus.grep_remote_commands;
--- Test upsert with constraint
-CREATE TABLE upsert_test
-(
- part_key int UNIQUE,
- other_col int,
- third_col int
-);
--- distribute the table
-SELECT create_distributed_table('upsert_test', 'part_key');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
--- do a regular insert
-INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1), (2, 2) RETURNING *;
- part_key | other_col | third_col
----------------------------------------------------------------------
- 1 | 1 |
- 2 | 2 |
-(2 rows)
-
-SET citus.log_remote_commands to true;
--- observe that there is a conflict and the following query does nothing
-INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT DO NOTHING RETURNING *;
-NOTICE: executing the command locally: INSERT INTO single_node.upsert_test_90630523 AS citus_table_alias (part_key, other_col) VALUES (1, 1) ON CONFLICT DO NOTHING RETURNING part_key, other_col, third_col
- part_key | other_col | third_col
----------------------------------------------------------------------
-(0 rows)
-
--- same as the above with different syntax
-INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO NOTHING RETURNING *;
-NOTICE: executing the command locally: INSERT INTO single_node.upsert_test_90630523 AS citus_table_alias (part_key, other_col) VALUES (1, 1) ON CONFLICT(part_key) DO NOTHING RETURNING part_key, other_col, third_col
- part_key | other_col | third_col
----------------------------------------------------------------------
-(0 rows)
-
--- again the same query with another syntax
-INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING RETURNING *;
-NOTICE: executing the command locally: INSERT INTO single_node.upsert_test_90630523 AS citus_table_alias (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT upsert_test_part_key_key_90630523 DO NOTHING RETURNING part_key, other_col, third_col
- part_key | other_col | third_col
----------------------------------------------------------------------
-(0 rows)
-
-BEGIN;
--- force local execution
-SELECT count(*) FROM upsert_test WHERE part_key = 1;
-NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.upsert_test_90630523 upsert_test WHERE (part_key OPERATOR(pg_catalog.=) 1)
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
-SET citus.log_remote_commands to false;
--- multi-shard pushdown query that goes through local execution
-INSERT INTO upsert_test (part_key, other_col) SELECT part_key, other_col FROM upsert_test ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING RETURNING *;
- part_key | other_col | third_col
----------------------------------------------------------------------
-(0 rows)
-
--- multi-shard pull-to-coordinator query that goes through local execution
-INSERT INTO upsert_test (part_key, other_col) SELECT part_key, other_col FROM upsert_test LIMIT 100 ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING RETURNING *;
- part_key | other_col | third_col
----------------------------------------------------------------------
-(0 rows)
-
-COMMIT;
--- to test citus local tables
-select undistribute_table('upsert_test');
-NOTICE: creating a new table for single_node.upsert_test
-NOTICE: moving the data of single_node.upsert_test
-NOTICE: dropping the old single_node.upsert_test
-NOTICE: renaming the new table to single_node.upsert_test
- undistribute_table
----------------------------------------------------------------------
-
-(1 row)
-
--- create citus local table
-select citus_add_local_table_to_metadata('upsert_test');
- citus_add_local_table_to_metadata
----------------------------------------------------------------------
-
-(1 row)
-
--- test the constraint with local execution
-INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING RETURNING *;
- part_key | other_col | third_col
----------------------------------------------------------------------
-(0 rows)
-
-DROP TABLE upsert_test;
-CREATE TABLE relation_tracking_table_1(id int, nonid int);
-SELECT create_distributed_table('relation_tracking_table_1', 'id', colocate_with := 'none');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-INSERT INTO relation_tracking_table_1 select generate_series(6, 10000, 1), 0;
-CREATE or REPLACE function foo()
-returns setof relation_tracking_table_1
-AS $$
-BEGIN
-RETURN query select * from relation_tracking_table_1 order by 1 limit 10;
-end;
-$$ language plpgsql;
-CREATE TABLE relation_tracking_table_2 (id int, nonid int);
--- use the relation-access in this session
-select foo();
- foo
----------------------------------------------------------------------
- (6,0)
- (7,0)
- (8,0)
- (9,0)
- (10,0)
- (11,0)
- (12,0)
- (13,0)
- (14,0)
- (15,0)
-(10 rows)
-
--- we should be able to use sequential mode, as the previous multi-shard
--- relation access has been cleaned-up
-BEGIN;
-SET LOCAL citus.multi_shard_modify_mode TO sequential;
-INSERT INTO relation_tracking_table_2 select generate_series(6, 1000, 1), 0;
-SELECT create_distributed_table('relation_tracking_table_2', 'id', colocate_with := 'none');
-NOTICE: Copying data from local table...
-NOTICE: copying the data has completed
-DETAIL: The local data in the table is no longer visible, but is still on disk.
-HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$single_node.relation_tracking_table_2$$)
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT count(*) FROM relation_tracking_table_2;
- count
----------------------------------------------------------------------
- 995
-(1 row)
-
-ROLLBACK;
-BEGIN;
-INSERT INTO relation_tracking_table_2 select generate_series(6, 1000, 1), 0;
-SELECT create_distributed_table('relation_tracking_table_2', 'id', colocate_with := 'none');
-NOTICE: Copying data from local table...
-NOTICE: copying the data has completed
-DETAIL: The local data in the table is no longer visible, but is still on disk.
-HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$single_node.relation_tracking_table_2$$)
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT count(*) FROM relation_tracking_table_2;
- count
----------------------------------------------------------------------
- 995
-(1 row)
-
-COMMIT;
-SET client_min_messages TO ERROR;
-DROP TABLE relation_tracking_table_2, relation_tracking_table_1 CASCADE;
-RESET client_min_messages;
-CREATE SCHEMA "Quoed.Schema";
-SET search_path TO "Quoed.Schema";
-CREATE TABLE "long_constraint_upsert\_test"
-(
- part_key int,
- other_col int,
- third_col int,
- CONSTRAINT "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" UNIQUE (part_key)
-);
-NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" will be truncated to "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted "
--- distribute the table and create shards
-SELECT create_distributed_table('"long_constraint_upsert\_test"', 'part_key');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-INSERT INTO "long_constraint_upsert\_test" (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" DO NOTHING RETURNING *;
-NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" will be truncated to "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted "
- part_key | other_col | third_col
----------------------------------------------------------------------
- 1 | 1 |
-(1 row)
-
-ALTER TABLE "long_constraint_upsert\_test" RENAME TO simple_table_name;
-INSERT INTO simple_table_name (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" DO NOTHING RETURNING *;
-NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" will be truncated to "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted "
- part_key | other_col | third_col
----------------------------------------------------------------------
-(0 rows)
-
--- this is currently not supported, but once we support
--- make sure that the following query also works fine
-ALTER TABLE simple_table_name RENAME CONSTRAINT "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" TO simple_constraint_name;
-NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" will be truncated to "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted "
-ERROR: renaming constraints belonging to distributed tables is currently unsupported
---INSERT INTO simple_table_name (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT simple_constraint_name DO NOTHING RETURNING *;
-SET search_path TO single_node;
-SET client_min_messages TO ERROR;
-DROP SCHEMA "Quoed.Schema" CASCADE;
-RESET client_min_messages;
--- test partitioned index creation with long name
-CREATE TABLE test_index_creation1
-(
- tenant_id integer NOT NULL,
- timeperiod timestamp without time zone NOT NULL,
- field1 integer NOT NULL,
- inserted_utc timestamp without time zone NOT NULL DEFAULT now(),
- PRIMARY KEY(tenant_id, timeperiod)
-) PARTITION BY RANGE (timeperiod);
-CREATE TABLE test_index_creation1_p2020_09_26
-PARTITION OF test_index_creation1 FOR VALUES FROM ('2020-09-26 00:00:00') TO ('2020-09-27 00:00:00');
-CREATE TABLE test_index_creation1_p2020_09_27
-PARTITION OF test_index_creation1 FOR VALUES FROM ('2020-09-27 00:00:00') TO ('2020-09-28 00:00:00');
-select create_distributed_table('test_index_creation1', 'tenant_id');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
--- should be able to create indexes with INCLUDE/WHERE
-CREATE INDEX ix_test_index_creation5 ON test_index_creation1
- USING btree(tenant_id, timeperiod)
- INCLUDE (field1) WHERE (tenant_id = 100);
--- test if indexes are created
-SELECT 1 AS created WHERE EXISTS(SELECT * FROM pg_indexes WHERE indexname LIKE '%test_index_creation%');
- created
----------------------------------------------------------------------
- 1
-(1 row)
-
--- test citus size functions in transaction with modification
-CREATE TABLE test_citus_size_func (a int);
-SELECT create_distributed_table('test_citus_size_func', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-INSERT INTO test_citus_size_func VALUES(1), (2);
-BEGIN;
- -- DDL with citus_table_size
- ALTER TABLE test_citus_size_func ADD COLUMN newcol INT;
- SELECT citus_table_size('test_citus_size_func');
-ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications
-ROLLBACK;
-BEGIN;
- -- DDL with citus_relation_size
- ALTER TABLE test_citus_size_func ADD COLUMN newcol INT;
- SELECT citus_relation_size('test_citus_size_func');
-ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications
-ROLLBACK;
-BEGIN;
- -- DDL with citus_total_relation_size
- ALTER TABLE test_citus_size_func ADD COLUMN newcol INT;
- SELECT citus_total_relation_size('test_citus_size_func');
-ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications
-ROLLBACK;
-BEGIN;
- -- single shard insert with citus_table_size
- INSERT INTO test_citus_size_func VALUES (3);
- SELECT citus_table_size('test_citus_size_func');
-ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications
-ROLLBACK;
-BEGIN;
- -- multi shard modification with citus_table_size
- INSERT INTO test_citus_size_func SELECT * FROM test_citus_size_func;
- SELECT citus_table_size('test_citus_size_func');
-ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications
-ROLLBACK;
-BEGIN;
- -- single shard insert with citus_relation_size
- INSERT INTO test_citus_size_func VALUES (3);
- SELECT citus_relation_size('test_citus_size_func');
-ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications
-ROLLBACK;
-BEGIN;
- -- multi shard modification with citus_relation_size
- INSERT INTO test_citus_size_func SELECT * FROM test_citus_size_func;
- SELECT citus_relation_size('test_citus_size_func');
-ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications
-ROLLBACK;
-BEGIN;
- -- single shard insert with citus_total_relation_size
- INSERT INTO test_citus_size_func VALUES (3);
- SELECT citus_total_relation_size('test_citus_size_func');
-ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications
-ROLLBACK;
-BEGIN;
- -- multi shard modification with citus_total_relation_size
- INSERT INTO test_citus_size_func SELECT * FROM test_citus_size_func;
- SELECT citus_total_relation_size('test_citus_size_func');
-ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications
-ROLLBACK;
--- we should be able to limit intermediate results
-BEGIN;
- SET LOCAL citus.max_intermediate_result_size TO 0;
- WITH cte_1 AS (SELECT * FROM test OFFSET 0) SELECT * FROM cte_1;
-ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 0 kB)
-DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place.
-HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable.
-ROLLBACK;
--- the first cte (cte_1) does not exceed the limit
--- but the second (cte_2) exceeds, so we error out
-BEGIN;
- SET LOCAL citus.max_intermediate_result_size TO '1kB';
- INSERT INTO test SELECT i,i from generate_series(0,1000)i;
- -- only pulls 1 row, should not hit the limit
- WITH cte_1 AS (SELECT * FROM test LIMIT 1) SELECT count(*) FROM cte_1;
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
- -- cte_1 only pulls 1 row, but cte_2 all rows
- WITH cte_1 AS (SELECT * FROM test LIMIT 1),
- cte_2 AS (SELECT * FROM test OFFSET 0)
- SELECT count(*) FROM cte_1, cte_2;
-ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 1 kB)
-DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place.
-HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable.
-ROLLBACK;
--- single shard and multi-shard delete
--- inside a transaction block
-BEGIN;
- DELETE FROM test WHERE y = 5;
- INSERT INTO test VALUES (4, 5);
- DELETE FROM test WHERE x = 1;
- INSERT INTO test VALUES (1, 2);
-COMMIT;
-CREATE INDEX single_node_i1 ON test(x);
-CREATE INDEX single_node_i2 ON test(x,y);
-REINDEX SCHEMA single_node;
-REINDEX SCHEMA CONCURRENTLY single_node;
--- keep one of the indexes
--- drop w/wout tx blocks
-BEGIN;
- DROP INDEX single_node_i2;
-ROLLBACK;
-DROP INDEX single_node_i2;
--- change the schema w/wout TX block
-BEGIN;
- ALTER TABLE public.another_schema_table SET SCHEMA single_node;
-ROLLBACK;
-ALTER TABLE public.another_schema_table SET SCHEMA single_node;
-BEGIN;
- TRUNCATE test;
- SELECT * FROM test;
- x | y
----------------------------------------------------------------------
-(0 rows)
-
-ROLLBACK;
-VACUUM test;
-VACUUM test, test_2;
-VACUUM ref, test;
-VACUUM ANALYZE test(x);
-ANALYZE ref;
-ANALYZE test_2;
-VACUUM local;
-VACUUM local, ref, test, test_2;
-VACUUM FULL test, ref;
-BEGIN;
- ALTER TABLE test ADD COLUMN z INT DEFAULT 66;
- SELECT count(*) FROM test WHERE z = 66;
- count
----------------------------------------------------------------------
- 5
-(1 row)
-
-ROLLBACK;
--- explain analyze should work on a single node
-EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF)
- SELECT * FROM test;
- QUERY PLAN
----------------------------------------------------------------------
- Custom Scan (Citus Adaptive) (actual rows=5 loops=1)
- Task Count: 4
- Tuple data received from nodes: 40 bytes
- Tasks Shown: One of 4
- -> Task
- Tuple data received from node: 16 bytes
- Node: host=localhost port=xxxxx dbname=regression
- -> Seq Scan on test_90630506 test (actual rows=2 loops=1)
-(8 rows)
-
--- common utility command
-SELECT pg_size_pretty(citus_relation_size('test'::regclass));
- pg_size_pretty
----------------------------------------------------------------------
- 24 kB
-(1 row)
-
--- basic view queries
-CREATE VIEW single_node_view AS
- SELECT count(*) as cnt FROM test t1 JOIN test t2 USING (x);
-SELECT * FROM single_node_view;
- cnt
----------------------------------------------------------------------
- 5
-(1 row)
-
-SELECT * FROM single_node_view, test WHERE test.x = single_node_view.cnt;
- cnt | x | y
----------------------------------------------------------------------
- 5 | 5 | 6
-(1 row)
-
--- copy in/out
-BEGIN;
- COPY test(x) FROM PROGRAM 'seq 32';
- SELECT count(*) FROM test;
- count
----------------------------------------------------------------------
- 37
-(1 row)
-
- COPY (SELECT count(DISTINCT x) FROM test) TO STDOUT;
-32
- INSERT INTO test SELECT i,i FROM generate_series(0,100)i;
-ROLLBACK;
--- master_create_empty_shard on coordinator
-BEGIN;
-CREATE TABLE append_table (a INT, b INT);
-SELECT create_distributed_table('append_table','a','append');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT master_create_empty_shard('append_table');
-NOTICE: Creating placements for the append partitioned tables on the coordinator is not supported, skipping coordinator ...
-ERROR: could only create 0 of 1 of required shard replicas
-END;
--- alter table inside a tx block
-BEGIN;
- ALTER TABLE test ADD COLUMN z single_node.new_type;
- INSERT INTO test VALUES (99, 100, (1, 'onder')::new_type) RETURNING *;
- x | y | z
----------------------------------------------------------------------
- 99 | 100 | (1,onder)
-(1 row)
-
-ROLLBACK;
--- prepared statements with custom types
-PREPARE single_node_prepare_p1(int, int, new_type) AS
- INSERT INTO test_2 VALUES ($1, $2, $3);
-EXECUTE single_node_prepare_p1(1, 1, (95, 'citus9.5')::new_type);
-EXECUTE single_node_prepare_p1(2 ,2, (94, 'citus9.4')::new_type);
-EXECUTE single_node_prepare_p1(3 ,2, (93, 'citus9.3')::new_type);
-EXECUTE single_node_prepare_p1(4 ,2, (92, 'citus9.2')::new_type);
-EXECUTE single_node_prepare_p1(5 ,2, (91, 'citus9.1')::new_type);
-EXECUTE single_node_prepare_p1(6 ,2, (90, 'citus9.0')::new_type);
-PREPARE use_local_query_cache(int) AS SELECT count(*) FROM test_2 WHERE x = $1;
-EXECUTE use_local_query_cache(1);
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
-EXECUTE use_local_query_cache(1);
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
-EXECUTE use_local_query_cache(1);
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
-EXECUTE use_local_query_cache(1);
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
-EXECUTE use_local_query_cache(1);
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
-SET client_min_messages TO DEBUG2;
--- the 6th execution will go through the planner
--- the 7th execution will skip the planner as it uses the cache
-EXECUTE use_local_query_cache(1);
-DEBUG: Deferred pruning for a fast-path router query
-DEBUG: Creating router plan
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
-EXECUTE use_local_query_cache(1);
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
-RESET client_min_messages;
--- partitioned table should be fine, adding for completeness
-CREATE TABLE collections_list (
- key bigint,
- ts timestamptz DEFAULT now(),
- collection_id integer,
- value numeric,
- PRIMARY KEY(key, collection_id)
-) PARTITION BY LIST (collection_id );
-SELECT create_distributed_table('collections_list', 'key');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE collections_list_0
- PARTITION OF collections_list (key, ts, collection_id, value)
- FOR VALUES IN ( 0 );
-CREATE TABLE collections_list_1
- PARTITION OF collections_list (key, ts, collection_id, value)
- FOR VALUES IN ( 1 );
-INSERT INTO collections_list SELECT i, '2011-01-01', i % 2, i * i FROM generate_series(0, 100) i;
-SELECT count(*) FROM collections_list WHERE key < 10 AND collection_id = 1;
- count
----------------------------------------------------------------------
- 5
-(1 row)
-
-SELECT count(*) FROM collections_list_0 WHERE key < 10 AND collection_id = 1;
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
-SELECT count(*) FROM collections_list_1 WHERE key = 11;
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
-ALTER TABLE collections_list DROP COLUMN ts;
-SELECT * FROM collections_list, collections_list_0 WHERE collections_list.key=collections_list_0.key ORDER BY 1 DESC,2 DESC,3 DESC,4 DESC LIMIT 1;
- key | collection_id | value | key | collection_id | value
----------------------------------------------------------------------
- 100 | 0 | 10000 | 100 | 0 | 10000
-(1 row)
-
--- test hash distribution using INSERT with generate_series() function
-CREATE OR REPLACE FUNCTION part_hashint4_noop(value int4, seed int8)
-RETURNS int8 AS $$
-SELECT value + seed;
-$$ LANGUAGE SQL IMMUTABLE;
-CREATE OPERATOR CLASS part_test_int4_ops
-FOR TYPE int4
-USING HASH AS
-operator 1 =,
-function 2 part_hashint4_noop(int4, int8);
-CREATE TABLE hash_parted (
- a int,
- b int
-) PARTITION BY HASH (a part_test_int4_ops);
-CREATE TABLE hpart0 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 0);
-CREATE TABLE hpart1 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 1);
-CREATE TABLE hpart2 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 2);
-CREATE TABLE hpart3 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 3);
--- Disable metadata sync since citus doesn't support distributing
--- operator class for now.
-SET citus.enable_metadata_sync TO OFF;
-SELECT create_distributed_table('hash_parted ', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-INSERT INTO hash_parted VALUES (1, generate_series(1, 10));
-SELECT * FROM hash_parted ORDER BY 1, 2;
- a | b
----------------------------------------------------------------------
- 1 | 1
- 1 | 2
- 1 | 3
- 1 | 4
- 1 | 5
- 1 | 6
- 1 | 7
- 1 | 8
- 1 | 9
- 1 | 10
-(10 rows)
-
-ALTER TABLE hash_parted DETACH PARTITION hpart0;
-ALTER TABLE hash_parted DETACH PARTITION hpart1;
-ALTER TABLE hash_parted DETACH PARTITION hpart2;
-ALTER TABLE hash_parted DETACH PARTITION hpart3;
-RESET citus.enable_metadata_sync;
--- test range partition without creating partitions and inserting with generate_series()
--- should error out even in plain PG since no partition of relation "parent_tab" is found for row
--- in Citus it errors out because it fails to evaluate partition key in insert
-CREATE TABLE parent_tab (id int) PARTITION BY RANGE (id);
-SELECT create_distributed_table('parent_tab', 'id');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-INSERT INTO parent_tab VALUES (generate_series(0, 3));
-ERROR: failed to evaluate partition key in insert
-HINT: try using constant values for partition column
--- now it should work
-CREATE TABLE parent_tab_1_2 PARTITION OF parent_tab FOR VALUES FROM (1) to (2);
-ALTER TABLE parent_tab ADD COLUMN b int;
-INSERT INTO parent_tab VALUES (1, generate_series(0, 3));
-SELECT * FROM parent_tab ORDER BY 1, 2;
- id | b
----------------------------------------------------------------------
- 1 | 0
- 1 | 1
- 1 | 2
- 1 | 3
-(4 rows)
-
--- make sure that parallel accesses are good
-SET citus.force_max_query_parallelization TO ON;
-SELECT * FROM test_2 ORDER BY 1 DESC;
- x | y | z
----------------------------------------------------------------------
- 6 | 2 | (90,citus9.0)
- 5 | 2 | (91,citus9.1)
- 4 | 2 | (92,citus9.2)
- 3 | 2 | (93,citus9.3)
- 2 | 2 | (94,citus9.4)
- 1 | 1 | (95,citus9.5)
-(6 rows)
-
-DELETE FROM test_2 WHERE y = 1000 RETURNING *;
- x | y | z
----------------------------------------------------------------------
-(0 rows)
-
-RESET citus.force_max_query_parallelization ;
-BEGIN;
- INSERT INTO test_2 VALUES (7 ,2, (83, 'citus8.3')::new_type);
- SAVEPOINT s1;
- INSERT INTO test_2 VALUES (9 ,1, (82, 'citus8.2')::new_type);
- SAVEPOINT s2;
- ROLLBACK TO SAVEPOINT s1;
- SELECT * FROM test_2 WHERE z = (83, 'citus8.3')::new_type OR z = (82, 'citus8.2')::new_type;
- x | y | z
----------------------------------------------------------------------
- 7 | 2 | (83,citus8.3)
-(1 row)
-
- RELEASE SAVEPOINT s1;
-COMMIT;
-SELECT * FROM test_2 WHERE z = (83, 'citus8.3')::new_type OR z = (82, 'citus8.2')::new_type;
- x | y | z
----------------------------------------------------------------------
- 7 | 2 | (83,citus8.3)
-(1 row)
-
--- final query is only intermediate result
--- we want PG 11/12/13 behave consistently, the CTEs should be MATERIALIZED
-WITH cte_1 AS (SELECT * FROM test_2) SELECT * FROM cte_1 ORDER BY 1,2;
- x | y | z
----------------------------------------------------------------------
- 1 | 1 | (95,citus9.5)
- 2 | 2 | (94,citus9.4)
- 3 | 2 | (93,citus9.3)
- 4 | 2 | (92,citus9.2)
- 5 | 2 | (91,citus9.1)
- 6 | 2 | (90,citus9.0)
- 7 | 2 | (83,citus8.3)
-(7 rows)
-
--- final query is router query
-WITH cte_1 AS (SELECT * FROM test_2) SELECT * FROM cte_1, test_2 WHERE test_2.x = cte_1.x AND test_2.x = 7 ORDER BY 1,2;
- x | y | z | x | y | z
----------------------------------------------------------------------
- 7 | 2 | (83,citus8.3) | 7 | 2 | (83,citus8.3)
-(1 row)
-
--- final query is a distributed query
-WITH cte_1 AS (SELECT * FROM test_2) SELECT * FROM cte_1, test_2 WHERE test_2.x = cte_1.x AND test_2.y != 2 ORDER BY 1,2;
- x | y | z | x | y | z
----------------------------------------------------------------------
- 1 | 1 | (95,citus9.5) | 1 | 1 | (95,citus9.5)
-(1 row)
-
--- query pushdown should work
-SELECT
- *
-FROM
- (SELECT x, count(*) FROM test_2 GROUP BY x) as foo,
- (SELECT x, count(*) FROM test_2 GROUP BY x) as bar
-WHERE
- foo.x = bar.x
-ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC
-LIMIT 1;
- x | count | x | count
----------------------------------------------------------------------
- 7 | 1 | 7 | 1
-(1 row)
-
--- make sure that foreign keys work fine
-ALTER TABLE test_2 ADD CONSTRAINT first_pkey PRIMARY KEY (x);
-ALTER TABLE test ADD CONSTRAINT foreign_key FOREIGN KEY (x) REFERENCES test_2(x) ON DELETE CASCADE;
--- show that delete on test_2 cascades to test
-SELECT * FROM test WHERE x = 5;
- x | y
----------------------------------------------------------------------
- 5 | 6
-(1 row)
-
-DELETE FROM test_2 WHERE x = 5;
-SELECT * FROM test WHERE x = 5;
- x | y
----------------------------------------------------------------------
-(0 rows)
-
-INSERT INTO test_2 VALUES (5 ,2, (91, 'citus9.1')::new_type);
-INSERT INTO test VALUES (5, 6);
-INSERT INTO ref VALUES (1, 2), (5, 6), (7, 8);
-SELECT count(*) FROM ref;
- count
----------------------------------------------------------------------
- 3
-(1 row)
-
-SELECT * FROM ref ORDER BY a;
- a | b
----------------------------------------------------------------------
- 1 | 2
- 5 | 6
- 7 | 8
-(3 rows)
-
-SELECT * FROM test, ref WHERE x = a ORDER BY x;
- x | y | a | b
----------------------------------------------------------------------
- 1 | 2 | 1 | 2
- 5 | 6 | 5 | 6
-(2 rows)
-
-INSERT INTO local VALUES (1, 2), (3, 4), (7, 8);
-SELECT count(*) FROM local;
- count
----------------------------------------------------------------------
- 3
-(1 row)
-
-SELECT * FROM local ORDER BY c;
- c | d
----------------------------------------------------------------------
- 1 | 2
- 3 | 4
- 7 | 8
-(3 rows)
-
-SELECT * FROM ref, local WHERE a = c ORDER BY a;
- a | b | c | d
----------------------------------------------------------------------
- 1 | 2 | 1 | 2
- 7 | 8 | 7 | 8
-(2 rows)
-
--- Check repartition joins are supported
-SET citus.enable_repartition_joins TO ON;
-SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
- x | y | x | y
----------------------------------------------------------------------
- 2 | 7 | 1 | 2
- 4 | 5 | 3 | 4
- 5 | 6 | 4 | 5
-(3 rows)
-
-SET citus.enable_single_hash_repartition_joins TO ON;
-SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
- x | y | x | y
----------------------------------------------------------------------
- 2 | 7 | 1 | 2
- 4 | 5 | 3 | 4
- 5 | 6 | 4 | 5
-(3 rows)
-
-SET search_path TO public;
-SET citus.enable_single_hash_repartition_joins TO OFF;
-SELECT * FROM single_node.test t1, single_node.test t2 WHERE t1.x = t2.y ORDER BY t1.x;
- x | y | x | y
----------------------------------------------------------------------
- 2 | 7 | 1 | 2
- 4 | 5 | 3 | 4
- 5 | 6 | 4 | 5
-(3 rows)
-
-SET citus.enable_single_hash_repartition_joins TO ON;
-SELECT * FROM single_node.test t1, single_node.test t2 WHERE t1.x = t2.y ORDER BY t1.x;
- x | y | x | y
----------------------------------------------------------------------
- 2 | 7 | 1 | 2
- 4 | 5 | 3 | 4
- 5 | 6 | 4 | 5
-(3 rows)
-
-SET search_path TO single_node;
-SET citus.task_assignment_policy TO 'round-robin';
-SET citus.enable_single_hash_repartition_joins TO ON;
-SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
- x | y | x | y
----------------------------------------------------------------------
- 2 | 7 | 1 | 2
- 4 | 5 | 3 | 4
- 5 | 6 | 4 | 5
-(3 rows)
-
-SET citus.task_assignment_policy TO 'greedy';
-SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
- x | y | x | y
----------------------------------------------------------------------
- 2 | 7 | 1 | 2
- 4 | 5 | 3 | 4
- 5 | 6 | 4 | 5
-(3 rows)
-
-SET citus.task_assignment_policy TO 'first-replica';
-SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
- x | y | x | y
----------------------------------------------------------------------
- 2 | 7 | 1 | 2
- 4 | 5 | 3 | 4
- 5 | 6 | 4 | 5
-(3 rows)
-
-RESET citus.enable_repartition_joins;
-RESET citus.enable_single_hash_repartition_joins;
--- INSERT SELECT router
-BEGIN;
-INSERT INTO test(x, y) SELECT x, y FROM test WHERE x = 1;
-SELECT count(*) from test;
- count
----------------------------------------------------------------------
- 6
-(1 row)
-
-ROLLBACK;
--- INSERT SELECT pushdown
-BEGIN;
-INSERT INTO test(x, y) SELECT x, y FROM test;
-SELECT count(*) from test;
- count
----------------------------------------------------------------------
- 10
-(1 row)
-
-ROLLBACK;
--- INSERT SELECT analytical query
-BEGIN;
-INSERT INTO test(x, y) SELECT count(x), max(y) FROM test;
-SELECT count(*) from test;
- count
----------------------------------------------------------------------
- 6
-(1 row)
-
-ROLLBACK;
--- INSERT SELECT repartition
-BEGIN;
-INSERT INTO test(x, y) SELECT y, x FROM test;
-SELECT count(*) from test;
- count
----------------------------------------------------------------------
- 10
-(1 row)
-
-ROLLBACK;
--- INSERT SELECT from reference table into distributed
-BEGIN;
-INSERT INTO test(x, y) SELECT a, b FROM ref;
-SELECT count(*) from test;
- count
----------------------------------------------------------------------
- 8
-(1 row)
-
-ROLLBACK;
--- INSERT SELECT from local table into distributed
-BEGIN;
-INSERT INTO test(x, y) SELECT c, d FROM local;
-SELECT count(*) from test;
- count
----------------------------------------------------------------------
- 8
-(1 row)
-
-ROLLBACK;
--- INSERT SELECT from distributed table to local table
-BEGIN;
-INSERT INTO ref(a, b) SELECT x, y FROM test;
-SELECT count(*) from ref;
- count
----------------------------------------------------------------------
- 8
-(1 row)
-
-ROLLBACK;
--- INSERT SELECT from distributed table to local table
-BEGIN;
-INSERT INTO ref(a, b) SELECT c, d FROM local;
-SELECT count(*) from ref;
- count
----------------------------------------------------------------------
- 6
-(1 row)
-
-ROLLBACK;
--- INSERT SELECT from distributed table to local table
-BEGIN;
-INSERT INTO local(c, d) SELECT x, y FROM test;
-SELECT count(*) from local;
- count
----------------------------------------------------------------------
- 8
-(1 row)
-
-ROLLBACK;
--- INSERT SELECT from distributed table to local table
-BEGIN;
-INSERT INTO local(c, d) SELECT a, b FROM ref;
-SELECT count(*) from local;
- count
----------------------------------------------------------------------
- 6
-(1 row)
-
-ROLLBACK;
--- Confirm that dummy placements work
-SELECT count(*) FROM test WHERE false;
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
-SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y);
- count
----------------------------------------------------------------------
-(0 rows)
-
--- Confirm that they work with round-robin task assignment policy
-SET citus.task_assignment_policy TO 'round-robin';
-SELECT count(*) FROM test WHERE false;
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
-SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y);
- count
----------------------------------------------------------------------
-(0 rows)
-
-RESET citus.task_assignment_policy;
-SELECT count(*) FROM test;
- count
----------------------------------------------------------------------
- 5
-(1 row)
-
--- INSERT SELECT from distributed table to local table
-BEGIN;
-INSERT INTO ref(a, b) SELECT x, y FROM test;
-SELECT count(*) from ref;
- count
----------------------------------------------------------------------
- 8
-(1 row)
-
-ROLLBACK;
--- INSERT SELECT from distributed table to local table
-BEGIN;
-INSERT INTO ref(a, b) SELECT c, d FROM local;
-SELECT count(*) from ref;
- count
----------------------------------------------------------------------
- 6
-(1 row)
-
-ROLLBACK;
--- INSERT SELECT from distributed table to local table
-BEGIN;
-INSERT INTO local(c, d) SELECT x, y FROM test;
-SELECT count(*) from local;
- count
----------------------------------------------------------------------
- 8
-(1 row)
-
-ROLLBACK;
--- INSERT SELECT from distributed table to local table
-BEGIN;
-INSERT INTO local(c, d) SELECT a, b FROM ref;
-SELECT count(*) from local;
- count
----------------------------------------------------------------------
- 6
-(1 row)
-
-ROLLBACK;
--- query fails on the shards should be handled
--- nicely
-SELECT x/0 FROM test;
-ERROR: division by zero
-CONTEXT: while executing command on localhost:xxxxx
--- Add "fake" pg_dist_transaction records and run recovery
--- to show that it is recovered
--- Temporarily disable automatic 2PC recovery
-ALTER SYSTEM SET citus.recover_2pc_interval TO -1;
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
-BEGIN;
-CREATE TABLE should_commit (value int);
-PREPARE TRANSACTION 'citus_0_should_commit';
--- zero is the coordinator's group id, so we can hard code it
-INSERT INTO pg_dist_transaction VALUES (0, 'citus_0_should_commit');
-SELECT recover_prepared_transactions();
- recover_prepared_transactions
----------------------------------------------------------------------
- 1
-(1 row)
-
--- the table should be seen
-SELECT * FROM should_commit;
- value
----------------------------------------------------------------------
-(0 rows)
-
--- set the original back
-ALTER SYSTEM RESET citus.recover_2pc_interval;
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
-RESET citus.task_executor_type;
--- make sure undistribute table works fine
-ALTER TABLE test DROP CONSTRAINT foreign_key;
-SELECT undistribute_table('test_2');
-NOTICE: creating a new table for single_node.test_2
-NOTICE: moving the data of single_node.test_2
-NOTICE: dropping the old single_node.test_2
-NOTICE: renaming the new table to single_node.test_2
- undistribute_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT * FROM pg_dist_partition WHERE logicalrelid = 'test_2'::regclass;
- logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
----------------------------------------------------------------------
-(0 rows)
-
-CREATE TABLE reference_table_1 (col_1 INT UNIQUE, col_2 INT UNIQUE, UNIQUE (col_2, col_1));
-SELECT create_reference_table('reference_table_1');
- create_reference_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE distributed_table_1 (col_1 INT UNIQUE);
-SELECT create_distributed_table('distributed_table_1', 'col_1');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE citus_local_table_1 (col_1 INT UNIQUE);
-SELECT citus_add_local_table_to_metadata('citus_local_table_1');
- citus_add_local_table_to_metadata
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE partitioned_table_1 (col_1 INT UNIQUE, col_2 INT) PARTITION BY RANGE (col_1);
-CREATE TABLE partitioned_table_1_100_200 PARTITION OF partitioned_table_1 FOR VALUES FROM (100) TO (200);
-CREATE TABLE partitioned_table_1_200_300 PARTITION OF partitioned_table_1 FOR VALUES FROM (200) TO (300);
-SELECT create_distributed_table('partitioned_table_1', 'col_1');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-ALTER TABLE citus_local_table_1 ADD CONSTRAINT fkey_1 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_2);
-ALTER TABLE reference_table_1 ADD CONSTRAINT fkey_2 FOREIGN KEY (col_2) REFERENCES reference_table_1(col_1);
-ALTER TABLE distributed_table_1 ADD CONSTRAINT fkey_3 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_1);
-ALTER TABLE citus_local_table_1 ADD CONSTRAINT fkey_4 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_2);
-ALTER TABLE partitioned_table_1 ADD CONSTRAINT fkey_5 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_2);
-SELECT undistribute_table('partitioned_table_1', cascade_via_foreign_keys=>true);
-NOTICE: converting the partitions of single_node.partitioned_table_1
-NOTICE: creating a new table for single_node.partitioned_table_1
-NOTICE: dropping the old single_node.partitioned_table_1
-NOTICE: renaming the new table to single_node.partitioned_table_1
-NOTICE: creating a new table for single_node.reference_table_1
-NOTICE: moving the data of single_node.reference_table_1
-NOTICE: dropping the old single_node.reference_table_1
-NOTICE: renaming the new table to single_node.reference_table_1
-NOTICE: creating a new table for single_node.distributed_table_1
-NOTICE: moving the data of single_node.distributed_table_1
-NOTICE: dropping the old single_node.distributed_table_1
-NOTICE: renaming the new table to single_node.distributed_table_1
-NOTICE: creating a new table for single_node.citus_local_table_1
-NOTICE: moving the data of single_node.citus_local_table_1
-NOTICE: dropping the old single_node.citus_local_table_1
-NOTICE: renaming the new table to single_node.citus_local_table_1
-NOTICE: creating a new table for single_node.partitioned_table_1_100_200
-NOTICE: moving the data of single_node.partitioned_table_1_100_200
-NOTICE: dropping the old single_node.partitioned_table_1_100_200
-NOTICE: renaming the new table to single_node.partitioned_table_1_100_200
-NOTICE: creating a new table for single_node.partitioned_table_1_200_300
-NOTICE: moving the data of single_node.partitioned_table_1_200_300
-NOTICE: dropping the old single_node.partitioned_table_1_200_300
-NOTICE: renaming the new table to single_node.partitioned_table_1_200_300
- undistribute_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE local_table_1 (col_1 INT UNIQUE);
-CREATE TABLE local_table_2 (col_1 INT UNIQUE);
-CREATE TABLE local_table_3 (col_1 INT UNIQUE);
-ALTER TABLE local_table_2 ADD CONSTRAINT fkey_6 FOREIGN KEY (col_1) REFERENCES local_table_1(col_1);
-ALTER TABLE local_table_3 ADD CONSTRAINT fkey_7 FOREIGN KEY (col_1) REFERENCES local_table_1(col_1);
-ALTER TABLE local_table_1 ADD CONSTRAINT fkey_8 FOREIGN KEY (col_1) REFERENCES local_table_1(col_1);
-SELECT citus_add_local_table_to_metadata('local_table_2', cascade_via_foreign_keys=>true);
- citus_add_local_table_to_metadata
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE PROCEDURE call_delegation(x int) LANGUAGE plpgsql AS $$
-BEGIN
- INSERT INTO test (x) VALUES ($1);
-END;$$;
-SELECT * FROM pg_dist_node;
- nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
----------------------------------------------------------------------
- 5 | 0 | localhost | 57636 | default | t | t | primary | default | t | t
-(1 row)
-
-SELECT create_distributed_function('call_delegation(int)', '$1', 'test');
- create_distributed_function
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE FUNCTION function_delegation(int) RETURNS void AS $$
-BEGIN
-UPDATE test SET y = y + 1 WHERE x < $1;
-END;
-$$ LANGUAGE plpgsql;
-SELECT create_distributed_function('function_delegation(int)', '$1', 'test');
- create_distributed_function
----------------------------------------------------------------------
-
-(1 row)
-
-SET client_min_messages TO DEBUG1;
-CALL call_delegation(1);
-DEBUG: not pushing down procedure to the same node
-SELECT function_delegation(1);
-DEBUG: not pushing down function to the same node
- function_delegation
----------------------------------------------------------------------
-
-(1 row)
-
-SET client_min_messages TO WARNING;
-DROP TABLE test CASCADE;
-CREATE OR REPLACE FUNCTION pg_catalog.get_all_active_client_backend_count()
- RETURNS bigint
- LANGUAGE C STRICT
- AS 'citus', $$get_all_active_client_backend_count$$;
--- set the cached connections to zero
--- and execute a distributed query so that
--- we end up with zero cached connections afterwards
-ALTER SYSTEM SET citus.max_cached_conns_per_worker TO 0;
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
--- disable deadlock detection and re-trigger 2PC recovery
--- once more when citus.max_cached_conns_per_worker is zero
--- so that we can be sure that the connections established for
--- maintanince daemon is closed properly.
--- this is to prevent random failures in the tests (otherwise, we
--- might see connections established for this operations)
-ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO -1;
-ALTER SYSTEM SET citus.recover_2pc_interval TO '1ms';
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
-SELECT pg_sleep(0.1);
- pg_sleep
----------------------------------------------------------------------
-
-(1 row)
-
--- now that last 2PC recovery is done, we're good to disable it
-ALTER SYSTEM SET citus.recover_2pc_interval TO '-1';
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
--- test alter_distributed_table UDF
-CREATE TABLE adt_table (a INT, b INT);
-CREATE TABLE adt_col (a INT UNIQUE, b INT);
-CREATE TABLE adt_ref (a INT REFERENCES adt_col(a));
-SELECT create_distributed_table('adt_table', 'a', colocate_with:='none');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT create_distributed_table('adt_col', 'a', colocate_with:='adt_table');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT create_distributed_table('adt_ref', 'a', colocate_with:='adt_table');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-INSERT INTO adt_table VALUES (1, 2), (3, 4), (5, 6);
-INSERT INTO adt_col VALUES (3, 4), (5, 6), (7, 8);
-INSERT INTO adt_ref VALUES (3), (5);
-SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text LIKE 'adt%';
- table_name | citus_table_type | distribution_column | shard_count
----------------------------------------------------------------------
- adt_col | distributed | a | 4
- adt_ref | distributed | a | 4
- adt_table | distributed | a | 4
-(3 rows)
-
-SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables WHERE table_name::text LIKE 'adt%' GROUP BY colocation_id ORDER BY 1;
- Colocation Groups
----------------------------------------------------------------------
- adt_col, adt_ref, adt_table
-(1 row)
-
-SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
- WHERE (conrelid::regclass::text = 'adt_col' OR confrelid::regclass::text = 'adt_col') ORDER BY 1;
- Referencing Table | Definition
----------------------------------------------------------------------
- adt_col | UNIQUE (a)
- adt_ref | FOREIGN KEY (a) REFERENCES adt_col(a)
-(2 rows)
-
-SELECT alter_distributed_table('adt_table', shard_count:=6, cascade_to_colocated:=true);
- alter_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text LIKE 'adt%';
- table_name | citus_table_type | distribution_column | shard_count
----------------------------------------------------------------------
- adt_col | distributed | a | 6
- adt_ref | distributed | a | 6
- adt_table | distributed | a | 6
-(3 rows)
-
-SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables WHERE table_name::text LIKE 'adt%' GROUP BY colocation_id ORDER BY 1;
- Colocation Groups
----------------------------------------------------------------------
- adt_col, adt_ref, adt_table
-(1 row)
-
-SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
- WHERE (conrelid::regclass::text = 'adt_col' OR confrelid::regclass::text = 'adt_col') ORDER BY 1;
- Referencing Table | Definition
----------------------------------------------------------------------
- adt_col | UNIQUE (a)
- adt_ref | FOREIGN KEY (a) REFERENCES adt_col(a)
-(2 rows)
-
-SELECT alter_distributed_table('adt_table', distribution_column:='b', colocate_with:='none');
- alter_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text LIKE 'adt%';
- table_name | citus_table_type | distribution_column | shard_count
----------------------------------------------------------------------
- adt_col | distributed | a | 6
- adt_ref | distributed | a | 6
- adt_table | distributed | b | 6
-(3 rows)
-
-SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables WHERE table_name::text LIKE 'adt%' GROUP BY colocation_id ORDER BY 1;
- Colocation Groups
----------------------------------------------------------------------
- adt_col, adt_ref
- adt_table
-(2 rows)
-
-SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
- WHERE (conrelid::regclass::text = 'adt_col' OR confrelid::regclass::text = 'adt_col') ORDER BY 1;
- Referencing Table | Definition
----------------------------------------------------------------------
- adt_col | UNIQUE (a)
- adt_ref | FOREIGN KEY (a) REFERENCES adt_col(a)
-(2 rows)
-
-SELECT * FROM adt_table ORDER BY 1;
- a | b
----------------------------------------------------------------------
- 1 | 2
- 3 | 4
- 5 | 6
-(3 rows)
-
-SELECT * FROM adt_col ORDER BY 1;
- a | b
----------------------------------------------------------------------
- 3 | 4
- 5 | 6
- 7 | 8
-(3 rows)
-
-SELECT * FROM adt_ref ORDER BY 1;
- a
----------------------------------------------------------------------
- 3
- 5
-(2 rows)
-
--- make sure that COPY (e.g., INSERT .. SELECT) and
--- alter_distributed_table works in the same TX
-BEGIN;
-SET LOCAL citus.enable_local_execution=OFF;
-INSERT INTO adt_table SELECT x, x+1 FROM generate_series(1, 1000) x;
-SELECT alter_distributed_table('adt_table', distribution_column:='a');
- alter_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-ROLLBACK;
-BEGIN;
-INSERT INTO adt_table SELECT x, x+1 FROM generate_series(1, 1000) x;
-SELECT alter_distributed_table('adt_table', distribution_column:='a');
- alter_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT COUNT(*) FROM adt_table;
- count
----------------------------------------------------------------------
- 1003
-(1 row)
-
-END;
-SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text = 'adt_table';
- table_name | citus_table_type | distribution_column | shard_count
----------------------------------------------------------------------
- adt_table | distributed | a | 6
-(1 row)
-
-\c - - - :master_port
--- sometimes Postgres is a little slow to terminate the backends
--- even if PGFinish is sent. So, to prevent any flaky tests, sleep
-SELECT pg_sleep(0.1);
- pg_sleep
----------------------------------------------------------------------
-
-(1 row)
-
--- since max_cached_conns_per_worker == 0 at this point, the
--- backend(s) that execute on the shards will be terminated
--- so show that there no internal backends
-SET search_path TO single_node;
-SET citus.next_shard_id TO 90730500;
-SELECT count(*) from should_commit;
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
-SELECT count(*) FROM pg_stat_activity WHERE application_name LIKE 'citus_internal%';
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
-SELECT get_all_active_client_backend_count();
- get_all_active_client_backend_count
----------------------------------------------------------------------
- 1
-(1 row)
-
-BEGIN;
- SET LOCAL citus.shard_count TO 32;
- SET LOCAL citus.force_max_query_parallelization TO ON;
- SET LOCAL citus.enable_local_execution TO false;
- CREATE TABLE test (a int);
- SET citus.shard_replication_factor TO 1;
- SELECT create_distributed_table('test', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
- SELECT count(*) FROM test;
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
- -- now, we should have additional 32 connections
- SELECT count(*) FROM pg_stat_activity WHERE application_name LIKE 'citus_internal%';
- count
----------------------------------------------------------------------
- 32
-(1 row)
-
- -- single external connection
- SELECT get_all_active_client_backend_count();
- get_all_active_client_backend_count
----------------------------------------------------------------------
- 1
-(1 row)
-
-ROLLBACK;
-\c - - - :master_port
-SET search_path TO single_node;
-SET citus.next_shard_id TO 90830500;
--- simulate that even if there is no connection slots
--- to connect, Citus can switch to local execution
-SET citus.force_max_query_parallelization TO false;
-SET citus.log_remote_commands TO ON;
-ALTER SYSTEM SET citus.local_shared_pool_size TO -1;
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
-SELECT pg_sleep(0.1);
- pg_sleep
----------------------------------------------------------------------
-
-(1 row)
-
-SET citus.executor_slow_start_interval TO 10;
-SELECT count(*) from another_schema_table;
-NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630515 another_schema_table WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630516 another_schema_table WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630517 another_schema_table WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630518 another_schema_table WHERE true
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
-UPDATE another_schema_table SET b = b;
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = b
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630516 another_schema_table SET b = b
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630517 another_schema_table SET b = b
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630518 another_schema_table SET b = b
--- INSERT .. SELECT pushdown and INSERT .. SELECT via repartitioning
--- not that we ignore INSERT .. SELECT via coordinator as it relies on
--- COPY command
-INSERT INTO another_schema_table SELECT * FROM another_schema_table;
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE (a IS NOT NULL)
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE (a IS NOT NULL)
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE (a IS NOT NULL)
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE (a IS NOT NULL)
-INSERT INTO another_schema_table SELECT b::int, a::int FROM another_schema_table;
-NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630515_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630515_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630516_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630516_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630517_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630517_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630518_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630518_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
--- multi-row INSERTs
-INSERT INTO another_schema_table VALUES (1,1), (2,2), (3,3), (4,4), (5,5),(6,6),(7,7);
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) VALUES (1,1), (5,5)
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) VALUES (3,3), (4,4), (7,7)
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) VALUES (6,6)
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) VALUES (2,2)
--- INSERT..SELECT with re-partitioning when using local execution
-BEGIN;
-INSERT INTO another_schema_table VALUES (1,100);
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 (a, b) VALUES (1, 100)
-INSERT INTO another_schema_table VALUES (2,100);
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 (a, b) VALUES (2, 100)
-INSERT INTO another_schema_table SELECT b::int, a::int FROM another_schema_table;
-NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630515_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630515_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630516_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630516_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630517_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630517_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630518_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630518_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630515_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630516_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630515_to_2,repartitioned_results_xxxxx_from_90630517_to_2,repartitioned_results_xxxxx_from_90630518_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630518_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer)
-SELECT * FROM another_schema_table WHERE a = 100 ORDER BY b;
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 100) ORDER BY b
- a | b
----------------------------------------------------------------------
- 100 | 1
- 100 | 2
-(2 rows)
-
-ROLLBACK;
--- intermediate results
-WITH cte_1 AS (SELECT * FROM another_schema_table LIMIT 1000)
- SELECT count(*) FROM cte_1;
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true LIMIT '1000'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true LIMIT '1000'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true LIMIT '1000'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true LIMIT '1000'::bigint
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1
- count
----------------------------------------------------------------------
- 7
-(1 row)
-
--- this is to get ready for the next tests
-TRUNCATE another_schema_table;
-NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE
-NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE
-NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE
-NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE
--- copy can use local execution even if there is no connection available
-COPY another_schema_table(a) FROM PROGRAM 'seq 32';
-NOTICE: executing the copy locally for shard xxxxx
-CONTEXT: COPY another_schema_table, line 1: "1"
-NOTICE: executing the copy locally for shard xxxxx
-CONTEXT: COPY another_schema_table, line 2: "2"
-NOTICE: executing the copy locally for shard xxxxx
-CONTEXT: COPY another_schema_table, line 3: "3"
-NOTICE: executing the copy locally for shard xxxxx
-CONTEXT: COPY another_schema_table, line 6: "6"
--- INSERT .. SELECT with co-located intermediate results
-SET citus.log_remote_commands to false;
-CREATE UNIQUE INDEX another_schema_table_pk ON another_schema_table(a);
-SET citus.log_local_commands to true;
-INSERT INTO another_schema_table SELECT * FROM another_schema_table LIMIT 10000 ON CONFLICT(a) DO NOTHING;
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true LIMIT '10000'::bigint
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING
-INSERT INTO another_schema_table SELECT * FROM another_schema_table ORDER BY a LIMIT 10 ON CONFLICT(a) DO UPDATE SET b = EXCLUDED.b + 1 RETURNING *;
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b
- a | b
----------------------------------------------------------------------
- 1 |
- 2 |
- 3 |
- 4 |
- 5 |
- 6 |
- 7 |
- 8 |
- 9 |
- 10 |
-(10 rows)
-
--- INSERT .. SELECT with co-located intermediate result for non-binary input
-WITH cte_1 AS
-(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING value)
-SELECT count(*) FROM cte_1;
-NOTICE: executing the command locally: SELECT key, value FROM single_node.non_binary_copy_test_90630519 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT key, value FROM single_node.non_binary_copy_test_90630520 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT key, value FROM single_node.non_binary_copy_test_90630521 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT key, value FROM single_node.non_binary_copy_test_90630522 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(value single_node.new_type)) cte_1
- count
----------------------------------------------------------------------
- 1001
-(1 row)
-
--- test with NULL columns
-ALTER TABLE non_binary_copy_test ADD COLUMN z INT;
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630519, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630520, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630521, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630522, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;')
-WITH cte_1 AS
-(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING z)
-SELECT bool_and(z is null) FROM cte_1;
-NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630519 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630520 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630521 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630522 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
-NOTICE: executing the command locally: SELECT bool_and((z IS NULL)) AS bool_and FROM (SELECT intermediate_result.z FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(z integer)) cte_1
- bool_and
----------------------------------------------------------------------
- t
-(1 row)
-
--- test with type coersion (int -> text) and also NULL values with coersion
-WITH cte_1 AS
-(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING key, z)
-SELECT count(DISTINCT key::text), count(DISTINCT z::text) FROM cte_1;
-NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630519 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630520 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630521 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630522 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z
-NOTICE: executing the command locally: SELECT count(DISTINCT (key)::text) AS count, count(DISTINCT (z)::text) AS count FROM (SELECT intermediate_result.key, intermediate_result.z FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, z integer)) cte_1
- count | count
----------------------------------------------------------------------
- 1001 | 0
-(1 row)
-
--- test disabling drop and truncate for known shards
-SET citus.shard_replication_factor TO 1;
-CREATE TABLE test_disabling_drop_and_truncate (a int);
-SELECT create_distributed_table('test_disabling_drop_and_truncate', 'a');
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830500, 'single_node', 'CREATE TABLE single_node.test_disabling_drop_and_truncate (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830500, 'single_node', 'ALTER TABLE single_node.test_disabling_drop_and_truncate OWNER TO postgres')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830501, 'single_node', 'CREATE TABLE single_node.test_disabling_drop_and_truncate (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830501, 'single_node', 'ALTER TABLE single_node.test_disabling_drop_and_truncate OWNER TO postgres')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830502, 'single_node', 'CREATE TABLE single_node.test_disabling_drop_and_truncate (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830502, 'single_node', 'ALTER TABLE single_node.test_disabling_drop_and_truncate OWNER TO postgres')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830503, 'single_node', 'CREATE TABLE single_node.test_disabling_drop_and_truncate (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830503, 'single_node', 'ALTER TABLE single_node.test_disabling_drop_and_truncate OWNER TO postgres')
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SET citus.enable_manual_changes_to_shards TO off;
--- these should error out
-DROP TABLE test_disabling_drop_and_truncate_90830500;
-ERROR: cannot modify "test_disabling_drop_and_truncate_90830500" because it is a shard of a distributed table
-HINT: Use the distributed table or set citus.enable_manual_changes_to_shards to on to modify shards directly
-TRUNCATE TABLE test_disabling_drop_and_truncate_90830500;
-ERROR: cannot modify "test_disabling_drop_and_truncate_90830500" because it is a shard of a distributed table
-HINT: Use the distributed table or set citus.enable_manual_changes_to_shards to on to modify shards directly
-RESET citus.enable_manual_changes_to_shards ;
--- these should work as expected
-TRUNCATE TABLE test_disabling_drop_and_truncate_90830500;
-DROP TABLE test_disabling_drop_and_truncate_90830500;
-DROP TABLE test_disabling_drop_and_truncate;
--- test creating distributed or reference tables from shards
-CREATE TABLE test_creating_distributed_relation_table_from_shard (a int);
-SELECT create_distributed_table('test_creating_distributed_relation_table_from_shard', 'a');
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830504, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830504, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830505, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830505, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830506, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830506, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830507, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830507, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres')
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
--- these should error because shards cannot be used to:
--- create distributed table
-SELECT create_distributed_table('test_creating_distributed_relation_table_from_shard_90830504', 'a');
-ERROR: relation "test_creating_distributed_relation_table_from_shard_90830504" is a shard relation
--- create reference table
-SELECT create_reference_table('test_creating_distributed_relation_table_from_shard_90830504');
-ERROR: relation "test_creating_distributed_relation_table_from_shard_90830504" is a shard relation
-RESET citus.shard_replication_factor;
-DROP TABLE test_creating_distributed_relation_table_from_shard;
--- lets flush the copy often to make sure everyhing is fine
-SET citus.local_copy_flush_threshold TO 1;
-TRUNCATE another_schema_table;
-NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE
-NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE
-NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE
-NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE
-INSERT INTO another_schema_table(a) SELECT i from generate_Series(0,10000)i;
-NOTICE: executing the copy locally for shard xxxxx
-NOTICE: executing the copy locally for shard xxxxx
-NOTICE: executing the copy locally for shard xxxxx
-NOTICE: executing the copy locally for shard xxxxx
-WITH cte_1 AS
-(INSERT INTO another_schema_table SELECT * FROM another_schema_table ORDER BY a LIMIT 10000 ON CONFLICT(a) DO NOTHING RETURNING *)
-SELECT count(*) FROM cte_1;
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true ORDER BY a LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true ORDER BY a LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true ORDER BY a LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true ORDER BY a LIMIT '10000'::bigint
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b
-NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1
- count
----------------------------------------------------------------------
- 0
-(1 row)
-
-WITH cte_1 AS
-(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING z)
-SELECT bool_and(z is null) FROM cte_1;
-NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630519 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630520 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630521 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630522 non_binary_copy_test WHERE true LIMIT '10000'::bigint
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
-NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z
-NOTICE: executing the command locally: SELECT bool_and((z IS NULL)) AS bool_and FROM (SELECT intermediate_result.z FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(z integer)) cte_1
- bool_and
----------------------------------------------------------------------
- t
-(1 row)
-
-RESET citus.local_copy_flush_threshold;
-RESET citus.local_copy_flush_threshold;
-CREATE OR REPLACE FUNCTION coordinated_transaction_should_use_2PC()
-RETURNS BOOL LANGUAGE C STRICT VOLATILE AS 'citus',
-$$coordinated_transaction_should_use_2PC$$;
--- a multi-shard/single-shard select that is failed over to local
--- execution doesn't start a 2PC
-BEGIN;
- SELECT count(*) FROM another_schema_table;
-NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630515 another_schema_table WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630516 another_schema_table WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630517 another_schema_table WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630518 another_schema_table WHERE true
- count
----------------------------------------------------------------------
- 10001
-(1 row)
-
- SELECT count(*) FROM another_schema_table WHERE a = 1;
-NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630515 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 1)
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
- WITH cte_1 as (SELECT * FROM another_schema_table LIMIT 10)
- SELECT count(*) FROM cte_1;
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true LIMIT '10'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true LIMIT '10'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true LIMIT '10'::bigint
-NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true LIMIT '10'::bigint
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1
- count
----------------------------------------------------------------------
- 10
-(1 row)
-
- WITH cte_1 as (SELECT * FROM another_schema_table WHERE a = 1 LIMIT 10)
- SELECT count(*) FROM cte_1;
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT another_schema_table.a, another_schema_table.b FROM single_node.another_schema_table_90630515 another_schema_table WHERE (another_schema_table.a OPERATOR(pg_catalog.=) 1) LIMIT 10) cte_1
- count
----------------------------------------------------------------------
- 1
-(1 row)
-
- SELECT coordinated_transaction_should_use_2PC();
- coordinated_transaction_should_use_2pc
----------------------------------------------------------------------
- f
-(1 row)
-
-ROLLBACK;
--- same without a transaction block
-WITH cte_1 AS (SELECT count(*) as cnt FROM another_schema_table LIMIT 1000),
- cte_2 AS (SELECT coordinated_transaction_should_use_2PC() as enabled_2pc)
-SELECT cnt, enabled_2pc FROM cte_1, cte_2;
-NOTICE: executing the command locally: SELECT count(*) AS cnt FROM single_node.another_schema_table_90630515 another_schema_table WHERE true LIMIT '1000'::bigint
-NOTICE: executing the command locally: SELECT count(*) AS cnt FROM single_node.another_schema_table_90630516 another_schema_table WHERE true LIMIT '1000'::bigint
-NOTICE: executing the command locally: SELECT count(*) AS cnt FROM single_node.another_schema_table_90630517 another_schema_table WHERE true LIMIT '1000'::bigint
-NOTICE: executing the command locally: SELECT count(*) AS cnt FROM single_node.another_schema_table_90630518 another_schema_table WHERE true LIMIT '1000'::bigint
-NOTICE: executing the command locally: SELECT cte_1.cnt, cte_2.enabled_2pc FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) cte_1, (SELECT intermediate_result.enabled_2pc FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(enabled_2pc boolean)) cte_2
- cnt | enabled_2pc
----------------------------------------------------------------------
- 10001 | f
-(1 row)
-
--- a multi-shard modification that is failed over to local
--- execution starts a 2PC
-BEGIN;
- UPDATE another_schema_table SET b = b + 1;
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1)
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630516 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1)
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630517 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1)
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630518 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1)
- SELECT coordinated_transaction_should_use_2PC();
- coordinated_transaction_should_use_2pc
----------------------------------------------------------------------
- t
-(1 row)
-
-ROLLBACK;
--- a multi-shard modification that is failed over to local
--- execution starts a 2PC
-BEGIN;
- WITH cte_1 AS (UPDATE another_schema_table SET b = b + 1 RETURNING *)
- SELECT count(*) FROM cte_1;
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630516 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630517 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630518 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1
- count
----------------------------------------------------------------------
- 10001
-(1 row)
-
- SELECT coordinated_transaction_should_use_2PC();
- coordinated_transaction_should_use_2pc
----------------------------------------------------------------------
- t
-(1 row)
-
-ROLLBACK;
--- same without transaction block
-WITH cte_1 AS (UPDATE another_schema_table SET b = b + 1 RETURNING *)
-SELECT coordinated_transaction_should_use_2PC();
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630516 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630517 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630518 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b
-NOTICE: executing the command locally: SELECT single_node.coordinated_transaction_should_use_2pc() AS coordinated_transaction_should_use_2pc
- coordinated_transaction_should_use_2pc
----------------------------------------------------------------------
- t
-(1 row)
-
--- a single-shard modification that is failed over to local
--- starts 2PC execution
-BEGIN;
- UPDATE another_schema_table SET b = b + 1 WHERE a = 1;
-NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) WHERE (a OPERATOR(pg_catalog.=) 1)
- SELECT coordinated_transaction_should_use_2PC();
- coordinated_transaction_should_use_2pc
----------------------------------------------------------------------
- t
-(1 row)
-
-ROLLBACK;
--- if the local execution is disabled, we cannot failover to
--- local execution and the queries would fail
-SET citus.enable_local_execution TO false;
-SELECT count(*) from another_schema_table;
-ERROR: the total number of connections on the server is more than max_connections(100)
-HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true;
-UPDATE another_schema_table SET b = b;
-ERROR: the total number of connections on the server is more than max_connections(100)
-HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true;
-INSERT INTO another_schema_table SELECT * FROM another_schema_table;
-ERROR: the total number of connections on the server is more than max_connections(100)
-HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true;
-INSERT INTO another_schema_table SELECT b::int, a::int FROM another_schema_table;
-ERROR: the total number of connections on the server is more than max_connections(100)
-HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true;
-WITH cte_1 AS (SELECT * FROM another_schema_table LIMIT 1000)
- SELECT count(*) FROM cte_1;
-ERROR: the total number of connections on the server is more than max_connections(100)
-HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true;
-INSERT INTO another_schema_table VALUES (1,1), (2,2), (3,3), (4,4), (5,5),(6,6),(7,7);
-ERROR: the total number of connections on the server is more than max_connections(100)
-HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true;
--- copy fails if local execution is disabled and there is no connection slot
-COPY another_schema_table(a) FROM PROGRAM 'seq 32';
-ERROR: could not find an available connection
-HINT: Set citus.max_shared_pool_size TO -1 to let COPY command finish
-CONTEXT: COPY another_schema_table, line 1: "1"
--- set the values to originals back
-ALTER SYSTEM RESET citus.max_cached_conns_per_worker;
-ALTER SYSTEM RESET citus.distributed_deadlock_detection_factor;
-ALTER SYSTEM RESET citus.recover_2pc_interval;
-ALTER SYSTEM RESET citus.distributed_deadlock_detection_factor;
-ALTER SYSTEM RESET citus.local_shared_pool_size;
-SELECT pg_reload_conf();
- pg_reload_conf
----------------------------------------------------------------------
- t
-(1 row)
-
--- suppress notices
-SET client_min_messages TO error;
--- cannot remove coordinator since a reference table exists on coordinator and no other worker nodes are added
-SELECT 1 FROM master_remove_node('localhost', :master_port);
-ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
-DETAIL: One of the table(s) that prevents the operation complete successfully is single_node.ref
-HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
--- Cleanup
-DROP SCHEMA single_node CASCADE;
--- Remove the coordinator again
-SELECT 1 FROM master_remove_node('localhost', :master_port);
- ?column?
----------------------------------------------------------------------
- 1
-(1 row)
-
--- restart nodeid sequence so that multi_cluster_management still has the same
--- nodeids
-ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 1;
diff --git a/src/test/regress/sql/create_drop_database_propagation_pg15.sql b/src/test/regress/sql/create_drop_database_propagation_pg15.sql
index 40d1b9e09..4f57f9112 100644
--- a/src/test/regress/sql/create_drop_database_propagation_pg15.sql
+++ b/src/test/regress/sql/create_drop_database_propagation_pg15.sql
@@ -1,14 +1,3 @@
---
--- PG15
---
-SHOW server_version \gset
-SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
-\gset
-\if :server_version_ge_15
-\else
-\q
-\endif
-
-- create/drop database for pg >= 15
set citus.enable_create_database_propagation=on;
diff --git a/src/test/regress/sql/merge_unsupported.sql b/src/test/regress/sql/merge_unsupported.sql
index ef95e01ea..9903fd6a5 100644
--- a/src/test/regress/sql/merge_unsupported.sql
+++ b/src/test/regress/sql/merge_unsupported.sql
@@ -1,18 +1,9 @@
-
-
SHOW server_version \gset
SELECT CASE
WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+'
WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16'
- WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14'
ELSE 'Unsupported version'
END AS version_category;
-SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
-\gset
-\if :server_version_ge_15
-\else
-\q
-\endif
--
-- MERGE test from PG community (adapted to Citus by converting all tables to Citus local)
diff --git a/src/test/regress/sql/multi_explain.sql b/src/test/regress/sql/multi_explain.sql
index 408130fde..1c4841c6f 100644
--- a/src/test/regress/sql/multi_explain.sql
+++ b/src/test/regress/sql/multi_explain.sql
@@ -6,8 +6,13 @@
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15
--
+-- This test file has an alternative output because of the following in PG18:
+-- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a
+-- The alternative output can be deleted when we drop support for PG17
+--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
+SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18;
SET citus.next_shard_id TO 570000;
@@ -930,10 +935,10 @@ ROLLBACK;
-- test EXPLAIN ANALYZE with non-text output formats
BEGIN;
-EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
ROLLBACK;
-EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON, BUFFERS OFF) SELECT * FROM explain_pk;
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) SELECT * FROM explain_pk;
BEGIN;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
diff --git a/src/test/regress/sql/pg17.sql b/src/test/regress/sql/pg17.sql
index b88cde1c1..57edc1571 100644
--- a/src/test/regress/sql/pg17.sql
+++ b/src/test/regress/sql/pg17.sql
@@ -1292,7 +1292,7 @@ SET citus.grep_remote_commands TO '%12242024%';
select public.explain_filter('explain (memory) select * from int8_tbl i8');
select public.explain_filter('explain (memory, analyze, buffers false) select * from int8_tbl i8');
select public.explain_filter('explain (memory, summary, format yaml) select * from int8_tbl i8');
-select public.explain_filter('explain (memory, analyze, buffers false, format json) select * from int8_tbl i8');
+select public.explain_filter('explain (memory, analyze, buffers false, format yaml) select * from int8_tbl i8');
prepare int8_query as select * from int8_tbl i8;
select public.explain_filter('explain (memory) execute int8_query');
diff --git a/src/test/regress/sql/pg18.sql b/src/test/regress/sql/pg18.sql
index af077bf4c..399d62f26 100644
--- a/src/test/regress/sql/pg18.sql
+++ b/src/test/regress/sql/pg18.sql
@@ -632,6 +632,828 @@ CREATE MATERIALIZED VIEW copytest_mv AS
SELECT create_distributed_table('copytest_mv', 'id');
-- After that, any command on the materialized view is outside Citus support.
+-- PG18: verify publish_generated_columns is preserved for distributed tables
+-- https://github.com/postgres/postgres/commit/7054186c4
+\c - - - :master_port
+CREATE SCHEMA pg18_publication;
+SET search_path TO pg18_publication;
+
+-- table with a stored generated column
+CREATE TABLE gen_pub_tab (
+ id int primary key,
+ a int,
+ b int GENERATED ALWAYS AS (a * 10) STORED
+);
+
+-- make it distributed so CREATE PUBLICATION goes through Citus metadata/DDL path
+SELECT create_distributed_table('gen_pub_tab', 'id', colocate_with := 'none');
+
+-- publication using the new PG18 option: stored
+CREATE PUBLICATION pub_gen_cols_stored
+ FOR TABLE gen_pub_tab
+ WITH (publish = 'insert, update', publish_generated_columns = stored);
+
+-- second publication explicitly using "none" for completeness
+CREATE PUBLICATION pub_gen_cols_none
+ FOR TABLE gen_pub_tab
+ WITH (publish = 'insert, update', publish_generated_columns = none);
+
+-- On coordinator: pubgencols must be 's' and 'n' respectively
+SELECT pubname, pubgencols
+FROM pg_publication
+WHERE pubname IN ('pub_gen_cols_stored', 'pub_gen_cols_none')
+ORDER BY pubname;
+
+-- On worker 1: both publications must exist and keep pubgencols in sync
+\c - - - :worker_1_port
+SET search_path TO pg18_publication;
+
+SELECT pubname, pubgencols
+FROM pg_publication
+WHERE pubname IN ('pub_gen_cols_stored', 'pub_gen_cols_none')
+ORDER BY pubname;
+
+-- On worker 2: same check
+\c - - - :worker_2_port
+SET search_path TO pg18_publication;
+
+SELECT pubname, pubgencols
+FROM pg_publication
+WHERE pubname IN ('pub_gen_cols_stored', 'pub_gen_cols_none')
+ORDER BY pubname;
+
+-- Now verify ALTER PUBLICATION .. SET (publish_generated_columns = none)
+-- propagates to workers as well.
+
+\c - - - :master_port
+SET search_path TO pg18_publication;
+
+ALTER PUBLICATION pub_gen_cols_stored
+ SET (publish_generated_columns = none);
+
+-- coordinator: both publications should now have pubgencols = 'n'
+SELECT pubname, pubgencols
+FROM pg_publication
+WHERE pubname IN ('pub_gen_cols_stored', 'pub_gen_cols_none')
+ORDER BY pubname;
+
+-- worker 1: pubgencols must match coordinator
+\c - - - :worker_1_port
+SET search_path TO pg18_publication;
+
+SELECT pubname, pubgencols
+FROM pg_publication
+WHERE pubname IN ('pub_gen_cols_stored', 'pub_gen_cols_none')
+ORDER BY pubname;
+
+-- worker 2: same check
+\c - - - :worker_2_port
+SET search_path TO pg18_publication;
+
+SELECT pubname, pubgencols
+FROM pg_publication
+WHERE pubname IN ('pub_gen_cols_stored', 'pub_gen_cols_none')
+ORDER BY pubname;
+
+-- Column list precedence test: Citus must preserve both prattrs and pubgencols
+
+\c - - - :master_port
+SET search_path TO pg18_publication;
+
+-- Case 1: column list explicitly includes the generated column, flag = none
+CREATE PUBLICATION pub_gen_cols_list_includes_b
+ FOR TABLE gen_pub_tab (id, a, b)
+ WITH (publish_generated_columns = none);
+
+-- Case 2: column list excludes the generated column, flag = stored
+CREATE PUBLICATION pub_gen_cols_list_excludes_b
+ FOR TABLE gen_pub_tab (id, a)
+ WITH (publish_generated_columns = stored);
+
+-- Helper: show pubname, pubgencols, and column list (prattrs) for gen_pub_tab
+SELECT p.pubname,
+ p.pubgencols,
+ r.prattrs
+FROM pg_publication p
+JOIN pg_publication_rel r ON p.oid = r.prpubid
+JOIN pg_class c ON c.oid = r.prrelid
+WHERE p.pubname IN ('pub_gen_cols_list_includes_b',
+ 'pub_gen_cols_list_excludes_b')
+ AND c.relname = 'gen_pub_tab'
+ORDER BY p.pubname;
+
+-- worker 1: must see the same pubgencols + prattrs
+\c - - - :worker_1_port
+SET search_path TO pg18_publication;
+
+SELECT p.pubname,
+ p.pubgencols,
+ r.prattrs
+FROM pg_publication p
+JOIN pg_publication_rel r ON p.oid = r.prpubid
+JOIN pg_class c ON c.oid = r.prrelid
+WHERE p.pubname IN ('pub_gen_cols_list_includes_b',
+ 'pub_gen_cols_list_excludes_b')
+ AND c.relname = 'gen_pub_tab'
+ORDER BY p.pubname;
+
+-- worker 2: same check
+\c - - - :worker_2_port
+SET search_path TO pg18_publication;
+
+SELECT p.pubname,
+ p.pubgencols,
+ r.prattrs
+FROM pg_publication p
+JOIN pg_publication_rel r ON p.oid = r.prpubid
+JOIN pg_class c ON c.oid = r.prrelid
+WHERE p.pubname IN ('pub_gen_cols_list_includes_b',
+ 'pub_gen_cols_list_excludes_b')
+ AND c.relname = 'gen_pub_tab'
+ORDER BY p.pubname;
+
+-- back to coordinator for subsequent tests / cleanup
+\c - - - :master_port
+SET search_path TO pg18_publication;
+DROP PUBLICATION pub_gen_cols_stored;
+DROP PUBLICATION pub_gen_cols_none;
+DROP PUBLICATION pub_gen_cols_list_includes_b;
+DROP PUBLICATION pub_gen_cols_list_excludes_b;
+DROP SCHEMA pg18_publication CASCADE;
+SET search_path TO pg18_nn;
+-- END: PG18: verify publish_generated_columns is preserved for distributed tables
+
+-- PG18 Feature: FOREIGN KEY constraints can be specified as NOT ENFORCED
+-- PG18 commit: https://github.com/postgres/postgres/commit/eec0040c4
+CREATE TABLE customers(
+ customer_id INT GENERATED ALWAYS AS IDENTITY,
+ customer_name VARCHAR(255) NOT NULL,
+ PRIMARY KEY(customer_id)
+);
+
+SET citus.shard_replication_factor TO 1;
+
+SELECT create_distributed_table('customers', 'customer_id');
+
+CREATE TABLE contacts(
+ contact_id INT GENERATED ALWAYS AS IDENTITY,
+ customer_id INT,
+ contact_name VARCHAR(255) NOT NULL,
+ phone VARCHAR(15),
+ email VARCHAR(100),
+ CONSTRAINT fk_customer
+ FOREIGN KEY(customer_id)
+ REFERENCES customers(customer_id)
+ ON DELETE CASCADE NOT ENFORCED
+);
+
+-- The foreign key constraint is propagated to worker nodes.
+SELECT create_distributed_table('contacts', 'customer_id');
+
+SELECT pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
+WHERE conrelid = 'contacts'::regclass AND conname = 'fk_customer';
+
+INSERT INTO customers(customer_name)
+VALUES('BlueBird Inc'),
+ ('Dolphin LLC');
+
+INSERT INTO contacts(customer_id, contact_name, phone, email)
+VALUES(1,'John Doe','(408)-111-1234','john.doe@example.com'),
+ (1,'Jane Doe','(408)-111-1235','jane.doe@example.com'),
+ (2,'David Wright','(408)-222-1234','david.wright@example.com');
+
+DELETE FROM customers WHERE customer_name = 'Dolphin LLC';
+
+-- After deleting 'Dolphin LLC' from customers, the corresponding contact
+-- 'David Wright' is not deleted from contacts due to the NOT ENFORCED.
+SELECT * FROM contacts ORDER BY contact_id;
+
+-- Test that ALTER TABLE .. ADD CONSTRAINT .. FOREIGN KEY .. NOT ENFORCED
+-- is propagated to worker nodes. First drop the foreign key:
+ALTER TABLE contacts DROP CONSTRAINT fk_customer;
+
+SELECT pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
+WHERE conrelid = 'contacts'::regclass AND conname = 'fk_customer';
+
+-- Now add the foreign key constraint back with NOT ENFORCED.
+ALTER TABLE contacts ADD CONSTRAINT fk_customer
+ FOREIGN KEY(customer_id)
+ REFERENCES customers(customer_id)
+ ON DELETE CASCADE NOT ENFORCED;
+
+-- The foreign key is propagated to worker nodes.
+SELECT pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
+WHERE conrelid = 'contacts'::regclass AND conname = 'fk_customer';
+
+DELETE FROM customers WHERE customer_name = 'BlueBird Inc';
+
+-- The customers table is now empty but the contacts table still has
+-- the contacts due to the NOT ENFORCED foreign key.
+SELECT * FROM customers ORDER BY customer_id;
+SELECT * FROM contacts ORDER BY contact_id;
+
+-- ALTER TABLE .. ALTER CONSTRAINT is not supported in Citus,
+-- so the following command should fail
+ALTER TABLE contacts ALTER CONSTRAINT fk_customer ENFORCED;
+
+-- PG18 Feature: ENFORCED / NOT ENFORCED check constraints
+-- PG18 commit: https://github.com/postgres/postgres/commit/ca87c415e
+
+-- In Citus, CHECK constraints are propagated on promoting a postgres table
+-- to a citus table, on adding a new CHECK constraint to a citus table, and
+-- on adding a node to a citus cluster. Postgres does not support altering a
+-- check constraint's enforcement status, so Citus does not either.
+
+CREATE TABLE NE_CHECK_TBL (x int, y int,
+ CONSTRAINT CHECK_X CHECK (x > 3) NOT ENFORCED,
+ CONSTRAINT CHECK_Y CHECK (y < 20) ENFORCED
+);
+
+SET citus.next_shard_id TO 4754044;
+SELECT create_distributed_table('ne_check_tbl', 'x');
+
+-- CHECK_X is NOT ENFORCED, so these inserts should succeed
+INSERT INTO NE_CHECK_TBL (x) VALUES (5), (4), (3), (2), (6), (1);
+SELECT x FROM NE_CHECK_TBL ORDER BY x;
+
+-- CHECK_Y is ENFORCED, so this insert should fail
+INSERT INTO NE_CHECK_TBL (x, y) VALUES (1, 15), (2, 25), (3, 10), (4, 30);
+
+-- Test adding new constraints with enforcement status
+ALTER TABLE NE_CHECK_TBL
+ ADD CONSTRAINT CHECK_Y2 CHECK (y > 10) NOT ENFORCED;
+
+-- CHECK_Y2 is NOT ENFORCED, so these inserts should succeed
+INSERT INTO NE_CHECK_TBL (x, y) VALUES (1, 8), (2, 9), (3, 10), (4, 11);
+SELECT x, y FROM NE_CHECK_TBL ORDER BY x, y;
+
+ALTER TABLE NE_CHECK_TBL
+ ADD CONSTRAINT CHECK_X2 CHECK (x < 10) ENFORCED;
+
+-- CHECK_X2 is ENFORCED, so these inserts should fail
+INSERT INTO NE_CHECK_TBL (x) VALUES (5), (15), (8), (12);
+
+-- PG18 Feature: Generated Virtual Columns
+-- PG18 commit: https://github.com/postgres/postgres/commit/83ea6c540
+
+-- Verify that generated virtual columns are supported on distributed tables.
+CREATE TABLE v_reading (
+ celsius DECIMAL(5,2),
+ farenheit DECIMAL(6, 2) GENERATED ALWAYS AS (celsius * 9/5 + 32) VIRTUAL,
+ created_at TIMESTAMPTZ DEFAULT now(),
+ device_id INT
+);
+
+-- Cannot distribute on a generated column (#4616) applies
+-- to VIRTUAL columns.
+SELECT create_distributed_table('v_reading', 'farenheit');
+
+SELECT create_distributed_table('v_reading', 'device_id');
+
+INSERT INTO v_reading (celsius, device_id) VALUES (0, 1), (100, 1), (37.5, 2), (25, 2), (-40, 3);
+
+SELECT device_id, celsius, farenheit FROM v_reading ORDER BY device_id;
+
+ALTER TABLE v_reading ADD COLUMN kelvin DECIMAL(6, 2) GENERATED ALWAYS AS (celsius + 273.15) VIRTUAL;
+SELECT device_id, celsius, kelvin FROM v_reading ORDER BY device_id, celsius;
+
+-- Show all columns that are generated
+ SELECT s.relname, a.attname, a.attgenerated
+ FROM pg_class s
+ JOIN pg_attribute a ON a.attrelid=s.oid
+ WHERE s.relname LIKE 'v_reading%' and attgenerated::int != 0
+ ORDER BY 1,2;
+
+-- Generated columns are virtual by default - repeat the test without VIRTUAL keyword
+CREATE TABLE d_reading (
+ celsius DECIMAL(5,2),
+ farenheit DECIMAL(6, 2) GENERATED ALWAYS AS (celsius * 9/5 + 32),
+ created_at TIMESTAMPTZ DEFAULT now(),
+ device_id INT
+);
+
+SELECT create_distributed_table('d_reading', 'farenheit');
+
+SELECT create_distributed_table('d_reading', 'device_id');
+
+INSERT INTO d_reading (celsius, device_id) VALUES (0, 1), (100, 1), (37.5, 2), (25, 2), (-40, 3);
+
+SELECT device_id, celsius, farenheit FROM d_reading ORDER BY device_id;
+
+ALTER TABLE d_reading ADD COLUMN kelvin DECIMAL(6, 2) GENERATED ALWAYS AS (celsius + 273.15) VIRTUAL;
+SELECT device_id, celsius, kelvin FROM d_reading ORDER BY device_id, celsius;
+
+-- Show all columns that are generated
+ SELECT s.relname, a.attname, a.attgenerated
+ FROM pg_class s
+ JOIN pg_attribute a ON a.attrelid=s.oid
+ WHERE s.relname LIKE 'd_reading%' and attgenerated::int != 0
+ ORDER BY 1,2;
+
+-- COPY implementation needs to handle GENERATED ALWAYS AS (...) VIRTUAL columns.
+\COPY d_reading FROM STDIN WITH DELIMITER ','
+3.00,2025-11-24 09:46:17.390872+00,1
+6.00,2025-11-24 09:46:17.390872+00,5
+2.00,2025-11-24 09:46:17.390872+00,1
+22.00,2025-11-24 09:46:17.390872+00,5
+15.00,2025-11-24 09:46:17.390872+00,1
+13.00,2025-11-24 09:46:17.390872+00,5
+27.00,2025-11-24 09:46:17.390872+00,1
+14.00,2025-11-24 09:46:17.390872+00,5
+2.00,2025-11-24 09:46:17.390872+00,1
+23.00,2025-11-24 09:46:17.390872+00,5
+22.00,2025-11-24 09:46:17.390872+00,1
+3.00,2025-11-24 09:46:17.390872+00,5
+2.00,2025-11-24 09:46:17.390872+00,1
+7.00,2025-11-24 09:46:17.390872+00,5
+6.00,2025-11-24 09:46:17.390872+00,1
+21.00,2025-11-24 09:46:17.390872+00,5
+30.00,2025-11-24 09:46:17.390872+00,1
+1.00,2025-11-24 09:46:17.390872+00,5
+31.00,2025-11-24 09:46:17.390872+00,1
+22.00,2025-11-24 09:46:17.390872+00,5
+\.
+
+SELECT device_id, count(device_id) as count, round(avg(celsius), 2) as avg, min(farenheit), max(farenheit)
+FROM d_reading
+GROUP BY device_id
+ORDER BY count DESC;
+
+-- Test GROUP BY on tables with generated virtual columns - this requires
+-- special case handling in distributed planning. Test it out on some
+-- some queries involving joins and set operations.
+
+SELECT device_id, max(kelvin) as Kel
+FROM v_reading
+WHERE (device_id, celsius) NOT IN (SELECT device_id, max(celsius) FROM v_reading GROUP BY device_id)
+GROUP BY device_id
+ORDER BY device_id ASC;
+
+SELECT device_id, round(AVG( (d_farenheit + v_farenheit) / 2), 2) as Avg_Far
+FROM (SELECT *
+ FROM (SELECT device_id, round(AVG(farenheit),2) as d_farenheit
+ FROM d_reading
+ GROUP BY device_id) AS subq
+ RIGHT JOIN (SELECT device_id, MAX(farenheit) AS v_farenheit
+ FROM d_reading
+ GROUP BY device_id) AS subq2
+ USING (device_id)
+ ) AS finalq
+GROUP BY device_id
+ORDER BY device_id ASC;
+
+SELECT device_id, MAX(farenheit) as farenheit
+FROM
+((SELECT device_id, round(AVG(farenheit),2) as farenheit
+ FROM d_reading
+ GROUP BY device_id)
+UNION ALL (SELECT device_id, MAX(farenheit) AS farenheit
+ FROM d_reading
+ GROUP BY device_id) ) AS unioned
+GROUP BY device_id
+ORDER BY device_id ASC;
+
+SELECT device_id, MAX(farenheit) as farenheit
+FROM
+((SELECT device_id, round(AVG(farenheit),2) as farenheit
+ FROM d_reading
+ GROUP BY device_id)
+INTERSECT (SELECT device_id, MAX(farenheit) AS farenheit
+ FROM d_reading
+ GROUP BY device_id) ) AS intersected
+GROUP BY device_id
+ORDER BY device_id ASC;
+
+SELECT device_id, MAX(farenheit) as farenheit
+FROM
+((SELECT device_id, round(AVG(farenheit),2) as farenheit
+ FROM d_reading
+ GROUP BY device_id)
+EXCEPT (SELECT device_id, MAX(farenheit) AS farenheit
+ FROM d_reading
+ GROUP BY device_id) ) AS excepted
+GROUP BY device_id
+ORDER BY device_id ASC;
+
+-- Ensure that UDFs such as alter_distributed_table, undistribute_table
+-- and add_local_table_to_metadata work fine with VIRTUAL columns. For
+-- this, PR #4616 changes are modified to handle VIRTUAL columns in
+-- addition to STORED columns.
+
+CREATE TABLE generated_stored_dist (
+ col_1 int,
+ "col\'_2" text,
+ col_3 text generated always as (UPPER("col\'_2")) virtual
+);
+
+SELECT create_distributed_table ('generated_stored_dist', 'col_1');
+
+INSERT INTO generated_stored_dist VALUES (1, 'text_1'), (2, 'text_2');
+SELECT * FROM generated_stored_dist ORDER BY 1,2,3;
+
+INSERT INTO generated_stored_dist VALUES (1, 'text_1'), (2, 'text_2');
+SELECT alter_distributed_table('generated_stored_dist', shard_count := 5, cascade_to_colocated := false);
+SELECT * FROM generated_stored_dist ORDER BY 1,2,3;
+
+CREATE TABLE generated_stored_local (
+ col_1 int,
+ "col\'_2" text,
+ col_3 text generated always as (UPPER("col\'_2")) stored
+);
+
+SELECT citus_add_local_table_to_metadata('generated_stored_local');
+
+INSERT INTO generated_stored_local VALUES (1, 'text_1'), (2, 'text_2');
+SELECT * FROM generated_stored_local ORDER BY 1,2,3;
+
+SELECT create_distributed_table ('generated_stored_local', 'col_1');
+
+INSERT INTO generated_stored_local VALUES (1, 'text_1'), (2, 'text_2');
+SELECT * FROM generated_stored_local ORDER BY 1,2,3;
+
+CREATE TABLE generated_stored_ref (
+ col_1 int,
+ col_2 int,
+ col_3 int generated always as (col_1+col_2) virtual,
+ col_4 int,
+ col_5 int generated always as (col_4*2-col_1) virtual
+);
+
+SELECT create_reference_table ('generated_stored_ref');
+
+INSERT INTO generated_stored_ref (col_1, col_4) VALUES (1,2), (11,12);
+INSERT INTO generated_stored_ref (col_1, col_2, col_4) VALUES (100,101,102), (200,201,202);
+
+SELECT * FROM generated_stored_ref ORDER BY 1,2,3,4,5;
+
+BEGIN;
+ SELECT undistribute_table('generated_stored_ref');
+ INSERT INTO generated_stored_ref (col_1, col_4) VALUES (11,12), (21,22);
+ INSERT INTO generated_stored_ref (col_1, col_2, col_4) VALUES (200,201,202), (300,301,302);
+ SELECT * FROM generated_stored_ref ORDER BY 1,2,3,4,5;
+ROLLBACK;
+
+BEGIN;
+ -- drop some of the columns not having "generated always as virtual" expressions
+ SET client_min_messages TO WARNING;
+ ALTER TABLE generated_stored_ref DROP COLUMN col_1 CASCADE;
+ RESET client_min_messages;
+ ALTER TABLE generated_stored_ref DROP COLUMN col_4;
+
+ -- show that undistribute_table works fine
+ SELECT undistribute_table('generated_stored_ref');
+ INSERT INTO generated_stored_ref VALUES (5);
+ SELECT * FROM generated_stored_REF ORDER BY 1;
+ROLLBACK;
+
+BEGIN;
+ -- now drop all columns
+ ALTER TABLE generated_stored_ref DROP COLUMN col_3;
+ ALTER TABLE generated_stored_ref DROP COLUMN col_5;
+ ALTER TABLE generated_stored_ref DROP COLUMN col_1;
+ ALTER TABLE generated_stored_ref DROP COLUMN col_2;
+ ALTER TABLE generated_stored_ref DROP COLUMN col_4;
+
+ -- show that undistribute_table works fine
+ SELECT undistribute_table('generated_stored_ref');
+
+ SELECT * FROM generated_stored_ref;
+ROLLBACK;
+
+-- PG18 Feature: VACUUM/ANALYZE support ONLY to limit processing to the parent.
+-- For Citus, ensure ONLY does not trigger shard propagation.
+-- PG18 commit: https://github.com/postgres/postgres/commit/62ddf7ee9
+CREATE SCHEMA pg18_vacuum_part;
+SET search_path TO pg18_vacuum_part;
+
+CREATE TABLE vac_analyze_only (a int);
+SELECT create_distributed_table('vac_analyze_only', 'a');
+INSERT INTO vac_analyze_only VALUES (1), (2), (3);
+
+-- ANALYZE (no ONLY) should recurse into shard placements
+ANALYZE vac_analyze_only;
+
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part;
+
+SELECT coalesce(max(last_analyze), 'epoch'::timestamptz) AS analyze_before_only
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part'
+ AND relname LIKE 'vac_analyze_only_%'
+\gset
+
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part;
+
+-- ANALYZE ONLY should not recurse into shard placements
+ANALYZE ONLY vac_analyze_only;
+
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part;
+
+SELECT max(last_analyze) = :'analyze_before_only'::timestamptz
+ AS analyze_only_skipped
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part'
+ AND relname LIKE 'vac_analyze_only_%';
+
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part;
+
+-- VACUUM (no ONLY) should recurse into shard placements
+VACUUM vac_analyze_only;
+
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part;
+
+SELECT coalesce(max(last_vacuum), 'epoch'::timestamptz) AS vacuum_before_only
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part'
+ AND relname LIKE 'vac_analyze_only_%'
+\gset
+
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part;
+
+-- VACUUM ONLY should not recurse into shard placements
+VACUUM ONLY vac_analyze_only;
+
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part;
+
+SELECT max(last_vacuum) = :'vacuum_before_only'::timestamptz
+ AS vacuum_only_skipped
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part'
+ AND relname LIKE 'vac_analyze_only_%';
+
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part;
+
+DROP SCHEMA pg18_vacuum_part CASCADE;
+SET search_path TO pg18_nn;
+
+-- END PG18 Feature: VACUUM/ANALYZE support ONLY to limit processing to the parent
+
+-- PG18 Feature: VACUUM/ANALYZE ONLY on a partitioned distributed table
+-- Ensure Citus does not recurse into shard placements when ONLY is used
+-- on the partitioned parent.
+-- PG18 commit: https://github.com/postgres/postgres/commit/62ddf7ee9
+CREATE SCHEMA pg18_vacuum_part_dist;
+SET search_path TO pg18_vacuum_part_dist;
+
+SET citus.shard_count = 2;
+SET citus.shard_replication_factor = 1;
+
+CREATE TABLE part_dist (id int, v int) PARTITION BY RANGE (id);
+CREATE TABLE part_dist_1 PARTITION OF part_dist FOR VALUES FROM (1) TO (100);
+CREATE TABLE part_dist_2 PARTITION OF part_dist FOR VALUES FROM (100) TO (200);
+
+SELECT create_distributed_table('part_dist', 'id');
+
+INSERT INTO part_dist
+SELECT g, g FROM generate_series(1, 199) g;
+
+-- ANALYZE (no ONLY) should recurse into partitions and shard placements
+ANALYZE part_dist;
+
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part_dist;
+
+SELECT coalesce(max(last_analyze), 'epoch'::timestamptz) AS analyze_before_only
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part_dist'
+ AND relname LIKE 'part_dist_%'
+\gset
+
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part_dist;
+
+-- ANALYZE ONLY should not recurse into shard placements
+ANALYZE ONLY part_dist;
+
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part_dist;
+
+SELECT max(last_analyze) = :'analyze_before_only'::timestamptz
+ AS analyze_only_partitioned_skipped
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part_dist'
+ AND relname LIKE 'part_dist_%';
+
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part_dist;
+
+-- VACUUM (no ONLY) should recurse into partitions and shard placements
+VACUUM part_dist;
+
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part_dist;
+
+SELECT coalesce(max(last_vacuum), 'epoch'::timestamptz) AS vacuum_before_only
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part_dist'
+ AND relname LIKE 'part_dist_%'
+\gset
+
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part_dist;
+
+-- VACUUM ONLY parent: core warns and does no work; Citus must not
+-- propagate to shard placements.
+VACUUM ONLY part_dist;
+
+\c - - - :worker_1_port
+SET search_path TO pg18_vacuum_part_dist;
+
+SELECT max(last_vacuum) = :'vacuum_before_only'::timestamptz
+ AS vacuum_only_partitioned_skipped
+FROM pg_stat_user_tables
+WHERE schemaname = 'pg18_vacuum_part_dist'
+ AND relname LIKE 'part_dist_%';
+
+\c - - - :master_port
+SET search_path TO pg18_vacuum_part_dist;
+
+DROP SCHEMA pg18_vacuum_part_dist CASCADE;
+SET search_path TO pg18_nn;
+
+-- END PG18 Feature: VACUUM/ANALYZE ONLY on partitioned distributed table
+
+-- PG18 Feature: text search with nondeterministic collations
+-- PG18 commit: https://github.com/postgres/postgres/commit/329304c90
+
+-- This test verifies that the PG18 tests apply to Citus tables; Citus
+-- just passes through the collation info and text search queries to
+-- worker shards.
+
+CREATE COLLATION ignore_accents (provider = icu, locale = '@colStrength=primary;colCaseLevel=yes', deterministic = false);
+-- nondeterministic collations
+CREATE COLLATION ctest_det (provider = icu, locale = '', deterministic = true);
+CREATE COLLATION ctest_nondet (provider = icu, locale = '', deterministic = false);
+
+CREATE TABLE strtest1 (a int, b text);
+SELECT create_distributed_table('strtest1', 'a');
+
+INSERT INTO strtest1 VALUES (1, U&'zy\00E4bc');
+INSERT INTO strtest1 VALUES (2, U&'zy\0061\0308bc');
+INSERT INTO strtest1 VALUES (3, U&'ab\00E4cd');
+INSERT INTO strtest1 VALUES (4, U&'ab\0061\0308cd');
+INSERT INTO strtest1 VALUES (5, U&'ab\00E4cd');
+INSERT INTO strtest1 VALUES (6, U&'ab\0061\0308cd');
+INSERT INTO strtest1 VALUES (7, U&'ab\00E4cd');
+
+SELECT * FROM strtest1 WHERE b = 'zyäbc' COLLATE ctest_det ORDER BY a;
+SELECT * FROM strtest1 WHERE b = 'zyäbc' COLLATE ctest_nondet ORDER BY a;
+
+SELECT strpos(b COLLATE ctest_det, 'bc') FROM strtest1 ORDER BY a;
+SELECT strpos(b COLLATE ctest_nondet, 'bc') FROM strtest1 ORDER BY a;
+
+SELECT replace(b COLLATE ctest_det, U&'\00E4b', 'X') FROM strtest1 ORDER BY a;
+SELECT replace(b COLLATE ctest_nondet, U&'\00E4b', 'X') FROM strtest1 ORDER BY a;
+
+SELECT a, split_part(b COLLATE ctest_det, U&'\00E4b', 2) FROM strtest1 ORDER BY a;
+SELECT a, split_part(b COLLATE ctest_nondet, U&'\00E4b', 2) FROM strtest1 ORDER BY a;
+SELECT a, split_part(b COLLATE ctest_det, U&'\00E4b', -1) FROM strtest1 ORDER BY a;
+SELECT a, split_part(b COLLATE ctest_nondet, U&'\00E4b', -1) FROM strtest1 ORDER BY a;
+
+SELECT a, string_to_array(b COLLATE ctest_det, U&'\00E4b') FROM strtest1 ORDER BY a;
+SELECT a, string_to_array(b COLLATE ctest_nondet, U&'\00E4b') FROM strtest1 ORDER BY a;
+
+SELECT * FROM strtest1 WHERE b LIKE 'zyäbc' COLLATE ctest_det ORDER BY a;
+SELECT * FROM strtest1 WHERE b LIKE 'zyäbc' COLLATE ctest_nondet ORDER BY a;
+
+CREATE TABLE strtest2 (a int, b text);
+SELECT create_distributed_table('strtest2', 'a');
+INSERT INTO strtest2 VALUES (1, 'cote'), (2, 'côte'), (3, 'coté'), (4, 'côté');
+
+CREATE TABLE strtest2nfd (a int, b text);
+SELECT create_distributed_table('strtest2nfd', 'a');
+INSERT INTO strtest2nfd VALUES (1, 'cote'), (2, 'côte'), (3, 'coté'), (4, 'côté');
+
+UPDATE strtest2nfd SET b = normalize(b, nfd);
+
+-- This shows why replace should be greedy. Otherwise, in the NFD
+-- case, the match would stop before the decomposed accents, which
+-- would leave the accents in the results.
+SELECT a, b, replace(b COLLATE ignore_accents, 'co', 'ma') FROM strtest2 ORDER BY a, b;
+SELECT a, b, replace(b COLLATE ignore_accents, 'co', 'ma') FROM strtest2nfd ORDER BY a, b;
+
+-- PG18 Feature: LIKE support for non-deterministic collations
+-- PG18 commit: https://github.com/postgres/postgres/commit/85b7efa1c
+
+-- As with non-deterministic collation text search, we verify that
+-- LIKE with non-deterministic collation is passed through by Citus
+-- and expected results are returned by the queries.
+
+INSERT INTO strtest1 VALUES (8, U&'abc');
+INSERT INTO strtest1 VALUES (9, 'abc');
+
+SELECT a, b FROM strtest1
+WHERE b LIKE 'abc' COLLATE ctest_det
+ORDER BY a;
+
+SELECT a, b FROM strtest1
+WHERE b LIKE 'a\bc' COLLATE ctest_det
+ORDER BY a;
+
+SELECT a, b FROM strtest1
+WHERE b LIKE 'abc' COLLATE ctest_nondet
+ORDER BY a;
+
+SELECT a, b FROM strtest1
+WHERE b LIKE 'a\bc' COLLATE ctest_nondet
+ORDER BY a;
+
+CREATE COLLATION case_insensitive (provider = icu, locale = '@colStrength=secondary', deterministic = false);
+
+SELECT a, b FROM strtest1
+WHERE b LIKE 'ABC' COLLATE case_insensitive
+ORDER BY a;
+
+SELECT a, b FROM strtest1
+WHERE b LIKE 'ABC%' COLLATE case_insensitive
+ORDER BY a;
+
+INSERT INTO strtest1 VALUES (10, U&'\00E4bc');
+INSERT INTO strtest1 VALUES (12, U&'\0061\0308bc');
+
+SELECT * FROM strtest1
+WHERE b LIKE 'äbc' COLLATE ctest_det
+ORDER BY a;
+
+SELECT * FROM strtest1
+WHERE b LIKE 'äbc' COLLATE ctest_nondet
+ORDER BY a;
+
+-- Tests with ignore_accents collation. Taken from
+-- PG18 regress tests and applied to a Citus table.
+
+INSERT INTO strtest1 VALUES (10, U&'\0061\0308bc');
+INSERT INTO strtest1 VALUES (11, U&'\00E4bc');
+INSERT INTO strtest1 VALUES (12, U&'cb\0061\0308');
+INSERT INTO strtest1 VALUES (13, U&'\0308bc');
+INSERT INTO strtest1 VALUES (14, 'foox');
+
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'\00E4_c' COLLATE ignore_accents ORDER BY a, b;
+-- and in reverse:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'\0061\0308_c' COLLATE ignore_accents ORDER BY a, b;
+-- inner % matches b:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'\00E4%c' COLLATE ignore_accents ORDER BY a, b;
+-- inner %% matches b then zero:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'\00E4%%c' COLLATE ignore_accents ORDER BY a, b;
+-- inner %% matches b then zero:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'c%%\00E4' COLLATE ignore_accents ORDER BY a, b;
+-- trailing _ matches two codepoints that form one grapheme:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'cb_' COLLATE ignore_accents ORDER BY a, b;
+-- trailing __ matches two codepoints that form one grapheme:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'cb__' COLLATE ignore_accents ORDER BY a, b;
+-- leading % matches zero:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'%\00E4bc' COLLATE ignore_accents
+ORDER BY a;
+
+-- leading % matches zero (with later %):
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'%\00E4%c' COLLATE ignore_accents ORDER BY a, b;
+-- trailing % matches zero:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'\00E4bc%' COLLATE ignore_accents ORDER BY a, b;
+-- trailing % matches zero (with previous %):
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'\00E4%c%' COLLATE ignore_accents ORDER BY a, b;
+-- _ versus two codepoints that form one grapheme:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'_bc' COLLATE ignore_accents ORDER BY a, b;
+-- (actually this matches because)
+SELECT a, b FROM strtest1
+WHERE b = 'bc' COLLATE ignore_accents ORDER BY a, b;
+-- __ matches two codepoints that form one grapheme:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'__bc' COLLATE ignore_accents ORDER BY a, b;
+-- _ matches one codepoint that forms half a grapheme:
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'_\0308bc' COLLATE ignore_accents ORDER BY a, b;
+-- doesn't match because \00e4 doesn't match only \0308
+SELECT a, b FROM strtest1
+WHERE b LIKE U&'_\00e4bc' COLLATE ignore_accents ORDER BY a, b;
+-- escape character at end of pattern
+SELECT a, b FROM strtest1
+WHERE b LIKE 'foo\' COLLATE ignore_accents ORDER BY a, b;
+
+DROP TABLE strtest1;
+DROP COLLATION ignore_accents;
+DROP COLLATION ctest_det;
+DROP COLLATION ctest_nondet;
+DROP COLLATION case_insensitive;
+
-- cleanup with minimum verbosity
SET client_min_messages TO ERROR;
RESET search_path;