From 4b295cc857bda0a90ce0c3bed6a26bd4a7e448c9 Mon Sep 17 00:00:00 2001 From: zhjwpku Date: Mon, 22 Jan 2024 21:55:14 +0800 Subject: [PATCH 01/16] Simplify CitusNewNode (#7434) postgres refactored newNode() in PG 17, the main point for doing this is the original tricks is no longer neccessary for modern compilers[1]. This does the same for Citus. This should have no backward compatibility issues since it just replaces palloc0fast with palloc0. This is good for forward compatibility since palloc0fast no longer exists in PG 17. [1] https://www.postgresql.org/message-id/b51f1fa7-7e6a-4ecc-936d-90a8a1659e7c@iki.fi --- src/include/distributed/citus_nodes.h | 39 ++++++++------------------- 1 file changed, 11 insertions(+), 28 deletions(-) diff --git a/src/include/distributed/citus_nodes.h b/src/include/distributed/citus_nodes.h index 888133a89..16df367aa 100644 --- a/src/include/distributed/citus_nodes.h +++ b/src/include/distributed/citus_nodes.h @@ -92,38 +92,21 @@ CitusNodeTagI(Node *node) return ((CitusNode*)(node))->citus_tag; } -/* - * Postgres's nodes/nodes.h has more information on why we do this. - */ -#ifdef __GNUC__ /* Citus variant of newNode(), don't use directly. */ -#define CitusNewNode(size, tag) \ -({ CitusNode *_result; \ - AssertMacro((size) >= sizeof(CitusNode)); /* need the tag, at least */ \ - _result = (CitusNode *) palloc0fast(size); \ - _result->extensible.type = T_ExtensibleNode; \ - _result->extensible.extnodename = CitusNodeTagNames[tag - CITUS_NODE_TAG_START]; \ - _result->citus_tag =(int) (tag); \ - _result; \ -}) +static inline CitusNode * +CitusNewNode(size_t size, CitusNodeTag tag) +{ + CitusNode *result; -#else - -extern CitusNode *newCitusNodeMacroHolder; - -#define CitusNewNode(size, tag) \ -( \ - AssertMacro((size) >= sizeof(CitusNode)), /* need the tag, at least */ \ - newCitusNodeMacroHolder = (CitusNode *) palloc0fast(size), \ - newCitusNodeMacroHolder->extensible.type = T_ExtensibleNode, \ - newCitusNodeMacroHolder->extensible.extnodename = CitusNodeTagNames[tag - CITUS_NODE_TAG_START], \ - newCitusNodeMacroHolder->citus_tag =(int) (tag), \ - newCitusNodeMacroHolder \ -) - -#endif + Assert(size >= sizeof(CitusNode)); /* need the ExtensibleNode and the tag, at least */ + result = (CitusNode *) palloc0(size); + result->extensible.type = T_ExtensibleNode; + result->extensible.extnodename = CitusNodeTagNames[tag - CITUS_NODE_TAG_START]; + result->citus_tag = (int) (tag); + return result; +} /* * IsA equivalent that compares node tags, including Citus-specific nodes. From ee11492a0ed080b4c669460ae523cb437ab2faeb Mon Sep 17 00:00:00 2001 From: eaydingol <60466783+eaydingol@users.noreply.github.com> Date: Mon, 22 Jan 2024 17:32:49 +0300 Subject: [PATCH 02/16] Generate qualified relation name (#7427) This change refactors the code by using generate_qualified_relation_name from id instead of using a sequence of functions to generate the relation name. Fixes #6602 --- .../distributed/commands/alter_table.c | 75 +++++++------------ .../citus_add_local_table_to_metadata.c | 8 +- .../commands/create_distributed_table.c | 5 +- src/backend/distributed/commands/multi_copy.c | 8 +- src/backend/distributed/commands/view.c | 4 +- .../executor/insert_select_executor.c | 7 +- .../distributed/operations/shard_transfer.c | 6 +- .../distributed/worker/worker_drop_protocol.c | 6 +- 8 files changed, 36 insertions(+), 83 deletions(-) diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c index a81f23ad6..030dbbe78 100644 --- a/src/backend/distributed/commands/alter_table.c +++ b/src/backend/distributed/commands/alter_table.c @@ -209,12 +209,9 @@ static void ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommand static bool HasAnyGeneratedStoredColumns(Oid relationId); static List * GetNonGeneratedStoredColumnNameList(Oid relationId); static void CheckAlterDistributedTableConversionParameters(TableConversionState *con); -static char * CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaName, - char *sequenceName, - char *sourceSchemaName, - char *sourceName, - char *targetSchemaName, - char *targetName); +static char * CreateWorkerChangeSequenceDependencyCommand(char *qualifiedSequeceName, + char *qualifiedSourceName, + char *qualifiedTargetName); static void ErrorIfMatViewSizeExceedsTheLimit(Oid matViewOid); static char * CreateMaterializedViewDDLCommand(Oid matViewOid); static char * GetAccessMethodForMatViewIfExists(Oid viewOid); @@ -791,13 +788,15 @@ ConvertTableInternal(TableConversionState *con) justBeforeDropCommands = lappend(justBeforeDropCommands, detachFromParentCommand); } + char *qualifiedRelationName = quote_qualified_identifier(con->schemaName, + con->relationName); + if (PartitionedTable(con->relationId)) { if (!con->suppressNoticeMessages) { ereport(NOTICE, (errmsg("converting the partitions of %s", - quote_qualified_identifier(con->schemaName, - con->relationName)))); + qualifiedRelationName))); } List *partitionList = PartitionList(con->relationId); @@ -870,9 +869,7 @@ ConvertTableInternal(TableConversionState *con) if (!con->suppressNoticeMessages) { - ereport(NOTICE, (errmsg("creating a new table for %s", - quote_qualified_identifier(con->schemaName, - con->relationName)))); + ereport(NOTICE, (errmsg("creating a new table for %s", qualifiedRelationName))); } TableDDLCommand *tableCreationCommand = NULL; @@ -999,8 +996,6 @@ ConvertTableInternal(TableConversionState *con) { continue; } - char *qualifiedRelationName = quote_qualified_identifier(con->schemaName, - con->relationName); TableConversionParameters cascadeParam = { .relationId = colocatedTableId, @@ -1750,9 +1745,7 @@ CreateMaterializedViewDDLCommand(Oid matViewOid) { StringInfo query = makeStringInfo(); - char *viewName = get_rel_name(matViewOid); - char *schemaName = get_namespace_name(get_rel_namespace(matViewOid)); - char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName); + char *qualifiedViewName = generate_qualified_relation_name(matViewOid); /* here we need to get the access method of the view to recreate it */ char *accessMethodName = GetAccessMethodForMatViewIfExists(matViewOid); @@ -1801,9 +1794,8 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, bool suppressNoticeMessages) { char *sourceName = get_rel_name(sourceId); - char *targetName = get_rel_name(targetId); - Oid schemaId = get_rel_namespace(sourceId); - char *schemaName = get_namespace_name(schemaId); + char *qualifiedSourceName = generate_qualified_relation_name(sourceId); + char *qualifiedTargetName = generate_qualified_relation_name(targetId); StringInfo query = makeStringInfo(); @@ -1811,8 +1803,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, { if (!suppressNoticeMessages) { - ereport(NOTICE, (errmsg("moving the data of %s", - quote_qualified_identifier(schemaName, sourceName)))); + ereport(NOTICE, (errmsg("moving the data of %s", qualifiedSourceName))); } if (!HasAnyGeneratedStoredColumns(sourceId)) @@ -1822,8 +1813,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, * "INSERT INTO .. SELECT *"". */ appendStringInfo(query, "INSERT INTO %s SELECT * FROM %s", - quote_qualified_identifier(schemaName, targetName), - quote_qualified_identifier(schemaName, sourceName)); + qualifiedTargetName, qualifiedSourceName); } else { @@ -1838,9 +1828,8 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, char *insertColumnString = StringJoin(nonStoredColumnNameList, ','); appendStringInfo(query, "INSERT INTO %s (%s) OVERRIDING SYSTEM VALUE SELECT %s FROM %s", - quote_qualified_identifier(schemaName, targetName), - insertColumnString, insertColumnString, - quote_qualified_identifier(schemaName, sourceName)); + qualifiedTargetName, insertColumnString, + insertColumnString, qualifiedSourceName); } ExecuteQueryViaSPI(query->data, SPI_OK_INSERT); @@ -1864,14 +1853,11 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, */ if (ShouldSyncTableMetadata(targetId)) { - Oid sequenceSchemaOid = get_rel_namespace(sequenceOid); - char *sequenceSchemaName = get_namespace_name(sequenceSchemaOid); - char *sequenceName = get_rel_name(sequenceOid); + char *qualifiedSequenceName = generate_qualified_relation_name(sequenceOid); char *workerChangeSequenceDependencyCommand = - CreateWorkerChangeSequenceDependencyCommand(sequenceSchemaName, - sequenceName, - schemaName, sourceName, - schemaName, targetName); + CreateWorkerChangeSequenceDependencyCommand(qualifiedSequenceName, + qualifiedSourceName, + qualifiedTargetName); SendCommandToWorkersWithMetadata(workerChangeSequenceDependencyCommand); } else if (ShouldSyncTableMetadata(sourceId)) @@ -1894,25 +1880,23 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, if (!suppressNoticeMessages) { - ereport(NOTICE, (errmsg("dropping the old %s", - quote_qualified_identifier(schemaName, sourceName)))); + ereport(NOTICE, (errmsg("dropping the old %s", qualifiedSourceName))); } resetStringInfo(query); appendStringInfo(query, "DROP %sTABLE %s CASCADE", IsForeignTable(sourceId) ? "FOREIGN " : "", - quote_qualified_identifier(schemaName, sourceName)); + qualifiedSourceName); ExecuteQueryViaSPI(query->data, SPI_OK_UTILITY); if (!suppressNoticeMessages) { - ereport(NOTICE, (errmsg("renaming the new table to %s", - quote_qualified_identifier(schemaName, sourceName)))); + ereport(NOTICE, (errmsg("renaming the new table to %s", qualifiedSourceName))); } resetStringInfo(query); appendStringInfo(query, "ALTER TABLE %s RENAME TO %s", - quote_qualified_identifier(schemaName, targetName), + qualifiedTargetName, quote_identifier(sourceName)); ExecuteQueryViaSPI(query->data, SPI_OK_UTILITY); } @@ -2172,18 +2156,13 @@ CheckAlterDistributedTableConversionParameters(TableConversionState *con) * worker_change_sequence_dependency query with the parameters. */ static char * -CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaName, char *sequenceName, - char *sourceSchemaName, char *sourceName, - char *targetSchemaName, char *targetName) +CreateWorkerChangeSequenceDependencyCommand(char *qualifiedSequeceName, + char *qualifiedSourceName, + char *qualifiedTargetName) { - char *qualifiedSchemaName = quote_qualified_identifier(sequenceSchemaName, - sequenceName); - char *qualifiedSourceName = quote_qualified_identifier(sourceSchemaName, sourceName); - char *qualifiedTargetName = quote_qualified_identifier(targetSchemaName, targetName); - StringInfo query = makeStringInfo(); appendStringInfo(query, "SELECT worker_change_sequence_dependency(%s, %s, %s)", - quote_literal_cstr(qualifiedSchemaName), + quote_literal_cstr(qualifiedSequeceName), quote_literal_cstr(qualifiedSourceName), quote_literal_cstr(qualifiedTargetName)); diff --git a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c index d95cdd353..93f1e7d28 100644 --- a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c +++ b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c @@ -1160,9 +1160,7 @@ DropIdentitiesOnTable(Oid relationId) if (attributeForm->attidentity) { - char *tableName = get_rel_name(relationId); - char *schemaName = get_namespace_name(get_rel_namespace(relationId)); - char *qualifiedTableName = quote_qualified_identifier(schemaName, tableName); + char *qualifiedTableName = generate_qualified_relation_name(relationId); StringInfo dropCommand = makeStringInfo(); @@ -1222,9 +1220,7 @@ DropViewsOnTable(Oid relationId) Oid viewId = InvalidOid; foreach_oid(viewId, reverseOrderedViews) { - char *viewName = get_rel_name(viewId); - char *schemaName = get_namespace_name(get_rel_namespace(viewId)); - char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName); + char *qualifiedViewName = generate_qualified_relation_name(viewId); StringInfo dropCommand = makeStringInfo(); appendStringInfo(dropCommand, "DROP %sVIEW IF EXISTS %s", diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 9f3975a1e..5ec6d6dd7 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -1323,10 +1323,7 @@ CreateCitusTable(Oid relationId, CitusTableType tableType, { List *partitionList = PartitionList(relationId); Oid partitionRelationId = InvalidOid; - Oid namespaceId = get_rel_namespace(relationId); - char *schemaName = get_namespace_name(namespaceId); - char *relationName = get_rel_name(relationId); - char *parentRelationName = quote_qualified_identifier(schemaName, relationName); + char *parentRelationName = generate_qualified_relation_name(relationId); /* * when there are many partitions, each call to CreateDistributedTable diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index c69e33f94..0284ea64d 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -2547,12 +2547,8 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu if (columnNulls[partitionColumnIndex]) { - Oid relationId = copyDest->distributedRelationId; - char *relationName = get_rel_name(relationId); - Oid schemaOid = get_rel_namespace(relationId); - char *schemaName = get_namespace_name(schemaOid); - char *qualifiedTableName = quote_qualified_identifier(schemaName, - relationName); + char *qualifiedTableName = generate_qualified_relation_name( + copyDest->distributedRelationId); ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("the partition column of table %s cannot be NULL", diff --git a/src/backend/distributed/commands/view.c b/src/backend/distributed/commands/view.c index 0c39be4ca..9689b9267 100644 --- a/src/backend/distributed/commands/view.c +++ b/src/backend/distributed/commands/view.c @@ -392,9 +392,7 @@ CreateViewDDLCommand(Oid viewOid) static void AppendQualifiedViewNameToCreateViewCommand(StringInfo buf, Oid viewOid) { - char *viewName = get_rel_name(viewOid); - char *schemaName = get_namespace_name(get_rel_namespace(viewOid)); - char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName); + char *qualifiedViewName = generate_qualified_relation_name(viewOid); appendStringInfo(buf, "%s ", qualifiedViewName); } diff --git a/src/backend/distributed/executor/insert_select_executor.c b/src/backend/distributed/executor/insert_select_executor.c index f5fbb3f78..a8dc1fa5a 100644 --- a/src/backend/distributed/executor/insert_select_executor.c +++ b/src/backend/distributed/executor/insert_select_executor.c @@ -143,15 +143,10 @@ NonPushableInsertSelectExecScan(CustomScanState *node) targetRelation->partitionColumn); if (distributionColumnIndex == -1) { - char *relationName = get_rel_name(targetRelationId); - Oid schemaOid = get_rel_namespace(targetRelationId); - char *schemaName = get_namespace_name(schemaOid); - ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg( "the partition column of table %s should have a value", - quote_qualified_identifier(schemaName, - relationName)))); + generate_qualified_relation_name(targetRelationId)))); } TargetEntry *selectPartitionTE = list_nth(selectQuery->targetList, diff --git a/src/backend/distributed/operations/shard_transfer.c b/src/backend/distributed/operations/shard_transfer.c index 7d6747caf..805ef39d7 100644 --- a/src/backend/distributed/operations/shard_transfer.c +++ b/src/backend/distributed/operations/shard_transfer.c @@ -1945,11 +1945,7 @@ ConstructQualifiedShardName(ShardInterval *shardInterval) static List * RecreateTableDDLCommandList(Oid relationId) { - const char *relationName = get_rel_name(relationId); - Oid relationSchemaId = get_rel_namespace(relationId); - const char *relationSchemaName = get_namespace_name(relationSchemaId); - const char *qualifiedRelationName = quote_qualified_identifier(relationSchemaName, - relationName); + const char *qualifiedRelationName = generate_qualified_relation_name(relationId); StringInfo dropCommand = makeStringInfo(); diff --git a/src/backend/distributed/worker/worker_drop_protocol.c b/src/backend/distributed/worker/worker_drop_protocol.c index 6d7b5326a..280de4493 100644 --- a/src/backend/distributed/worker/worker_drop_protocol.c +++ b/src/backend/distributed/worker/worker_drop_protocol.c @@ -170,14 +170,10 @@ WorkerDropDistributedTable(Oid relationId) */ if (!IsAnyObjectAddressOwnedByExtension(list_make1(distributedTableObject), NULL)) { - char *relName = get_rel_name(relationId); - Oid schemaId = get_rel_namespace(relationId); - char *schemaName = get_namespace_name(schemaId); - StringInfo dropCommand = makeStringInfo(); appendStringInfo(dropCommand, "DROP%sTABLE %s CASCADE", IsForeignTable(relationId) ? " FOREIGN " : " ", - quote_qualified_identifier(schemaName, relName)); + generate_qualified_relation_name(relationId)); Node *dropCommandNode = ParseTreeNode(dropCommand->data); From 72fbea20c49c6de74f0c5005f77efce9f3a54178 Mon Sep 17 00:00:00 2001 From: Marco Slot Date: Tue, 23 Jan 2024 11:55:03 +0100 Subject: [PATCH 03/16] Replace spurious strdup with pstrdup (#7440) Not sure why we never found this using valgrind, but using strdup will cause memory leaks because the pointer is not tracked in a memory context. --- src/backend/distributed/planner/query_colocation_checker.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/distributed/planner/query_colocation_checker.c b/src/backend/distributed/planner/query_colocation_checker.c index 827a0286c..d9b1aad5a 100644 --- a/src/backend/distributed/planner/query_colocation_checker.c +++ b/src/backend/distributed/planner/query_colocation_checker.c @@ -433,7 +433,7 @@ CreateTargetEntryForColumn(Form_pg_attribute attributeTuple, Index rteIndex, attributeTuple->atttypmod, attributeTuple->attcollation, 0); TargetEntry *targetEntry = makeTargetEntry((Expr *) targetColumn, resno, - strdup(attributeTuple->attname.data), false); + pstrdup(attributeTuple->attname.data), false); return targetEntry; } From 9683bef2ecf624b54a42bbd94ed4f4c4e765aa7b Mon Sep 17 00:00:00 2001 From: Jelte Fennema-Nio Date: Tue, 23 Jan 2024 13:28:26 +0100 Subject: [PATCH 04/16] Replace more spurious strdups with pstrdups (#7441) DESCRIPTION: Remove a few small memory leaks In #7440 one instance of a strdup was removed. But there were a few more. This removes the ones that are left over, or adds a comment why strdup is on purpose. --- src/backend/distributed/commands/extension.c | 8 ++++---- .../distributed/connection/connection_configuration.c | 4 ++++ .../distributed/planner/query_colocation_checker.c | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index 36267ff66..2ead0c58a 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -776,7 +776,7 @@ PreprocessCreateExtensionStmtForCitusColumnar(Node *parsetree) /*create extension citus version xxx*/ if (newVersionValue) { - char *newVersion = strdup(defGetString(newVersionValue)); + char *newVersion = pstrdup(defGetString(newVersionValue)); versionNumber = GetExtensionVersionNumber(newVersion); } @@ -796,7 +796,7 @@ PreprocessCreateExtensionStmtForCitusColumnar(Node *parsetree) Oid citusOid = get_extension_oid("citus", true); if (citusOid != InvalidOid) { - char *curCitusVersion = strdup(get_extension_version(citusOid)); + char *curCitusVersion = pstrdup(get_extension_version(citusOid)); int curCitusVersionNum = GetExtensionVersionNumber(curCitusVersion); if (curCitusVersionNum < 1110) { @@ -891,7 +891,7 @@ PreprocessAlterExtensionCitusStmtForCitusColumnar(Node *parseTree) if (newVersionValue) { char *newVersion = defGetString(newVersionValue); - double newVersionNumber = GetExtensionVersionNumber(strdup(newVersion)); + double newVersionNumber = GetExtensionVersionNumber(pstrdup(newVersion)); /*alter extension citus update to version >= 11.1-1, and no citus_columnar installed */ if (newVersionNumber >= 1110 && citusColumnarOid == InvalidOid) @@ -935,7 +935,7 @@ PostprocessAlterExtensionCitusStmtForCitusColumnar(Node *parseTree) if (newVersionValue) { char *newVersion = defGetString(newVersionValue); - double newVersionNumber = GetExtensionVersionNumber(strdup(newVersion)); + double newVersionNumber = GetExtensionVersionNumber(pstrdup(newVersion)); if (newVersionNumber >= 1110 && citusColumnarOid != InvalidOid) { /*upgrade citus, after "ALTER EXTENSION citus update to xxx" updates citus_columnar Y to version Z. */ diff --git a/src/backend/distributed/connection/connection_configuration.c b/src/backend/distributed/connection/connection_configuration.c index 57069f698..c52254f9c 100644 --- a/src/backend/distributed/connection/connection_configuration.c +++ b/src/backend/distributed/connection/connection_configuration.c @@ -123,6 +123,10 @@ AddConnParam(const char *keyword, const char *value) errmsg("ConnParams arrays bound check failed"))); } + /* + * Don't use pstrdup here to avoid being tied to a memory context, we free + * these later using ResetConnParams + */ ConnParams.keywords[ConnParams.size] = strdup(keyword); ConnParams.values[ConnParams.size] = strdup(value); ConnParams.size++; diff --git a/src/backend/distributed/planner/query_colocation_checker.c b/src/backend/distributed/planner/query_colocation_checker.c index d9b1aad5a..bef91618e 100644 --- a/src/backend/distributed/planner/query_colocation_checker.c +++ b/src/backend/distributed/planner/query_colocation_checker.c @@ -449,7 +449,7 @@ CreateTargetEntryForNullCol(Form_pg_attribute attributeTuple, int resno) attributeTuple->attcollation); char *resName = attributeTuple->attname.data; TargetEntry *targetEntry = - makeTargetEntry(nullExpr, resno, strdup(resName), false); + makeTargetEntry(nullExpr, resno, pstrdup(resName), false); return targetEntry; } From 11d7c273523c5893f195455c839551f9999bc9c9 Mon Sep 17 00:00:00 2001 From: Teja Mupparti Date: Tue, 23 Jan 2024 10:24:45 -0800 Subject: [PATCH 05/16] Fix assertions in other PG versions too, the original fix is in PR-7379 --- src/backend/distributed/deparser/ruleutils_14.c | 11 +++++++++-- src/backend/distributed/deparser/ruleutils_16.c | 11 +++++++++-- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/backend/distributed/deparser/ruleutils_14.c b/src/backend/distributed/deparser/ruleutils_14.c index 01b74eab1..88948cff5 100644 --- a/src/backend/distributed/deparser/ruleutils_14.c +++ b/src/backend/distributed/deparser/ruleutils_14.c @@ -1526,8 +1526,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, /* Assert we processed the right number of columns */ #ifdef USE_ASSERT_CHECKING - while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) - i++; + for (int col_index = 0; col_index < colinfo->num_cols; col_index++) + { + /* + * In the above processing-loops, "i" advances only if + * the column is not new, check if this is a new column. + */ + if (colinfo->is_new_col[col_index]) + i++; + } Assert(i == colinfo->num_cols); Assert(j == nnewcolumns); #endif diff --git a/src/backend/distributed/deparser/ruleutils_16.c b/src/backend/distributed/deparser/ruleutils_16.c index 10373e487..7f2a41d75 100644 --- a/src/backend/distributed/deparser/ruleutils_16.c +++ b/src/backend/distributed/deparser/ruleutils_16.c @@ -1580,8 +1580,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, /* Assert we processed the right number of columns */ #ifdef USE_ASSERT_CHECKING - while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) - i++; + for (int col_index = 0; col_index < colinfo->num_cols; col_index++) + { + /* + * In the above processing-loops, "i" advances only if + * the column is not new, check if this is a new column. + */ + if (colinfo->is_new_col[col_index]) + i++; + } Assert(i == colinfo->num_cols); Assert(j == nnewcolumns); #endif From 863713e9b7aed4762ed9851fea0dbf68bcdab323 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20=C4=B0ndibay?= Date: Wed, 24 Jan 2024 09:00:19 +0300 Subject: [PATCH 06/16] Refactors ExtendedTaskList methods (#7372) ExecuteTaskListIntoTupleDestWithParam and ExecuteTaskListIntoTupleDest are nearly the same. I parameterized and a made a reusable structure here --------- Co-authored-by: Onur Tirtir --- .../distributed/executor/adaptive_executor.c | 53 ++++++++++++------- 1 file changed, 35 insertions(+), 18 deletions(-) diff --git a/src/backend/distributed/executor/adaptive_executor.c b/src/backend/distributed/executor/adaptive_executor.c index 1b0277f2e..e912f418d 100644 --- a/src/backend/distributed/executor/adaptive_executor.c +++ b/src/backend/distributed/executor/adaptive_executor.c @@ -727,6 +727,11 @@ static uint64 MicrosecondsBetweenTimestamps(instr_time startTime, instr_time end static int WorkerPoolCompare(const void *lhsKey, const void *rhsKey); static void SetAttributeInputMetadata(DistributedExecution *execution, ShardCommandExecution *shardCommandExecution); +static ExecutionParams * CreateDefaultExecutionParams(RowModifyLevel modLevel, + List *taskList, + TupleDestination *tupleDest, + bool expectResults, + ParamListInfo paramListInfo); /* @@ -1013,14 +1018,14 @@ ExecuteTaskListOutsideTransaction(RowModifyLevel modLevel, List *taskList, /* - * ExecuteTaskListIntoTupleDestWithParam is a proxy to ExecuteTaskListExtended() which uses - * bind params from executor state, and with defaults for some of the arguments. + * CreateDefaultExecutionParams returns execution params based on given (possibly null) + * bind params (presumably from executor state) with defaults for some of the arguments. */ -uint64 -ExecuteTaskListIntoTupleDestWithParam(RowModifyLevel modLevel, List *taskList, - TupleDestination *tupleDest, - bool expectResults, - ParamListInfo paramListInfo) +static ExecutionParams * +CreateDefaultExecutionParams(RowModifyLevel modLevel, List *taskList, + TupleDestination *tupleDest, + bool expectResults, + ParamListInfo paramListInfo) { int targetPoolSize = MaxAdaptiveExecutorPoolSize; bool localExecutionSupported = true; @@ -1034,6 +1039,24 @@ ExecuteTaskListIntoTupleDestWithParam(RowModifyLevel modLevel, List *taskList, executionParams->tupleDestination = tupleDest; executionParams->paramListInfo = paramListInfo; + return executionParams; +} + + +/* + * ExecuteTaskListIntoTupleDestWithParam is a proxy to ExecuteTaskListExtended() which uses + * bind params from executor state, and with defaults for some of the arguments. + */ +uint64 +ExecuteTaskListIntoTupleDestWithParam(RowModifyLevel modLevel, List *taskList, + TupleDestination *tupleDest, + bool expectResults, + ParamListInfo paramListInfo) +{ + ExecutionParams *executionParams = CreateDefaultExecutionParams(modLevel, taskList, + tupleDest, + expectResults, + paramListInfo); return ExecuteTaskListExtended(executionParams); } @@ -1047,17 +1070,11 @@ ExecuteTaskListIntoTupleDest(RowModifyLevel modLevel, List *taskList, TupleDestination *tupleDest, bool expectResults) { - int targetPoolSize = MaxAdaptiveExecutorPoolSize; - bool localExecutionSupported = true; - ExecutionParams *executionParams = CreateBasicExecutionParams( - modLevel, taskList, targetPoolSize, localExecutionSupported - ); - - executionParams->xactProperties = DecideTransactionPropertiesForTaskList( - modLevel, taskList, false); - executionParams->expectResults = expectResults; - executionParams->tupleDestination = tupleDest; - + ParamListInfo paramListInfo = NULL; + ExecutionParams *executionParams = CreateDefaultExecutionParams(modLevel, taskList, + tupleDest, + expectResults, + paramListInfo); return ExecuteTaskListExtended(executionParams); } From 3ffb831beb420d7d0794600d3e4b3106897e0ae1 Mon Sep 17 00:00:00 2001 From: Gokhan Gulbiz Date: Wed, 24 Jan 2024 11:50:49 +0300 Subject: [PATCH 07/16] Update contributing docs (#7447) This is a minor change to use a generic name instead of our legacy CI provider name in the contributing documentation. --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index eaec55c3e..e1900642d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -175,7 +175,7 @@ that are missing in earlier minor versions. ### Following our coding conventions -CircleCI will automatically reject any PRs which do not follow our coding +CI pipeline will automatically reject any PRs which do not follow our coding conventions. The easiest way to ensure your PR adheres to those conventions is to use the [citus_indent](https://github.com/citusdata/tools/tree/develop/uncrustify) tool. This tool uses `uncrustify` under the hood. From 1cb2e1e4e8d31601f5b250f90b02d71a8a02e82e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Halil=20Ozan=20Akg=C3=BCl?= Date: Wed, 24 Jan 2024 12:57:54 +0300 Subject: [PATCH 08/16] Fixes create user queries from Citus non-main databases with other users (#7442) This PR makes the connections to other nodes for `mark_object_distributed` use the same user as `execute_command_on_remote_nodes_as_user` so they'll use the same connection. --- src/backend/distributed/commands/utility_hook.c | 5 +++-- src/backend/distributed/metadata/distobject.c | 16 +++++++++++----- .../sql/downgrades/citus--12.2-1--12.1-1.sql | 2 +- .../sql/udfs/mark_object_distributed/12.2-1.sql | 4 ++-- .../sql/udfs/mark_object_distributed/latest.sql | 4 ++-- .../distributed/transaction/worker_transaction.c | 6 +----- src/include/distributed/metadata/distobject.h | 3 ++- src/include/distributed/worker_transaction.h | 4 ++++ .../citus_tests/test/test_other_databases.py | 4 ++-- src/test/regress/expected/multi_extension.out | 2 +- src/test/regress/expected/other_databases.out | 1 + .../expected/upgrade_list_citus_objects.out | 2 +- src/test/regress/sql/other_databases.sql | 1 + 13 files changed, 32 insertions(+), 22 deletions(-) diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index c2155383a..68af4b7b5 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -92,7 +92,7 @@ #define START_MANAGEMENT_TRANSACTION \ "SELECT citus_internal.start_management_transaction('%lu')" #define MARK_OBJECT_DISTRIBUTED \ - "SELECT citus_internal.mark_object_distributed(%d, %s, %d)" + "SELECT citus_internal.mark_object_distributed(%d, %s, %d, %s)" bool EnableDDLPropagation = true; /* ddl propagation is enabled */ @@ -1636,7 +1636,8 @@ RunPostprocessMainDBCommand(Node *parsetree) MARK_OBJECT_DISTRIBUTED, AuthIdRelationId, quote_literal_cstr(createRoleStmt->role), - roleOid); + roleOid, + quote_literal_cstr(CurrentUserName())); RunCitusMainDBQuery(mainDBQuery->data); } } diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index 1d07be8c3..007d07bdc 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -67,7 +67,8 @@ PG_FUNCTION_INFO_V1(master_unmark_object_distributed); /* * mark_object_distributed adds an object to pg_dist_object - * in all of the nodes. + * in all of the nodes, for the connections to the other nodes this function + * uses the user passed. */ Datum mark_object_distributed(PG_FUNCTION_ARGS) @@ -81,6 +82,8 @@ mark_object_distributed(PG_FUNCTION_ARGS) Oid objectId = PG_GETARG_OID(2); ObjectAddress *objectAddress = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*objectAddress, classId, objectId); + text *connectionUserText = PG_GETARG_TEXT_P(3); + char *connectionUser = text_to_cstring(connectionUserText); /* * This function is called when a query is run from a Citus non-main database. @@ -88,7 +91,8 @@ mark_object_distributed(PG_FUNCTION_ARGS) * 2PC still works. */ bool useConnectionForLocalQuery = true; - MarkObjectDistributedWithName(objectAddress, objectName, useConnectionForLocalQuery); + MarkObjectDistributedWithName(objectAddress, objectName, useConnectionForLocalQuery, + connectionUser); PG_RETURN_VOID(); } @@ -193,7 +197,8 @@ void MarkObjectDistributed(const ObjectAddress *distAddress) { bool useConnectionForLocalQuery = false; - MarkObjectDistributedWithName(distAddress, "", useConnectionForLocalQuery); + MarkObjectDistributedWithName(distAddress, "", useConnectionForLocalQuery, + CurrentUserName()); } @@ -204,7 +209,7 @@ MarkObjectDistributed(const ObjectAddress *distAddress) */ void MarkObjectDistributedWithName(const ObjectAddress *distAddress, char *objectName, - bool useConnectionForLocalQuery) + bool useConnectionForLocalQuery, char *connectionUser) { if (!CitusHasBeenLoaded()) { @@ -234,7 +239,8 @@ MarkObjectDistributedWithName(const ObjectAddress *distAddress, char *objectName { char *workerPgDistObjectUpdateCommand = CreatePgDistObjectEntryCommand(distAddress, objectName); - SendCommandToRemoteNodesWithMetadata(workerPgDistObjectUpdateCommand); + SendCommandToRemoteMetadataNodesParams(workerPgDistObjectUpdateCommand, + connectionUser, 0, NULL, NULL); } } diff --git a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql index f889a0095..20d85444f 100644 --- a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql @@ -15,7 +15,7 @@ DROP FUNCTION citus_internal.execute_command_on_remote_nodes_as_user( ); DROP FUNCTION citus_internal.mark_object_distributed( - classId Oid, objectName text, objectId Oid + classId Oid, objectName text, objectId Oid, connectionUser text ); DROP FUNCTION citus_internal.commit_management_command_2pc(); diff --git a/src/backend/distributed/sql/udfs/mark_object_distributed/12.2-1.sql b/src/backend/distributed/sql/udfs/mark_object_distributed/12.2-1.sql index ee2c5e7e8..25d35c028 100644 --- a/src/backend/distributed/sql/udfs/mark_object_distributed/12.2-1.sql +++ b/src/backend/distributed/sql/udfs/mark_object_distributed/12.2-1.sql @@ -1,7 +1,7 @@ -CREATE OR REPLACE FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid) +CREATE OR REPLACE FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid, connectionUser text) RETURNS VOID LANGUAGE C AS 'MODULE_PATHNAME', $$mark_object_distributed$$; -COMMENT ON FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid) +COMMENT ON FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid, connectionUser text) IS 'adds an object to pg_dist_object on all nodes'; diff --git a/src/backend/distributed/sql/udfs/mark_object_distributed/latest.sql b/src/backend/distributed/sql/udfs/mark_object_distributed/latest.sql index ee2c5e7e8..25d35c028 100644 --- a/src/backend/distributed/sql/udfs/mark_object_distributed/latest.sql +++ b/src/backend/distributed/sql/udfs/mark_object_distributed/latest.sql @@ -1,7 +1,7 @@ -CREATE OR REPLACE FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid) +CREATE OR REPLACE FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid, connectionUser text) RETURNS VOID LANGUAGE C AS 'MODULE_PATHNAME', $$mark_object_distributed$$; -COMMENT ON FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid) +COMMENT ON FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid, connectionUser text) IS 'adds an object to pg_dist_object on all nodes'; diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index 9c8563de0..c6fcee107 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -36,10 +36,6 @@ #include "distributed/worker_manager.h" #include "distributed/worker_transaction.h" -static void SendCommandToRemoteMetadataNodesParams(const char *command, - const char *user, int parameterCount, - const Oid *parameterTypes, - const char *const *parameterValues); static void SendBareCommandListToMetadataNodesInternal(List *commandList, TargetWorkerSet targetWorkerSet); static void SendCommandToMetadataWorkersParams(const char *command, @@ -209,7 +205,7 @@ SendCommandListToRemoteNodesWithMetadata(List *commands) * SendCommandToWorkersParamsInternal() that can be used to send commands * to remote metadata nodes. */ -static void +void SendCommandToRemoteMetadataNodesParams(const char *command, const char *user, int parameterCount, const Oid *parameterTypes, diff --git a/src/include/distributed/metadata/distobject.h b/src/include/distributed/metadata/distobject.h index 13f38178b..e98e6ee86 100644 --- a/src/include/distributed/metadata/distobject.h +++ b/src/include/distributed/metadata/distobject.h @@ -24,7 +24,8 @@ extern bool IsAnyObjectDistributed(const List *addresses); extern bool ClusterHasDistributedFunctionWithDistArgument(void); extern void MarkObjectDistributed(const ObjectAddress *distAddress); extern void MarkObjectDistributedWithName(const ObjectAddress *distAddress, char *name, - bool useConnectionForLocalQuery); + bool useConnectionForLocalQuery, + char *connectionUser); extern void MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress); extern void MarkObjectDistributedLocally(const ObjectAddress *distAddress); extern void UnmarkObjectDistributed(const ObjectAddress *address); diff --git a/src/include/distributed/worker_transaction.h b/src/include/distributed/worker_transaction.h index b9a855828..1b3809a0e 100644 --- a/src/include/distributed/worker_transaction.h +++ b/src/include/distributed/worker_transaction.h @@ -68,6 +68,10 @@ extern void SendCommandToWorkersAsUser(TargetWorkerSet targetWorkerSet, const char *nodeUser, const char *command); extern void SendCommandToWorkerAsUser(const char *nodeName, int32 nodePort, const char *nodeUser, const char *command); +extern void SendCommandToRemoteMetadataNodesParams(const char *command, + const char *user, int parameterCount, + const Oid *parameterTypes, + const char *const *parameterValues); extern bool SendOptionalCommandListToWorkerOutsideTransaction(const char *nodeName, int32 nodePort, const char *nodeUser, diff --git a/src/test/regress/citus_tests/test/test_other_databases.py b/src/test/regress/citus_tests/test/test_other_databases.py index 925b065a7..494301692 100644 --- a/src/test/regress/citus_tests/test/test_other_databases.py +++ b/src/test/regress/citus_tests/test/test_other_databases.py @@ -22,7 +22,7 @@ def test_main_commited_outer_not_yet(cluster): "SELECT citus_internal.execute_command_on_remote_nodes_as_user('CREATE USER u1;', 'postgres')" ) cur2.execute( - "SELECT citus_internal.mark_object_distributed(1260, 'u1', 123123)" + "SELECT citus_internal.mark_object_distributed(1260, 'u1', 123123, 'postgres')" ) cur2.execute("COMMIT") @@ -133,7 +133,7 @@ def test_main_commited_outer_aborted(cluster): "SELECT citus_internal.execute_command_on_remote_nodes_as_user('CREATE USER u2;', 'postgres')" ) cur2.execute( - "SELECT citus_internal.mark_object_distributed(1260, 'u2', 321321)" + "SELECT citus_internal.mark_object_distributed(1260, 'u2', 321321, 'postgres')" ) cur2.execute("COMMIT") diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 60e283800..57dbe3e75 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1424,7 +1424,7 @@ SELECT * FROM multi_extension.print_extension_changes(); --------------------------------------------------------------------- | function citus_internal.commit_management_command_2pc() void | function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void - | function citus_internal.mark_object_distributed(oid,text,oid) void + | function citus_internal.mark_object_distributed(oid,text,oid,text) void | function citus_internal.start_management_transaction(xid8) void | function citus_internal_acquire_citus_advisory_object_class_lock(integer,cstring) void | function citus_internal_database_command(text) void diff --git a/src/test/regress/expected/other_databases.out b/src/test/regress/expected/other_databases.out index 1b81af3b7..9e170861e 100644 --- a/src/test/regress/expected/other_databases.out +++ b/src/test/regress/expected/other_databases.out @@ -71,6 +71,7 @@ SELECT citus_internal.execute_command_on_remote_nodes_as_user($$SELECT 'dangerou ERROR: operation is not allowed HINT: Run the command with a superuser. \c other_db1 +SET ROLE nonsuperuser; CREATE USER other_db_user9; RESET ROLE; \c regression diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 97e5c0928..9bd542f05 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -59,7 +59,7 @@ ORDER BY 1; function citus_internal.commit_management_command_2pc() function citus_internal.execute_command_on_remote_nodes_as_user(text,text) function citus_internal.find_groupid_for_node(text,integer) - function citus_internal.mark_object_distributed(oid,text,oid) + function citus_internal.mark_object_distributed(oid,text,oid,text) function citus_internal.pg_dist_node_trigger_func() function citus_internal.pg_dist_rebalance_strategy_trigger_func() function citus_internal.pg_dist_shard_placement_trigger_func() diff --git a/src/test/regress/sql/other_databases.sql b/src/test/regress/sql/other_databases.sql index 629f74f45..563793518 100644 --- a/src/test/regress/sql/other_databases.sql +++ b/src/test/regress/sql/other_databases.sql @@ -51,6 +51,7 @@ SET ROLE nonsuperuser; SELECT citus_internal.execute_command_on_remote_nodes_as_user($$SELECT 'dangerous query'$$, 'postgres'); \c other_db1 +SET ROLE nonsuperuser; CREATE USER other_db_user9; RESET ROLE; From 8b48d6ab0298d80ded17a6721e187b627f647c91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20Sedl=C3=A1k?= Date: Wed, 24 Jan 2024 12:24:23 +0100 Subject: [PATCH 09/16] Log username in the failed connection message (#7432) This patch includes the username in the reported error message. This makes debugging easier when certain commands open connections as other users than the user that is executing the command. ``` monitora_snapshot=# SELECT citus_move_shard_placement(102030, 'monitora.db-dev-worker-a', 6005, 'monitora.db-dev-worker-a', 6017); ERROR: connection to the remote node monitora_user@monitora.db-dev-worker-a:6017 failed with the following error: fe_sendauth: no password supplied Time: 40,198 ms ``` --- .../distributed/connection/remote_commands.c | 9 +- .../regress/expected/detect_conn_close.out | 2 +- .../failure_connection_establishment.out | 2 +- .../regress/expected/failure_copy_on_hash.out | 4 +- ...ure_create_distributed_table_non_empty.out | 12 +-- .../failure_create_index_concurrently.out | 6 +- .../regress/expected/failure_create_table.out | 10 +-- .../regress/expected/failure_cte_subquery.out | 8 +- src/test/regress/expected/failure_ddl.out | 16 ++-- .../expected/failure_distributed_results.out | 2 +- .../failure_failover_to_local_execution.out | 2 +- .../failure_insert_select_pushdown.out | 4 +- .../failure_insert_select_repartition.out | 10 +-- .../regress/expected/failure_multi_dml.out | 6 +- .../expected/failure_multi_row_insert.out | 10 +-- .../failure_multi_shard_update_delete.out | 32 +++---- .../failure_mx_metadata_sync_multi_trans.out | 84 +++++++++---------- .../failure_on_create_subscription.out | 4 +- .../failure_online_move_shard_placement.out | 6 +- .../regress/expected/failure_ref_tables.out | 6 +- .../failure_replicated_partitions.out | 2 +- .../regress/expected/failure_savepoints.out | 2 +- .../regress/expected/failure_single_mod.out | 6 +- .../expected/failure_single_select.out | 10 +-- .../expected/failure_split_cleanup.out | 8 +- .../expected/failure_tenant_isolation.out | 8 +- .../failure_tenant_isolation_nonblocking.out | 6 +- .../regress/expected/failure_truncate.out | 14 ++-- src/test/regress/expected/failure_vacuum.out | 6 +- .../expected/isolation_update_node.out | 2 +- .../expected/local_shard_execution.out | 4 +- .../expected/local_shard_execution_0.out | 4 +- .../regress/expected/multi_citus_tools.out | 2 +- src/test/regress/expected/multi_copy.out | 6 +- .../expected/multi_modifying_xacts.out | 20 ++--- .../regress/expected/multi_multiuser_auth.out | 2 +- .../regress/expected/multi_router_planner.out | 4 +- .../regress/expected/node_conninfo_reload.out | 16 ++-- .../regress/expected/shard_rebalancer.out | 6 +- .../sql/failure_on_create_subscription.sql | 4 +- 40 files changed, 184 insertions(+), 183 deletions(-) diff --git a/src/backend/distributed/connection/remote_commands.c b/src/backend/distributed/connection/remote_commands.c index f694ff390..4b46e96d2 100644 --- a/src/backend/distributed/connection/remote_commands.c +++ b/src/backend/distributed/connection/remote_commands.c @@ -246,6 +246,7 @@ ClearResultsIfReady(MultiConnection *connection) void ReportConnectionError(MultiConnection *connection, int elevel) { + char *userName = connection->user; char *nodeName = connection->hostname; int nodePort = connection->port; PGconn *pgConn = connection->pgConn; @@ -264,15 +265,15 @@ ReportConnectionError(MultiConnection *connection, int elevel) if (messageDetail) { ereport(elevel, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("connection to the remote node %s:%d failed with the " - "following error: %s", nodeName, nodePort, + errmsg("connection to the remote node %s@%s:%d failed with the " + "following error: %s", userName, nodeName, nodePort, messageDetail))); } else { ereport(elevel, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("connection to the remote node %s:%d failed", - nodeName, nodePort))); + errmsg("connection to the remote node %s@%s:%d failed", + userName, nodeName, nodePort))); } } diff --git a/src/test/regress/expected/detect_conn_close.out b/src/test/regress/expected/detect_conn_close.out index ad758f32e..60973de76 100644 --- a/src/test/regress/expected/detect_conn_close.out +++ b/src/test/regress/expected/detect_conn_close.out @@ -128,7 +128,7 @@ BEGIN; (1 row) SELECT count(*) FROM socket_test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open ROLLBACK; -- repartition joins also can recover SET citus.enable_repartition_joins TO on; diff --git a/src/test/regress/expected/failure_connection_establishment.out b/src/test/regress/expected/failure_connection_establishment.out index d032755dd..f23f11d2b 100644 --- a/src/test/regress/expected/failure_connection_establishment.out +++ b/src/test/regress/expected/failure_connection_establishment.out @@ -84,7 +84,7 @@ SELECT citus.mitmproxy('conn.connect_delay(1400)'); ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); WARNING: could not establish connection after 900 ms -ERROR: connection to the remote node localhost:xxxxx failed +ERROR: connection to the remote node postgres@localhost:xxxxx failed RESET citus.node_connection_timeout; SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_copy_on_hash.out b/src/test/regress/expected/failure_copy_on_hash.out index 24350f707..424ab0da8 100644 --- a/src/test/regress/expected/failure_copy_on_hash.out +++ b/src/test/regress/expected/failure_copy_on_hash.out @@ -36,7 +36,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) \COPY test_table FROM stdin delimiter ','; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -271,7 +271,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) \COPY test_table_2 FROM stdin delimiter ','; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_create_distributed_table_non_empty.out b/src/test/regress/expected/failure_create_distributed_table_non_empty.out index 0e4b85701..109d3686f 100644 --- a/src/test/regress/expected/failure_create_distributed_table_non_empty.out +++ b/src/test/regress/expected/failure_create_distributed_table_non_empty.out @@ -28,7 +28,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -164,7 +164,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -436,7 +436,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -519,7 +519,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_apply_shard_ddl_comma (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -680,7 +680,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -901,7 +901,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_create_index_concurrently.out b/src/test/regress/expected/failure_create_index_concurrently.out index 784c91aec..947d5711e 100644 --- a/src/test/regress/expected/failure_create_index_concurrently.out +++ b/src/test/regress/expected/failure_create_index_concurrently.out @@ -29,7 +29,7 @@ CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, if applicable, and then re-attempt the original command. -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -63,7 +63,7 @@ CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, if applicable, and then re-attempt the original command. -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -144,7 +144,7 @@ DROP INDEX CONCURRENTLY IF EXISTS idx_index_test; WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, if applicable, and then re-attempt the original command. -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_create_table.out b/src/test/regress/expected/failure_create_table.out index 5d022d678..956bdb2b2 100644 --- a/src/test/regress/expected/failure_create_table.out +++ b/src/test/regress/expected/failure_create_table.out @@ -22,7 +22,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -116,7 +116,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_comman (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -147,7 +147,7 @@ BEGIN; (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -215,7 +215,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -484,7 +484,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_cte_subquery.out b/src/test/regress/expected/failure_cte_subquery.out index 19fb11f37..89e3e1489 100644 --- a/src/test/regress/expected/failure_cte_subquery.out +++ b/src/test/regress/expected/failure_cte_subquery.out @@ -86,7 +86,7 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- kill at the third copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").kill()'); mitmproxy @@ -117,7 +117,7 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancel at the first copy (push) SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy @@ -254,7 +254,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()'); WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -365,7 +365,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open END; RESET SEARCH_PATH; SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/expected/failure_ddl.out b/src/test/regress/expected/failure_ddl.out index 77b134a72..2f55663a0 100644 --- a/src/test/regress/expected/failure_ddl.out +++ b/src/test/regress/expected/failure_ddl.out @@ -36,7 +36,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -99,7 +99,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- show that we've never commited the changes SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg @@ -300,7 +300,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -361,7 +361,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -661,7 +661,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -722,7 +722,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -1010,7 +1010,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- kill as soon as the coordinator after it sends worker_apply_shard_ddl_command 2nd time SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").after(2).kill()'); mitmproxy @@ -1019,7 +1019,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").aft (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancel as soon as the coordinator after it sends worker_apply_shard_ddl_command 2nd time SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").after(2).cancel(' || pg_backend_pid() || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_distributed_results.out b/src/test/regress/expected/failure_distributed_results.out index a316763e3..5a2461057 100644 --- a/src/test/regress/expected/failure_distributed_results.out +++ b/src/test/regress/expected/failure_distributed_results.out @@ -88,7 +88,7 @@ CREATE TABLE distributed_result_info AS SELECT resultId, nodeport, rowcount, targetShardId, targetShardIndex FROM partition_task_list_results('test', $$ SELECT * FROM source_table $$, 'target_table') NATURAL JOIN pg_dist_node; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT * FROM distributed_result_info ORDER BY resultId; resultid | nodeport | rowcount | targetshardid | targetshardindex --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_failover_to_local_execution.out b/src/test/regress/expected/failure_failover_to_local_execution.out index 56518141a..20ad2a6df 100644 --- a/src/test/regress/expected/failure_failover_to_local_execution.out +++ b/src/test/regress/expected/failure_failover_to_local_execution.out @@ -101,7 +101,7 @@ NOTICE: issuing SELECT count(*) AS count FROM failure_failover_to_local_executi DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT count(*) AS count FROM failure_failover_to_local_execution.failover_to_local_1980003 failover_to_local WHERE true DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open NOTICE: executing the command locally: SELECT count(*) AS count FROM failure_failover_to_local_execution.failover_to_local_1980000 failover_to_local WHERE true NOTICE: executing the command locally: SELECT count(*) AS count FROM failure_failover_to_local_execution.failover_to_local_1980002 failover_to_local WHERE true count diff --git a/src/test/regress/expected/failure_insert_select_pushdown.out b/src/test/regress/expected/failure_insert_select_pushdown.out index ed461d040..570bf22f9 100644 --- a/src/test/regress/expected/failure_insert_select_pushdown.out +++ b/src/test/regress/expected/failure_insert_select_pushdown.out @@ -44,7 +44,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown" (1 row) INSERT INTO events_summary SELECT user_id, event_id, count(*) FROM events_table GROUP BY 1,2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -95,7 +95,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown" (1 row) INSERT INTO events_table SELECT * FROM events_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_insert_select_repartition.out b/src/test/regress/expected/failure_insert_select_repartition.out index d45318208..ca36f7e88 100644 --- a/src/test/regress/expected/failure_insert_select_repartition.out +++ b/src/test/regress/expected/failure_insert_select_repartition.out @@ -55,7 +55,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_partition_query_result").kill (1 row) INSERT INTO target_table SELECT * FROM source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -68,7 +68,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_partition_query_result").kill (1 row) INSERT INTO target_table SELECT * FROM replicated_source_table; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -138,7 +138,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()') (1 row) INSERT INTO target_table SELECT * FROM source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -151,7 +151,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()') (1 row) INSERT INTO target_table SELECT * FROM replicated_source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -168,7 +168,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()') (1 row) INSERT INTO replicated_target_table SELECT * FROM source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT * FROM replicated_target_table; a | b --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_multi_dml.out b/src/test/regress/expected/failure_multi_dml.out index bbea2c999..7757f574c 100644 --- a/src/test/regress/expected/failure_multi_dml.out +++ b/src/test/regress/expected/failure_multi_dml.out @@ -33,7 +33,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE").kill()'); BEGIN; DELETE FROM dml_test WHERE id = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open DELETE FROM dml_test WHERE id = 2; ERROR: current transaction is aborted, commands ignored until end of transaction block INSERT INTO dml_test VALUES (5, 'Epsilon'); @@ -93,7 +93,7 @@ BEGIN; DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 2; INSERT INTO dml_test VALUES (5, 'Epsilon'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open UPDATE dml_test SET name = 'alpha' WHERE id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block UPDATE dml_test SET name = 'gamma' WHERE id = 3; @@ -148,7 +148,7 @@ DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 2; INSERT INTO dml_test VALUES (5, 'Epsilon'); UPDATE dml_test SET name = 'alpha' WHERE id = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open UPDATE dml_test SET name = 'gamma' WHERE id = 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; diff --git a/src/test/regress/expected/failure_multi_row_insert.out b/src/test/regress/expected/failure_multi_row_insert.out index f3cd4919a..8feffbaeb 100644 --- a/src/test/regress/expected/failure_multi_row_insert.out +++ b/src/test/regress/expected/failure_multi_row_insert.out @@ -43,7 +43,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,1), (1,2), (1,3); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- this test is broken, see https://github.com/citusdata/citus/issues/2460 -- SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); -- INSERT INTO distributed_table VALUES (1,4), (1,5), (1,6); @@ -55,7 +55,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,7), (5,8); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- this test is broken, see https://github.com/citusdata/citus/issues/2460 -- SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); -- INSERT INTO distributed_table VALUES (1,9), (5,10); @@ -67,7 +67,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,11), (6,12); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -84,7 +84,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).kill()'); (1 row) INSERT INTO distributed_table VALUES (1,15), (6,16); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -101,7 +101,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (2,19),(1,20); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_multi_shard_update_delete.out b/src/test/regress/expected/failure_multi_shard_update_delete.out index 24cb895ea..27284ec38 100644 --- a/src/test/regress/expected/failure_multi_shard_update_delete.out +++ b/src/test/regress/expected/failure_multi_shard_update_delete.out @@ -58,7 +58,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); -- issue a multi shard delete DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FROM t2; count @@ -74,7 +74,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005"). (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FROM t2; count @@ -134,7 +134,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); -- issue a multi shard update UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 @@ -150,7 +150,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill( (1 row) UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 @@ -202,7 +202,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); -- issue a multi shard delete DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FROM t2; count @@ -218,7 +218,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005"). (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FROM t2; count @@ -278,7 +278,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); -- issue a multi shard update UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 @@ -294,7 +294,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill( (1 row) UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 @@ -364,7 +364,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); (1 row) DELETE FROM r1 WHERE a = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b2 @@ -379,7 +379,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b2 @@ -459,7 +459,7 @@ UPDATE t3 SET c = q.c FROM ( SELECT b, max(c) as c FROM t2 GROUP BY b) q WHERE t3.b = q.b RETURNING *; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open --- verify nothing is updated SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -515,7 +515,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t3_201013").kill( (1 row) UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 @@ -547,7 +547,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- following will fail UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open END; -- verify everything is rolled back SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; @@ -563,7 +563,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO (1 row) UPDATE t3 SET b = 1 WHERE b = 2 RETURNING *; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 @@ -578,7 +578,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO (1 row) UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 @@ -610,7 +610,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- following will fail UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open END; -- verify everything is rolled back SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; diff --git a/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out b/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out index 541bce5c5..2c4120dbd 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out +++ b/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out @@ -155,7 +155,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_local_group SET group (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to drop node metadata SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").cancel(' || :pid || ')'); mitmproxy @@ -172,7 +172,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").kill()'); (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to send node metadata SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").cancel(' || :pid || ')'); mitmproxy @@ -189,7 +189,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").kill()'); (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to drop sequence dependency for all tables SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency.*FROM pg_dist_partition").cancel(' || :pid || ')'); mitmproxy @@ -206,7 +206,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequen (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to drop shell table SELECT citus.mitmproxy('conn.onQuery(query="CALL pg_catalog.worker_drop_all_shell_tables").cancel(' || :pid || ')'); mitmproxy @@ -223,7 +223,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CALL pg_catalog.worker_drop_all_shel (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to delete all pg_dist_partition metadata SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_partition").cancel(' || :pid || ')'); mitmproxy @@ -240,7 +240,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_partition").kill (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to delete all pg_dist_shard metadata SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_shard").cancel(' || :pid || ')'); mitmproxy @@ -257,7 +257,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_shard").kill()') (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to delete all pg_dist_placement metadata SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_placement").cancel(' || :pid || ')'); mitmproxy @@ -274,7 +274,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_placement").kill (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to delete all pg_dist_object metadata SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_object").cancel(' || :pid || ')'); mitmproxy @@ -291,7 +291,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_objec (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to delete all pg_dist_colocation metadata SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_colocation").cancel(' || :pid || ')'); mitmproxy @@ -308,7 +308,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_coloc (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to alter or create role SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_alter_role").cancel(' || :pid || ')'); mitmproxy @@ -325,7 +325,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_alter_role") (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to set database owner SELECT citus.mitmproxy('conn.onQuery(query="ALTER DATABASE.*OWNER TO").cancel(' || :pid || ')'); mitmproxy @@ -342,7 +342,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER DATABASE.*OWNER TO").kill()'); (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create schema SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans AUTHORIZATION").cancel(' || :pid || ')'); mitmproxy @@ -359,7 +359,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metad (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create collation SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*CREATE COLLATION mx_metadata_sync_multi_trans.german_phonebook").cancel(' || :pid || ')'); mitmproxy @@ -376,7 +376,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_obje (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create function SELECT citus.mitmproxy('conn.onQuery(query="CREATE OR REPLACE FUNCTION mx_metadata_sync_multi_trans.one_as_result").cancel(' || :pid || ')'); mitmproxy @@ -393,7 +393,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE OR REPLACE FUNCTION mx_metada (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create text search dictionary SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*my_german_dict").cancel(' || :pid || ')'); mitmproxy @@ -410,7 +410,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_obje (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create text search config SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*my_ts_config").cancel(' || :pid || ')'); mitmproxy @@ -427,7 +427,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_obje (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create type SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*pair_type").cancel(' || :pid || ')'); mitmproxy @@ -444,7 +444,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_obje (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create publication SELECT citus.mitmproxy('conn.onQuery(query="CREATE PUBLICATION.*pub_all").cancel(' || :pid || ')'); mitmproxy @@ -461,7 +461,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE PUBLICATION.*pub_all").kill() (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create sequence SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command").cancel(' || :pid || ')'); mitmproxy @@ -478,7 +478,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to drop sequence dependency for distributed table SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency.*mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')'); mitmproxy @@ -495,7 +495,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequen (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to drop distributed table if exists SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')'); mitmproxy @@ -512,7 +512,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS mx_metadata_syn (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create distributed table SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')'); mitmproxy @@ -529,7 +529,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to record sequence dependency for table SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_record_sequence_dependency").cancel(' || :pid || ')'); mitmproxy @@ -546,7 +546,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_record_sequ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create index for table SELECT citus.mitmproxy('conn.onQuery(query="CREATE INDEX dist1_search_phone_idx ON mx_metadata_sync_multi_trans.dist1 USING gin").cancel(' || :pid || ')'); mitmproxy @@ -563,7 +563,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE INDEX dist1_search_phone_idx (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create reference table SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.ref").cancel(' || :pid || ')'); mitmproxy @@ -580,7 +580,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create local table SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.loc1").cancel(' || :pid || ')'); mitmproxy @@ -597,7 +597,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create distributed partitioned table SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.orders").cancel(' || :pid || ')'); mitmproxy @@ -614,7 +614,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create distributed partition table SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.orders_p2020_01_05").cancel(' || :pid || ')'); mitmproxy @@ -631,7 +631,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to attach partition SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE mx_metadata_sync_multi_trans.orders ATTACH PARTITION mx_metadata_sync_multi_trans.orders_p2020_01_05").cancel(' || :pid || ')'); mitmproxy @@ -648,7 +648,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE mx_metadata_sync_multi_t (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add partition metadata SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").cancel(' || :pid || ')'); mitmproxy @@ -665,7 +665,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add shard metadata SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").cancel(' || :pid || ')'); mitmproxy @@ -682,7 +682,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_meta (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add placement metadata SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").cancel(' || :pid || ')'); mitmproxy @@ -699,7 +699,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add colocation metadata SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.citus_internal_add_colocation_metadata").cancel(' || :pid || ')'); mitmproxy @@ -716,7 +716,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.citus_internal_add (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add distributed object metadata SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").cancel(' || :pid || ')'); mitmproxy @@ -733,7 +733,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_met (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark function as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*one_as_result").cancel(' || :pid || ')'); mitmproxy @@ -750,7 +750,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*one_as (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark collation as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*german_phonebook").cancel(' || :pid || ')'); mitmproxy @@ -767,7 +767,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*german (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark text search dictionary as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_german_dict").cancel(' || :pid || ')'); mitmproxy @@ -784,7 +784,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_ger (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark text search configuration as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_ts_config").cancel(' || :pid || ')'); mitmproxy @@ -801,7 +801,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_ts_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark type as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pair_type").cancel(' || :pid || ')'); mitmproxy @@ -818,7 +818,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pair_t (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark sequence as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*seq_owned").cancel(' || :pid || ')'); mitmproxy @@ -835,7 +835,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*seq_ow (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark publication as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pub_all").cancel(' || :pid || ')'); mitmproxy @@ -852,7 +852,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pub_al (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to set isactive to true SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET isactive = TRUE").cancel(' || :pid || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_on_create_subscription.out b/src/test/regress/expected/failure_on_create_subscription.out index 19df82d3e..a42df24d2 100644 --- a/src/test/regress/expected/failure_on_create_subscription.out +++ b/src/test/regress/expected/failure_on_create_subscription.out @@ -43,9 +43,9 @@ SELECT * FROM shards_in_workers; -- Failure on creating the subscription -- Failing exactly on CREATE SUBSCRIPTION is causing flaky test where we fail with either: --- 1) ERROR: connection to the remote node localhost:xxxxx failed with the following error: ERROR: subscription "citus_shard_move_subscription_xxxxxxx" does not exist +-- 1) ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: ERROR: subscription "citus_shard_move_subscription_xxxxxxx" does not exist -- another command is already in progress --- 2) ERROR: connection to the remote node localhost:xxxxx failed with the following error: another command is already in progress +-- 2) ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: another command is already in progress -- Instead fail on the next step (ALTER SUBSCRIPTION) instead which is also required logically as part of uber CREATE SUBSCRIPTION operation. SELECT citus.mitmproxy('conn.onQuery(query="ALTER SUBSCRIPTION").kill()'); mitmproxy diff --git a/src/test/regress/expected/failure_online_move_shard_placement.out b/src/test/regress/expected/failure_online_move_shard_placement.out index cf5890f35..0a881fe42 100644 --- a/src/test/regress/expected/failure_online_move_shard_placement.out +++ b/src/test/regress/expected/failure_online_move_shard_placement.out @@ -407,7 +407,7 @@ SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cleanup leftovers SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -442,7 +442,7 @@ SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- failure on parallel create index ALTER SYSTEM RESET citus.max_adaptive_executor_pool_size; SELECT pg_reload_conf(); @@ -458,7 +458,7 @@ SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Verify that the shard is not moved and the number of rows are still 100k SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_ref_tables.out b/src/test/regress/expected/failure_ref_tables.out index 4984cc1bf..e9a7e4571 100644 --- a/src/test/regress/expected/failure_ref_tables.out +++ b/src/test/regress/expected/failure_ref_tables.out @@ -33,7 +33,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT").kill()'); (1 row) INSERT INTO ref_table VALUES (5, 6); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT COUNT(*) FROM ref_table WHERE key=5; count --------------------------------------------------------------------- @@ -48,7 +48,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE").kill()'); (1 row) UPDATE ref_table SET key=7 RETURNING value; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT COUNT(*) FROM ref_table WHERE key=7; count --------------------------------------------------------------------- @@ -65,7 +65,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE").kill()'); BEGIN; DELETE FROM ref_table WHERE key=5; UPDATE ref_table SET key=value; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT COUNT(*) FROM ref_table WHERE key=value; count diff --git a/src/test/regress/expected/failure_replicated_partitions.out b/src/test/regress/expected/failure_replicated_partitions.out index 7294df98b..67dda269c 100644 --- a/src/test/regress/expected/failure_replicated_partitions.out +++ b/src/test/regress/expected/failure_replicated_partitions.out @@ -28,7 +28,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT").kill()'); (1 row) INSERT INTO partitioned_table VALUES (0, 0); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- use both placements SET citus.task_assignment_policy TO "round-robin"; -- the results should be the same diff --git a/src/test/regress/expected/failure_savepoints.out b/src/test/regress/expected/failure_savepoints.out index 9b155e90e..ca5cb91f6 100644 --- a/src/test/regress/expected/failure_savepoints.out +++ b/src/test/regress/expected/failure_savepoints.out @@ -312,7 +312,7 @@ SELECT * FROM ref; ROLLBACK TO SAVEPOINT start; SELECT * FROM ref; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open END; -- clean up RESET client_min_messages; diff --git a/src/test/regress/expected/failure_single_mod.out b/src/test/regress/expected/failure_single_mod.out index 2a6ed2d77..aa6c10e66 100644 --- a/src/test/regress/expected/failure_single_mod.out +++ b/src/test/regress/expected/failure_single_mod.out @@ -27,7 +27,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT").kill()'); (1 row) INSERT INTO mod_test VALUES (2, 6); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT COUNT(*) FROM mod_test WHERE key=2; count --------------------------------------------------------------------- @@ -59,7 +59,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE").kill()'); (1 row) UPDATE mod_test SET value='ok' WHERE key=2 RETURNING key; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT COUNT(*) FROM mod_test WHERE value='ok'; count --------------------------------------------------------------------- @@ -89,7 +89,7 @@ INSERT INTO mod_test VALUES (2, 6); INSERT INTO mod_test VALUES (2, 7); DELETE FROM mod_test WHERE key=2 AND value = '7'; UPDATE mod_test SET value='ok' WHERE key=2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT COUNT(*) FROM mod_test WHERE key=2; count diff --git a/src/test/regress/expected/failure_single_select.out b/src/test/regress/expected/failure_single_select.out index 1b60f3125..586dd4756 100644 --- a/src/test/regress/expected/failure_single_select.out +++ b/src/test/regress/expected/failure_single_select.out @@ -30,14 +30,14 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT.*select_test").kill()'); (1 row) SELECT * FROM select_test WHERE key = 3; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open key | value --------------------------------------------------------------------- 3 | test data (1 row) SELECT * FROM select_test WHERE key = 3; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open key | value --------------------------------------------------------------------- 3 | test data @@ -54,7 +54,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT.*select_test").kill()'); BEGIN; INSERT INTO select_test VALUES (3, 'more data'); SELECT * FROM select_test WHERE key = 3; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -142,7 +142,7 @@ SELECT * FROM select_test WHERE key = 3; INSERT INTO select_test VALUES (3, 'even more data'); SELECT * FROM select_test WHERE key = 3; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT citus.mitmproxy('conn.onQuery(query="SELECT.*pg_prepared_xacts").after(2).kill()'); mitmproxy @@ -186,7 +186,7 @@ SELECT * FROM select_test WHERE key = 1; (1 row) SELECT * FROM select_test WHERE key = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- now the same test with query cancellation SELECT citus.mitmproxy('conn.onQuery(query="SELECT.*select_test").after(1).cancel(' || pg_backend_pid() || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_split_cleanup.out b/src/test/regress/expected/failure_split_cleanup.out index f9eacd47c..ba8624725 100644 --- a/src/test/regress/expected/failure_split_cleanup.out +++ b/src/test/regress/expected/failure_split_cleanup.out @@ -627,10 +627,10 @@ WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT operation_id, object_type, object_name, node_group_id, policy_type diff --git a/src/test/regress/expected/failure_tenant_isolation.out b/src/test/regress/expected/failure_tenant_isolation.out index 6be4580be..b406aa2a3 100644 --- a/src/test/regress/expected/failure_tenant_isolation.out +++ b/src/test/regress/expected/failure_tenant_isolation.out @@ -76,7 +76,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").kill()'); (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on colocated table population SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").cancel(' || :pid || ')'); mitmproxy @@ -94,7 +94,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on colocated table constraints SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); mitmproxy @@ -131,7 +131,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").kill()'); (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on table population SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").cancel(' || :pid || ')'); mitmproxy @@ -149,7 +149,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on table constraints SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_tenant_isolation_nonblocking.out b/src/test/regress/expected/failure_tenant_isolation_nonblocking.out index e40842e2a..aecde71c0 100644 --- a/src/test/regress/expected/failure_tenant_isolation_nonblocking.out +++ b/src/test/regress/expected/failure_tenant_isolation_nonblocking.out @@ -159,7 +159,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SET TRANSACTION SNAPSHOT").kill()'); (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on setting snapshot SELECT citus.mitmproxy('conn.onQuery(query="SET TRANSACTION SNAPSHOT").cancel(' || :pid || ')'); mitmproxy @@ -177,7 +177,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").kill()'); (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on table population SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").cancel(' || :pid || ')'); mitmproxy @@ -195,7 +195,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").kill()'); (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on colocated table population SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").cancel(' || :pid || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_truncate.out b/src/test/regress/expected/failure_truncate.out index 4e332252e..253314ee9 100644 --- a/src/test/regress/expected/failure_truncate.out +++ b/src/test/regress/expected/failure_truncate.out @@ -43,7 +43,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -152,7 +152,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -414,7 +414,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE").after(2).kill()'); (1 row) TRUNCATE reference_table CASCADE; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -553,7 +553,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -662,7 +662,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE truncate_failure.tes (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -922,7 +922,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -1031,7 +1031,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_vacuum.out b/src/test/regress/expected/failure_vacuum.out index 617d40d3a..b438f413b 100644 --- a/src/test/regress/expected/failure_vacuum.out +++ b/src/test/regress/expected/failure_vacuum.out @@ -30,7 +30,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); (1 row) VACUUM vacuum_test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); mitmproxy --------------------------------------------------------------------- @@ -38,7 +38,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); (1 row) ANALYZE vacuum_test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SET client_min_messages TO ERROR; SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy @@ -113,7 +113,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()'); (1 row) VACUUM vacuum_test, other_vacuum_test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_update_node.out b/src/test/regress/expected/isolation_update_node.out index 53d792e61..703fcc427 100644 --- a/src/test/regress/expected/isolation_update_node.out +++ b/src/test/regress/expected/isolation_update_node.out @@ -250,7 +250,7 @@ count step s1-commit-prepared: COMMIT prepared 'label'; -s2: WARNING: connection to the remote node non-existent:57637 failed with the following error: could not translate host name "non-existent" to address: +s2: WARNING: connection to the remote node postgres@non-existent:57637 failed with the following error: could not translate host name "non-existent" to address: step s2-execute-prepared: EXECUTE foo; diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index f6e4db7ee..58293a2d6 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -3281,9 +3281,9 @@ SELECT pg_sleep(0.1); -- wait to make sure the config has changed before running SET citus.enable_local_execution TO false; -- force a connection to the dummy placements -- run queries that use dummy placements for local execution SELECT * FROM event_responses WHERE FALSE; -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: WITH cte_1 AS (SELECT * FROM event_responses LIMIT 1) SELECT count(*) FROM cte_1; -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: ALTER SYSTEM RESET citus.local_hostname; SELECT pg_reload_conf(); pg_reload_conf diff --git a/src/test/regress/expected/local_shard_execution_0.out b/src/test/regress/expected/local_shard_execution_0.out index 8c4fbfd74..948941aad 100644 --- a/src/test/regress/expected/local_shard_execution_0.out +++ b/src/test/regress/expected/local_shard_execution_0.out @@ -3281,9 +3281,9 @@ SELECT pg_sleep(0.1); -- wait to make sure the config has changed before running SET citus.enable_local_execution TO false; -- force a connection to the dummy placements -- run queries that use dummy placements for local execution SELECT * FROM event_responses WHERE FALSE; -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: WITH cte_1 AS (SELECT * FROM event_responses LIMIT 1) SELECT count(*) FROM cte_1; -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: ALTER SYSTEM RESET citus.local_hostname; SELECT pg_reload_conf(); pg_reload_conf diff --git a/src/test/regress/expected/multi_citus_tools.out b/src/test/regress/expected/multi_citus_tools.out index eef7a98ca..b47763686 100644 --- a/src/test/regress/expected/multi_citus_tools.out +++ b/src/test/regress/expected/multi_citus_tools.out @@ -587,7 +587,7 @@ SET client_min_messages TO DEBUG; -- verify that we can create connections only with users with login privileges. SET ROLE role_without_login; SELECT citus_check_connection_to_node('localhost', :worker_1_port); -WARNING: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "role_without_login" is not permitted to log in +WARNING: connection to the remote node role_without_login@localhost:xxxxx failed with the following error: FATAL: role "role_without_login" is not permitted to log in citus_check_connection_to_node --------------------------------------------------------------------- f diff --git a/src/test/regress/expected/multi_copy.out b/src/test/regress/expected/multi_copy.out index abd58eb1d..ff4cbdd2c 100644 --- a/src/test/regress/expected/multi_copy.out +++ b/src/test/regress/expected/multi_copy.out @@ -730,7 +730,7 @@ ALTER USER test_user WITH nologin; \c - test_user - :master_port -- reissue copy, and it should fail COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in -- verify shards in the none of the workers as marked invalid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) @@ -749,7 +749,7 @@ SELECT shardid, shardstate, nodename, nodeport -- try to insert into a reference table copy should fail COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in -- verify shards for reference table are still valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) @@ -765,7 +765,7 @@ SELECT shardid, shardstate, nodename, nodeport -- since it can not insert into either copies of a shard. shards are expected to -- stay valid since the operation is rolled back. COPY numbers_hash_other FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in -- verify shards for numbers_hash_other are still valid -- since copy has failed altogether SELECT shardid, shardstate, nodename, nodeport diff --git a/src/test/regress/expected/multi_modifying_xacts.out b/src/test/regress/expected/multi_modifying_xacts.out index 7c3344ee5..849a28c73 100644 --- a/src/test/regress/expected/multi_modifying_xacts.out +++ b/src/test/regress/expected/multi_modifying_xacts.out @@ -1208,15 +1208,15 @@ set citus.enable_alter_role_propagation=true; SET search_path TO multi_modifying_xacts; -- should fail since the worker doesn't have test_user anymore INSERT INTO reference_failure_test VALUES (1, '1'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist -- the same as the above, but wrapped within a transaction BEGIN; INSERT INTO reference_failure_test VALUES (1, '1'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist COMMIT; BEGIN; COPY reference_failure_test FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist COMMIT; -- show that no data go through the table and shard states are good SET client_min_messages to 'ERROR'; @@ -1242,7 +1242,7 @@ ORDER BY s.logicalrelid, sp.shardstate; -- any failure rollbacks the transaction BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist ABORT; -- none of placements are invalid after abort SELECT shardid, shardstate, nodename, nodeport @@ -1263,8 +1263,8 @@ ORDER BY shardid, nodeport; -- verify nothing is inserted SELECT count(*) FROM numbers_hash_failure_test; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist -WARNING: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +WARNING: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +WARNING: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist count --------------------------------------------------------------------- 0 @@ -1290,7 +1290,7 @@ ORDER BY shardid, nodeport; -- all failures roll back the transaction BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist COMMIT; -- expect none of the placements to be market invalid after commit SELECT shardid, shardstate, nodename, nodeport @@ -1311,8 +1311,8 @@ ORDER BY shardid, nodeport; -- verify no data is inserted SELECT count(*) FROM numbers_hash_failure_test; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist -WARNING: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +WARNING: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +WARNING: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist count --------------------------------------------------------------------- 0 @@ -1328,7 +1328,7 @@ set citus.enable_alter_role_propagation=true; SET search_path TO multi_modifying_xacts; -- fails on all shard placements INSERT INTO numbers_hash_failure_test VALUES (2,2); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist -- connect back to the master with the proper user to continue the tests \c - :default_user - :master_port SET search_path TO multi_modifying_xacts; diff --git a/src/test/regress/expected/multi_multiuser_auth.out b/src/test/regress/expected/multi_multiuser_auth.out index 7a72eeba1..6b0e85b67 100644 --- a/src/test/regress/expected/multi_multiuser_auth.out +++ b/src/test/regress/expected/multi_multiuser_auth.out @@ -72,7 +72,7 @@ GRANT ALL ON TABLE lineitem, orders, lineitem, customer, nation, part, supplier \c :alice_conninfo -- router query (should break because of bad password) INSERT INTO customer VALUES (12345, 'name', NULL, 5, 'phone', 123.45, 'segment', 'comment'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: password authentication failed for user "alice" +ERROR: connection to the remote node alice@localhost:xxxxx failed with the following error: FATAL: password authentication failed for user "alice" -- fix alice's worker1 password ... UPDATE pg_dist_authinfo SET authinfo = ('password=' || :'alice_worker_1_pw') diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index c6d46ccc9..702b115da 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -2703,10 +2703,10 @@ SET search_path TO multi_router_planner; -- still, we never mark placements inactive. Instead, fail the transaction BEGIN; INSERT INTO failure_test VALUES (1, 1); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "router_user" does not exist +ERROR: connection to the remote node router_user@localhost:xxxxx failed with the following error: FATAL: role "router_user" does not exist ROLLBACK; INSERT INTO failure_test VALUES (2, 1); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "router_user" does not exist +ERROR: connection to the remote node router_user@localhost:xxxxx failed with the following error: FATAL: role "router_user" does not exist SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard diff --git a/src/test/regress/expected/node_conninfo_reload.out b/src/test/regress/expected/node_conninfo_reload.out index d2e33d950..785e3e1b1 100644 --- a/src/test/regress/expected/node_conninfo_reload.out +++ b/src/test/regress/expected/node_conninfo_reload.out @@ -47,7 +47,7 @@ show citus.node_conninfo; -- Should give a connection error because of bad sslmode select count(*) from test where a = 0; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; select pg_reload_conf(); @@ -118,7 +118,7 @@ select count(*) from test where a = 0; COMMIT; -- Should fail now with connection error, when transaction is finished select count(*) from test where a = 0; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; select pg_reload_conf(); @@ -181,7 +181,7 @@ COMMIT; -- Should fail now, when transaction is finished SET client_min_messages TO ERROR; select count(*) from test where a = 0; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" RESET client_min_messages; -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; @@ -235,11 +235,11 @@ show citus.node_conninfo; -- Should fail since a different shard is accessed and thus a new connection -- will to be created. select count(*) from test where a = 0; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" COMMIT; -- Should still fail now, when transaction is finished select count(*) from test where a = 0; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; select pg_reload_conf(); @@ -301,7 +301,7 @@ COMMIT; -- Should fail now, when transaction is finished SET client_min_messages TO ERROR; select count(*) from test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" RESET client_min_messages; -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; @@ -359,7 +359,7 @@ ROLLBACK; -- Should fail now, when transaction is finished SET client_min_messages TO ERROR; select count(*) from test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" RESET client_min_messages; -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; @@ -497,7 +497,7 @@ ALTER TABLE test ADD COLUMN c INT; COMMIT; -- Should fail now, when transaction is finished ALTER TABLE test ADD COLUMN d INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; select pg_reload_conf(); diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index 2c399f24a..a7cd6b38c 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -127,7 +127,7 @@ SELECT pg_sleep(.1); -- wait to make sure the config has changed before running (1 row) SELECT master_drain_node('localhost', :master_port); -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: CALL citus_cleanup_orphaned_resources(); ALTER SYSTEM RESET citus.local_hostname; SELECT pg_reload_conf(); @@ -197,7 +197,7 @@ SELECT pg_sleep(.1); -- wait to make sure the config has changed before running (1 row) SELECT replicate_table_shards('dist_table_test_2', max_shard_copies := 4, shard_transfer_mode:='block_writes'); -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: ALTER SYSTEM RESET citus.local_hostname; SELECT pg_reload_conf(); pg_reload_conf @@ -681,7 +681,7 @@ FROM ( FROM pg_dist_shard WHERE logicalrelid = 'rebalance_test_table'::regclass ) T; -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: CALL citus_cleanup_orphaned_resources(); ALTER SYSTEM RESET citus.local_hostname; SELECT pg_reload_conf(); diff --git a/src/test/regress/sql/failure_on_create_subscription.sql b/src/test/regress/sql/failure_on_create_subscription.sql index 3a0ae3b5e..60af71e47 100644 --- a/src/test/regress/sql/failure_on_create_subscription.sql +++ b/src/test/regress/sql/failure_on_create_subscription.sql @@ -34,9 +34,9 @@ SELECT * FROM shards_in_workers; -- Failure on creating the subscription -- Failing exactly on CREATE SUBSCRIPTION is causing flaky test where we fail with either: --- 1) ERROR: connection to the remote node localhost:xxxxx failed with the following error: ERROR: subscription "citus_shard_move_subscription_xxxxxxx" does not exist +-- 1) ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: ERROR: subscription "citus_shard_move_subscription_xxxxxxx" does not exist -- another command is already in progress --- 2) ERROR: connection to the remote node localhost:xxxxx failed with the following error: another command is already in progress +-- 2) ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: another command is already in progress -- Instead fail on the next step (ALTER SUBSCRIPTION) instead which is also required logically as part of uber CREATE SUBSCRIPTION operation. SELECT citus.mitmproxy('conn.onQuery(query="ALTER SUBSCRIPTION").kill()'); From 1d096df7f4da722330ee41e1918f1374781267b9 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Wed, 24 Jan 2024 15:58:55 +0300 Subject: [PATCH 10/16] Not use hardcoded LOCAL_HOST_NAME but citus.local_hostname to distinguish loopback connections (#7436) Fixes a bug that breaks queries from non-maindbs when citus.local_hostname is set to a value different than "localhost". This is a very old bug doesn't cause a problem as long as Citus catalog is available to FindWorkerNode(). And the catalog is always available unless we're in non-main database, which might be the case on main but not on older releases, hence not adding a `DESCRIPTION`. For this reason, I don't see a reason to backport this. Maybe we should totally refrain using LOCAL_HOST_NAME in all code-paths, but not doing that in this PR as the other paths don't seem to be breaking something that is user-facing. ```c char * GetAuthinfo(char *hostname, int32 port, char *user) { char *authinfo = NULL; bool isLoopback = (strncmp(LOCAL_HOST_NAME, hostname, MAX_NODE_LENGTH) == 0 && PostPortNumber == port); if (IsTransactionState()) { int64 nodeId = WILDCARD_NODE_ID; /* -1 is a special value for loopback connections (task tracker) */ if (isLoopback) { nodeId = LOCALHOST_NODE_ID; } else { WorkerNode *worker = FindWorkerNode(hostname, port); if (worker != NULL) { nodeId = worker->nodeId; } } authinfo = GetAuthinfoViaCatalog(user, nodeId); } return (authinfo != NULL) ? authinfo : ""; } ``` --- .../connection/connection_configuration.c | 16 +++++++++++++++- .../distributed/metadata/metadata_cache.c | 8 -------- src/test/regress/expected/other_databases.out | 6 ++++++ src/test/regress/sql/other_databases.sql | 7 +++++++ 4 files changed, 28 insertions(+), 9 deletions(-) diff --git a/src/backend/distributed/connection/connection_configuration.c b/src/backend/distributed/connection/connection_configuration.c index c52254f9c..e07692517 100644 --- a/src/backend/distributed/connection/connection_configuration.c +++ b/src/backend/distributed/connection/connection_configuration.c @@ -521,9 +521,23 @@ char * GetAuthinfo(char *hostname, int32 port, char *user) { char *authinfo = NULL; - bool isLoopback = (strncmp(LOCAL_HOST_NAME, hostname, MAX_NODE_LENGTH) == 0 && + bool isLoopback = (strncmp(LocalHostName, hostname, MAX_NODE_LENGTH) == 0 && PostPortNumber == port); + /* + * Citus will not be loaded when we run a global DDL command from a + * Citus non-main database. + */ + if (!CitusHasBeenLoaded()) + { + /* + * We don't expect non-main databases to connect to a node other than + * the local one. + */ + Assert(isLoopback); + return ""; + } + if (IsTransactionState()) { int64 nodeId = WILDCARD_NODE_ID; diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 555365e68..402dedb8a 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -5723,14 +5723,6 @@ GetPoolinfoViaCatalog(int32 nodeId) char * GetAuthinfoViaCatalog(const char *roleName, int64 nodeId) { - /* - * Citus will not be loaded when we run a global DDL command from a - * Citus non-main database. - */ - if (!CitusHasBeenLoaded()) - { - return ""; - } char *authinfo = ""; Datum nodeIdDatumArray[2] = { Int32GetDatum(nodeId), diff --git a/src/test/regress/expected/other_databases.out b/src/test/regress/expected/other_databases.out index 9e170861e..15bff66ed 100644 --- a/src/test/regress/expected/other_databases.out +++ b/src/test/regress/expected/other_databases.out @@ -71,9 +71,15 @@ SELECT citus_internal.execute_command_on_remote_nodes_as_user($$SELECT 'dangerou ERROR: operation is not allowed HINT: Run the command with a superuser. \c other_db1 +SET citus.local_hostname TO '127.0.0.1'; SET ROLE nonsuperuser; +-- Make sure that we don't try to access pg_dist_node. +-- Otherwise, we would get the following error: +-- ERROR: cache lookup failed for pg_dist_node, called too early? CREATE USER other_db_user9; RESET ROLE; +RESET citus.local_hostname; +RESET ROLE; \c regression SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1; usename diff --git a/src/test/regress/sql/other_databases.sql b/src/test/regress/sql/other_databases.sql index 563793518..8cd54f354 100644 --- a/src/test/regress/sql/other_databases.sql +++ b/src/test/regress/sql/other_databases.sql @@ -51,9 +51,16 @@ SET ROLE nonsuperuser; SELECT citus_internal.execute_command_on_remote_nodes_as_user($$SELECT 'dangerous query'$$, 'postgres'); \c other_db1 +SET citus.local_hostname TO '127.0.0.1'; SET ROLE nonsuperuser; + +-- Make sure that we don't try to access pg_dist_node. +-- Otherwise, we would get the following error: +-- ERROR: cache lookup failed for pg_dist_node, called too early? CREATE USER other_db_user9; +RESET ROLE; +RESET citus.local_hostname; RESET ROLE; \c regression SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1; From 3de5601bcca3a461ab7901ee6c57d1e1132b2043 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Wed, 24 Jan 2024 16:50:39 +0300 Subject: [PATCH 11/16] Replace LOCAL_HOST_NAME with LocalHostName (#7449) The only usages of LOCAL_HOST_NAME were in functions that are only used during regression tests and in places where it was used incorrectly. --- .../distributed/connection/connection_configuration.c | 2 +- src/backend/distributed/test/metadata_sync.c | 2 +- src/backend/distributed/test/run_from_same_connection.c | 2 +- src/include/distributed/connection_management.h | 8 -------- 4 files changed, 3 insertions(+), 11 deletions(-) diff --git a/src/backend/distributed/connection/connection_configuration.c b/src/backend/distributed/connection/connection_configuration.c index e07692517..ac82d4e09 100644 --- a/src/backend/distributed/connection/connection_configuration.c +++ b/src/backend/distributed/connection/connection_configuration.c @@ -445,7 +445,7 @@ GetEffectiveConnKey(ConnectionHashKey *key) if (!IsTransactionState()) { /* we're in the task tracker, so should only see loopback */ - Assert(strncmp(LOCAL_HOST_NAME, key->hostname, MAX_NODE_LENGTH) == 0 && + Assert(strncmp(LocalHostName, key->hostname, MAX_NODE_LENGTH) == 0 && PostPortNumber == key->port); return key; } diff --git a/src/backend/distributed/test/metadata_sync.c b/src/backend/distributed/test/metadata_sync.c index d6aeb842c..dec20c772 100644 --- a/src/backend/distributed/test/metadata_sync.c +++ b/src/backend/distributed/test/metadata_sync.c @@ -125,7 +125,7 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS) /* First we start listening. */ MultiConnection *connection = GetNodeConnection(FORCE_NEW_CONNECTION, - LOCAL_HOST_NAME, PostPortNumber); + LocalHostName, PostPortNumber); ExecuteCriticalRemoteCommand(connection, "LISTEN " METADATA_SYNC_CHANNEL); /* diff --git a/src/backend/distributed/test/run_from_same_connection.c b/src/backend/distributed/test/run_from_same_connection.c index 5663a42fa..52b2e0b18 100644 --- a/src/backend/distributed/test/run_from_same_connection.c +++ b/src/backend/distributed/test/run_from_same_connection.c @@ -155,7 +155,7 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS) StringInfo processStringInfo = makeStringInfo(); StringInfo workerProcessStringInfo = makeStringInfo(); - MultiConnection *localConnection = GetNodeConnection(0, LOCAL_HOST_NAME, + MultiConnection *localConnection = GetNodeConnection(0, LocalHostName, PostPortNumber); if (!singleConnection) diff --git a/src/include/distributed/connection_management.h b/src/include/distributed/connection_management.h index 9eadbde9d..d93e4483a 100644 --- a/src/include/distributed/connection_management.h +++ b/src/include/distributed/connection_management.h @@ -61,14 +61,6 @@ */ #define LOCAL_NODE_ID UINT32_MAX -/* - * If you want to connect to the current node use `LocalHostName`, which is a GUC, instead - * of the hardcoded loopback hostname. Only if you really need the loopback hostname use - * this define. - */ -#define LOCAL_HOST_NAME "localhost" - - /* forward declare, to avoid forcing large headers on everyone */ struct pg_conn; /* target of the PGconn typedef */ struct MemoryContextData; From 542212c3d80c526ec5ae5a9090ba66953829fbe1 Mon Sep 17 00:00:00 2001 From: eaydingol <60466783+eaydingol@users.noreply.github.com> Date: Wed, 24 Jan 2024 17:11:10 +0300 Subject: [PATCH 12/16] Make citus_internal schema public (#7450) DESCRIPTION: Makes citus_internal schema public #7405 --- .../distributed/sql/citus--12.1-1--12.2-1.sql | 13 +++++++++++++ .../sql/downgrades/citus--12.2-1--12.1-1.sql | 1 + src/test/regress/expected/citus_internal_access.out | 10 ++++++++++ src/test/regress/expected/other_databases.out | 3 +-- src/test/regress/multi_schedule | 1 + src/test/regress/sql/citus_internal_access.sql | 10 ++++++++++ 6 files changed, 36 insertions(+), 2 deletions(-) create mode 100644 src/test/regress/expected/citus_internal_access.out create mode 100644 src/test/regress/sql/citus_internal_access.sql diff --git a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql index 72ef46e6f..b4520ecb5 100644 --- a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql +++ b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql @@ -12,3 +12,16 @@ ALTER TABLE pg_catalog.pg_dist_transaction ADD COLUMN outer_xid xid8; #include "udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql" + +GRANT USAGE ON SCHEMA citus_internal TO PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.commit_management_command_2pc FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.execute_command_on_remote_nodes_as_user FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.find_groupid_for_node FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.mark_object_distributed FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.pg_dist_node_trigger_func FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.pg_dist_rebalance_strategy_trigger_func FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.pg_dist_shard_placement_trigger_func FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.refresh_isolation_tester_prepared_statement FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.replace_isolation_tester_func FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.restore_isolation_tester_func FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.start_management_transaction FROM PUBLIC; diff --git a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql index 20d85444f..b0204919c 100644 --- a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql @@ -21,3 +21,4 @@ DROP FUNCTION citus_internal.mark_object_distributed( DROP FUNCTION citus_internal.commit_management_command_2pc(); ALTER TABLE pg_catalog.pg_dist_transaction DROP COLUMN outer_xid; +REVOKE USAGE ON SCHEMA citus_internal FROM PUBLIC; diff --git a/src/test/regress/expected/citus_internal_access.out b/src/test/regress/expected/citus_internal_access.out new file mode 100644 index 000000000..21464b38f --- /dev/null +++ b/src/test/regress/expected/citus_internal_access.out @@ -0,0 +1,10 @@ +--- Create a non-superuser role and check if it can access citus_internal schema functions +CREATE USER nonsuperuser CREATEROLE; +SET ROLE nonsuperuser; +--- The non-superuser role should not be able to access citus_internal functions +SELECT citus_internal.commit_management_command_2pc(); +ERROR: permission denied for function commit_management_command_2pc +SELECT citus_internal.replace_isolation_tester_func(); +ERROR: permission denied for function replace_isolation_tester_func +RESET ROLE; +DROP USER nonsuperuser; \ No newline at end of file diff --git a/src/test/regress/expected/other_databases.out b/src/test/regress/expected/other_databases.out index 15bff66ed..a15c4bb50 100644 --- a/src/test/regress/expected/other_databases.out +++ b/src/test/regress/expected/other_databases.out @@ -68,8 +68,7 @@ CREATE USER nonsuperuser CREATEROLE; GRANT ALL ON SCHEMA citus_internal TO nonsuperuser; SET ROLE nonsuperuser; SELECT citus_internal.execute_command_on_remote_nodes_as_user($$SELECT 'dangerous query'$$, 'postgres'); -ERROR: operation is not allowed -HINT: Run the command with a superuser. +ERROR: permission denied for function execute_command_on_remote_nodes_as_user \c other_db1 SET citus.local_hostname TO '127.0.0.1'; SET ROLE nonsuperuser; diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 5c9d8a45c..f599363a9 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -109,6 +109,7 @@ test: undistribute_table test: run_command_on_all_nodes test: background_task_queue_monitor test: other_databases +test: citus_internal_access # Causal clock test test: clock diff --git a/src/test/regress/sql/citus_internal_access.sql b/src/test/regress/sql/citus_internal_access.sql new file mode 100644 index 000000000..8e97448f3 --- /dev/null +++ b/src/test/regress/sql/citus_internal_access.sql @@ -0,0 +1,10 @@ +--- Create a non-superuser role and check if it can access citus_internal schema functions +CREATE USER nonsuperuser CREATEROLE; + +SET ROLE nonsuperuser; +--- The non-superuser role should not be able to access citus_internal functions +SELECT citus_internal.commit_management_command_2pc(); +SELECT citus_internal.replace_isolation_tester_func(); + +RESET ROLE; +DROP USER nonsuperuser; From 9a0259cf3e8280356e1855db07fe130306570ac4 Mon Sep 17 00:00:00 2001 From: gurkanindibay Date: Wed, 24 Jan 2024 17:47:39 +0300 Subject: [PATCH 13/16] Adds metadata sync tests --- .../regress/expected/metadata_sync_2pc.out | 60 +++++++++++++++++++ src/test/regress/multi_schedule | 1 + src/test/regress/sql/metadata_sync_2pc.sql | 60 +++++++++++++++++++ 3 files changed, 121 insertions(+) create mode 100644 src/test/regress/expected/metadata_sync_2pc.out create mode 100644 src/test/regress/sql/metadata_sync_2pc.sql diff --git a/src/test/regress/expected/metadata_sync_2pc.out b/src/test/regress/expected/metadata_sync_2pc.out new file mode 100644 index 000000000..36fe4dd92 --- /dev/null +++ b/src/test/regress/expected/metadata_sync_2pc.out @@ -0,0 +1,60 @@ +CREATE SCHEMA metadata_sync_2pc_schema; +SET search_path TO metadata_sync_2pc_schema; +set citus.enable_create_database_propagation to on; +CREATE DATABASE metadata_sync_2pc_db; +\c metadata_sync_2pc_db +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +CREATE USER grant_role2pc_user1; +CREATE USER grant_role2pc_user2; +CREATE USER grant_role2pc_user3; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +\c regression +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user3 WITH ADMIN OPTION; +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user4,grant_role2pc_user5 granted by grant_role2pc_user3; +\c regression +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user2','grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5') + order by member::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user4","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user4","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"grant_role2pc_user2","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"postgres","admin_option":false}] +(3 rows) + +\c metadata_sync_2pc_db +revoke grant_role2pc_user1,grant_role2pc_user2 from grant_role2pc_user4,grant_role2pc_user5 granted by grant_role2pc_user3; +revoke admin option for grant_role2pc_user1,grant_role2pc_user2 from grant_role2pc_user3; +revoke grant_role2pc_user1,grant_role2pc_user2 from grant_role2pc_user3; +\c regression +drop user grant_role2pc_user1,grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5; +drop database metadata_sync_2pc_db; +drop schema metadata_sync_2pc_schema; +reset citus.enable_create_database_propagation; +reset search_path; diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index aac6464ae..4b8a4a654 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -25,6 +25,7 @@ test: multi_insert_select_conflict citus_table_triggers test: multi_row_insert insert_select_into_local_table test: multi_agg_approximate_distinct test: tablespace +test: metadata_sync_2pc # following should not run in parallel because it relies on connection counts to workers test: insert_select_connection_leak diff --git a/src/test/regress/sql/metadata_sync_2pc.sql b/src/test/regress/sql/metadata_sync_2pc.sql new file mode 100644 index 000000000..dd343f35e --- /dev/null +++ b/src/test/regress/sql/metadata_sync_2pc.sql @@ -0,0 +1,60 @@ + +CREATE SCHEMA metadata_sync_2pc_schema; + +SET search_path TO metadata_sync_2pc_schema; + +set citus.enable_create_database_propagation to on; + +CREATE DATABASE metadata_sync_2pc_db; + + +\c metadata_sync_2pc_db +SHOW citus.main_db; + +CREATE USER grant_role2pc_user1; +CREATE USER grant_role2pc_user2; +CREATE USER grant_role2pc_user3; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; + +\c regression +select 1 from citus_remove_node('localhost', :worker_2_port); + +\c metadata_sync_2pc_db +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user3 WITH ADMIN OPTION; +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user4,grant_role2pc_user5 granted by grant_role2pc_user3; + + +\c regression +select 1 from citus_add_node('localhost', :worker_2_port); + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user2','grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5') + order by member::regrole::text +) t +$$); + + +\c metadata_sync_2pc_db +revoke grant_role2pc_user1,grant_role2pc_user2 from grant_role2pc_user4,grant_role2pc_user5 granted by grant_role2pc_user3; + +revoke admin option for grant_role2pc_user1,grant_role2pc_user2 from grant_role2pc_user3; + +revoke grant_role2pc_user1,grant_role2pc_user2 from grant_role2pc_user3; + +\c regression + +drop user grant_role2pc_user1,grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5; + +drop database metadata_sync_2pc_db; + +drop schema metadata_sync_2pc_schema; + +reset citus.enable_create_database_propagation; +reset search_path; + From 24188959ed6433c61f61e527d2d25e5e507ff35d Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Thu, 25 Jan 2024 11:22:39 +0300 Subject: [PATCH 14/16] Improve the script that sorts GUCs in alphabetical order (#7452) Soon we will have occurrences of "citus.X" in shared_library_init.c that are not part of GUC defs, so we need to use a more precise regular expression. --- ci/check_gucs_are_alphabetically_sorted.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/check_gucs_are_alphabetically_sorted.sh b/ci/check_gucs_are_alphabetically_sorted.sh index 763b5305f..3d368e708 100755 --- a/ci/check_gucs_are_alphabetically_sorted.sh +++ b/ci/check_gucs_are_alphabetically_sorted.sh @@ -4,7 +4,7 @@ set -euo pipefail # shellcheck disable=SC1091 source ci/ci_helpers.sh -# extract citus gucs in the form of "citus.X" -grep -o -E "(\.*\"citus\.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out +# extract citus gucs in the form of "citus.X" +grep -P "^[\t][\t]\"citus\.[a-zA-Z_0-9]+\"" src/backend/distributed/shared_library_init.c > gucs.out sort -c gucs.out rm gucs.out From 016081a49648abfdbec246fe2e4a0f267b64acf2 Mon Sep 17 00:00:00 2001 From: gurkanindibay Date: Wed, 24 Jan 2024 21:18:11 +0300 Subject: [PATCH 15/16] Fixes test error --- src/test/regress/expected/metadata_sync_2pc.out | 1 + src/test/regress/sql/metadata_sync_2pc.sql | 1 + 2 files changed, 2 insertions(+) diff --git a/src/test/regress/expected/metadata_sync_2pc.out b/src/test/regress/expected/metadata_sync_2pc.out index 36fe4dd92..23c1b5b20 100644 --- a/src/test/regress/expected/metadata_sync_2pc.out +++ b/src/test/regress/expected/metadata_sync_2pc.out @@ -54,6 +54,7 @@ revoke admin option for grant_role2pc_user1,grant_role2pc_user2 from grant_role2 revoke grant_role2pc_user1,grant_role2pc_user2 from grant_role2pc_user3; \c regression drop user grant_role2pc_user1,grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5; +set citus.enable_create_database_propagation to on; drop database metadata_sync_2pc_db; drop schema metadata_sync_2pc_schema; reset citus.enable_create_database_propagation; diff --git a/src/test/regress/sql/metadata_sync_2pc.sql b/src/test/regress/sql/metadata_sync_2pc.sql index dd343f35e..8d6ee6467 100644 --- a/src/test/regress/sql/metadata_sync_2pc.sql +++ b/src/test/regress/sql/metadata_sync_2pc.sql @@ -51,6 +51,7 @@ revoke grant_role2pc_user1,grant_role2pc_user2 from grant_role2pc_user3; drop user grant_role2pc_user1,grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5; +set citus.enable_create_database_propagation to on; drop database metadata_sync_2pc_db; drop schema metadata_sync_2pc_schema; From eba3553adae099a16d505c6e4be3f29b1bb214f1 Mon Sep 17 00:00:00 2001 From: gurkanindibay Date: Wed, 24 Jan 2024 21:48:11 +0300 Subject: [PATCH 16/16] Changes test order --- src/test/regress/multi_1_schedule | 1 + src/test/regress/multi_schedule | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index cfff00942..02ac8528a 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -40,6 +40,7 @@ test: create_drop_database_propagation_pg15 test: create_drop_database_propagation_pg16 test: comment_on_database test: comment_on_role +test: metadata_sync_2pc # don't parallelize single_shard_table_udfs to make sure colocation ids are sequential test: single_shard_table_udfs test: schema_based_sharding diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index aa97801b3..1dbc6eba4 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -25,7 +25,6 @@ test: multi_insert_select_conflict citus_table_triggers test: multi_row_insert insert_select_into_local_table test: multi_agg_approximate_distinct test: tablespace -test: metadata_sync_2pc # following should not run in parallel because it relies on connection counts to workers test: insert_select_connection_leak