From 1af8ca8f7cf2046e4a6817dba8967ce11e0724d7 Mon Sep 17 00:00:00 2001 From: Hanefi Onaldi Date: Wed, 22 Dec 2021 18:16:11 +0300 Subject: [PATCH 01/17] Fix statical analysis findings (#5550) --- src/backend/distributed/commands/alter_table.c | 2 +- src/backend/distributed/metadata/metadata_cache.c | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c index cc2ebd33e..223a06318 100644 --- a/src/backend/distributed/commands/alter_table.c +++ b/src/backend/distributed/commands/alter_table.c @@ -1190,7 +1190,7 @@ CreateDistributedTableLike(TableConversionState *con) * at this moment, but that's going to be the table in pg_dist_partition. */ Oid parentRelationId = PartitionParentOid(originalRelationId); - Var *parentDistKey = DistPartitionKey(parentRelationId); + Var *parentDistKey = DistPartitionKeyOrError(parentRelationId); char *parentDistKeyColumnName = ColumnToColumnName(parentRelationId, nodeToString(parentDistKey)); diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 52c4d258e..ce05f7c28 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -4353,6 +4353,13 @@ GetIntervalTypeInfo(char partitionMethod, Var *partitionColumn, case DISTRIBUTE_BY_APPEND: case DISTRIBUTE_BY_RANGE: { + /* we need a valid partition column Var in this case */ + if (partitionColumn == NULL) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unexpected partition column value: null"), + errdetail("Please report this to the Citus core team."))); + } *intervalTypeId = partitionColumn->vartype; *intervalTypeMod = partitionColumn->vartypmod; break; From 76176caea75141f5fbcb0308ad10df88b8542bd5 Mon Sep 17 00:00:00 2001 From: Hanefi Onaldi Date: Wed, 22 Dec 2021 21:24:58 +0300 Subject: [PATCH 02/17] Fix typo s/exlusive/exclusive/ --- src/backend/columnar/columnar_tableam.c | 4 ++-- .../distributed/executor/distributed_execution_locks.c | 2 +- src/backend/distributed/transaction/transaction_recovery.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index 05fe0c536..df1d28584 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -2660,7 +2660,7 @@ upgrade_columnar_storage(PG_FUNCTION_ARGS) * ACCESS EXCLUSIVE LOCK is not required by the low-level routines, so we * can take only an ACCESS SHARE LOCK. But all access to non-current * columnar tables will fail anyway, so it's better to take ACCESS - * EXLUSIVE LOCK now. + * EXCLUSIVE LOCK now. */ Relation rel = table_open(relid, AccessExclusiveLock); if (!IsColumnarTableAmTable(relid)) @@ -2696,7 +2696,7 @@ downgrade_columnar_storage(PG_FUNCTION_ARGS) * ACCESS EXCLUSIVE LOCK is not required by the low-level routines, so we * can take only an ACCESS SHARE LOCK. But all access to non-current * columnar tables will fail anyway, so it's better to take ACCESS - * EXLUSIVE LOCK now. + * EXCLUSIVE LOCK now. */ Relation rel = table_open(relid, AccessExclusiveLock); if (!IsColumnarTableAmTable(relid)) diff --git a/src/backend/distributed/executor/distributed_execution_locks.c b/src/backend/distributed/executor/distributed_execution_locks.c index 035c6e511..27c6a961d 100644 --- a/src/backend/distributed/executor/distributed_execution_locks.c +++ b/src/backend/distributed/executor/distributed_execution_locks.c @@ -117,7 +117,7 @@ AcquireExecutorShardLocksForRelationRowLockList(List *relationRowLockList) * We have selected these lock types according to conflict table given in the * Postgres documentation. It is given that FOR UPDATE and FOR NO KEY UPDATE * must be conflict with each other modify command. By getting ExlcusiveLock - * we guarantee that. Note that, getting ExlusiveLock does not mimic the + * we guarantee that. Note that, getting ExclusiveLock does not mimic the * behaviour of Postgres exactly. Getting row lock with FOR NO KEY UPDATE and * FOR KEY SHARE do not conflict in Postgres, yet they block each other in * our implementation. Since FOR SHARE and FOR KEY SHARE does not conflict diff --git a/src/backend/distributed/transaction/transaction_recovery.c b/src/backend/distributed/transaction/transaction_recovery.c index 1f42ab2e6..87809c7b5 100644 --- a/src/backend/distributed/transaction/transaction_recovery.c +++ b/src/backend/distributed/transaction/transaction_recovery.c @@ -183,7 +183,7 @@ RecoverWorkerTransactions(WorkerNode *workerNode) * distributed transactions. * * We could avoid this by temporarily blocking new prepared transactions - * from being created by taking an ExlusiveLock on pg_dist_transaction. + * from being created by taking an ExclusiveLock on pg_dist_transaction. * However, this hurts write performance, so instead we avoid blocking * by consulting the list of active distributed transactions, and follow * a carefully chosen order to avoid race conditions: From e196d238548a208c28c3769b58c2d34030e8319c Mon Sep 17 00:00:00 2001 From: Talha Nisanci Date: Thu, 23 Dec 2021 13:19:02 +0300 Subject: [PATCH 03/17] Refactor AttributeEquivalenceId (#5006) --- .../planner/relation_restriction_equivalence.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c index 437bef6b4..d750d09db 100644 --- a/src/backend/distributed/planner/relation_restriction_equivalence.c +++ b/src/backend/distributed/planner/relation_restriction_equivalence.c @@ -36,7 +36,7 @@ #include "optimizer/pathnode.h" -static uint32 attributeEquivalenceId = 1; +static uint32 AttributeEquivalenceId = 1; /* @@ -292,7 +292,7 @@ SafeToPushdownUnionSubquery(Query *originalQuery, palloc0(sizeof(AttributeEquivalenceClass)); ListCell *relationRestrictionCell = NULL; - attributeEquivalence->equivalenceId = attributeEquivalenceId++; + attributeEquivalence->equivalenceId = AttributeEquivalenceId++; /* * Ensure that the partition column is in the same place across all @@ -617,7 +617,7 @@ GenerateAllAttributeEquivalences(PlannerRestrictionContext *plannerRestrictionCo plannerRestrictionContext->joinRestrictionContext; /* reset the equivalence id counter per call to prevent overflows */ - attributeEquivalenceId = 1; + AttributeEquivalenceId = 1; List *relationRestrictionAttributeEquivalenceList = GenerateAttributeEquivalencesForRelationRestrictions(relationRestrictionContext); @@ -801,7 +801,7 @@ AttributeEquivalenceClassForEquivalenceClass(EquivalenceClass *plannerEqClass, ListCell *equivilanceMemberCell = NULL; PlannerInfo *plannerInfo = relationRestriction->plannerInfo; - attributeEquivalence->equivalenceId = attributeEquivalenceId++; + attributeEquivalence->equivalenceId = AttributeEquivalenceId++; foreach(equivilanceMemberCell, plannerEqClass->ec_members) { @@ -1183,7 +1183,7 @@ GenerateAttributeEquivalencesForJoinRestrictions(JoinRestrictionContext * AttributeEquivalenceClass *attributeEquivalence = palloc0( sizeof(AttributeEquivalenceClass)); - attributeEquivalence->equivalenceId = attributeEquivalenceId++; + attributeEquivalence->equivalenceId = AttributeEquivalenceId++; AddToAttributeEquivalenceClass(attributeEquivalence, joinRestriction->plannerInfo, leftVar); From 61b5fb1cfc165a1d12584da9ae850c4075e2f108 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Thu, 23 Dec 2021 14:54:12 +0300 Subject: [PATCH 04/17] Run failure_test_helpers in base schedule (#5559) --- src/test/regress/base_schedule | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/regress/base_schedule b/src/test/regress/base_schedule index e3ea4de24..7eca2ab85 100644 --- a/src/test/regress/base_schedule +++ b/src/test/regress/base_schedule @@ -1,8 +1,8 @@ # ---------- # Only run few basic tests to set up a testing environment # ---------- +test: multi_test_helpers multi_test_helpers_superuser multi_create_fdw columnar_test_helpers failure_test_helpers test: multi_cluster_management -test: multi_test_helpers multi_test_helpers_superuser multi_create_fdw columnar_test_helpers test: multi_test_catalog_views test: multi_create_table multi_behavioral_analytics_create_table test: multi_create_table_superuser multi_behavioral_analytics_create_table_superuser From 042d45b263af66d35f707a274c5f02c667f2c36f Mon Sep 17 00:00:00 2001 From: Ahmet Gedemenli Date: Tue, 16 Nov 2021 20:44:21 +0300 Subject: [PATCH 05/17] Propagate foreign server ops --- .../distributed/commands/dependencies.c | 5 + .../commands/distribute_object_ops.c | 65 ++++ src/backend/distributed/commands/extension.c | 2 +- .../distributed/commands/foreign_server.c | 364 ++++++++++++++++++ .../distributed/deparser/citus_ruleutils.c | 3 +- .../deparser/deparse_foreign_server_stmts.c | 277 +++++++++++++ src/backend/distributed/metadata/dependency.c | 5 + .../metadata/pg_get_object_address_12_13_14.c | 8 + src/include/distributed/citus_ruleutils.h | 1 + src/include/distributed/commands.h | 21 + src/include/distributed/deparser.h | 7 + .../expected/distributed_functions.out | 2 +- .../regress/expected/local_table_join.out | 9 + .../expected/propagate_foreign_servers.out | 131 +++++++ src/test/regress/multi_1_schedule | 1 + src/test/regress/multi_schedule | 3 +- src/test/regress/pg_regress_multi.pl | 16 - src/test/regress/sql/local_table_join.sql | 3 + .../regress/sql/propagate_foreign_servers.sql | 83 ++++ 19 files changed, 985 insertions(+), 21 deletions(-) create mode 100644 src/backend/distributed/commands/foreign_server.c create mode 100644 src/backend/distributed/deparser/deparse_foreign_server_stmts.c create mode 100644 src/test/regress/expected/propagate_foreign_servers.out create mode 100644 src/test/regress/sql/propagate_foreign_servers.sql diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index ffb1222e6..a8fa5c118 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -288,6 +288,11 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) return CreateExtensionDDLCommand(dependency); } + case OCLASS_FOREIGN_SERVER: + { + return GetForeignServerCreateDDLCommand(dependency->objectId); + } + default: { break; diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 6c4985226..0764ace26 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -89,6 +89,14 @@ static DistributeObjectOps Any_AlterExtensionContents = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps Any_AlterForeignServer = { + .deparse = DeparseAlterForeignServerStmt, + .qualify = NULL, + .preprocess = PreprocessAlterForeignServerStmt, + .postprocess = NULL, + .address = NULL, + .markDistributed = false, +}; static DistributeObjectOps Any_AlterFunction = { .deparse = DeparseAlterFunctionStmt, .qualify = QualifyAlterFunctionStmt, @@ -177,6 +185,14 @@ static DistributeObjectOps Any_CreatePolicy = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps Any_CreateForeignServer = { + .deparse = DeparseCreateForeignServerStmt, + .qualify = NULL, + .preprocess = PreprocessCreateForeignServerStmt, + .postprocess = PostprocessCreateForeignServerStmt, + .address = CreateForeignServerStmtObjectAddress, + .markDistributed = true, +}; static DistributeObjectOps Any_CreateStatistics = { .deparse = DeparseCreateStatisticsStmt, .qualify = QualifyCreateStatisticsStmt, @@ -297,6 +313,30 @@ static DistributeObjectOps Extension_Drop = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps ForeignServer_Drop = { + .deparse = DeparseDropForeignServerStmt, + .qualify = NULL, + .preprocess = PreprocessDropForeignServerStmt, + .postprocess = NULL, + .address = NULL, + .markDistributed = false, +}; +static DistributeObjectOps ForeignServer_Rename = { + .deparse = DeparseAlterForeignServerRenameStmt, + .qualify = NULL, + .preprocess = PreprocessRenameForeignServerStmt, + .postprocess = NULL, + .address = NULL, + .markDistributed = false, +}; +static DistributeObjectOps ForeignServer_AlterOwner = { + .deparse = DeparseAlterForeignServerOwnerStmt, + .qualify = NULL, + .preprocess = PreprocessAlterForeignServerOwnerStmt, + .postprocess = PostprocessAlterForeignServerOwnerStmt, + .address = AlterForeignServerOwnerStmtObjectAddress, + .markDistributed = false, +}; static DistributeObjectOps ForeignTable_AlterTable = { .deparse = NULL, .qualify = NULL, @@ -675,6 +715,11 @@ GetDistributeObjectOps(Node *node) return &Any_AlterFunction; } + case T_AlterForeignServerStmt: + { + return &Any_AlterForeignServer; + } + case T_AlterObjectDependsStmt: { AlterObjectDependsStmt *stmt = castNode(AlterObjectDependsStmt, node); @@ -789,6 +834,11 @@ GetDistributeObjectOps(Node *node) return &Database_AlterOwner; } + case OBJECT_FOREIGN_SERVER: + { + return &ForeignServer_AlterOwner; + } + case OBJECT_FUNCTION: { return &Function_AlterOwner; @@ -915,6 +965,11 @@ GetDistributeObjectOps(Node *node) return &Any_CreateFunction; } + case T_CreateForeignServerStmt: + { + return &Any_CreateForeignServer; + } + case T_CreatePolicyStmt: { return &Any_CreatePolicy; @@ -977,6 +1032,11 @@ GetDistributeObjectOps(Node *node) return &Function_Drop; } + case OBJECT_FOREIGN_SERVER: + { + return &ForeignServer_Drop; + } + case OBJECT_INDEX: { return &Index_Drop; @@ -1081,6 +1141,11 @@ GetDistributeObjectOps(Node *node) return &Collation_Rename; } + case OBJECT_FOREIGN_SERVER: + { + return &ForeignServer_Rename; + } + case OBJECT_FUNCTION: { return &Function_Rename; diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index 649234a4f..88150942f 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -797,7 +797,7 @@ CreateExtensionDDLCommand(const ObjectAddress *extensionAddress) /* - * RecreateEnumStmt returns a parsetree for a CREATE EXTENSION statement that would + * RecreateExtensionStmt returns a parsetree for a CREATE EXTENSION statement that would * recreate the given extension on a new node. */ static Node * diff --git a/src/backend/distributed/commands/foreign_server.c b/src/backend/distributed/commands/foreign_server.c new file mode 100644 index 000000000..ad1802ddb --- /dev/null +++ b/src/backend/distributed/commands/foreign_server.c @@ -0,0 +1,364 @@ +/*------------------------------------------------------------------------- + * + * foreign_server.c + * Commands for FOREIGN SERVER statements. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/pg_foreign_server.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/commands.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/metadata/distobject.h" +#include "distributed/metadata_sync.h" +#include "distributed/worker_transaction.h" +#include "foreign/foreign.h" +#include "nodes/makefuncs.h" +#include "nodes/parsenodes.h" +#include "nodes/primnodes.h" + +static Node * RecreateForeignServerStmt(Oid serverId); +static bool NameListHasDistributedServer(List *serverNames); +static ObjectAddress GetObjectAddressByServerName(char *serverName, bool missing_ok); + + +/* + * PreprocessCreateForeignServerStmt is called during the planning phase for + * CREATE SERVER. + */ +List * +PreprocessCreateForeignServerStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + if (!ShouldPropagate()) + { + return NIL; + } + + EnsureCoordinator(); + + char *sql = DeparseTreeNode(node); + + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PreprocessAlterForeignServerStmt is called during the planning phase for + * ALTER SERVER .. OPTIONS .. + */ +List * +PreprocessAlterForeignServerStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + AlterForeignServerStmt *stmt = castNode(AlterForeignServerStmt, node); + + ObjectAddress address = GetObjectAddressByServerName(stmt->servername, false); + + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + + char *sql = DeparseTreeNode(node); + + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PreprocessRenameForeignServerStmt is called during the planning phase for + * ALTER SERVER RENAME. + */ +List * +PreprocessRenameForeignServerStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_FOREIGN_SERVER); + + ObjectAddress address = GetObjectAddressByServerName(strVal(stmt->object), false); + + /* filter distributed servers */ + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + + char *sql = DeparseTreeNode(node); + + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PreprocessAlterForeignServerOwnerStmt is called during the planning phase for + * ALTER SERVER .. OWNER TO. + */ +List * +PreprocessAlterForeignServerOwnerStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_FOREIGN_SERVER); + + ObjectAddress address = GetObjectAddressByServerName(strVal(stmt->object), false); + + /* filter distributed servers */ + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + + char *sql = DeparseTreeNode(node); + + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PreprocessDropForeignServerStmt is called during the planning phase for + * DROP SERVER. + */ +List * +PreprocessDropForeignServerStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + DropStmt *stmt = castNode(DropStmt, node); + Assert(stmt->removeType == OBJECT_FOREIGN_SERVER); + + bool includesDistributedServer = NameListHasDistributedServer(stmt->objects); + + if (!includesDistributedServer) + { + return NIL; + } + + if (list_length(stmt->objects) > 1) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot drop distributed server with other servers"), + errhint("Try dropping each object in a separate DROP command"))); + } + + if (!ShouldPropagate()) + { + return NIL; + } + + EnsureCoordinator(); + + Assert(list_length(stmt->objects) == 1); + + Value *serverValue = linitial(stmt->objects); + ObjectAddress address = GetObjectAddressByServerName(strVal(serverValue), false); + + /* unmark distributed server */ + UnmarkObjectDistributed(&address); + + const char *deparsedStmt = DeparseTreeNode((Node *) stmt); + + /* + * To prevent recursive propagation in mx architecture, we disable ddl + * propagation before sending the command to workers. + */ + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) deparsedStmt, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PostprocessCreateForeignServerStmt is called after a CREATE SERVER command has + * been executed by standard process utility. + */ +List * +PostprocessCreateForeignServerStmt(Node *node, const char *queryString) +{ + bool missingOk = false; + ObjectAddress address = GetObjectAddressFromParseTree(node, missingOk); + EnsureDependenciesExistOnAllNodes(&address); + + return NIL; +} + + +/* + * PostprocessAlterForeignServerOwnerStmt is called after a ALTER SERVER OWNER command + * has been executed by standard process utility. + */ +List * +PostprocessAlterForeignServerOwnerStmt(Node *node, const char *queryString) +{ + bool missingOk = false; + ObjectAddress address = GetObjectAddressFromParseTree(node, missingOk); + EnsureDependenciesExistOnAllNodes(&address); + + return NIL; +} + + +/* + * CreateForeignServerStmtObjectAddress finds the ObjectAddress for the server + * that is created by given CreateForeignServerStmt. If missingOk is false and if + * the server does not exist, then it errors out. + * + * Never returns NULL, but the objid in the address can be invalid if missingOk + * was set to true. + */ +ObjectAddress +CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok) +{ + CreateForeignServerStmt *stmt = castNode(CreateForeignServerStmt, node); + + return GetObjectAddressByServerName(stmt->servername, missing_ok); +} + + +/* + * AlterForeignServerOwnerStmtObjectAddress finds the ObjectAddress for the server + * given in AlterOwnerStmt. If missingOk is false and if + * the server does not exist, then it errors out. + * + * Never returns NULL, but the objid in the address can be invalid if missingOk + * was set to true. + */ +ObjectAddress +AlterForeignServerOwnerStmtObjectAddress(Node *node, bool missing_ok) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + char *serverName = strVal(stmt->object); + + return GetObjectAddressByServerName(serverName, missing_ok); +} + + +/* + * GetForeignServerCreateDDLCommand returns a list that includes the CREATE SERVER + * command that would recreate the given server on a new node. + */ +List * +GetForeignServerCreateDDLCommand(Oid serverId) +{ + /* generate a statement for creation of the server in "if not exists" construct */ + Node *stmt = RecreateForeignServerStmt(serverId); + + /* capture ddl command for the create statement */ + const char *ddlCommand = DeparseTreeNode(stmt); + + List *ddlCommands = list_make1((void *) ddlCommand); + + return ddlCommands; +} + + +/* + * RecreateForeignServerStmt returns a parsetree for a CREATE SERVER statement + * that would recreate the given server on a new node. + */ +static Node * +RecreateForeignServerStmt(Oid serverId) +{ + ForeignServer *server = GetForeignServer(serverId); + + CreateForeignServerStmt *createStmt = makeNode(CreateForeignServerStmt); + + /* set server name and if_not_exists fields */ + createStmt->servername = pstrdup(server->servername); + createStmt->if_not_exists = true; + + /* set foreign data wrapper */ + ForeignDataWrapper *fdw = GetForeignDataWrapper(server->fdwid); + createStmt->fdwname = pstrdup(fdw->fdwname); + + /* set all fields using the existing server */ + if (server->servertype != NULL) + { + createStmt->servertype = pstrdup(server->servertype); + } + + if (server->serverversion != NULL) + { + createStmt->version = pstrdup(server->serverversion); + } + + createStmt->options = NIL; + + int location = -1; + DefElem *option = NULL; + foreach_ptr(option, server->options) + { + DefElem *copyOption = makeDefElem(option->defname, option->arg, location); + createStmt->options = lappend(createStmt->options, copyOption); + } + + return (Node *) createStmt; +} + + +/* + * NameListHasDistributedServer takes a namelist of servers and returns true if at least + * one of them is distributed. Returns false otherwise. + */ +static bool +NameListHasDistributedServer(List *serverNames) +{ + Value *serverValue = NULL; + foreach_ptr(serverValue, serverNames) + { + ObjectAddress address = GetObjectAddressByServerName(strVal(serverValue), false); + + if (IsObjectDistributed(&address)) + { + return true; + } + } + + return false; +} + + +static ObjectAddress +GetObjectAddressByServerName(char *serverName, bool missing_ok) +{ + ForeignServer *server = GetForeignServerByName(serverName, missing_ok); + Oid serverOid = server->serverid; + ObjectAddress address = { 0 }; + ObjectAddressSet(address, ForeignServerRelationId, serverOid); + + return address; +} diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index 07982d029..a2002851d 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -77,7 +77,6 @@ static void deparse_index_columns(StringInfo buffer, List *indexParameterList, List *deparseContext); -static void AppendOptionListToString(StringInfo stringData, List *options); static void AppendStorageParametersToString(StringInfo stringBuffer, List *optionList); static void simple_quote_literal(StringInfo buf, const char *val); @@ -1056,7 +1055,7 @@ generate_qualified_relation_name(Oid relid) * AppendOptionListToString converts the option list to its textual format, and * appends this text to the given string buffer. */ -static void +void AppendOptionListToString(StringInfo stringBuffer, List *optionList) { if (optionList != NIL) diff --git a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c new file mode 100644 index 000000000..62c5f98c8 --- /dev/null +++ b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c @@ -0,0 +1,277 @@ +/*------------------------------------------------------------------------- + * + * deparse_foreign_server_stmts.c + * All routines to deparse foreign server statements. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "commands/defrem.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/relay_utility.h" +#include "lib/stringinfo.h" +#include "nodes/nodes.h" +#include "utils/builtins.h" + +static void AppendCreateForeignServerStmt(StringInfo buf, CreateForeignServerStmt *stmt); +static void AppendAlterForeignServerStmt(StringInfo buf, AlterForeignServerStmt *stmt); +static void AppendAlterForeignServerOptions(StringInfo buf, AlterForeignServerStmt *stmt); +static void AppendAlterForeignServerRenameStmt(StringInfo buf, RenameStmt *stmt); +static void AppendAlterForeignServerOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt); +static void AppendDropForeignServerStmt(StringInfo buf, DropStmt *stmt); +static void AppendServerNames(StringInfo buf, DropStmt *stmt); +static void AppendBehavior(StringInfo buf, DropStmt *stmt); +static char * GetDefElemActionString(DefElemAction action); + +char * +DeparseCreateForeignServerStmt(Node *node) +{ + CreateForeignServerStmt *stmt = castNode(CreateForeignServerStmt, node); + + StringInfoData str; + initStringInfo(&str); + + AppendCreateForeignServerStmt(&str, stmt); + + return str.data; +} + + +char * +DeparseAlterForeignServerStmt(Node *node) +{ + AlterForeignServerStmt *stmt = castNode(AlterForeignServerStmt, node); + + StringInfoData str; + initStringInfo(&str); + + AppendAlterForeignServerStmt(&str, stmt); + + return str.data; +} + + +char * +DeparseAlterForeignServerRenameStmt(Node *node) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + + Assert(stmt->renameType == OBJECT_FOREIGN_SERVER); + + StringInfoData str; + initStringInfo(&str); + + AppendAlterForeignServerRenameStmt(&str, stmt); + + return str.data; +} + + +char * +DeparseAlterForeignServerOwnerStmt(Node *node) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + + Assert(stmt->objectType == OBJECT_FOREIGN_SERVER); + + StringInfoData str; + initStringInfo(&str); + + AppendAlterForeignServerOwnerStmt(&str, stmt); + + return str.data; +} + + +char * +DeparseDropForeignServerStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + + Assert(stmt->removeType == OBJECT_FOREIGN_SERVER); + + StringInfoData str; + initStringInfo(&str); + + AppendDropForeignServerStmt(&str, stmt); + + return str.data; +} + + +static void +AppendCreateForeignServerStmt(StringInfo buf, CreateForeignServerStmt *stmt) +{ + appendStringInfoString(buf, "CREATE SERVER "); + + if (stmt->if_not_exists) + { + appendStringInfoString(buf, "IF NOT EXISTS "); + } + + appendStringInfo(buf, "%s ", quote_identifier(stmt->servername)); + + if (stmt->servertype) + { + appendStringInfo(buf, "TYPE %s ", quote_literal_cstr(stmt->servertype)); + } + + if (stmt->version) + { + appendStringInfo(buf, "VERSION %s ", quote_literal_cstr(stmt->version)); + } + + appendStringInfo(buf, "FOREIGN DATA WRAPPER %s ", quote_identifier(stmt->fdwname)); + + AppendOptionListToString(buf, stmt->options); +} + + +static void +AppendAlterForeignServerStmt(StringInfo buf, AlterForeignServerStmt *stmt) +{ + appendStringInfo(buf, "ALTER SERVER %s ", quote_identifier(stmt->servername)); + + if (stmt->has_version) + { + appendStringInfo(buf, "VERSION %s ", quote_literal_cstr(stmt->version)); + } + + AppendAlterForeignServerOptions(buf, stmt); +} + + +static void +AppendAlterForeignServerOptions(StringInfo buf, AlterForeignServerStmt *stmt) +{ + if (list_length(stmt->options) <= 0) + { + return; + } + + appendStringInfoString(buf, "OPTIONS ("); + + DefElemAction action = DEFELEM_UNSPEC; + DefElem *def = NULL; + foreach_ptr(def, stmt->options) + { + if (def->defaction != DEFELEM_UNSPEC) + { + action = def->defaction; + char *actionString = GetDefElemActionString(action); + appendStringInfo(buf, "%s ", actionString); + } + + appendStringInfo(buf, "%s", quote_identifier(def->defname)); + + if (action != DEFELEM_DROP) + { + const char *value = quote_literal_cstr(defGetString(def)); + appendStringInfo(buf, " %s", value); + } + + if (def != llast(stmt->options)) + { + appendStringInfoString(buf, ", "); + } + } + + appendStringInfoString(buf, ")"); +} + + +static void +AppendAlterForeignServerRenameStmt(StringInfo buf, RenameStmt *stmt) +{ + appendStringInfo(buf, "ALTER SERVER %s RENAME TO %s", + quote_identifier(strVal(stmt->object)), + quote_identifier(stmt->newname)); +} + + +static void +AppendAlterForeignServerOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt) +{ + const char *servername = quote_identifier(strVal(stmt->object)); + appendStringInfo(buf, "ALTER SERVER %s OWNER TO ", servername); + + appendStringInfo(buf, "%s", RoleSpecString(stmt->newowner, true)); +} + + +static void +AppendDropForeignServerStmt(StringInfo buf, DropStmt *stmt) +{ + appendStringInfoString(buf, "DROP SERVER "); + + if (stmt->missing_ok) + { + appendStringInfoString(buf, "IF EXISTS "); + } + + AppendServerNames(buf, stmt); + + AppendBehavior(buf, stmt); +} + + +static void +AppendServerNames(StringInfo buf, DropStmt *stmt) +{ + Value *serverValue = NULL; + foreach_ptr(serverValue, stmt->objects) + { + const char *serverString = quote_identifier(strVal(serverValue)); + appendStringInfo(buf, "%s", serverString); + + if (serverValue != llast(stmt->objects)) + { + appendStringInfoString(buf, ", "); + } + } +} + + +static void +AppendBehavior(StringInfo buf, DropStmt *stmt) +{ + if (stmt->behavior == DROP_CASCADE) + { + appendStringInfoString(buf, " CASCADE"); + } + else if (stmt->behavior == DROP_RESTRICT) + { + appendStringInfoString(buf, " RESTRICT"); + } +} + + +static char * +GetDefElemActionString(DefElemAction action) +{ + switch (action) + { + case DEFELEM_ADD: + { + return "ADD"; + } + + case DEFELEM_SET: + { + return "SET"; + } + + case DEFELEM_DROP: + { + return "DROP"; + } + + default: + return ""; + } +} diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index 975fd7d5b..ba3681f3e 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -612,6 +612,11 @@ SupportedDependencyByCitus(const ObjectAddress *address) return true; } + case OCLASS_FOREIGN_SERVER: + { + return true; + } + case OCLASS_ROLE: { /* diff --git a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c index 30bbfd08d..4f22f977c 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c +++ b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c @@ -456,6 +456,14 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, break; } + case OBJECT_FOREIGN_SERVER: + { + idToCheck = addr->objectId; + aclMaskResult = pg_foreign_server_aclmask(idToCheck, userId, ACL_USAGE, + ACLMASK_ANY); + break; + } + case OBJECT_SEQUENCE: { idToCheck = addr->objectId; diff --git a/src/include/distributed/citus_ruleutils.h b/src/include/distributed/citus_ruleutils.h index f32c234ed..e87f70931 100644 --- a/src/include/distributed/citus_ruleutils.h +++ b/src/include/distributed/citus_ruleutils.h @@ -65,6 +65,7 @@ extern char * generate_relation_name(Oid relid, List *namespaces); extern char * generate_qualified_relation_name(Oid relid); extern char * generate_operator_name(Oid operid, Oid arg1, Oid arg2); extern List * getOwnedSequences_internal(Oid relid, AttrNumber attnum, char deptype); +extern void AppendOptionListToString(StringInfo stringData, List *options); #endif /* CITUS_RULEUTILS_H */ diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 67fbc99c0..1f2540a0c 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -225,6 +225,27 @@ extern Oid GetReferencedTableId(Oid foreignKeyId); extern Oid GetReferencingTableId(Oid foreignKeyId); extern bool RelationInvolvedInAnyNonInheritedForeignKeys(Oid relationId); +/* foreign_server.c - forward declarations */ +extern List * PreprocessCreateForeignServerStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PreprocessAlterForeignServerStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * PreprocessRenameForeignServerStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PreprocessAlterForeignServerOwnerStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PreprocessDropForeignServerStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PostprocessCreateForeignServerStmt(Node *node, const char *queryString); +extern List * PostprocessAlterForeignServerOwnerStmt(Node *node, const char *queryString); +extern ObjectAddress CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok); +extern ObjectAddress AlterForeignServerOwnerStmtObjectAddress(Node *node, bool + missing_ok); +extern List * GetForeignServerCreateDDLCommand(Oid serverId); /* function.c - forward declarations */ extern List * PreprocessCreateFunctionStmt(Node *stmt, const char *queryString, diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index 8f9e43311..8934323f0 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -46,6 +46,13 @@ extern void QualifyRenameCollationStmt(Node *stmt); extern void QualifyAlterCollationSchemaStmt(Node *stmt); extern void QualifyAlterCollationOwnerStmt(Node *stmt); +/* forward declarations for deparse_foreign_server_stmts.c */ +extern char * DeparseCreateForeignServerStmt(Node *node); +extern char * DeparseAlterForeignServerStmt(Node *node); +extern char * DeparseAlterForeignServerRenameStmt(Node *node); +extern char * DeparseAlterForeignServerOwnerStmt(Node *node); +extern char * DeparseDropForeignServerStmt(Node *node); + /* forward declarations for deparse_table_stmts.c */ extern char * DeparseAlterTableSchemaStmt(Node *stmt); extern char * DeparseAlterTableStmt(Node *node); diff --git a/src/test/regress/expected/distributed_functions.out b/src/test/regress/expected/distributed_functions.out index 637441b46..d448cd774 100644 --- a/src/test/regress/expected/distributed_functions.out +++ b/src/test/regress/expected/distributed_functions.out @@ -612,7 +612,7 @@ SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', dist SELECT * FROM (SELECT unnest(master_metadata_snapshot()) as metadata_command order by 1) as innerResult WHERE metadata_command like '%distributed_object_data%'; metadata_command --------------------------------------------------------------------- - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('type', ARRAY['public.usage_access_type']::text[], ARRAY[]::text[], -1, 0), ('type', ARRAY['function_tests.dup_result']::text[], ARRAY[]::text[], -1, 0), ('function', ARRAY['public', 'usage_access_func']::text[], ARRAY['public.usage_access_type', 'integer[]']::text[], -1, 0), ('function', ARRAY['public', 'usage_access_func_third']::text[], ARRAY['integer', 'integer[]']::text[], 0, 50), ('function', ARRAY['function_tests', 'notice']::text[], ARRAY['pg_catalog.text']::text[], -1, 0), ('function', ARRAY['function_tests', 'dup']::text[], ARRAY['pg_catalog.macaddr']::text[], 0, 52), ('function', ARRAY['function_tests', 'eq_with_param_names']::text[], ARRAY['pg_catalog.macaddr', 'pg_catalog.macaddr']::text[], 0, 52), ('function', ARRAY['function_tests', 'eq_mi''xed_param_names']::text[], ARRAY['pg_catalog.macaddr', 'pg_catalog.macaddr']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_sfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_invfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_finalfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('aggregate', ARRAY['function_tests', 'my_rank']::text[], ARRAY['pg_catalog."any"']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_names_sfunc']::text[], ARRAY['function_tests.dup_result', 'function_tests.dup_result', 'function_tests.dup_result']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_names_finalfunc']::text[], ARRAY['function_tests.dup_result']::text[], -1, 0), ('aggregate', ARRAY['function_tests', 'agg_names']::text[], ARRAY['function_tests.dup_result', 'function_tests.dup_result']::text[], -1, 0), ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['schema_colocation']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['function_tests']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['function_tests2']::text[], ARRAY[]::text[], -1, 0), ('extension', ARRAY['plpgsql']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('type', ARRAY['public.usage_access_type']::text[], ARRAY[]::text[], -1, 0), ('type', ARRAY['function_tests.dup_result']::text[], ARRAY[]::text[], -1, 0), ('function', ARRAY['public', 'usage_access_func']::text[], ARRAY['public.usage_access_type', 'integer[]']::text[], -1, 0), ('function', ARRAY['public', 'usage_access_func_third']::text[], ARRAY['integer', 'integer[]']::text[], 0, 50), ('function', ARRAY['function_tests', 'notice']::text[], ARRAY['pg_catalog.text']::text[], -1, 0), ('function', ARRAY['function_tests', 'dup']::text[], ARRAY['pg_catalog.macaddr']::text[], 0, 52), ('function', ARRAY['function_tests', 'eq_with_param_names']::text[], ARRAY['pg_catalog.macaddr', 'pg_catalog.macaddr']::text[], 0, 52), ('function', ARRAY['function_tests', 'eq_mi''xed_param_names']::text[], ARRAY['pg_catalog.macaddr', 'pg_catalog.macaddr']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_sfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_invfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_finalfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('aggregate', ARRAY['function_tests', 'my_rank']::text[], ARRAY['pg_catalog."any"']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_names_sfunc']::text[], ARRAY['function_tests.dup_result', 'function_tests.dup_result', 'function_tests.dup_result']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_names_finalfunc']::text[], ARRAY['function_tests.dup_result']::text[], -1, 0), ('aggregate', ARRAY['function_tests', 'agg_names']::text[], ARRAY['function_tests.dup_result', 'function_tests.dup_result']::text[], -1, 0), ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('server', ARRAY['fake_fdw_server']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['schema_colocation']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['function_tests']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['function_tests2']::text[], ARRAY[]::text[], -1, 0), ('extension', ARRAY['plpgsql']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data; (1 row) -- valid distribution with distribution_arg_index diff --git a/src/test/regress/expected/local_table_join.out b/src/test/regress/expected/local_table_join.out index 81490dcda..d71aee3cf 100644 --- a/src/test/regress/expected/local_table_join.out +++ b/src/test/regress/expected/local_table_join.out @@ -62,6 +62,15 @@ RETURNS fdw_handler AS 'citus' LANGUAGE C STRICT; CREATE FOREIGN DATA WRAPPER fake_fdw_1 HANDLER fake_fdw_handler; +SELECT run_command_on_workers($$ + CREATE FOREIGN DATA WRAPPER fake_fdw_1 HANDLER fake_fdw_handler; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE FOREIGN DATA WRAPPER") + (localhost,57638,t,"CREATE FOREIGN DATA WRAPPER") +(2 rows) + CREATE SERVER fake_fdw_server_1 FOREIGN DATA WRAPPER fake_fdw_1; CREATE FOREIGN TABLE foreign_table ( key int, diff --git a/src/test/regress/expected/propagate_foreign_servers.out b/src/test/regress/expected/propagate_foreign_servers.out new file mode 100644 index 000000000..b964965a5 --- /dev/null +++ b/src/test/regress/expected/propagate_foreign_servers.out @@ -0,0 +1,131 @@ +CREATE SCHEMA propagate_foreign_server; +SET search_path TO propagate_foreign_server; +-- remove node to add later +SELECT citus_remove_node('localhost', :worker_1_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +-- create schema, extension and foreign server while the worker is removed +SET citus.enable_ddl_propagation TO OFF; +CREATE SCHEMA test_dependent_schema; +CREATE EXTENSION postgres_fdw WITH SCHEMA test_dependent_schema; +SET citus.enable_ddl_propagation TO ON; +CREATE SERVER foreign_server_dependent_schema + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'test'); +SELECT 1 FROM citus_add_node('localhost', :worker_1_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- verify the dependent schema and the foreign server are created on the newly added worker +SELECT run_command_on_workers( + $$SELECT COUNT(*) FROM pg_namespace WHERE nspname = 'test_dependent_schema';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) + (localhost,57638,t,1) +(2 rows) + +SELECT run_command_on_workers( + $$SELECT COUNT(*)=1 FROM pg_foreign_server WHERE srvname = 'foreign_server_dependent_schema';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,t) + (localhost,57638,t,t) +(2 rows) + +CREATE SERVER foreign_server_to_drop + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'test'); +--should error +DROP SERVER foreign_server_dependent_schema, foreign_server_to_drop; +ERROR: cannot drop distributed server with other servers +HINT: Try dropping each object in a separate DROP command +SET client_min_messages TO ERROR; +DROP SCHEMA test_dependent_schema CASCADE; +RESET client_min_messages; +-- test propagating foreign server creation +CREATE EXTENSION postgres_fdw; +CREATE SERVER foreign_server TYPE 'test_type' VERSION 'v1' + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'testhost', port '5432', dbname 'testdb'); +SELECT COUNT(*)=1 FROM pg_foreign_server WHERE srvname = 'foreign_server'; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- verify that the server is created on the worker +SELECT run_command_on_workers( + $$SELECT COUNT(*)=1 FROM pg_foreign_server WHERE srvname = 'foreign_server';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,t) + (localhost,57638,t,t) +(2 rows) + +ALTER SERVER foreign_server OPTIONS (ADD "fdw_startup_cost" '1000'); +ALTER SERVER foreign_server OPTIONS (ADD passfile 'to_be_dropped'); +ALTER SERVER foreign_server OPTIONS (SET host 'localhost'); +ALTER SERVER foreign_server OPTIONS (DROP port, DROP dbname); +ALTER SERVER foreign_server OPTIONS (ADD port :'master_port', dbname 'regression', DROP passfile); +ALTER SERVER foreign_server RENAME TO "foreign'server_1!"; +-- test alter owner +SELECT rolname FROM pg_roles JOIN pg_foreign_server ON (pg_roles.oid=pg_foreign_server.srvowner) WHERE srvname = 'foreign''server_1!'; + rolname +--------------------------------------------------------------------- + postgres +(1 row) + +ALTER SERVER "foreign'server_1!" OWNER TO pg_monitor; +-- verify that the server is renamed on the worker +SELECT run_command_on_workers( + $$SELECT srvoptions FROM pg_foreign_server WHERE srvname = 'foreign''server_1!';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"{host=localhost,fdw_startup_cost=1000,port=57636,dbname=regression}") + (localhost,57638,t,"{host=localhost,fdw_startup_cost=1000,port=57636,dbname=regression}") +(2 rows) + +-- verify the owner is changed +SELECT run_command_on_workers( + $$SELECT rolname FROM pg_roles WHERE oid IN (SELECT srvowner FROM pg_foreign_server WHERE srvname = 'foreign''server_1!');$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,pg_monitor) + (localhost,57638,t,pg_monitor) +(2 rows) + +-- verify the owner is changed on the coordinator +SELECT rolname FROM pg_roles JOIN pg_foreign_server ON (pg_roles.oid=pg_foreign_server.srvowner) WHERE srvname = 'foreign''server_1!'; + rolname +--------------------------------------------------------------------- + pg_monitor +(1 row) + +DROP SERVER IF EXISTS "foreign'server_1!" CASCADE; +-- verify that the server is dropped on the worker +SELECT run_command_on_workers( + $$SELECT COUNT(*)=0 FROM pg_foreign_server WHERE srvname = 'foreign''server_1!';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,t) + (localhost,57638,t,t) +(2 rows) + +\c - - - :worker_1_port +-- not allowed on the worker +ALTER SERVER foreign_server OPTIONS (ADD async_capable 'False'); +ERROR: server "foreign_server" does not exist +CREATE SERVER foreign_server_1 TYPE 'test_type' VERSION 'v1' + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'testhost', port '5432', dbname 'testdb'); +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +\c - - - :master_port +DROP SCHEMA propagate_foreign_server CASCADE; +NOTICE: drop cascades to extension postgres_fdw diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index d7cf50e14..37537e9dd 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -26,6 +26,7 @@ test: multi_cluster_management # below tests are placed right after multi_cluster_management as we do # remove/add node operations and we do not want any preexisting objects test: non_super_user_object_metadata +test: propagate_foreign_servers test: alter_role_propagation test: propagate_extension_commands test: escape_extension_name diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 11b024f70..c31c68df5 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -1,5 +1,6 @@ -test: multi_test_helpers multi_test_helpers_superuser multi_create_fdw +test: multi_test_helpers multi_test_helpers_superuser test: multi_cluster_management +test: multi_create_fdw test: multi_test_catalog_views test: replicated_table_disable_node diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index 30807d9b0..d18b63539 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -924,14 +924,6 @@ if (!$conninfo) '-c', "CREATE FOREIGN DATA WRAPPER $fdw HANDLER $fdws{$fdw};")) == 0 or die "Could not create foreign data wrapper $fdw on worker"; } - - foreach my $fdwServer (keys %fdwServers) - { - system(catfile($bindir, "psql"), - ('-X', '-h', $host, '-p', $port, '-U', $user, "-d", "regression", - '-c', "CREATE SERVER $fdwServer FOREIGN DATA WRAPPER $fdwServers{$fdwServer};")) == 0 - or die "Could not create server $fdwServer on worker"; - } } } else @@ -958,14 +950,6 @@ else '-c', "SELECT run_command_on_workers('CREATE FOREIGN DATA WRAPPER $fdw HANDLER $fdws{$fdw};');")) == 0 or die "Could not create foreign data wrapper $fdw on worker"; } - - foreach my $fdwServer (keys %fdwServers) - { - system(catfile($bindir, "psql"), - ('-X', '-h', $host, '-p', $masterPort, '-U', $user, "-d", $dbname, - '-c', "SELECT run_command_on_workers('CREATE SERVER $fdwServer FOREIGN DATA WRAPPER $fdwServers{$fdwServer};');")) == 0 - or die "Could not create server $fdwServer on worker"; - } } # Prepare pg_regress arguments diff --git a/src/test/regress/sql/local_table_join.sql b/src/test/regress/sql/local_table_join.sql index b9c5d2db0..f9c05789a 100644 --- a/src/test/regress/sql/local_table_join.sql +++ b/src/test/regress/sql/local_table_join.sql @@ -40,6 +40,9 @@ RETURNS fdw_handler AS 'citus' LANGUAGE C STRICT; CREATE FOREIGN DATA WRAPPER fake_fdw_1 HANDLER fake_fdw_handler; +SELECT run_command_on_workers($$ + CREATE FOREIGN DATA WRAPPER fake_fdw_1 HANDLER fake_fdw_handler; +$$); CREATE SERVER fake_fdw_server_1 FOREIGN DATA WRAPPER fake_fdw_1; CREATE FOREIGN TABLE foreign_table ( diff --git a/src/test/regress/sql/propagate_foreign_servers.sql b/src/test/regress/sql/propagate_foreign_servers.sql new file mode 100644 index 000000000..a4033ba2a --- /dev/null +++ b/src/test/regress/sql/propagate_foreign_servers.sql @@ -0,0 +1,83 @@ +CREATE SCHEMA propagate_foreign_server; +SET search_path TO propagate_foreign_server; + +-- remove node to add later +SELECT citus_remove_node('localhost', :worker_1_port); + +-- create schema, extension and foreign server while the worker is removed +SET citus.enable_ddl_propagation TO OFF; +CREATE SCHEMA test_dependent_schema; +CREATE EXTENSION postgres_fdw WITH SCHEMA test_dependent_schema; +SET citus.enable_ddl_propagation TO ON; +CREATE SERVER foreign_server_dependent_schema + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'test'); + +SELECT 1 FROM citus_add_node('localhost', :worker_1_port); + +-- verify the dependent schema and the foreign server are created on the newly added worker +SELECT run_command_on_workers( + $$SELECT COUNT(*) FROM pg_namespace WHERE nspname = 'test_dependent_schema';$$); +SELECT run_command_on_workers( + $$SELECT COUNT(*)=1 FROM pg_foreign_server WHERE srvname = 'foreign_server_dependent_schema';$$); + +CREATE SERVER foreign_server_to_drop + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'test'); + +--should error +DROP SERVER foreign_server_dependent_schema, foreign_server_to_drop; + +SET client_min_messages TO ERROR; +DROP SCHEMA test_dependent_schema CASCADE; +RESET client_min_messages; + +-- test propagating foreign server creation +CREATE EXTENSION postgres_fdw; +CREATE SERVER foreign_server TYPE 'test_type' VERSION 'v1' + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'testhost', port '5432', dbname 'testdb'); + +SELECT COUNT(*)=1 FROM pg_foreign_server WHERE srvname = 'foreign_server'; + +-- verify that the server is created on the worker +SELECT run_command_on_workers( + $$SELECT COUNT(*)=1 FROM pg_foreign_server WHERE srvname = 'foreign_server';$$); + +ALTER SERVER foreign_server OPTIONS (ADD "fdw_startup_cost" '1000'); +ALTER SERVER foreign_server OPTIONS (ADD passfile 'to_be_dropped'); +ALTER SERVER foreign_server OPTIONS (SET host 'localhost'); +ALTER SERVER foreign_server OPTIONS (DROP port, DROP dbname); +ALTER SERVER foreign_server OPTIONS (ADD port :'master_port', dbname 'regression', DROP passfile); +ALTER SERVER foreign_server RENAME TO "foreign'server_1!"; + +-- test alter owner +SELECT rolname FROM pg_roles JOIN pg_foreign_server ON (pg_roles.oid=pg_foreign_server.srvowner) WHERE srvname = 'foreign''server_1!'; +ALTER SERVER "foreign'server_1!" OWNER TO pg_monitor; + +-- verify that the server is renamed on the worker +SELECT run_command_on_workers( + $$SELECT srvoptions FROM pg_foreign_server WHERE srvname = 'foreign''server_1!';$$); + +-- verify the owner is changed +SELECT run_command_on_workers( + $$SELECT rolname FROM pg_roles WHERE oid IN (SELECT srvowner FROM pg_foreign_server WHERE srvname = 'foreign''server_1!');$$); + +-- verify the owner is changed on the coordinator +SELECT rolname FROM pg_roles JOIN pg_foreign_server ON (pg_roles.oid=pg_foreign_server.srvowner) WHERE srvname = 'foreign''server_1!'; +DROP SERVER IF EXISTS "foreign'server_1!" CASCADE; + +-- verify that the server is dropped on the worker +SELECT run_command_on_workers( + $$SELECT COUNT(*)=0 FROM pg_foreign_server WHERE srvname = 'foreign''server_1!';$$); + +\c - - - :worker_1_port +-- not allowed on the worker +ALTER SERVER foreign_server OPTIONS (ADD async_capable 'False'); + +CREATE SERVER foreign_server_1 TYPE 'test_type' VERSION 'v1' + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'testhost', port '5432', dbname 'testdb'); + +\c - - - :master_port +DROP SCHEMA propagate_foreign_server CASCADE; From 479b2da74033e136abfe588029a58b6a9c5ade37 Mon Sep 17 00:00:00 2001 From: Hanefi Onaldi Date: Tue, 21 Dec 2021 04:21:14 +0300 Subject: [PATCH 06/17] Fix one flaky failure test --- .../regress/expected/failure_connection_establishment.out | 6 +++--- src/test/regress/sql/failure_connection_establishment.sql | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/test/regress/expected/failure_connection_establishment.out b/src/test/regress/expected/failure_connection_establishment.out index 99b9efcda..6284107d2 100644 --- a/src/test/regress/expected/failure_connection_establishment.out +++ b/src/test/regress/expected/failure_connection_establishment.out @@ -105,10 +105,10 @@ SELECT name FROM r1 WHERE id = 2; -- verify a connection attempt was made to the intercepted node, this would have cause the -- connection to have been delayed and thus caused a timeout -SELECT citus.dump_network_traffic(); - dump_network_traffic +SELECT * FROM citus.dump_network_traffic() WHERE conn=0; + conn | source | message --------------------------------------------------------------------- - (0,coordinator,"[initial message]") + 0 | coordinator | [initial message] (1 row) SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/sql/failure_connection_establishment.sql b/src/test/regress/sql/failure_connection_establishment.sql index 76e699132..43cb97b86 100644 --- a/src/test/regress/sql/failure_connection_establishment.sql +++ b/src/test/regress/sql/failure_connection_establishment.sql @@ -66,7 +66,7 @@ SELECT name FROM r1 WHERE id = 2; -- verify a connection attempt was made to the intercepted node, this would have cause the -- connection to have been delayed and thus caused a timeout -SELECT citus.dump_network_traffic(); +SELECT * FROM citus.dump_network_traffic() WHERE conn=0; SELECT citus.mitmproxy('conn.allow()'); From b9c06a6762121792942f144d01776b01b649d9d8 Mon Sep 17 00:00:00 2001 From: Halil Ozan Akgul Date: Wed, 22 Dec 2021 10:57:05 +0300 Subject: [PATCH 07/17] Turn metadata sync on in multi_metadata_sync --- .../regress/expected/multi_metadata_sync.out | 31 ++++++++++++++++++- src/test/regress/multi_1_schedule | 3 -- src/test/regress/sql/multi_metadata_sync.sql | 9 ++++++ 3 files changed, 39 insertions(+), 4 deletions(-) diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 970318b7f..6db1c40f2 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -3,6 +3,21 @@ -- -- Tests for metadata snapshot functions, metadata syncing functions and propagation of -- metadata changes to MX tables. +-- Turn metadata sync off at first +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +NOTICE: dropping metadata on the node (localhost,57637) + stop_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +NOTICE: dropping metadata on the node (localhost,57638) + stop_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; SET citus.replicate_reference_tables_on_activate TO off; SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id @@ -1467,13 +1482,14 @@ WHERE logicalrelid='mx_ref'::regclass; (1 row) \c - - - :master_port +SET client_min_messages TO ERROR; SELECT master_add_node('localhost', :worker_2_port); -NOTICE: Replicating reference table "mx_ref" to the node localhost:xxxxx master_add_node --------------------------------------------------------------------- 7 (1 row) +RESET client_min_messages; SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass @@ -1914,3 +1930,16 @@ ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; +-- Turn metadata sync back on at the end +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +SELECT start_metadata_sync_to_node('localhost', :worker_2_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 37537e9dd..25b3db268 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -232,10 +232,7 @@ test: multi_drop_extension # multi_metadata_sync tests the propagation of mx-related metadata changes to metadata workers # multi_unsupported_worker_operations tests that unsupported operations error out on metadata workers # ---------- -test: check_mx -test: turn_mx_off test: multi_metadata_sync -test: turn_mx_on test: multi_unsupported_worker_operations # ---------- diff --git a/src/test/regress/sql/multi_metadata_sync.sql b/src/test/regress/sql/multi_metadata_sync.sql index 440306ea6..f55bc9c3f 100644 --- a/src/test/regress/sql/multi_metadata_sync.sql +++ b/src/test/regress/sql/multi_metadata_sync.sql @@ -5,6 +5,9 @@ -- Tests for metadata snapshot functions, metadata syncing functions and propagation of -- metadata changes to MX tables. +-- Turn metadata sync off at first +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; SET citus.replicate_reference_tables_on_activate TO off; @@ -674,7 +677,9 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; \c - - - :master_port +SET client_min_messages TO ERROR; SELECT master_add_node('localhost', :worker_2_port); +RESET client_min_messages; SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement @@ -859,3 +864,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; + +-- Turn metadata sync back on at the end +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('localhost', :worker_2_port); From 9ad29e5a9d9426245a5de6fce25afb98a2868826 Mon Sep 17 00:00:00 2001 From: Hanefi Onaldi Date: Wed, 22 Dec 2021 17:47:31 +0300 Subject: [PATCH 08/17] Improve CircleCI configs - Parameterize PG versions - Use single parens for consistency - Fix spacing issues - Remove unsupported attributes - Compact check-style steps - Parameterize PG versions for upgrade tests --- .circleci/config.yml | 171 +++++++++++++++++++------------------------ 1 file changed, 76 insertions(+), 95 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8965658e3..0aa0fd4b8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,10 +6,20 @@ orbs: parameters: image_suffix: type: string - default: "-v2021_10_27" - + default: '-v2021_10_27' + pg12_version: + type: string + default: '12.8' + pg13_version: + type: string + default: '13.4' + pg14_version: + type: string + default: '14.0' + upgrade_pg_versions: + type: string + default: '12.8-13.4-14.0' jobs: - build: description: Build the citus extension parameters: @@ -47,40 +57,22 @@ jobs: command: citus_indent --check - run: name: 'Fix whitespace' - command: ci/editorconfig.sh - - run: - name: 'Check if whitespace fixing changed anything, install editorconfig if it did' - command: git diff --exit-code + command: ci/editorconfig.sh && git diff --exit-code - run: name: 'Remove useless declarations' - command: ci/remove_useless_declarations.sh - - run: - name: 'Check if changed' - command: git diff --cached --exit-code + command: ci/remove_useless_declarations.sh && git diff --cached --exit-code - run: name: 'Normalize test output' - command: ci/normalize_expected.sh - - run: - name: 'Check if changed' - command: git diff --exit-code + command: ci/normalize_expected.sh && git diff --exit-code - run: name: 'Check for C-style comments in migration files' - command: ci/disallow_c_comments_in_migrations.sh + command: ci/disallow_c_comments_in_migrations.sh && git diff --exit-code - run: - name: 'Check if changed' - command: git diff --exit-code - - run: - name: 'Check for comments that start with # character in spec files' - command: ci/disallow_hash_comments_in_spec_files.sh - - run: - name: 'Check if changed' - command: git diff --exit-code + name: 'Check for comment--cached ns that start with # character in spec files' + command: ci/disallow_hash_comments_in_spec_files.sh && git diff --exit-code - run: name: 'Check for gitignore entries .for source files' - command: ci/fix_gitignore.sh - - run: - name: 'Check if changed' - command: git diff --exit-code + command: ci/fix_gitignore.sh && git diff --exit-code - run: name: 'Check for lengths of changelog entries' command: ci/disallow_long_changelog_entries.sh @@ -180,11 +172,9 @@ jobs: - store_artifacts: name: 'Save regressions' path: src/test/regress/regression.diffs - when: on_fail - store_artifacts: name: 'Save core dumps' path: /tmp/core_dumps - when: on_fail - store_artifacts: name: 'Save pg_upgrade logs for newData dir' path: /tmp/pg_upgrade_newData_logs @@ -267,17 +257,16 @@ jobs: name: 'Save core dumps' path: /tmp/core_dumps - store_artifacts: - name: "Save logfiles" + name: 'Save logfiles' path: src/test/regress/tmp_citus_test/logfiles - codecov/upload: flags: 'test_<< parameters.pg_major >>,upgrade' - test-citus-upgrade: description: Runs citus upgrade tests parameters: pg_major: - description: "postgres major version" + description: 'postgres major version' type: integer image: description: 'docker image to use as for the tests' @@ -348,11 +337,9 @@ jobs: - store_artifacts: name: 'Save regressions' path: src/test/regress/regression.diffs - when: on_fail - store_artifacts: name: 'Save core dumps' path: /tmp/core_dumps - when: on_fail - codecov/upload: flags: 'test_<< parameters.pg_major >>,upgrade' @@ -360,7 +347,7 @@ jobs: description: Runs the common tests of citus parameters: pg_major: - description: "postgres major version" + description: 'postgres major version' type: integer image: description: 'docker image to use as for the tests' @@ -370,7 +357,7 @@ jobs: description: 'docker image tag to use' type: string make: - description: "make target" + description: 'make target' type: string docker: - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' @@ -416,18 +403,15 @@ jobs: - store_artifacts: name: 'Save regressions' path: src/test/regress/regression.diffs - when: on_fail - store_artifacts: name: 'Save mitmproxy output (failure test specific)' path: src/test/regress/proxy.output - store_artifacts: name: 'Save results' path: src/test/regress/results/ - when: on_fail - store_artifacts: name: 'Save core dumps' path: /tmp/core_dumps - when: on_fail - codecov/upload: flags: 'test_<< parameters.pg_major >>,<< parameters.make >>' when: always @@ -436,7 +420,7 @@ jobs: description: Runs tap tests for citus parameters: pg_major: - description: "postgres major version" + description: 'postgres major version' type: integer image: description: 'docker image to use as for the tests' @@ -449,7 +433,7 @@ jobs: description: 'name of the tap test suite to run' type: string make: - description: "make target" + description: 'make target' type: string default: installcheck docker: @@ -488,18 +472,16 @@ jobs: - store_artifacts: name: 'Save tap logs' path: /home/circleci/project/src/test/recovery/tmp_check/log - when: on_fail - store_artifacts: name: 'Save core dumps' path: /tmp/core_dumps - when: on_fail - codecov/upload: flags: 'test_<< parameters.pg_major >>,tap_<< parameters.suite >>_<< parameters.make >>' when: always check-merge-to-enterprise: docker: - - image: citus/extbuilder:13.4 + - image: citus/extbuilder:<< pipeline.parameters.pg13_version >> working_directory: /home/circleci/project steps: - checkout @@ -541,7 +523,6 @@ workflows: version: 2 build_and_test: jobs: - - check-merge-to-enterprise: filters: branches: @@ -551,15 +532,15 @@ workflows: - build: name: build-12 pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' - build: name: build-13 pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' - build: name: build-14 pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' - check-style - check-sql-snapshots @@ -567,266 +548,266 @@ workflows: - test-citus: name: 'test-12_check-multi' pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' make: check-multi requires: [build-12] - test-citus: name: 'test-12_check-multi-1' pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' make: check-multi-1 requires: [build-12] - test-citus: name: 'test-12_check-mx' pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' make: check-multi-mx requires: [build-12] - test-citus: name: 'test-12_check-vanilla' pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' make: check-vanilla requires: [build-12] - test-citus: name: 'test-12_check-isolation' pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' make: check-isolation requires: [build-12] - test-citus: name: 'test-12_check-worker' pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' make: check-worker requires: [build-12] - test-citus: name: 'test-12_check-operations' pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' make: check-operations requires: [build-12] - test-citus: name: 'test-12_check-follower-cluster' pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' make: check-follower-cluster requires: [build-12] - test-citus: name: 'test-12_check-columnar' pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' make: check-columnar requires: [build-12] - test-citus: name: 'test-12_check-columnar-isolation' pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' make: check-columnar-isolation requires: [build-12] - tap-test-citus: name: 'test_12_tap-recovery' pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' suite: recovery requires: [build-12] - test-citus: name: 'test-12_check-failure' pg_major: 12 image: citus/failtester - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' make: check-failure requires: [build-12] - test-citus: name: 'test-13_check-multi' pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' make: check-multi requires: [build-13] - test-citus: name: 'test-13_check-multi-1' pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' make: check-multi-1 requires: [build-13] - test-citus: name: 'test-13_check-mx' pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' make: check-multi-mx requires: [build-13] - test-citus: name: 'test-13_check-vanilla' pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' make: check-vanilla requires: [build-13] - test-citus: name: 'test-13_check-isolation' pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' make: check-isolation requires: [build-13] - test-citus: name: 'test-13_check-worker' pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' make: check-worker requires: [build-13] - test-citus: name: 'test-13_check-operations' pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' make: check-operations requires: [build-13] - test-citus: name: 'test-13_check-follower-cluster' pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' make: check-follower-cluster requires: [build-13] - test-citus: name: 'test-13_check-columnar' pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' make: check-columnar requires: [build-13] - test-citus: name: 'test-13_check-columnar-isolation' pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' make: check-columnar-isolation requires: [build-13] - tap-test-citus: name: 'test_13_tap-recovery' pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' suite: recovery requires: [build-13] - test-citus: name: 'test-13_check-failure' pg_major: 13 image: citus/failtester - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' make: check-failure requires: [build-13] - test-citus: name: 'test-14_check-multi' pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' make: check-multi requires: [build-14] - test-citus: name: 'test-14_check-multi-1' pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' make: check-multi-1 requires: [build-14] - test-citus: name: 'test-14_check-mx' pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' make: check-multi-mx requires: [build-14] - test-citus: name: 'test-14_check-vanilla' pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' make: check-vanilla requires: [build-14] - test-citus: name: 'test-14_check-isolation' pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' make: check-isolation requires: [build-14] - test-citus: name: 'test-14_check-worker' pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' make: check-worker requires: [build-14] - test-citus: name: 'test-14_check-operations' pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' make: check-operations requires: [build-14] - test-citus: name: 'test-14_check-follower-cluster' pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' make: check-follower-cluster requires: [build-14] - test-citus: name: 'test-14_check-columnar' pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' make: check-columnar requires: [build-14] - test-citus: name: 'test-14_check-columnar-isolation' pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' make: check-columnar-isolation requires: [build-14] - tap-test-citus: name: 'test_14_tap-recovery' pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' suite: recovery requires: [build-14] - test-citus: name: 'test-14_check-failure' pg_major: 14 image: citus/failtester - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' make: check-failure requires: [build-14] - test-arbitrary-configs: name: 'test-12_check-arbitrary-configs' pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' requires: [build-12] - test-arbitrary-configs: name: 'test-13_check-arbitrary-configs' pg_major: 13 - image_tag: '13.4' + image_tag: '<< pipeline.parameters.pg13_version >>' requires: [build-13] - test-arbitrary-configs: name: 'test-14_check-arbitrary-configs' pg_major: 14 - image_tag: '14.0' + image_tag: '<< pipeline.parameters.pg14_version >>' requires: [build-14] - test-pg-upgrade: name: 'test-12-13_check-pg-upgrade' old_pg_major: 12 new_pg_major: 13 - image_tag: '12.8-13.4-14.0' - requires: [build-12,build-13] + image_tag: '<< pipeline.parameters.upgrade_pg_versions >>' + requires: [build-12, build-13] - test-pg-upgrade: name: 'test-12-14_check-pg-upgrade' old_pg_major: 12 new_pg_major: 14 - image_tag: '12.8-13.4-14.0' - requires: [build-12,build-14] + image_tag: '<< pipeline.parameters.upgrade_pg_versions >>' + requires: [build-12, build-14] - test-pg-upgrade: name: 'test-13-14_check-pg-upgrade' old_pg_major: 13 new_pg_major: 14 - image_tag: '12.8-13.4-14.0' - requires: [build-13,build-14] + image_tag: '<< pipeline.parameters.upgrade_pg_versions >>' + requires: [build-13, build-14] - test-citus-upgrade: name: test-12_check-citus-upgrade pg_major: 12 - image_tag: '12.8' + image_tag: '<< pipeline.parameters.pg12_version >>' requires: [build-12] - ch_benchmark: From 5c2fb06322866def2ffeba964ac34d202ffc396b Mon Sep 17 00:00:00 2001 From: Halil Ozan Akgul Date: Tue, 21 Dec 2021 11:45:33 +0300 Subject: [PATCH 09/17] Fix metadata sync fails on multi_sequence_default --- .../expected/multi_sequence_default.out | 71 +++++++++++++------ src/test/regress/multi_1_schedule | 2 +- .../regress/sql/multi_sequence_default.sql | 30 +++++--- 3 files changed, 71 insertions(+), 32 deletions(-) diff --git a/src/test/regress/expected/multi_sequence_default.out b/src/test/regress/expected/multi_sequence_default.out index 903feaeeb..e00310b0e 100644 --- a/src/test/regress/expected/multi_sequence_default.out +++ b/src/test/regress/expected/multi_sequence_default.out @@ -10,6 +10,7 @@ CREATE SCHEMA sequence_default; SET search_path = sequence_default, public; -- test both distributed and citus local tables SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0); +NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata ?column? --------------------------------------------------------------------- 1 @@ -64,13 +65,9 @@ ERROR: cannot add a column involving DEFAULT nextval('..') because the table is HINT: You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..') ALTER TABLE seq_test_0 ADD COLUMN z serial; -ERROR: Cannot add a column involving serial pseudotypes because the table is not empty -HINT: You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint -Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..') +ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers ALTER TABLE seq_test_0_local_table ADD COLUMN z serial; -ERROR: Cannot add a column involving serial pseudotypes because the table is not empty -HINT: You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint -Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..') +ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers -- follow hint ALTER TABLE seq_test_0 ADD COLUMN z int; ALTER TABLE seq_test_0 ALTER COLUMN z SET DEFAULT nextval('seq_0'); @@ -127,30 +124,63 @@ SELECT * FROM seq_test_0_local_table ORDER BY 1, 2 LIMIT 5; --------------------------------------------------------------------- integer | 1 | 1 | 2147483647 | 1 | no | 1 --- cannot change the type of a sequence used in a distributed table --- even if metadata is not synced to workers +-- cannot alter a sequence used in a distributed table +-- since the metadata is synced to workers ALTER SEQUENCE seq_0 AS bigint; -ERROR: Altering a sequence used in a distributed table is currently not supported. +ERROR: Altering a distributed sequence is currently not supported. ALTER SEQUENCE seq_0_local_table AS bigint; -ERROR: Altering a sequence used in a local table that is added to metadata is currently not supported. +ERROR: Altering a distributed sequence is currently not supported. -- we can change other things like increment -- if metadata is not synced to workers -ALTER SEQUENCE seq_0 INCREMENT BY 2; -ALTER SEQUENCE seq_0_local_table INCREMENT BY 2; -\d seq_0 - Sequence "sequence_default.seq_0" +BEGIN; +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +NOTICE: dropping metadata on the node (localhost,57637) + stop_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +NOTICE: dropping metadata on the node (localhost,57638) + stop_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +CREATE SEQUENCE seq_13; +CREATE SEQUENCE seq_13_local_table; +CREATE TABLE seq_test_13 (x int, y int); +CREATE TABLE seq_test_13_local_table (x int, y int); +SELECT create_distributed_table('seq_test_13','x'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_add_local_table_to_metadata('seq_test_13_local_table'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +ALTER TABLE seq_test_13 ADD COLUMN z int DEFAULT nextval('seq_13'); +ALTER TABLE seq_test_13_local_table ADD COLUMN z int DEFAULT nextval('seq_13_local_table'); +ALTER SEQUENCE seq_13 INCREMENT BY 2; +ALTER SEQUENCE seq_13_local_table INCREMENT BY 2; +\d seq_13 + Sequence "sequence_default.seq_13" Type | Start | Minimum | Maximum | Increment | Cycles? | Cache --------------------------------------------------------------------- integer | 1 | 1 | 2147483647 | 2 | no | 1 -\d seq_0_local_table - Sequence "sequence_default.seq_0_local_table" +\d seq_13_local_table + Sequence "sequence_default.seq_13_local_table" Type | Start | Minimum | Maximum | Increment | Cycles? | Cache --------------------------------------------------------------------- integer | 1 | 1 | 2147483647 | 2 | no | 1 -- check that we can add serial pseudo-type columns --- when metadata is not yet synced to workers +-- when metadata is not synced to workers TRUNCATE seq_test_0; ALTER TABLE seq_test_0 ADD COLUMN w00 smallserial; ALTER TABLE seq_test_0 ADD COLUMN w01 serial2; @@ -165,6 +195,7 @@ ALTER TABLE seq_test_0_local_table ADD COLUMN w10 serial; ALTER TABLE seq_test_0_local_table ADD COLUMN w11 serial4; ALTER TABLE seq_test_0_local_table ADD COLUMN w20 bigserial; ALTER TABLE seq_test_0_local_table ADD COLUMN w21 serial8; +ROLLBACK; -- check alter column type precaution ALTER TABLE seq_test_0 ALTER COLUMN z TYPE bigint; ERROR: cannot execute ALTER COLUMN TYPE .. command because the column involves a default coming from a sequence @@ -962,12 +993,6 @@ SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default CASCADE'); (localhost,57638,t,"DROP SCHEMA") (2 rows) -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - SELECT master_remove_node('localhost', :master_port); master_remove_node --------------------------------------------------------------------- diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 25b3db268..6f181b608 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -35,9 +35,9 @@ test: alter_database_owner test: multi_test_catalog_views test: multi_table_ddl +test: multi_sequence_default test: check_mx test: turn_mx_off -test: multi_sequence_default test: multi_name_lengths test: turn_mx_on test: multi_name_resolution diff --git a/src/test/regress/sql/multi_sequence_default.sql b/src/test/regress/sql/multi_sequence_default.sql index 4b0ac1ca3..67663accb 100644 --- a/src/test/regress/sql/multi_sequence_default.sql +++ b/src/test/regress/sql/multi_sequence_default.sql @@ -46,20 +46,33 @@ SELECT * FROM seq_test_0_local_table ORDER BY 1, 2 LIMIT 5; -- in this case column z is of type int \d seq_0 \d seq_0_local_table --- cannot change the type of a sequence used in a distributed table --- even if metadata is not synced to workers +-- cannot alter a sequence used in a distributed table +-- since the metadata is synced to workers ALTER SEQUENCE seq_0 AS bigint; ALTER SEQUENCE seq_0_local_table AS bigint; + -- we can change other things like increment -- if metadata is not synced to workers -ALTER SEQUENCE seq_0 INCREMENT BY 2; -ALTER SEQUENCE seq_0_local_table INCREMENT BY 2; -\d seq_0 -\d seq_0_local_table +BEGIN; +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +CREATE SEQUENCE seq_13; +CREATE SEQUENCE seq_13_local_table; +CREATE TABLE seq_test_13 (x int, y int); +CREATE TABLE seq_test_13_local_table (x int, y int); +SELECT create_distributed_table('seq_test_13','x'); +SELECT citus_add_local_table_to_metadata('seq_test_13_local_table'); +ALTER TABLE seq_test_13 ADD COLUMN z int DEFAULT nextval('seq_13'); +ALTER TABLE seq_test_13_local_table ADD COLUMN z int DEFAULT nextval('seq_13_local_table'); + +ALTER SEQUENCE seq_13 INCREMENT BY 2; +ALTER SEQUENCE seq_13_local_table INCREMENT BY 2; +\d seq_13 +\d seq_13_local_table -- check that we can add serial pseudo-type columns --- when metadata is not yet synced to workers +-- when metadata is not synced to workers TRUNCATE seq_test_0; ALTER TABLE seq_test_0 ADD COLUMN w00 smallserial; ALTER TABLE seq_test_0 ADD COLUMN w01 serial2; @@ -76,6 +89,8 @@ ALTER TABLE seq_test_0_local_table ADD COLUMN w11 serial4; ALTER TABLE seq_test_0_local_table ADD COLUMN w20 bigserial; ALTER TABLE seq_test_0_local_table ADD COLUMN w21 serial8; +ROLLBACK; + -- check alter column type precaution ALTER TABLE seq_test_0 ALTER COLUMN z TYPE bigint; ALTER TABLE seq_test_0 ALTER COLUMN z TYPE smallint; @@ -468,6 +483,5 @@ DROP TABLE sequence_default.seq_test_7_par; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA sequence_default CASCADE; SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default CASCADE'); -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SELECT master_remove_node('localhost', :master_port); SET search_path TO public; From 70e68d5312e1af0abcd5211a5bcfe1c530acd3f8 Mon Sep 17 00:00:00 2001 From: Halil Ozan Akgul Date: Tue, 21 Dec 2021 14:19:10 +0300 Subject: [PATCH 10/17] Fix metadata sync fails on multi_name_lengths --- src/test/regress/expected/multi_name_lengths.out | 13 +++++++++---- src/test/regress/multi_1_schedule | 3 --- src/test/regress/sql/multi_name_lengths.sql | 13 +++++++++---- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/test/regress/expected/multi_name_lengths.out b/src/test/regress/expected/multi_name_lengths.out index 5ef151042..e100ba7d8 100644 --- a/src/test/regress/expected/multi_name_lengths.out +++ b/src/test/regress/expected/multi_name_lengths.out @@ -219,8 +219,9 @@ NOTICE: identifier "append_zero_shard_table_12345678901234567890123456789012345 -- Verify that CREATE INDEX on already distributed table has proper shard names. CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_lengths(col2); \c - - :public_worker_1_host :worker_1_port +SET citus.override_table_visibility TO FALSE; SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE - relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; + relname SIMILAR TO 'tmp_idx_%\_\d{6}' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; relname | Column | Type | Definition --------------------------------------------------------------------- tmp_idx_123456789012345678901234567890123456789_5e470afa_225003 | col2 | integer | col2 @@ -237,8 +238,9 @@ ALTER INDEX tmp_idx_123456789012345678901234567890123456789012345678901234567890 NOTICE: identifier "tmp_idx_123456789012345678901234567890123456789012345678901234567890" will be truncated to "tmp_idx_1234567890123456789012345678901234567890123456789012345" NOTICE: identifier "tmp_idx_newname_123456789012345678901234567890123456789012345678901234567890" will be truncated to "tmp_idx_newname_12345678901234567890123456789012345678901234567" \c - - :public_worker_1_host :worker_1_port +SET citus.override_table_visibility TO FALSE; SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE - relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; + relname SIMILAR TO 'tmp_idx_%\_\d{6}' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; relname | Column | Type | Definition --------------------------------------------------------------------- tmp_idx_newname_1234567890123456789012345678901_c54e849b_225003 | col2 | integer | col2 @@ -337,6 +339,7 @@ SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); (1 row) \c - - :public_worker_1_host :worker_1_port +SET citus.override_table_visibility TO FALSE; SELECT c1.relname AS unique_index_name FROM pg_class c1 JOIN pg_index i ON i.indexrelid = c1.oid @@ -369,6 +372,7 @@ SELECT create_distributed_table('too_long_12345678901234567890123456789012345678 (1 row) \c - - :public_worker_1_host :worker_1_port +SET citus.override_table_visibility TO FALSE; \dt *225000000000* List of relations Schema | Name | Type | Owner @@ -401,7 +405,8 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0 (1 row) \c - - :public_worker_1_host :worker_1_port -\dt public.elephant_* +SET citus.override_table_visibility TO FALSE; +\dt public.elephant_*[0-9]+ List of relations Schema | Name | Type | Owner --------------------------------------------------------------------- @@ -409,7 +414,7 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0 public | elephant_слонслонслонсло_c8b737c2_2250000000003 | table | postgres (2 rows) -\di public.elephant_* +\di public.elephant_*[0-9]+ List of relations Schema | Name | Type | Owner | Table --------------------------------------------------------------------- diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 6f181b608..1f00ca06d 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -36,10 +36,7 @@ test: alter_database_owner test: multi_test_catalog_views test: multi_table_ddl test: multi_sequence_default -test: check_mx -test: turn_mx_off test: multi_name_lengths -test: turn_mx_on test: multi_name_resolution test: multi_metadata_access test: multi_metadata_attributes diff --git a/src/test/regress/sql/multi_name_lengths.sql b/src/test/regress/sql/multi_name_lengths.sql index 3328572d4..df0a68f33 100644 --- a/src/test/regress/sql/multi_name_lengths.sql +++ b/src/test/regress/sql/multi_name_lengths.sql @@ -163,8 +163,9 @@ CREATE INDEX append_zero_shard_table_idx_123456789012345678901234567890123456789 CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_lengths(col2); \c - - :public_worker_1_host :worker_1_port +SET citus.override_table_visibility TO FALSE; SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE - relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; + relname SIMILAR TO 'tmp_idx_%\_\d{6}' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; \c - - :master_host :master_port -- Verify that a new index name > 63 characters is auto-truncated @@ -175,8 +176,9 @@ CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890123456789 ALTER INDEX tmp_idx_123456789012345678901234567890123456789012345678901234567890 RENAME TO tmp_idx_newname_123456789012345678901234567890123456789012345678901234567890; \c - - :public_worker_1_host :worker_1_port +SET citus.override_table_visibility TO FALSE; SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE - relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; + relname SIMILAR TO 'tmp_idx_%\_\d{6}' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; \c - - :master_host :master_port SET citus.shard_count TO 2; @@ -236,6 +238,7 @@ CREATE TABLE sneaky_name_lengths ( SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); \c - - :public_worker_1_host :worker_1_port +SET citus.override_table_visibility TO FALSE; SELECT c1.relname AS unique_index_name FROM pg_class c1 @@ -263,6 +266,7 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); \c - - :public_worker_1_host :worker_1_port +SET citus.override_table_visibility TO FALSE; \dt *225000000000* \c - - :master_host :master_port @@ -283,8 +287,9 @@ FROM pg_dist_shard WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass; \c - - :public_worker_1_host :worker_1_port -\dt public.elephant_* -\di public.elephant_* +SET citus.override_table_visibility TO FALSE; +\dt public.elephant_*[0-9]+ +\di public.elephant_*[0-9]+ \c - - :master_host :master_port SET citus.shard_count TO 2; From bb636e6a29271b68af0c40fb594704ddd4cfda75 Mon Sep 17 00:00:00 2001 From: Halil Ozan Akgul Date: Fri, 24 Dec 2021 19:21:22 +0300 Subject: [PATCH 11/17] Fix metadata sync fails on multi_function_evaluation --- src/test/regress/expected/master_copy_shard_placement.out | 1 + src/test/regress/multi_1_schedule | 4 ++-- src/test/regress/sql/master_copy_shard_placement.sql | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/test/regress/expected/master_copy_shard_placement.out b/src/test/regress/expected/master_copy_shard_placement.out index 4b5ee8ae7..09fad3aa7 100644 --- a/src/test/regress/expected/master_copy_shard_placement.out +++ b/src/test/regress/expected/master_copy_shard_placement.out @@ -131,4 +131,5 @@ SELECT master_copy_shard_placement( transfer_mode := 'block_writes'); ERROR: Table 'mx_table' is streaming replicated. Shards of streaming replicated tables cannot be copied SET client_min_messages TO ERROR; +DROP TABLE mcsp.history; DROP SCHEMA mcsp CASCADE; diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 1f00ca06d..33dfa1efc 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -242,13 +242,13 @@ test: multi_schema_support # ---------- # multi_function_evaluation tests edge-cases in master-side function pre-evaluation # ---------- -test: check_mx -test: turn_mx_off test: multi_function_evaluation # ---------- # multi_truncate tests truncate functionality for distributed tables # ---------- +test: check_mx +test: turn_mx_off test: multi_truncate # ---------- diff --git a/src/test/regress/sql/master_copy_shard_placement.sql b/src/test/regress/sql/master_copy_shard_placement.sql index 9f6949e37..f4b70fdb2 100644 --- a/src/test/regress/sql/master_copy_shard_placement.sql +++ b/src/test/regress/sql/master_copy_shard_placement.sql @@ -106,4 +106,5 @@ SELECT master_copy_shard_placement( transfer_mode := 'block_writes'); SET client_min_messages TO ERROR; +DROP TABLE mcsp.history; DROP SCHEMA mcsp CASCADE; From c9127f921f7aaa12a1a6cd22f6bbf44071752c01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=96nder=20Kalac=C4=B1?= Date: Mon, 27 Dec 2021 10:29:37 +0100 Subject: [PATCH 12/17] Avoid round trips while fixing index names (#5549) With this commit, fix_partition_shard_index_names() works significantly faster. For example, 32 shards, 365 partitions, 5 indexes drop from ~120 seconds to ~44 seconds 32 shards, 1095 partitions, 5 indexes drop from ~600 seconds to ~265 seconds `queryStringList` can be really long, because it may contain #partitions * #indexes entries. Before this change, we were actually going through the executor where each command in the query string triggers 1 round trip per entry in queryStringList. The aim of this commit is to avoid the round-trips by creating a single query string. I first simply tried sending `q1;q2;..;qn` . However, the executor is designed to handle `q1;q2;..;qn` type of query executions via the infrastructure mentioned above (e.g., by tracking the query indexes in the list and doing 1 statement per round trip). One another option could have been to change the executor such that only track the query index when `queryStringList` is provided not with queryString including multiple `;`s . That is (a) more work (b) could cause weird edge cases with failure handling (c) felt like coding a special case in to the executor --- .../distributed/sql/citus--10.2-4--11.0-1.sql | 1 + .../sql/downgrades/citus--11.0-1--10.2-4.sql | 1 + .../udfs/citus_run_local_command/11.0-1.sql | 8 ++++++++ .../udfs/citus_run_local_command/latest.sql | 8 ++++++++ .../utils/multi_partitioning_utils.c | 18 +++++++++++++++++- .../expected/citus_local_table_triggers.out | 2 +- .../regress/expected/citus_run_command.out | 13 +++++++++++++ .../expected/coordinator_shouldhaveshards.out | 4 ++-- src/test/regress/expected/multi_extension.out | 3 ++- .../multi_fix_partition_shard_index_names.out | 18 ++++-------------- .../expected/upgrade_list_citus_objects.out | 3 ++- src/test/regress/sql/citus_run_command.sql | 4 ++++ src/test/regress/sql_schedule | 2 +- 13 files changed, 64 insertions(+), 21 deletions(-) create mode 100644 src/backend/distributed/sql/udfs/citus_run_local_command/11.0-1.sql create mode 100644 src/backend/distributed/sql/udfs/citus_run_local_command/latest.sql create mode 100644 src/test/regress/expected/citus_run_command.out create mode 100644 src/test/regress/sql/citus_run_command.sql diff --git a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql index c5f333152..f4ba74584 100644 --- a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql +++ b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql @@ -7,6 +7,7 @@ #include "udfs/citus_check_cluster_node_health/11.0-1.sql" #include "udfs/citus_internal_add_object_metadata/11.0-1.sql" +#include "udfs/citus_run_local_command/11.0-1.sql" DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text); DROP FUNCTION pg_catalog.master_get_table_metadata(text); diff --git a/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql b/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql index e5a7cb2bd..8651d945e 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql @@ -44,3 +44,4 @@ DROP FUNCTION pg_catalog.citus_check_connection_to_node (text, integer); DROP FUNCTION pg_catalog.citus_check_cluster_node_health (); DROP FUNCTION pg_catalog.citus_internal_add_object_metadata(text, text[], text[], integer, integer); +DROP FUNCTION pg_catalog.citus_run_local_command(text); diff --git a/src/backend/distributed/sql/udfs/citus_run_local_command/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_run_local_command/11.0-1.sql new file mode 100644 index 000000000..41a064ef4 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_run_local_command/11.0-1.sql @@ -0,0 +1,8 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_run_local_command(command text) +RETURNS void AS $$ +BEGIN + EXECUTE $1; +END; +$$ LANGUAGE PLPGSQL; +COMMENT ON FUNCTION pg_catalog.citus_run_local_command(text) + IS 'citus_run_local_command executes the input command'; diff --git a/src/backend/distributed/sql/udfs/citus_run_local_command/latest.sql b/src/backend/distributed/sql/udfs/citus_run_local_command/latest.sql new file mode 100644 index 000000000..41a064ef4 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_run_local_command/latest.sql @@ -0,0 +1,8 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_run_local_command(command text) +RETURNS void AS $$ +BEGIN + EXECUTE $1; +END; +$$ LANGUAGE PLPGSQL; +COMMENT ON FUNCTION pg_catalog.citus_run_local_command(text) + IS 'citus_run_local_command executes the input command'; diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index c0751a49b..a7477e5e5 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -569,7 +569,23 @@ CreateFixPartitionShardIndexNamesTaskList(Oid parentRelationId, Oid partitionRel task->taskId = taskId++; task->taskType = DDL_TASK; - SetTaskQueryStringList(task, queryStringList); + + /* + * There could be O(#partitions * #indexes) queries in + * the queryStringList. + * + * In order to avoid round-trips per query in queryStringList, + * we join the string and send as a single command via the UDF. + * Otherwise, the executor sends each command with one + * round-trip. + */ + char *string = StringJoin(queryStringList, ';'); + StringInfo commandToRun = makeStringInfo(); + + appendStringInfo(commandToRun, + "SELECT pg_catalog.citus_run_local_command($$%s$$)", string); + SetTaskQueryString(task, commandToRun->data); + task->dependentTaskList = NULL; task->replicationModel = REPLICATION_MODEL_INVALID; task->anchorShardId = parentShardId; diff --git a/src/test/regress/expected/citus_local_table_triggers.out b/src/test/regress/expected/citus_local_table_triggers.out index 8d9491933..ac6906282 100644 --- a/src/test/regress/expected/citus_local_table_triggers.out +++ b/src/test/regress/expected/citus_local_table_triggers.out @@ -457,7 +457,7 @@ ALTER TABLE par_another_citus_local_table ADD CONSTRAINT fkey_self FOREIGN KEY(v ALTER TABLE par_citus_local_table ADD CONSTRAINT fkey_c_to_c FOREIGN KEY(val) REFERENCES par_another_citus_local_table(val) ON UPDATE CASCADE; SELECT citus_add_local_table_to_metadata('par_another_citus_local_table', cascade_via_foreign_keys=>true); NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1507011, 'citus_local_table_triggers', 1507012, 'citus_local_table_triggers', 'ALTER TABLE citus_local_table_triggers.par_another_citus_local_table ATTACH PARTITION citus_local_table_triggers.par_another_citus_local_table_1 FOR VALUES FROM (1) TO (10000);') -NOTICE: executing the command locally: SELECT worker_fix_partition_shard_index_names('citus_local_table_triggers.par_another_citus_local_table_val_key_1507011'::regclass, 'citus_local_table_triggers.par_another_citus_local_table_1_1507012', 'par_another_citus_local_table_1_val_key_1507012') +NOTICE: executing the command locally: SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('citus_local_table_triggers.par_another_citus_local_table_val_key_1507011'::regclass, 'citus_local_table_triggers.par_another_citus_local_table_1_1507012', 'par_another_citus_local_table_1_val_key_1507012')$$) NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1507013, 'citus_local_table_triggers', 1507014, 'citus_local_table_triggers', 'ALTER TABLE citus_local_table_triggers.par_citus_local_table ATTACH PARTITION citus_local_table_triggers.par_citus_local_table_1 FOR VALUES FROM (1) TO (10000);') NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1507011, 'citus_local_table_triggers', 1507011, 'citus_local_table_triggers', 'ALTER TABLE citus_local_table_triggers.par_another_citus_local_table ADD CONSTRAINT fkey_self FOREIGN KEY (val) REFERENCES citus_local_table_triggers.par_another_citus_local_table(val)') NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1507013, 'citus_local_table_triggers', 1507011, 'citus_local_table_triggers', 'ALTER TABLE citus_local_table_triggers.par_citus_local_table ADD CONSTRAINT fkey_c_to_c FOREIGN KEY (val) REFERENCES citus_local_table_triggers.par_another_citus_local_table(val) ON UPDATE CASCADE') diff --git a/src/test/regress/expected/citus_run_command.out b/src/test/regress/expected/citus_run_command.out new file mode 100644 index 000000000..006872b9f --- /dev/null +++ b/src/test/regress/expected/citus_run_command.out @@ -0,0 +1,13 @@ +SELECT citus_run_local_command($$SELECT 1; SELECT 1$$); + citus_run_local_command +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_run_local_command($$SELECT 1; SELECT 1/0$$); +ERROR: division by zero +CONTEXT: SQL statement "SELECT 1; SELECT 1/0" +PL/pgSQL function citus_run_local_command(text) line XX at EXECUTE +SELECT citus_run_local_command(NULL); +ERROR: query string argument of EXECUTE is null +CONTEXT: PL/pgSQL function citus_run_local_command(text) line XX at EXECUTE diff --git a/src/test/regress/expected/coordinator_shouldhaveshards.out b/src/test/regress/expected/coordinator_shouldhaveshards.out index e11bda9c6..2cd9f2f20 100644 --- a/src/test/regress/expected/coordinator_shouldhaveshards.out +++ b/src/test/regress/expected/coordinator_shouldhaveshards.out @@ -627,8 +627,8 @@ CREATE INDEX ix_test_index_creation5 ON test_index_creation1 INCLUDE (field1) WHERE (tenant_id = 100); NOTICE: executing the command locally: CREATE INDEX ix_test_index_creation5_1503042 ON coordinator_shouldhaveshards.test_index_creation1_1503042 USING btree (tenant_id ,timeperiod ) INCLUDE (field1 ) WHERE (tenant_id = 100) NOTICE: executing the command locally: CREATE INDEX ix_test_index_creation5_1503045 ON coordinator_shouldhaveshards.test_index_creation1_1503045 USING btree (tenant_id ,timeperiod ) INCLUDE (field1 ) WHERE (tenant_id = 100) -NOTICE: executing the command locally: SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503048', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503048');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503049', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503049');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503050', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503050');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503051', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503051');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503052', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503052');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503053', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503053');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503054', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503054');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503055', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503055');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503056', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503056');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503057', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503057');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503058', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503058');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503059', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503059') -NOTICE: executing the command locally: SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503048', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503048');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503049', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503049');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503050', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503050');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503051', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503051');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503052', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503052');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503053', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503053');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503054', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503054');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503055', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503055');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503056', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503056');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503057', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503057');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503058', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503058');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503059', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503059') +NOTICE: executing the command locally: SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503048', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503048');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503049', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503049');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503050', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503050');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503051', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503051');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503052', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503052');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503053', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503053');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503054', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503054');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503055', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503055');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503056', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503056');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503057', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503057');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503058', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503058');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503059', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503059')$$) +NOTICE: executing the command locally: SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503048', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503048');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503049', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503049');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503050', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503050');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503051', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503051');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503052', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503052');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503053', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503053');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503054', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503054');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503055', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503055');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503056', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503056');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503057', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503057');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503058', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503058');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503059', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503059')$$) -- test if indexes are created SELECT 1 AS created WHERE EXISTS(SELECT * FROM pg_indexes WHERE indexname LIKE '%test_index_creation%'); created diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index fe48f175d..3643c8c75 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -977,7 +977,8 @@ SELECT * FROM multi_extension.print_extension_changes(); | function citus_check_connection_to_node(text,integer) boolean | function citus_disable_node(text,integer,boolean) void | function citus_internal_add_object_metadata(text,text[],text[],integer,integer) void -(8 rows) + | function citus_run_local_command(text) void +(9 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out index f22226d7f..49ed3d1fc 100644 --- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out +++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out @@ -564,7 +564,7 @@ NOTICE: issuing CREATE INDEX i4 ON parent_table(dist_col); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing CREATE INDEX i4_915000 ON fix_idx_names.parent_table_915000 USING btree (dist_col ) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.i4_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_dist_col_idx3_915001') +NOTICE: issuing SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('fix_idx_names.i4_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_dist_col_idx3_915001')$$) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -594,7 +594,7 @@ NOTICE: issuing ALTER TABLE parent_table ADD CONSTRAINT pkey_cst PRIMARY KEY (d DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_shard_ddl_command (915000, 'fix_idx_names', 'ALTER TABLE parent_table ADD CONSTRAINT pkey_cst PRIMARY KEY (dist_col, partition_col);') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.pkey_cst_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_pkey_915001') +NOTICE: issuing SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('fix_idx_names.pkey_cst_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_pkey_915001')$$) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -623,7 +623,7 @@ NOTICE: issuing ALTER TABLE parent_table ADD CONSTRAINT unique_cst UNIQUE (dist DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_shard_ddl_command (915000, 'fix_idx_names', 'ALTER TABLE parent_table ADD CONSTRAINT unique_cst UNIQUE (dist_col, partition_col);') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.unique_cst_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_dist_col_partition_col_key_915001') +NOTICE: issuing SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('fix_idx_names.unique_cst_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_dist_col_partition_col_key_915001')$$) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -700,17 +700,7 @@ NOTICE: issuing ALTER TABLE parent_table ATTACH PARTITION p2 FOR VALUES FROM (' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_inter_shard_ddl_command (915000, 'fix_idx_names', 915002, 'fix_idx_names', 'ALTER TABLE parent_table ATTACH PARTITION p2 FOR VALUES FROM (''2019-01-01'') TO (''2020-01-01'');') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.i1_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx_915002') -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.i2_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx1_915002') -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.i3_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx2_915002') -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.i4_renamed_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx3_915002') -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.pkey_cst_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_pkey_915002') -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.unique_cst_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_partition_col_key_915002') +NOTICE: issuing SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('fix_idx_names.i1_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx_915002');SELECT worker_fix_partition_shard_index_names('fix_idx_names.i2_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx1_915002');SELECT worker_fix_partition_shard_index_names('fix_idx_names.i3_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx2_915002');SELECT worker_fix_partition_shard_index_names('fix_idx_names.i4_renamed_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx3_915002');SELECT worker_fix_partition_shard_index_names('fix_idx_names.pkey_cst_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_pkey_915002');SELECT worker_fix_partition_shard_index_names('fix_idx_names.unique_cst_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_partition_col_key_915002')$$) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 9ffef1f70..c51838495 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -89,6 +89,7 @@ ORDER BY 1; function citus_relation_size(regclass) function citus_remote_connection_stats() function citus_remove_node(text,integer) + function citus_run_local_command(text) function citus_server_id() function citus_set_coordinator_host(text,integer,noderole,name) function citus_set_default_rebalance_strategy(text) @@ -262,5 +263,5 @@ ORDER BY 1; view citus_worker_stat_activity view pg_dist_shard_placement view time_partitions -(246 rows) +(247 rows) diff --git a/src/test/regress/sql/citus_run_command.sql b/src/test/regress/sql/citus_run_command.sql new file mode 100644 index 000000000..7ca17335b --- /dev/null +++ b/src/test/regress/sql/citus_run_command.sql @@ -0,0 +1,4 @@ +SELECT citus_run_local_command($$SELECT 1; SELECT 1$$); +SELECT citus_run_local_command($$SELECT 1; SELECT 1/0$$); +SELECT citus_run_local_command(NULL); + diff --git a/src/test/regress/sql_schedule b/src/test/regress/sql_schedule index a8073825a..924143bec 100644 --- a/src/test/regress/sql_schedule +++ b/src/test/regress/sql_schedule @@ -5,4 +5,4 @@ test: ch_benchmarks_4 ch_benchmarks_5 ch_benchmarks_6 test: intermediate_result_pruning_queries_1 intermediate_result_pruning_queries_2 test: dropped_columns_1 distributed_planning test: local_dist_join -test: connectivity_checks +test: connectivity_checks citus_run_command From 0c292a74f52ba04136b6ab1de752a6f3fcbf2b5c Mon Sep 17 00:00:00 2001 From: Halil Ozan Akgul Date: Mon, 27 Dec 2021 11:27:32 +0300 Subject: [PATCH 13/17] Fix metadata sync fails on multi_truncate --- src/test/regress/expected/multi_truncate.out | 3 +++ src/test/regress/multi_1_schedule | 4 ++-- src/test/regress/sql/multi_truncate.sql | 3 +++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/test/regress/expected/multi_truncate.out b/src/test/regress/expected/multi_truncate.out index c21a65fee..7ac388661 100644 --- a/src/test/regress/expected/multi_truncate.out +++ b/src/test/regress/expected/multi_truncate.out @@ -333,6 +333,7 @@ SELECT citus_drop_all_shards('test_local_truncate', 'public', 'test_local_trunca 4 (1 row) +CREATE TABLE temp_pg_dist_partition_row AS SELECT * FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; -- Ensure local data is truncated SELECT * FROM test_local_truncate; @@ -340,7 +341,9 @@ SELECT * FROM test_local_truncate; --------------------------------------------------------------------- (0 rows) +INSERT INTO pg_dist_partition SELECT * FROM temp_pg_dist_partition_row; DROP TABLE test_local_truncate; +DROP TABLE temp_pg_dist_partition_row; -- Truncate local data, but roll back CREATE TABLE test_local_truncate (x int, y int); INSERT INTO test_local_truncate VALUES (1,2); diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 33dfa1efc..840cd599f 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -247,14 +247,14 @@ test: multi_function_evaluation # ---------- # multi_truncate tests truncate functionality for distributed tables # ---------- -test: check_mx -test: turn_mx_off test: multi_truncate # ---------- # multi_colocation_utils tests utility functions written for co-location feature & internal API # multi_colocated_shard_transfer tests master_copy_shard_placement with colocated tables. # ---------- +test: check_mx +test: turn_mx_off test: multi_colocation_utils test: turn_mx_on test: multi_colocated_shard_transfer diff --git a/src/test/regress/sql/multi_truncate.sql b/src/test/regress/sql/multi_truncate.sql index 25b582633..83b4da202 100644 --- a/src/test/regress/sql/multi_truncate.sql +++ b/src/test/regress/sql/multi_truncate.sql @@ -206,12 +206,15 @@ SELECT * FROM test_local_truncate; -- Undistribute table SELECT citus_drop_all_shards('test_local_truncate', 'public', 'test_local_truncate'); +CREATE TABLE temp_pg_dist_partition_row AS SELECT * FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; -- Ensure local data is truncated SELECT * FROM test_local_truncate; +INSERT INTO pg_dist_partition SELECT * FROM temp_pg_dist_partition_row; DROP TABLE test_local_truncate; +DROP TABLE temp_pg_dist_partition_row; -- Truncate local data, but roll back CREATE TABLE test_local_truncate (x int, y int); From d33650d1c12280371f2c0beff373fb1c48ddb75b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=96nder=20Kalac=C4=B1?= Date: Mon, 27 Dec 2021 12:33:34 +0100 Subject: [PATCH 14/17] Record if any partitioned Citus tables during upgrade (#5555) With Citus 11, the default behavior is to sync the metadata. However, partitioned tables created pre-Citus 11 might have index names that are not compatiable with metadata syncing. See https://github.com/citusdata/citus/issues/4962 for the details. With this commit, we record the existence of partitioned tables such that we can fix it later if any exists. --- .../distributed/sql/citus--10.2-4--11.0-1.sql | 14 ++ src/test/regress/expected/multi_extension.out | 133 +++++++++++------- src/test/regress/sql/multi_extension.sql | 22 +++ 3 files changed, 116 insertions(+), 53 deletions(-) diff --git a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql index f4ba74584..1419ce1b2 100644 --- a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql +++ b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql @@ -30,3 +30,17 @@ BEGIN END IF; END; $$; + +-- Here we keep track of partitioned tables that exists before Citus 11 +-- where we need to call fix_all_partition_shard_index_names() before +-- metadata is synced. Note that after citus-11, we automatically +-- adjust the indexes so we only need to fix existing indexes +DO LANGUAGE plpgsql +$$ +DECLARE + partitioned_table_exists bool :=false; +BEGIN + SELECT count(*) > 0 INTO partitioned_table_exists FROM pg_dist_partition p JOIN pg_class c ON p.logicalrelid = c.oid WHERE c.relkind = 'p'; + UPDATE pg_dist_node_metadata SET metadata=jsonb_set(metadata, '{partitioned_citus_table_exists_pre_11}', to_jsonb(partitioned_table_exists), true); +END; +$$; diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 3643c8c75..ec3b806cc 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -425,20 +425,20 @@ SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDE ALTER EXTENSION citus UPDATE TO '9.4-2'; -- should see the old source code SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc + prosrc --------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + (1 row) @@ -466,20 +466,20 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '9.4-1'; -- should see the old source code SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc + prosrc --------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + (1 row) @@ -573,20 +573,20 @@ SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDE ALTER EXTENSION citus UPDATE TO '9.5-2'; -- should see the old source code SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc + prosrc --------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + (1 row) @@ -614,20 +614,20 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '9.5-1'; -- should see the old source code SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc + prosrc --------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + (1 row) @@ -955,8 +955,35 @@ ERROR: cstore_fdw tables are deprecated as of Citus 11.0 HINT: Install Citus 10.2 and convert your cstore_fdw tables to the columnar access method before upgrading further CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE DELETE FROM pg_dist_shard WHERE shardid = 1; +-- partitioned table count is tracked on Citus 11 upgrade +CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id); +CREATE TABLE orders_2020_07_01 +PARTITION OF e_transactions FOR VALUES IN (1,2,3); +INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', NULL, 7, 's'); +SELECT + (metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11, + (metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null +FROM + pg_dist_node_metadata; + partitioned_citus_table_exists_pre_11 | is_null +--------------------------------------------------------------------- + | t +(1 row) + -- Test downgrade to 10.2-4 from 11.0-1 ALTER EXTENSION citus UPDATE TO '11.0-1'; +SELECT + (metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11, + (metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null +FROM + pg_dist_node_metadata; + partitioned_citus_table_exists_pre_11 | is_null +--------------------------------------------------------------------- + t | f +(1 row) + +DELETE FROM pg_dist_partition WHERE logicalrelid = 'e_transactions'::regclass; +DROP TABLE e_transactions; ALTER EXTENSION citus UPDATE TO '10.2-4'; -- Should be empty result since upgrade+downgrade should be a no-op SELECT * FROM multi_extension.print_extension_changes(); @@ -967,7 +994,7 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Snapshot of state at 11.0-1 ALTER EXTENSION citus UPDATE TO '11.0-1'; SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object + previous_object | current_object --------------------------------------------------------------------- function citus_disable_node(text,integer) void | function master_append_table_to_shard(bigint,text,text,integer) real | diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index d237bf5a1..a5150d2c2 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -420,8 +420,30 @@ INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage) VALUES ('pg_dist ALTER EXTENSION citus UPDATE TO '11.0-1'; DELETE FROM pg_dist_shard WHERE shardid = 1; +-- partitioned table count is tracked on Citus 11 upgrade +CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id); +CREATE TABLE orders_2020_07_01 +PARTITION OF e_transactions FOR VALUES IN (1,2,3); +INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', NULL, 7, 's'); + +SELECT + (metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11, + (metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null +FROM + pg_dist_node_metadata; + -- Test downgrade to 10.2-4 from 11.0-1 ALTER EXTENSION citus UPDATE TO '11.0-1'; + +SELECT + (metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11, + (metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null +FROM + pg_dist_node_metadata; + +DELETE FROM pg_dist_partition WHERE logicalrelid = 'e_transactions'::regclass; +DROP TABLE e_transactions; + ALTER EXTENSION citus UPDATE TO '10.2-4'; -- Should be empty result since upgrade+downgrade should be a no-op SELECT * FROM multi_extension.print_extension_changes(); From aef2d83c7de98ecfc060b0b848e07e6d9ce97ec3 Mon Sep 17 00:00:00 2001 From: Halil Ozan Akgul Date: Mon, 27 Dec 2021 16:42:04 +0300 Subject: [PATCH 15/17] Fix metadata sync fails on multi_transaction_recovery --- src/test/regress/expected/multi_transaction_recovery.out | 8 +++++++- src/test/regress/multi_1_schedule | 2 +- src/test/regress/sql/multi_transaction_recovery.sql | 1 + 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/test/regress/expected/multi_transaction_recovery.out b/src/test/regress/expected/multi_transaction_recovery.out index c5b8347f8..938847576 100644 --- a/src/test/regress/expected/multi_transaction_recovery.out +++ b/src/test/regress/expected/multi_transaction_recovery.out @@ -371,7 +371,7 @@ COMMIT; SELECT COUNT(*) FROM pg_dist_transaction; count --------------------------------------------------------------------- - 1 + 2 (1 row) SELECT recover_prepared_transactions(); @@ -393,6 +393,12 @@ SELECT count(DISTINCT nodeport) FROM pg_dist_shard_placement WHERE shardid IN (g 2 (1 row) +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + -- only two of the connections will perform a write (INSERT) SET citus.force_max_query_parallelization TO ON; BEGIN; diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 840cd599f..0044ae00e 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -191,8 +191,8 @@ test: check_mx test: turn_mx_off test: multi_generate_ddl_commands multi_repair_shards test: multi_create_shards -test: multi_transaction_recovery test: turn_mx_on +test: multi_transaction_recovery test: local_dist_join_modifications test: local_table_join diff --git a/src/test/regress/sql/multi_transaction_recovery.sql b/src/test/regress/sql/multi_transaction_recovery.sql index c29273a82..39b90ce3d 100644 --- a/src/test/regress/sql/multi_transaction_recovery.sql +++ b/src/test/regress/sql/multi_transaction_recovery.sql @@ -207,6 +207,7 @@ SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', : -- for the following test, ensure that 6 and 7 go to different shards on different workers SELECT count(DISTINCT nodeport) FROM pg_dist_shard_placement WHERE shardid IN (get_shard_id_for_distribution_column('test_2pcskip', 6),get_shard_id_for_distribution_column('test_2pcskip', 7)); +SELECT recover_prepared_transactions(); -- only two of the connections will perform a write (INSERT) SET citus.force_max_query_parallelization TO ON; BEGIN; From 9547228e8d26d86da3582736bb105ebf880a8798 Mon Sep 17 00:00:00 2001 From: Halil Ozan Akgul Date: Thu, 30 Dec 2021 14:53:50 +0300 Subject: [PATCH 16/17] Add isolation_check_mx test --- .../regress/expected/isolation_check_mx.out | 17 +++++++++++++++++ src/test/regress/isolation_schedule | 1 + src/test/regress/spec/isolation_check_mx.spec | 10 ++++++++++ 3 files changed, 28 insertions(+) create mode 100644 src/test/regress/expected/isolation_check_mx.out create mode 100644 src/test/regress/spec/isolation_check_mx.spec diff --git a/src/test/regress/expected/isolation_check_mx.out b/src/test/regress/expected/isolation_check_mx.out new file mode 100644 index 000000000..c6d9f58ea --- /dev/null +++ b/src/test/regress/expected/isolation_check_mx.out @@ -0,0 +1,17 @@ +Parsed test spec with 1 sessions + +starting permutation: check_mx +step check_mx: + SHOW citus.enable_metadata_sync_by_default; + SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary'; + +citus.enable_metadata_sync_by_default +--------------------------------------------------------------------- +on +(1 row) + +bool_and +--------------------------------------------------------------------- +t +(1 row) + diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index c839cf3e7..bd57faa84 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -91,6 +91,7 @@ test: isolation_metadata_sync_deadlock test: isolation_replicated_dist_on_mx # MXless tests +test: isolation_check_mx test: isolation_turn_mx_off test: isolation_replicate_reference_tables_to_coordinator test: isolation_reference_copy_vs_all diff --git a/src/test/regress/spec/isolation_check_mx.spec b/src/test/regress/spec/isolation_check_mx.spec new file mode 100644 index 000000000..8958b92e8 --- /dev/null +++ b/src/test/regress/spec/isolation_check_mx.spec @@ -0,0 +1,10 @@ +session "s1" + +step "check_mx" +{ + SHOW citus.enable_metadata_sync_by_default; + + SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary'; +} + +permutation "check_mx" From 29dd7dfe052dac3283af309b4f6f6daed18a5a63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20=C4=B0ndibay?= Date: Tue, 4 Jan 2022 10:42:18 +0300 Subject: [PATCH 17/17] Adds stackoverflow badge into README.md (#5589) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index ebb0e3b66..86f7b3e0f 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![Slack Status](http://slack.citusdata.com/badge.svg)](https://slack.citusdata.com) [![Latest Docs](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://docs.citusdata.com/) [![Code Coverage](https://codecov.io/gh/citusdata/citus/branch/master/graph/badge.svg)](https://app.codecov.io/gh/citusdata/citus) +[![Stack Overflow](https://img.shields.io/badge/Stack%20Overflow-%20-545353?logo=Stack%20Overflow)](https://stackoverflow.com/questions/tagged/citus) ## What is Citus?