From 95d8d27c4fad73cce6ba8c32233a2b39f1afe201 Mon Sep 17 00:00:00 2001 From: Jason Petersen Date: Tue, 21 Mar 2017 16:47:53 -0600 Subject: [PATCH 1/8] Change IndexStmt to generate worker DDL on master Because we can't execute CREATE INDEX CONCURRENTLY during transactions, worker_apply_shard_ddl_command is insufficient. --- .../distributed/executor/multi_utility.c | 57 ++++++++++- .../distributed/utils/citus_ruleutils.c | 98 +++++++++++++++++++ src/include/distributed/citus_ruleutils.h | 2 + 3 files changed, 156 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/executor/multi_utility.c b/src/backend/distributed/executor/multi_utility.c index 745b440ef..c1f9a8027 100644 --- a/src/backend/distributed/executor/multi_utility.c +++ b/src/backend/distributed/executor/multi_utility.c @@ -134,6 +134,7 @@ static bool IsAlterTableRenameStmt(RenameStmt *renameStatement); static void ExecuteDistributedDDLJob(DDLJob *ddlJob); static void ShowNoticeIfNotUsing2PC(void); static List * DDLTaskList(Oid relationId, const char *commandString); +static List * IndexTaskList(Oid relationId, IndexStmt *indexStmt); static List * ForeignKeyTaskList(Oid leftRelationId, Oid rightRelationId, const char *commandString); static void RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, @@ -678,7 +679,7 @@ PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand) DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = relationId; ddlJob->commandString = createIndexCommand; - ddlJob->taskList = DDLTaskList(relationId, createIndexCommand); + ddlJob->taskList = IndexTaskList(relationId, createIndexStatement); ddlJobs = list_make1(ddlJob); } @@ -2071,6 +2072,60 @@ DDLTaskList(Oid relationId, const char *commandString) } +/* + * DDLTaskList builds a list of tasks to execute a DDL command on a + * given list of shards. + */ +static List * +IndexTaskList(Oid relationId, IndexStmt *indexStmt) +{ + List *taskList = NIL; + List *shardIntervalList = LoadShardIntervalList(relationId); + ListCell *shardIntervalCell = NULL; + Oid schemaId = get_rel_namespace(relationId); + char *schemaName = get_namespace_name(schemaId); + StringInfoData ddlString; + uint64 jobId = INVALID_JOB_ID; + int taskId = 1; + + initStringInfo(&ddlString); + + /* set statement's schema name if it is not set already */ + if (indexStmt->relation->schemaname == NULL) + { + indexStmt->relation->schemaname = schemaName; + } + + /* lock metadata before getting placement lists */ + LockShardListMetadata(shardIntervalList, ShareLock); + + foreach(shardIntervalCell, shardIntervalList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); + uint64 shardId = shardInterval->shardId; + Task *task = NULL; + + deparse_shard_index_statement(indexStmt, relationId, shardId, &ddlString); + + task = CitusMakeNode(Task); + task->jobId = jobId; + task->taskId = taskId++; + task->taskType = DDL_TASK; + task->queryString = pstrdup(ddlString.data); + task->replicationModel = REPLICATION_MODEL_INVALID; + task->dependedTaskList = NULL; + task->anchorShardId = shardId; + task->taskPlacementList = FinalizedShardPlacementList(shardId); + + taskList = lappend(taskList, task); + + resetStringInfo(&ddlString); + } + + return taskList; +} + + /* * ForeignKeyTaskList builds a list of tasks to execute a foreign key command on a * shards of given list of distributed table. diff --git a/src/backend/distributed/utils/citus_ruleutils.c b/src/backend/distributed/utils/citus_ruleutils.c index 3342d3394..df2ac1929 100644 --- a/src/backend/distributed/utils/citus_ruleutils.c +++ b/src/backend/distributed/utils/citus_ruleutils.c @@ -24,6 +24,7 @@ #include "access/tupdesc.h" #include "catalog/dependency.h" #include "catalog/indexing.h" +#include "catalog/namespace.h" #include "catalog/pg_attribute.h" #include "catalog/pg_authid.h" #include "catalog/pg_class.h" @@ -33,12 +34,14 @@ #include "commands/defrem.h" #include "commands/extension.h" #include "distributed/citus_ruleutils.h" +#include "distributed/relay_utility.h" #include "foreign/foreign.h" #include "lib/stringinfo.h" #include "nodes/nodes.h" #include "nodes/nodeFuncs.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" +#include "parser/parse_utilcmd.h" #include "storage/lock.h" #include "utils/acl.h" #include "utils/array.h" @@ -587,6 +590,101 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId) } +char * +deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid, + StringInfo buffer) +{ + IndexStmt *indexStmt = copyObject(origStmt); /* copy to avoid modifications */ + char *relationName = indexStmt->relation->relname; + char *indexName = indexStmt->idxname; + ListCell *indexParameterCell = NULL; + List *deparseContext = NULL; + + /* extend relation and index name using shard identifier */ + AppendShardIdToName(&relationName, shardid); + AppendShardIdToName(&indexName, shardid); + + /* use extended shard name and transformed stmt for deparsing */ + deparseContext = deparse_context_for(relationName, distrelid); + indexStmt = transformIndexStmt(distrelid, indexStmt, NULL); + + appendStringInfo(buffer, "CREATE %s INDEX %s %s %s ON %s USING %s ", + (indexStmt->unique ? "UNIQUE" : ""), + (indexStmt->concurrent ? "CONCURRENTLY" : ""), + (indexStmt->if_not_exists ? "IF NOT EXISTS" : ""), + quote_identifier(indexName), + quote_qualified_identifier(indexStmt->relation->schemaname, + relationName), + indexStmt->accessMethod); + + /* index column or expression list begins here */ + appendStringInfoChar(buffer, '('); + + foreach(indexParameterCell, indexStmt->indexParams) + { + IndexElem *indexElement = (IndexElem *) lfirst(indexParameterCell); + + /* use commas to separate subsequent elements */ + if (indexParameterCell != list_head(indexStmt->indexParams)) + { + appendStringInfoChar(buffer, ','); + } + + if (indexElement->name) + { + appendStringInfo(buffer, "%s ", quote_identifier(indexElement->name)); + } + else if (indexElement->expr) + { + appendStringInfo(buffer, "(%s)", deparse_expression(indexElement->expr, + deparseContext, false, + false)); + } + + if (indexElement->collation != NIL) + { + appendStringInfo(buffer, "COLLATE %s ", + NameListToQuotedString(indexElement->collation)); + } + + if (indexElement->opclass != NIL) + { + appendStringInfo(buffer, "%s ", + NameListToQuotedString(indexElement->opclass)); + } + + if (indexElement->ordering != SORTBY_DEFAULT) + { + bool sortAsc = (indexElement->ordering == SORTBY_ASC); + appendStringInfo(buffer, "%s ", (sortAsc ? "ASC" : "DESC")); + } + + if (indexElement->nulls_ordering != SORTBY_NULLS_DEFAULT) + { + bool nullsFirst = (indexElement->nulls_ordering == SORTBY_NULLS_FIRST); + appendStringInfo(buffer, "NULLS %s ", (nullsFirst ? "FIRST" : "LAST")); + } + } + + appendStringInfoString(buffer, ") "); + + if (indexStmt->options != NIL) + { + appendStringInfoString(buffer, "WITH "); + AppendOptionListToString(buffer, indexStmt->options); + } + + if (indexStmt->whereClause != NULL) + { + appendStringInfo(buffer, "WHERE %s", deparse_expression(indexStmt->whereClause, + deparseContext, false, + false)); + } + + return buffer->data; +} + + /* * pg_get_indexclusterdef_string returns the definition of a cluster statement * for given index. The function returns null if the table is not clustered on diff --git a/src/include/distributed/citus_ruleutils.h b/src/include/distributed/citus_ruleutils.h index b64e1a0e8..a6f7d2b6d 100644 --- a/src/include/distributed/citus_ruleutils.h +++ b/src/include/distributed/citus_ruleutils.h @@ -32,6 +32,8 @@ extern char * pg_get_sequencedef_string(Oid sequenceRelid); extern Form_pg_sequence pg_get_sequencedef(Oid sequenceRelationId); extern char * pg_get_tableschemadef_string(Oid tableRelationId, bool forShardCreation); extern char * pg_get_tablecolumnoptionsdef_string(Oid tableRelationId); +extern char * deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 + shardid, StringInfo buffer); extern char * pg_get_indexclusterdef_string(Oid indexRelationId); extern List * pg_get_table_grants(Oid relationId); From 0b6c4e756e21587328b5f62da69f166307e033cf Mon Sep 17 00:00:00 2001 From: Jason Petersen Date: Tue, 21 Mar 2017 17:38:16 -0600 Subject: [PATCH 2/8] Change DropStmt to generate worker DDL on master Because we can't execute DROP INDEX CONCURRENTLY during transactions, worker_apply_shard_ddl_command is insufficient. --- .../distributed/executor/multi_utility.c | 60 ++++++++++++++++++- 1 file changed, 59 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/executor/multi_utility.c b/src/backend/distributed/executor/multi_utility.c index c1f9a8027..bb51cc480 100644 --- a/src/backend/distributed/executor/multi_utility.c +++ b/src/backend/distributed/executor/multi_utility.c @@ -135,6 +135,7 @@ static void ExecuteDistributedDDLJob(DDLJob *ddlJob); static void ShowNoticeIfNotUsing2PC(void); static List * DDLTaskList(Oid relationId, const char *commandString); static List * IndexTaskList(Oid relationId, IndexStmt *indexStmt); +static List * DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt); static List * ForeignKeyTaskList(Oid leftRelationId, Oid rightRelationId, const char *commandString); static void RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, @@ -772,7 +773,8 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand) ddlJob->targetRelationId = distributedRelationId; ddlJob->commandString = dropIndexCommand; - ddlJob->taskList = DDLTaskList(distributedRelationId, dropIndexCommand); + ddlJob->taskList = DropIndexTaskList(distributedRelationId, distributedIndexId, + dropIndexStatement); ddlJobs = list_make1(ddlJob); } @@ -2126,6 +2128,62 @@ IndexTaskList(Oid relationId, IndexStmt *indexStmt) } +/* + * DDLTaskList builds a list of tasks to execute a DDL command on a + * given list of shards. + */ +static List * +DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt) +{ + List *taskList = NIL; + List *shardIntervalList = LoadShardIntervalList(relationId); + ListCell *shardIntervalCell = NULL; + char *indexName = get_rel_name(indexId); + Oid schemaId = get_rel_namespace(indexId); + char *schemaName = get_namespace_name(schemaId); + StringInfoData ddlString; + uint64 jobId = INVALID_JOB_ID; + int taskId = 1; + + initStringInfo(&ddlString); + + /* lock metadata before getting placement lists */ + LockShardListMetadata(shardIntervalList, ShareLock); + + foreach(shardIntervalCell, shardIntervalList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); + uint64 shardId = shardInterval->shardId; + char *shardIndexName = pstrdup(indexName); + Task *task = NULL; + + AppendShardIdToName(&shardIndexName, shardId); + + appendStringInfo(&ddlString, "DROP INDEX %s %s %s %s", + (dropStmt->concurrent ? "CONCURRENTLY" : ""), + (dropStmt->missing_ok ? "IF EXISTS" : ""), + quote_qualified_identifier(schemaName, shardIndexName), + (dropStmt->behavior == DROP_RESTRICT ? "RESTRICT" : "CASCADE")); + + task = CitusMakeNode(Task); + task->jobId = jobId; + task->taskId = taskId++; + task->taskType = DDL_TASK; + task->queryString = pstrdup(ddlString.data); + task->replicationModel = REPLICATION_MODEL_INVALID; + task->dependedTaskList = NULL; + task->anchorShardId = shardId; + task->taskPlacementList = FinalizedShardPlacementList(shardId); + + taskList = lappend(taskList, task); + + resetStringInfo(&ddlString); + } + + return taskList; +} + + /* * ForeignKeyTaskList builds a list of tasks to execute a foreign key command on a * shards of given list of distributed table. From dea6c44f75a769ac88d6207ea7e11c7922c73edc Mon Sep 17 00:00:00 2001 From: Jason Petersen Date: Tue, 21 Mar 2017 22:46:51 -0600 Subject: [PATCH 3/8] Remove CONCURRENTLY checks, fix tests Still pending failure testing, which broke with my recent changes. --- .../executor/multi_router_executor.c | 17 +++++++++ .../distributed/executor/multi_utility.c | 35 +++++++++++-------- .../distributed/multi_router_executor.h | 1 + src/include/distributed/multi_utility.h | 1 + .../expected/multi_index_statements.out | 17 ++++----- .../regress/sql/multi_index_statements.sql | 10 +++--- 6 files changed, 54 insertions(+), 27 deletions(-) diff --git a/src/backend/distributed/executor/multi_router_executor.c b/src/backend/distributed/executor/multi_router_executor.c index 9a26090f8..4dae254c8 100644 --- a/src/backend/distributed/executor/multi_router_executor.c +++ b/src/backend/distributed/executor/multi_router_executor.c @@ -869,6 +869,23 @@ ExecuteModifyTasksWithoutResults(List *taskList) } +int64 +ExecuteSequentialTasksWithoutResults(List *taskList) +{ + ListCell *taskCell = NULL; + + foreach(taskCell, taskList) + { + Task *task = (Task *) lfirst(taskCell); + List *singleTask = list_make1(task); + + ExecuteModifyTasks(singleTask, false, NULL, NULL); + } + + return 0; +} + + /* * ExecuteModifyTasks executes a list of tasks on remote nodes, and * optionally retrieves the results and stores them in a tuple store. diff --git a/src/backend/distributed/executor/multi_utility.c b/src/backend/distributed/executor/multi_utility.c index bb51cc480..95b0be922 100644 --- a/src/backend/distributed/executor/multi_utility.c +++ b/src/backend/distributed/executor/multi_utility.c @@ -679,6 +679,7 @@ PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand) { DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = relationId; + ddlJob->preventTransaction = createIndexStatement->concurrent; ddlJob->commandString = createIndexCommand; ddlJob->taskList = IndexTaskList(relationId, createIndexStatement); @@ -772,6 +773,7 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand) ErrorIfUnsupportedDropIndexStmt(dropIndexStatement); ddlJob->targetRelationId = distributedRelationId; + ddlJob->preventTransaction = dropIndexStatement->concurrent; ddlJob->commandString = dropIndexCommand; ddlJob->taskList = DropIndexTaskList(distributedRelationId, distributedIndexId, dropIndexStatement); @@ -866,6 +868,7 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = leftRelationId; + ddlJob->preventTransaction = false; ddlJob->commandString = alterTableCommand; if (rightRelationId) @@ -1271,13 +1274,6 @@ ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement) "currently unsupported"))); } - if (createIndexStatement->concurrent) - { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("creating indexes concurrently on distributed tables is " - "currently unsupported"))); - } - if (createIndexStatement->unique) { RangeVar *relation = createIndexStatement->relation; @@ -1355,13 +1351,6 @@ ErrorIfUnsupportedDropIndexStmt(DropStmt *dropIndexStatement) errhint("Try dropping each object in a separate DROP " "command."))); } - - if (dropIndexStatement->concurrent) - { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("dropping indexes concurrently on distributed tables is " - "currently unsupported"))); - } } @@ -1994,13 +1983,28 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) EnsureCoordinator(); ShowNoticeIfNotUsing2PC(); + if (ddlJob->preventTransaction) + { + /* save old commit protocol to restore at xact end */ + Assert(SavedMultiShardCommitProtocol == COMMIT_PROTOCOL_BARE); + SavedMultiShardCommitProtocol = MultiShardCommitProtocol; + MultiShardCommitProtocol = COMMIT_PROTOCOL_BARE; + } + if (shouldSyncMetadata) { SendCommandToWorkers(WORKERS_WITH_METADATA, DISABLE_DDL_PROPAGATION); SendCommandToWorkers(WORKERS_WITH_METADATA, (char *) ddlJob->commandString); } - ExecuteModifyTasksWithoutResults(ddlJob->taskList); + if (ddlJob->preventTransaction) + { + ExecuteSequentialTasksWithoutResults(ddlJob->taskList); + } + else + { + ExecuteModifyTasksWithoutResults(ddlJob->taskList); + } } @@ -2608,6 +2612,7 @@ PlanGrantStmt(GrantStmt *grantStmt) ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = relOid; + ddlJob->preventTransaction = false; ddlJob->commandString = pstrdup(ddlString.data); ddlJob->taskList = DDLTaskList(relOid, ddlString.data); diff --git a/src/include/distributed/multi_router_executor.h b/src/include/distributed/multi_router_executor.h index 8c9eafb7d..389e868c2 100644 --- a/src/include/distributed/multi_router_executor.h +++ b/src/include/distributed/multi_router_executor.h @@ -41,6 +41,7 @@ extern TupleTableSlot * RouterSelectExecScan(CustomScanState *node); extern TupleTableSlot * RouterMultiModifyExecScan(CustomScanState *node); extern int64 ExecuteModifyTasksWithoutResults(List *taskList); +extern int64 ExecuteSequentialTasksWithoutResults(List *taskList); #endif /* MULTI_ROUTER_EXECUTOR_H_ */ diff --git a/src/include/distributed/multi_utility.h b/src/include/distributed/multi_utility.h index 143df5d65..00ebc1062 100644 --- a/src/include/distributed/multi_utility.h +++ b/src/include/distributed/multi_utility.h @@ -23,6 +23,7 @@ extern bool EnableDDLPropagation; typedef struct DDLJob { Oid targetRelationId; /* oid of the target distributed relation */ + bool preventTransaction; const char *commandString; /* initial (coordinator) DDL command string */ List *taskList; /* worker DDL tasks to execute */ } DDLJob; diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index d8c1f58ed..dadd7d52c 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -91,6 +91,8 @@ CREATE INDEX lineitem_orderkey_index on index_test_hash(a); ERROR: relation "lineitem_orderkey_index" already exists CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on index_test_hash(a); NOTICE: relation "lineitem_orderkey_index" already exists, skipping +-- Verify that we can create indexes concurrently +CREATE INDEX CONCURRENTLY lineitem_concurrently_index ON lineitem (l_orderkey); -- Verify that all indexes got created on the master node and one of the workers SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; schemaname | tablename | indexname | tablespace | indexdef @@ -102,6 +104,7 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t public | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON index_test_range USING btree (a, b) public | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON index_test_range USING btree (a, b) WHERE (c IS NOT NULL) public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON lineitem USING btree (record_ne(lineitem.*, NULL::record)) + public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON lineitem USING btree (l_orderkey) public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON lineitem USING hash (l_partkey) public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON lineitem USING btree (l_orderkey) public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON lineitem USING btree (l_orderkey) @@ -109,13 +112,13 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON lineitem USING btree (l_partkey DESC) public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON lineitem USING btree (l_orderkey, l_linenumber) public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON lineitem USING btree (l_shipdate) -(14 rows) +(15 rows) \c - - - :worker_1_port SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1); count ------- - 8 + 9 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%'; @@ -138,8 +141,6 @@ SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%'; \c - - - :master_port -- Verify that we error out on unsupported statement types -CREATE INDEX CONCURRENTLY try_index ON lineitem (l_orderkey); -ERROR: creating indexes concurrently on distributed tables is currently unsupported CREATE UNIQUE INDEX try_index ON lineitem (l_orderkey); ERROR: creating unique indexes on append-partitioned tables is currently unsupported CREATE INDEX try_index ON lineitem (l_orderkey) TABLESPACE newtablespace; @@ -180,6 +181,7 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t public | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON index_test_range USING btree (a, b) public | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON index_test_range USING btree (a, b) WHERE (c IS NOT NULL) public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON lineitem USING btree (record_ne(lineitem.*, NULL::record)) + public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON lineitem USING btree (l_orderkey) public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON lineitem USING hash (l_partkey) public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON lineitem USING btree (l_orderkey) public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON lineitem USING btree (l_orderkey) @@ -187,7 +189,7 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON lineitem USING btree (l_partkey DESC) public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON lineitem USING btree (l_orderkey, l_linenumber) public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON lineitem USING btree (l_shipdate) -(14 rows) +(15 rows) -- -- DROP INDEX @@ -196,9 +198,6 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t DROP INDEX lineitem_orderkey_index, lineitem_partial_index; ERROR: cannot drop multiple distributed objects in a single command HINT: Try dropping each object in a separate DROP command. --- Verify that we error out on the CONCURRENTLY clause -DROP INDEX CONCURRENTLY lineitem_orderkey_index; -ERROR: dropping indexes concurrently on distributed tables is currently unsupported -- Verify that we can succesfully drop indexes DROP INDEX lineitem_orderkey_index; NOTICE: using one-phase commit for distributed DDL commands @@ -221,6 +220,8 @@ DROP INDEX index_test_range_index_a_b_partial; DROP INDEX index_test_hash_index_a; DROP INDEX index_test_hash_index_a_b; DROP INDEX index_test_hash_index_a_b_partial; +-- Verify that we can drop indexes concurrently +DROP INDEX CONCURRENTLY lineitem_concurrently_index; -- Verify that all the indexes are dropped from the master and one worker node. -- As there's a primary key, so exclude those from this check. SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; diff --git a/src/test/regress/sql/multi_index_statements.sql b/src/test/regress/sql/multi_index_statements.sql index 58e52a20f..178ab92cd 100644 --- a/src/test/regress/sql/multi_index_statements.sql +++ b/src/test/regress/sql/multi_index_statements.sql @@ -64,6 +64,9 @@ CREATE INDEX IF NOT EXISTS lineitem_orderkey_index_new on lineitem(l_orderkey); CREATE INDEX lineitem_orderkey_index on index_test_hash(a); CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on index_test_hash(a); +-- Verify that we can create indexes concurrently +CREATE INDEX CONCURRENTLY lineitem_concurrently_index ON lineitem (l_orderkey); + -- Verify that all indexes got created on the master node and one of the workers SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; \c - - - :worker_1_port @@ -75,7 +78,6 @@ SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%'; -- Verify that we error out on unsupported statement types -CREATE INDEX CONCURRENTLY try_index ON lineitem (l_orderkey); CREATE UNIQUE INDEX try_index ON lineitem (l_orderkey); CREATE INDEX try_index ON lineitem (l_orderkey) TABLESPACE newtablespace; @@ -105,9 +107,6 @@ SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_t -- Verify that we can't drop multiple indexes in a single command DROP INDEX lineitem_orderkey_index, lineitem_partial_index; --- Verify that we error out on the CONCURRENTLY clause -DROP INDEX CONCURRENTLY lineitem_orderkey_index; - -- Verify that we can succesfully drop indexes DROP INDEX lineitem_orderkey_index; DROP INDEX lineitem_orderkey_index_new; @@ -130,6 +129,9 @@ DROP INDEX index_test_hash_index_a; DROP INDEX index_test_hash_index_a_b; DROP INDEX index_test_hash_index_a_b_partial; +-- Verify that we can drop indexes concurrently +DROP INDEX CONCURRENTLY lineitem_concurrently_index; + -- Verify that all the indexes are dropped from the master and one worker node. -- As there's a primary key, so exclude those from this check. SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; From 32886e97a32ef27b307e91765a3f47b36ef7eeb6 Mon Sep 17 00:00:00 2001 From: Jason Petersen Date: Wed, 29 Mar 2017 23:29:14 -0600 Subject: [PATCH 4/8] Add code to set index validity on failure Coordinator code marks index as invalid as a base, set it as valid in a transactional layer atop that base, then proceeds with worker commands. If a worker command has problems, the rollback results in an index with isvalid = false. If everything succeeds, the user sees a valid index. --- .../distributed/executor/multi_utility.c | 87 ++++++++++++++++++- .../expected/multi_index_statements.out | 38 ++++++++ .../regress/sql/multi_index_statements.sql | 29 +++++++ 3 files changed, 153 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/executor/multi_utility.c b/src/backend/distributed/executor/multi_utility.c index 95b0be922..4b0aad2ec 100644 --- a/src/backend/distributed/executor/multi_utility.c +++ b/src/backend/distributed/executor/multi_utility.c @@ -24,6 +24,7 @@ #include "catalog/catalog.h" #include "catalog/dependency.h" #include "catalog/index.h" +#include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_attribute.h" #include "catalog/pg_class.h" @@ -76,6 +77,7 @@ #include "utils/palloc.h" #include "utils/rel.h" #include "utils/relcache.h" +#include "utils/snapmgr.h" #include "utils/syscache.h" @@ -142,6 +144,7 @@ static void RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid ol void *arg); static void CheckCopyPermissions(CopyStmt *copyStatement); static List * CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist); +static void PostProcessUtility(Node *parsetree); static bool warnedUserAbout2PC = false; @@ -369,6 +372,8 @@ multi_ProcessUtility(Node *parsetree, standard_ProcessUtility(parsetree, queryString, context, params, dest, completionTag); + PostProcessUtility(parsetree); + if (commandMustRunAsOwner) { SetUserIdAndSecContext(savedUserId, savedSecurityContext); @@ -1981,7 +1986,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) } EnsureCoordinator(); - ShowNoticeIfNotUsing2PC(); if (ddlJob->preventTransaction) { @@ -1990,6 +1994,10 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) SavedMultiShardCommitProtocol = MultiShardCommitProtocol; MultiShardCommitProtocol = COMMIT_PROTOCOL_BARE; } + else + { + ShowNoticeIfNotUsing2PC(); + } if (shouldSyncMetadata) { @@ -2474,6 +2482,83 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) } +/* + * PostProcessUtility performs additional tasks after a utility's local portion + * has been completed. Right now, the sole use is marking new indexes invalid + * if they were created using the CONCURRENTLY flag. This (non-transactional) + * change provides the fallback state if an error is raised, otherwise a sub- + * sequent change to valid will be committed. + */ +static void +PostProcessUtility(Node *parsetree) +{ + IndexStmt *indexStmt = NULL; + Relation relation = NULL; + Oid indexRelationId = InvalidOid; + Relation indexRelation = NULL; + Relation pg_index = NULL; + HeapTuple indexTuple = NULL; + Form_pg_index indexForm = NULL; + + /* only IndexStmts are processed */ + if (!IsA(parsetree, IndexStmt)) + { + return; + } + + /* and even then only if they're CONCURRENT */ + indexStmt = (IndexStmt *) parsetree; + if (!indexStmt->concurrent) + { + return; + } + + /* finally, this logic only applies to the coordinator */ + if (!IsCoordinator()) + { + return; + } + + /* commit the current transaction and start anew */ + CommitTransactionCommand(); + StartTransactionCommand(); + + /* get the affected relation and index */ + relation = heap_openrv(indexStmt->relation, ShareUpdateExclusiveLock); + indexRelationId = get_relname_relid(indexStmt->idxname, + RelationGetNamespace(relation)); + indexRelation = index_open(indexRelationId, RowExclusiveLock); + + /* close relations but retain locks */ + heap_close(relation, NoLock); + index_close(indexRelation, NoLock); + + /* mark index as invalid, in-place (cannot be rolled back) */ + index_set_state_flags(indexRelationId, INDEX_DROP_CLEAR_VALID); + + /* re-open a transaction command from here on out */ + CommitTransactionCommand(); + StartTransactionCommand(); + + /* now, update index's validity in a way that can roll back */ + pg_index = heap_open(IndexRelationId, RowExclusiveLock); + + indexTuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(indexRelationId)); + Assert(HeapTupleIsValid(indexTuple)); /* better be present, we have lock! */ + + /* mark as valid, save, and update pg_index indexes */ + indexForm = (Form_pg_index) GETSTRUCT(indexTuple); + indexForm->indisvalid = true; + + simple_heap_update(pg_index, &indexTuple->t_self, indexTuple); + CatalogUpdateIndexes(pg_index, indexTuple); + + /* clean up; index now marked valid, but ROLLBACK will mark invalid */ + heap_freetuple(indexTuple); + heap_close(pg_index, RowExclusiveLock); +} + + /* * PlanGrantStmt determines whether a given GRANT/REVOKE statement involves * a distributed table. If so, it creates DDLJobs to encapsulate information diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index dadd7d52c..8e2be889e 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -245,7 +245,45 @@ SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; ------------+-----------+-----------+------------+---------- (0 rows) +-- create index that will conflict with master operations +CREATE INDEX CONCURRENTLY ith_b_idx_102089 ON index_test_hash_102089(b); \c - - - :master_port +-- should fail because worker index already exists +CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); +ERROR: relation "ith_b_idx_102089" already exists +CONTEXT: while executing command on localhost:57637 +-- the failure results in an INVALID index +SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; + Index Valid? +-------------- + f +(1 row) + +-- we can clean it up and recreate with an DROP IF EXISTS +DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; +CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); +SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; + Index Valid? +-------------- + t +(1 row) + +\c - - - :worker_1_port +-- now drop shard index to test partial master DROP failure +DROP INDEX CONCURRENTLY ith_b_idx_102089; +\c - - - :master_port +DROP INDEX CONCURRENTLY ith_b_idx; +ERROR: index "ith_b_idx_102089" does not exist +CONTEXT: while executing command on localhost:57637 +-- the failure results in an INVALID index +SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; + Index Valid? +-------------- + f +(1 row) + +-- final clean up +DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; -- Drop created tables DROP TABLE index_test_range; DROP TABLE index_test_hash; diff --git a/src/test/regress/sql/multi_index_statements.sql b/src/test/regress/sql/multi_index_statements.sql index 178ab92cd..784a7a988 100644 --- a/src/test/regress/sql/multi_index_statements.sql +++ b/src/test/regress/sql/multi_index_statements.sql @@ -139,8 +139,37 @@ SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; \c - - - :worker_1_port SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; + +-- create index that will conflict with master operations +CREATE INDEX CONCURRENTLY ith_b_idx_102089 ON index_test_hash_102089(b); + \c - - - :master_port +-- should fail because worker index already exists +CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); + +-- the failure results in an INVALID index +SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; + +-- we can clean it up and recreate with an DROP IF EXISTS +DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; +CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); +SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; + +\c - - - :worker_1_port + +-- now drop shard index to test partial master DROP failure +DROP INDEX CONCURRENTLY ith_b_idx_102089; + +\c - - - :master_port +DROP INDEX CONCURRENTLY ith_b_idx; + +-- the failure results in an INVALID index +SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; + +-- final clean up +DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; + -- Drop created tables DROP TABLE index_test_range; DROP TABLE index_test_hash; From d904e96c596892a96f36722b3d9d61cd7d07066e Mon Sep 17 00:00:00 2001 From: Jason Petersen Date: Thu, 30 Mar 2017 00:38:30 -0600 Subject: [PATCH 5/8] Address MX CONCURRENTLY problems Adds a non-transactional multi-command method to propagate DDLs to all MX/metadata-synced nodes. --- .../distributed/executor/multi_utility.c | 27 +++++----- .../transaction/worker_transaction.c | 45 ++++++++++++++++ src/include/distributed/worker_transaction.h | 2 + src/test/regress/expected/multi_mx_ddl.out | 51 +++++++++++++++++-- src/test/regress/sql/multi_mx_ddl.sql | 12 +++-- 5 files changed, 117 insertions(+), 20 deletions(-) diff --git a/src/backend/distributed/executor/multi_utility.c b/src/backend/distributed/executor/multi_utility.c index 4b0aad2ec..70e1e939f 100644 --- a/src/backend/distributed/executor/multi_utility.c +++ b/src/backend/distributed/executor/multi_utility.c @@ -1993,24 +1993,27 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) Assert(SavedMultiShardCommitProtocol == COMMIT_PROTOCOL_BARE); SavedMultiShardCommitProtocol = MultiShardCommitProtocol; MultiShardCommitProtocol = COMMIT_PROTOCOL_BARE; - } - else - { - ShowNoticeIfNotUsing2PC(); - } - if (shouldSyncMetadata) - { - SendCommandToWorkers(WORKERS_WITH_METADATA, DISABLE_DDL_PROPAGATION); - SendCommandToWorkers(WORKERS_WITH_METADATA, (char *) ddlJob->commandString); - } + if (shouldSyncMetadata) + { + List *commandList = list_make2(DISABLE_DDL_PROPAGATION, + (char *) ddlJob->commandString); + + SendBareCommandListToWorkers(WORKERS_WITH_METADATA, commandList); + } - if (ddlJob->preventTransaction) - { ExecuteSequentialTasksWithoutResults(ddlJob->taskList); } else { + ShowNoticeIfNotUsing2PC(); + + if (shouldSyncMetadata) + { + SendCommandToWorkers(WORKERS_WITH_METADATA, DISABLE_DDL_PROPAGATION); + SendCommandToWorkers(WORKERS_WITH_METADATA, (char *) ddlJob->commandString); + } + ExecuteModifyTasksWithoutResults(ddlJob->taskList); } } diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index 64f3f6460..cd010523e 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -68,6 +68,51 @@ SendCommandToWorkers(TargetWorkerSet targetWorkerSet, char *command) } +void +SendBareCommandListToWorkers(TargetWorkerSet targetWorkerSet, List *commandList) +{ + List *workerNodeList = WorkerNodeList(); + ListCell *workerNodeCell = NULL; + char *nodeUser = CitusExtensionOwnerName(); + ListCell *commandCell = NULL; + + if (XactModificationLevel > XACT_MODIFICATION_NONE) + { + ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + errmsg("cannot open new connections after the first modification " + "command within a transaction"))); + } + + /* run commands serially */ + foreach(workerNodeCell, workerNodeList) + { + MultiConnection *workerConnection = NULL; + WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); + char *nodeName = workerNode->workerName; + int nodePort = workerNode->workerPort; + int connectionFlags = FORCE_NEW_CONNECTION; + + if (targetWorkerSet == WORKERS_WITH_METADATA && !workerNode->hasMetadata) + { + continue; + } + + workerConnection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, + nodePort, nodeUser, NULL); + + /* iterate over the commands and execute them in the same connection */ + foreach(commandCell, commandList) + { + char *commandString = lfirst(commandCell); + + ExecuteCriticalRemoteCommand(workerConnection, commandString); + } + + CloseConnection(workerConnection); + } +} + + /* * SendCommandToWorkersParams sends a command to all workers in parallel. * Commands are committed on the workers when the local transaction commits. The diff --git a/src/include/distributed/worker_transaction.h b/src/include/distributed/worker_transaction.h index 871c1ac4c..a63f12a78 100644 --- a/src/include/distributed/worker_transaction.h +++ b/src/include/distributed/worker_transaction.h @@ -30,6 +30,8 @@ typedef enum TargetWorkerSet extern List * GetWorkerTransactions(void); extern void SendCommandToWorker(char *nodeName, int32 nodePort, char *command); extern void SendCommandToWorkers(TargetWorkerSet targetWorkerSet, char *command); +extern void SendBareCommandListToWorkers(TargetWorkerSet targetWorkerSet, + List *commandList); extern void SendCommandToWorkersParams(TargetWorkerSet targetWorkerSet, char *command, int parameterCount, const Oid *parameterTypes, const char *const *parameterValues); diff --git a/src/test/regress/expected/multi_mx_ddl.out b/src/test/regress/expected/multi_mx_ddl.out index df9d775ae..2572e532f 100644 --- a/src/test/regress/expected/multi_mx_ddl.out +++ b/src/test/regress/expected/multi_mx_ddl.out @@ -18,6 +18,7 @@ SELECT * FROM mx_ddl_table ORDER BY key; CREATE INDEX ddl_test_index ON mx_ddl_table(value); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' +CREATE INDEX CONCURRENTLY ddl_test_concurrent_index ON mx_ddl_table(value); -- ADD COLUMN ALTER TABLE mx_ddl_table ADD COLUMN version INTEGER; -- SET DEFAULT @@ -40,6 +41,7 @@ ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL; version | integer | not null default 1 Indexes: "mx_ddl_table_pkey" PRIMARY KEY, btree (key) + "ddl_test_concurrent_index" btree (value) "ddl_test_index" btree (value) \c - - - :worker_1_port @@ -52,9 +54,21 @@ Indexes: version | integer | not null default 1 Indexes: "mx_ddl_table_pkey" PRIMARY KEY, btree (key) + "ddl_test_concurrent_index" btree (value) "ddl_test_index" btree (value) -\d mx_ddl_table_1600000 +\d mx_ddl_table_1220088 + Table "public.mx_ddl_table_1220088" + Column | Type | Modifiers +---------+---------+-------------------- + key | integer | not null + value | integer | + version | integer | not null default 1 +Indexes: + "mx_ddl_table_pkey_1220088" PRIMARY KEY, btree (key) + "ddl_test_concurrent_index_1220088" btree (value) + "ddl_test_index_1220088" btree (value) + \c - - - :worker_2_port \d mx_ddl_table Table "public.mx_ddl_table" @@ -65,9 +79,21 @@ Indexes: version | integer | not null default 1 Indexes: "mx_ddl_table_pkey" PRIMARY KEY, btree (key) + "ddl_test_concurrent_index" btree (value) "ddl_test_index" btree (value) -\d mx_ddl_table_1600001 +\d mx_ddl_table_1220089 + Table "public.mx_ddl_table_1220089" + Column | Type | Modifiers +---------+---------+-------------------- + key | integer | not null + value | integer | + version | integer | not null default 1 +Indexes: + "mx_ddl_table_pkey_1220089" PRIMARY KEY, btree (key) + "ddl_test_concurrent_index_1220089" btree (value) + "ddl_test_index_1220089" btree (value) + INSERT INTO mx_ddl_table VALUES (37, 78, 2); INSERT INTO mx_ddl_table VALUES (38, 78); -- Switch to the coordinator @@ -100,6 +126,7 @@ SELECT * FROM mx_ddl_table ORDER BY key; DROP INDEX ddl_test_index; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' +DROP INDEX CONCURRENTLY ddl_test_concurrent_index; -- DROP DEFAULT ALTER TABLE mx_ddl_table ALTER COLUMN version DROP DEFAULT; -- DROP NOT NULL @@ -126,7 +153,15 @@ Indexes: Indexes: "mx_ddl_table_pkey" PRIMARY KEY, btree (key) -\d mx_ddl_table_1600000 +\d mx_ddl_table_1220088 +Table "public.mx_ddl_table_1220088" + Column | Type | Modifiers +--------+---------+----------- + key | integer | not null + value | integer | +Indexes: + "mx_ddl_table_pkey_1220088" PRIMARY KEY, btree (key) + \c - - - :worker_2_port \d mx_ddl_table Table "public.mx_ddl_table" @@ -137,7 +172,15 @@ Indexes: Indexes: "mx_ddl_table_pkey" PRIMARY KEY, btree (key) -\d mx_ddl_table_1600001 +\d mx_ddl_table_1220089 +Table "public.mx_ddl_table_1220089" + Column | Type | Modifiers +--------+---------+----------- + key | integer | not null + value | integer | +Indexes: + "mx_ddl_table_pkey_1220089" PRIMARY KEY, btree (key) + -- Show that DDL commands are done within a two-phase commit transaction \c - - - :master_port SET client_min_messages TO debug2; diff --git a/src/test/regress/sql/multi_mx_ddl.sql b/src/test/regress/sql/multi_mx_ddl.sql index 0afa624fd..f0dd5cacf 100644 --- a/src/test/regress/sql/multi_mx_ddl.sql +++ b/src/test/regress/sql/multi_mx_ddl.sql @@ -8,6 +8,8 @@ SELECT * FROM mx_ddl_table ORDER BY key; -- CREATE INDEX CREATE INDEX ddl_test_index ON mx_ddl_table(value); +CREATE INDEX CONCURRENTLY ddl_test_concurrent_index ON mx_ddl_table(value); + -- ADD COLUMN ALTER TABLE mx_ddl_table ADD COLUMN version INTEGER; @@ -27,13 +29,13 @@ ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL; \d mx_ddl_table -\d mx_ddl_table_1600000 +\d mx_ddl_table_1220088 \c - - - :worker_2_port \d mx_ddl_table -\d mx_ddl_table_1600001 +\d mx_ddl_table_1220089 INSERT INTO mx_ddl_table VALUES (37, 78, 2); INSERT INTO mx_ddl_table VALUES (38, 78); @@ -56,6 +58,8 @@ SELECT * FROM mx_ddl_table ORDER BY key; -- DROP INDEX DROP INDEX ddl_test_index; +DROP INDEX CONCURRENTLY ddl_test_concurrent_index; + -- DROP DEFAULT ALTER TABLE mx_ddl_table ALTER COLUMN version DROP DEFAULT; @@ -73,13 +77,13 @@ ALTER TABLE mx_ddl_table DROP COLUMN version; \d mx_ddl_table -\d mx_ddl_table_1600000 +\d mx_ddl_table_1220088 \c - - - :worker_2_port \d mx_ddl_table -\d mx_ddl_table_1600001 +\d mx_ddl_table_1220089 -- Show that DDL commands are done within a two-phase commit transaction \c - - - :master_port From dd9365433e524bc97df57d77131593fef2d25f44 Mon Sep 17 00:00:00 2001 From: Jason Petersen Date: Thu, 30 Mar 2017 00:54:49 -0600 Subject: [PATCH 6/8] Update documentation Ensure all functions have comments, etc. --- src/backend/distributed/executor/multi_router_executor.c | 6 ++++++ src/backend/distributed/executor/multi_utility.c | 9 +++++---- src/backend/distributed/transaction/worker_transaction.c | 7 +++++++ src/backend/distributed/utils/citus_ruleutils.c | 9 ++++++--- src/include/distributed/citus_ruleutils.h | 8 ++++---- src/include/distributed/multi_utility.h | 2 +- 6 files changed, 29 insertions(+), 12 deletions(-) diff --git a/src/backend/distributed/executor/multi_router_executor.c b/src/backend/distributed/executor/multi_router_executor.c index 4dae254c8..b377d9ba6 100644 --- a/src/backend/distributed/executor/multi_router_executor.c +++ b/src/backend/distributed/executor/multi_router_executor.c @@ -869,6 +869,12 @@ ExecuteModifyTasksWithoutResults(List *taskList) } +/* + * ExecuteSequentialTasksWithoutResults basically calls ExecuteModifyTasks in a + * loop in order to simulate sequential execution of a list of tasks. Useful in + * cases where issuing commands in parallel before waiting for results could + * result in deadlocks (such as CREATE INDEX CONCURRENTLY). + */ int64 ExecuteSequentialTasksWithoutResults(List *taskList) { diff --git a/src/backend/distributed/executor/multi_utility.c b/src/backend/distributed/executor/multi_utility.c index 70e1e939f..eece32ed2 100644 --- a/src/backend/distributed/executor/multi_utility.c +++ b/src/backend/distributed/executor/multi_utility.c @@ -2090,8 +2090,8 @@ DDLTaskList(Oid relationId, const char *commandString) /* - * DDLTaskList builds a list of tasks to execute a DDL command on a - * given list of shards. + * IndexTaskList builds a list of tasks to execute a CREATE INDEX command + * against a specified distributed table. */ static List * IndexTaskList(Oid relationId, IndexStmt *indexStmt) @@ -2144,8 +2144,8 @@ IndexTaskList(Oid relationId, IndexStmt *indexStmt) /* - * DDLTaskList builds a list of tasks to execute a DDL command on a - * given list of shards. + * DropIndexTaskList builds a list of tasks to execute a DROP INDEX command + * against a specified distributed table. */ static List * DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt) @@ -2174,6 +2174,7 @@ DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt) AppendShardIdToName(&shardIndexName, shardId); + /* deparse shard-specific DROP INDEX command */ appendStringInfo(&ddlString, "DROP INDEX %s %s %s %s", (dropStmt->concurrent ? "CONCURRENTLY" : ""), (dropStmt->missing_ok ? "IF EXISTS" : ""), diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index cd010523e..c792c6f9c 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -68,6 +68,13 @@ SendCommandToWorkers(TargetWorkerSet targetWorkerSet, char *command) } +/* + * SendBareCommandListToWorkers sends a list of commands to a set of target + * workers in serial. Commands are committed immediately: new connections are + * always used and no transaction block is used (hence "bare"). The connections + * are made as the extension owner to ensure write access to the Citus metadata + * tables. Primarly useful for INDEX commands using CONCURRENTLY. + */ void SendBareCommandListToWorkers(TargetWorkerSet targetWorkerSet, List *commandList) { diff --git a/src/backend/distributed/utils/citus_ruleutils.c b/src/backend/distributed/utils/citus_ruleutils.c index df2ac1929..0da28da53 100644 --- a/src/backend/distributed/utils/citus_ruleutils.c +++ b/src/backend/distributed/utils/citus_ruleutils.c @@ -590,7 +590,12 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId) } -char * +/* + * deparse_shard_index_statement uses the provided CREATE INDEX node, dist. + * relation, and shard identifier to populate a provided buffer with a string + * representation of a shard-extended version of that command. + */ +void deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid, StringInfo buffer) { @@ -680,8 +685,6 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid, deparseContext, false, false)); } - - return buffer->data; } diff --git a/src/include/distributed/citus_ruleutils.h b/src/include/distributed/citus_ruleutils.h index a6f7d2b6d..e7eaf9204 100644 --- a/src/include/distributed/citus_ruleutils.h +++ b/src/include/distributed/citus_ruleutils.h @@ -32,15 +32,15 @@ extern char * pg_get_sequencedef_string(Oid sequenceRelid); extern Form_pg_sequence pg_get_sequencedef(Oid sequenceRelationId); extern char * pg_get_tableschemadef_string(Oid tableRelationId, bool forShardCreation); extern char * pg_get_tablecolumnoptionsdef_string(Oid tableRelationId); -extern char * deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 - shardid, StringInfo buffer); +extern void deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, + int64 shardid, StringInfo buffer); extern char * pg_get_indexclusterdef_string(Oid indexRelationId); extern List * pg_get_table_grants(Oid relationId); /* Function declarations for version dependent PostgreSQL ruleutils functions */ extern void pg_get_query_def(Query *query, StringInfo buffer); -extern void deparse_shard_query(Query *query, Oid distrelid, int64 shardid, StringInfo - buffer); +extern void deparse_shard_query(Query *query, Oid distrelid, int64 shardid, + StringInfo buffer); extern char * generate_relation_name(Oid relid, List *namespaces); extern char * generate_qualified_relation_name(Oid relid); diff --git a/src/include/distributed/multi_utility.h b/src/include/distributed/multi_utility.h index 00ebc1062..e6a4dfacb 100644 --- a/src/include/distributed/multi_utility.h +++ b/src/include/distributed/multi_utility.h @@ -23,7 +23,7 @@ extern bool EnableDDLPropagation; typedef struct DDLJob { Oid targetRelationId; /* oid of the target distributed relation */ - bool preventTransaction; + bool preventTransaction; /* prevent use of worker transactions? */ const char *commandString; /* initial (coordinator) DDL command string */ List *taskList; /* worker DDL tasks to execute */ } DDLJob; From cf775c47735b0ec19de3d0a4bad33c1222d9f437 Mon Sep 17 00:00:00 2001 From: Jason Petersen Date: Thu, 30 Mar 2017 01:13:39 -0600 Subject: [PATCH 7/8] Improve CONCURRENTLY-related error messages Thought this looked slightly nicer than the default behavior. Changed preventTransaction to concurrent to be clearer that this code path presently affects CONCURRENTLY code only. --- .../distributed/executor/multi_utility.c | 57 ++++++++++++------- src/include/distributed/multi_utility.h | 2 +- .../expected/multi_index_statements.out | 10 ++-- 3 files changed, 42 insertions(+), 27 deletions(-) diff --git a/src/backend/distributed/executor/multi_utility.c b/src/backend/distributed/executor/multi_utility.c index eece32ed2..485ff2da7 100644 --- a/src/backend/distributed/executor/multi_utility.c +++ b/src/backend/distributed/executor/multi_utility.c @@ -684,7 +684,7 @@ PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand) { DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = relationId; - ddlJob->preventTransaction = createIndexStatement->concurrent; + ddlJob->concurrent = createIndexStatement->concurrent; ddlJob->commandString = createIndexCommand; ddlJob->taskList = IndexTaskList(relationId, createIndexStatement); @@ -778,7 +778,7 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand) ErrorIfUnsupportedDropIndexStmt(dropIndexStatement); ddlJob->targetRelationId = distributedRelationId; - ddlJob->preventTransaction = dropIndexStatement->concurrent; + ddlJob->concurrent = dropIndexStatement->concurrent; ddlJob->commandString = dropIndexCommand; ddlJob->taskList = DropIndexTaskList(distributedRelationId, distributedIndexId, dropIndexStatement); @@ -873,7 +873,7 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = leftRelationId; - ddlJob->preventTransaction = false; + ddlJob->concurrent = false; ddlJob->commandString = alterTableCommand; if (rightRelationId) @@ -1987,24 +1987,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) EnsureCoordinator(); - if (ddlJob->preventTransaction) - { - /* save old commit protocol to restore at xact end */ - Assert(SavedMultiShardCommitProtocol == COMMIT_PROTOCOL_BARE); - SavedMultiShardCommitProtocol = MultiShardCommitProtocol; - MultiShardCommitProtocol = COMMIT_PROTOCOL_BARE; - - if (shouldSyncMetadata) - { - List *commandList = list_make2(DISABLE_DDL_PROPAGATION, - (char *) ddlJob->commandString); - - SendBareCommandListToWorkers(WORKERS_WITH_METADATA, commandList); - } - - ExecuteSequentialTasksWithoutResults(ddlJob->taskList); - } - else + if (!ddlJob->concurrent) { ShowNoticeIfNotUsing2PC(); @@ -2016,6 +1999,36 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) ExecuteModifyTasksWithoutResults(ddlJob->taskList); } + else + { + /* save old commit protocol to restore at xact end */ + Assert(SavedMultiShardCommitProtocol == COMMIT_PROTOCOL_BARE); + SavedMultiShardCommitProtocol = MultiShardCommitProtocol; + MultiShardCommitProtocol = COMMIT_PROTOCOL_BARE; + + PG_TRY(); + { + if (shouldSyncMetadata) + { + List *commandList = list_make2(DISABLE_DDL_PROPAGATION, + (char *) ddlJob->commandString); + + SendBareCommandListToWorkers(WORKERS_WITH_METADATA, commandList); + } + + ExecuteSequentialTasksWithoutResults(ddlJob->taskList); + } + PG_CATCH(); + { + ereport(ERROR, + (errmsg("CONCURRENTLY-enabled index command failed"), + errdetail("CONCURRENTLY-enabled index commands can fail partially, " + "leaving behind an INVALID index."), + errhint("Use DROP INDEX IF EXISTS to remove the invalid index, then " + "retry the original command."))); + } + PG_END_TRY(); + } } @@ -2701,7 +2714,7 @@ PlanGrantStmt(GrantStmt *grantStmt) ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = relOid; - ddlJob->preventTransaction = false; + ddlJob->concurrent = false; ddlJob->commandString = pstrdup(ddlString.data); ddlJob->taskList = DDLTaskList(relOid, ddlString.data); diff --git a/src/include/distributed/multi_utility.h b/src/include/distributed/multi_utility.h index e6a4dfacb..8afad5d54 100644 --- a/src/include/distributed/multi_utility.h +++ b/src/include/distributed/multi_utility.h @@ -23,7 +23,7 @@ extern bool EnableDDLPropagation; typedef struct DDLJob { Oid targetRelationId; /* oid of the target distributed relation */ - bool preventTransaction; /* prevent use of worker transactions? */ + bool concurrent; /* related to a CONCURRENTLY index command? */ const char *commandString; /* initial (coordinator) DDL command string */ List *taskList; /* worker DDL tasks to execute */ } DDLJob; diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index 8e2be889e..3cce9ad30 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -250,8 +250,9 @@ CREATE INDEX CONCURRENTLY ith_b_idx_102089 ON index_test_hash_102089(b); \c - - - :master_port -- should fail because worker index already exists CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); -ERROR: relation "ith_b_idx_102089" already exists -CONTEXT: while executing command on localhost:57637 +ERROR: CONCURRENTLY-enabled index command failed +DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. +HINT: Use DROP INDEX IF EXISTS to remove the invalid index, then retry the original command. -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; Index Valid? @@ -273,8 +274,9 @@ SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx':: DROP INDEX CONCURRENTLY ith_b_idx_102089; \c - - - :master_port DROP INDEX CONCURRENTLY ith_b_idx; -ERROR: index "ith_b_idx_102089" does not exist -CONTEXT: while executing command on localhost:57637 +ERROR: CONCURRENTLY-enabled index command failed +DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. +HINT: Use DROP INDEX IF EXISTS to remove the invalid index, then retry the original command. -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; Index Valid? From 4cdfc3a10f52e77a6b214a28062a6d7e19ebad36 Mon Sep 17 00:00:00 2001 From: Jason Petersen Date: Mon, 3 Apr 2017 11:42:57 -0600 Subject: [PATCH 8/8] Address review feedback Should just about do it. --- .../executor/multi_router_executor.c | 14 +++++----- .../distributed/executor/multi_utility.c | 27 +++++++++---------- .../transaction/worker_transaction.c | 7 ----- .../distributed/multi_router_executor.h | 2 +- src/include/distributed/multi_utility.h | 2 +- .../expected/multi_index_statements.out | 4 +-- 6 files changed, 23 insertions(+), 33 deletions(-) diff --git a/src/backend/distributed/executor/multi_router_executor.c b/src/backend/distributed/executor/multi_router_executor.c index b377d9ba6..e546c3311 100644 --- a/src/backend/distributed/executor/multi_router_executor.c +++ b/src/backend/distributed/executor/multi_router_executor.c @@ -870,13 +870,13 @@ ExecuteModifyTasksWithoutResults(List *taskList) /* - * ExecuteSequentialTasksWithoutResults basically calls ExecuteModifyTasks in a - * loop in order to simulate sequential execution of a list of tasks. Useful in - * cases where issuing commands in parallel before waiting for results could + * ExecuteTasksSequentiallyWithoutResults basically calls ExecuteModifyTasks in + * a loop in order to simulate sequential execution of a list of tasks. Useful + * in cases where issuing commands in parallel before waiting for results could * result in deadlocks (such as CREATE INDEX CONCURRENTLY). */ -int64 -ExecuteSequentialTasksWithoutResults(List *taskList) +void +ExecuteTasksSequentiallyWithoutResults(List *taskList) { ListCell *taskCell = NULL; @@ -885,10 +885,8 @@ ExecuteSequentialTasksWithoutResults(List *taskList) Task *task = (Task *) lfirst(taskCell); List *singleTask = list_make1(task); - ExecuteModifyTasks(singleTask, false, NULL, NULL); + ExecuteModifyTasksWithoutResults(singleTask); } - - return 0; } diff --git a/src/backend/distributed/executor/multi_utility.c b/src/backend/distributed/executor/multi_utility.c index 485ff2da7..d70fd1930 100644 --- a/src/backend/distributed/executor/multi_utility.c +++ b/src/backend/distributed/executor/multi_utility.c @@ -77,7 +77,6 @@ #include "utils/palloc.h" #include "utils/rel.h" #include "utils/relcache.h" -#include "utils/snapmgr.h" #include "utils/syscache.h" @@ -136,7 +135,7 @@ static bool IsAlterTableRenameStmt(RenameStmt *renameStatement); static void ExecuteDistributedDDLJob(DDLJob *ddlJob); static void ShowNoticeIfNotUsing2PC(void); static List * DDLTaskList(Oid relationId, const char *commandString); -static List * IndexTaskList(Oid relationId, IndexStmt *indexStmt); +static List * CreateIndexTaskList(Oid relationId, IndexStmt *indexStmt); static List * DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt); static List * ForeignKeyTaskList(Oid leftRelationId, Oid rightRelationId, const char *commandString); @@ -684,9 +683,9 @@ PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand) { DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = relationId; - ddlJob->concurrent = createIndexStatement->concurrent; + ddlJob->concurrentIndexCmd = createIndexStatement->concurrent; ddlJob->commandString = createIndexCommand; - ddlJob->taskList = IndexTaskList(relationId, createIndexStatement); + ddlJob->taskList = CreateIndexTaskList(relationId, createIndexStatement); ddlJobs = list_make1(ddlJob); } @@ -778,7 +777,7 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand) ErrorIfUnsupportedDropIndexStmt(dropIndexStatement); ddlJob->targetRelationId = distributedRelationId; - ddlJob->concurrent = dropIndexStatement->concurrent; + ddlJob->concurrentIndexCmd = dropIndexStatement->concurrent; ddlJob->commandString = dropIndexCommand; ddlJob->taskList = DropIndexTaskList(distributedRelationId, distributedIndexId, dropIndexStatement); @@ -873,7 +872,7 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = leftRelationId; - ddlJob->concurrent = false; + ddlJob->concurrentIndexCmd = false; ddlJob->commandString = alterTableCommand; if (rightRelationId) @@ -1987,7 +1986,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) EnsureCoordinator(); - if (!ddlJob->concurrent) + if (!ddlJob->concurrentIndexCmd) { ShowNoticeIfNotUsing2PC(); @@ -2008,6 +2007,8 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) PG_TRY(); { + ExecuteTasksSequentiallyWithoutResults(ddlJob->taskList); + if (shouldSyncMetadata) { List *commandList = list_make2(DISABLE_DDL_PROPAGATION, @@ -2015,8 +2016,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) SendBareCommandListToWorkers(WORKERS_WITH_METADATA, commandList); } - - ExecuteSequentialTasksWithoutResults(ddlJob->taskList); } PG_CATCH(); { @@ -2024,8 +2023,8 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) (errmsg("CONCURRENTLY-enabled index command failed"), errdetail("CONCURRENTLY-enabled index commands can fail partially, " "leaving behind an INVALID index."), - errhint("Use DROP INDEX IF EXISTS to remove the invalid index, then " - "retry the original command."))); + errhint("Use DROP INDEX CONCURRENTLY IF EXISTS to remove the " + "invalid index, then retry the original command."))); } PG_END_TRY(); } @@ -2103,11 +2102,11 @@ DDLTaskList(Oid relationId, const char *commandString) /* - * IndexTaskList builds a list of tasks to execute a CREATE INDEX command + * CreateIndexTaskList builds a list of tasks to execute a CREATE INDEX command * against a specified distributed table. */ static List * -IndexTaskList(Oid relationId, IndexStmt *indexStmt) +CreateIndexTaskList(Oid relationId, IndexStmt *indexStmt) { List *taskList = NIL; List *shardIntervalList = LoadShardIntervalList(relationId); @@ -2714,7 +2713,7 @@ PlanGrantStmt(GrantStmt *grantStmt) ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = relOid; - ddlJob->concurrent = false; + ddlJob->concurrentIndexCmd = false; ddlJob->commandString = pstrdup(ddlString.data); ddlJob->taskList = DDLTaskList(relOid, ddlString.data); diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index c792c6f9c..6f54de4c5 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -83,13 +83,6 @@ SendBareCommandListToWorkers(TargetWorkerSet targetWorkerSet, List *commandList) char *nodeUser = CitusExtensionOwnerName(); ListCell *commandCell = NULL; - if (XactModificationLevel > XACT_MODIFICATION_NONE) - { - ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), - errmsg("cannot open new connections after the first modification " - "command within a transaction"))); - } - /* run commands serially */ foreach(workerNodeCell, workerNodeList) { diff --git a/src/include/distributed/multi_router_executor.h b/src/include/distributed/multi_router_executor.h index 389e868c2..b5db9ddf9 100644 --- a/src/include/distributed/multi_router_executor.h +++ b/src/include/distributed/multi_router_executor.h @@ -41,7 +41,7 @@ extern TupleTableSlot * RouterSelectExecScan(CustomScanState *node); extern TupleTableSlot * RouterMultiModifyExecScan(CustomScanState *node); extern int64 ExecuteModifyTasksWithoutResults(List *taskList); -extern int64 ExecuteSequentialTasksWithoutResults(List *taskList); +extern void ExecuteTasksSequentiallyWithoutResults(List *taskList); #endif /* MULTI_ROUTER_EXECUTOR_H_ */ diff --git a/src/include/distributed/multi_utility.h b/src/include/distributed/multi_utility.h index 8afad5d54..7444c10a1 100644 --- a/src/include/distributed/multi_utility.h +++ b/src/include/distributed/multi_utility.h @@ -23,7 +23,7 @@ extern bool EnableDDLPropagation; typedef struct DDLJob { Oid targetRelationId; /* oid of the target distributed relation */ - bool concurrent; /* related to a CONCURRENTLY index command? */ + bool concurrentIndexCmd; /* related to a CONCURRENTLY index command? */ const char *commandString; /* initial (coordinator) DDL command string */ List *taskList; /* worker DDL tasks to execute */ } DDLJob; diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index 3cce9ad30..b92999c8b 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -252,7 +252,7 @@ CREATE INDEX CONCURRENTLY ith_b_idx_102089 ON index_test_hash_102089(b); CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -HINT: Use DROP INDEX IF EXISTS to remove the invalid index, then retry the original command. +HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; Index Valid? @@ -276,7 +276,7 @@ DROP INDEX CONCURRENTLY ith_b_idx_102089; DROP INDEX CONCURRENTLY ith_b_idx; ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -HINT: Use DROP INDEX IF EXISTS to remove the invalid index, then retry the original command. +HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; Index Valid?