summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenety Goh <benety@mongodb.com>2019-10-09 12:09:55 +0000
committerevergreen <evergreen@mongodb.com>2019-10-09 12:09:55 +0000
commit9471cc8ad18cc5f1da025c291e03adfde434ab8d (patch)
tree30d4876bd7adba0a70f0d4e748e78b51375f870d
parent342d72abd4b40d5325e7a252ca1c71bada457a9b (diff)
downloadmongo-9471cc8ad18cc5f1da025c291e03adfde434ab8d.tar.gz
SERVER-39002 write abortIndexBuild oplog entry in same WUOW as index build cleanup
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp43
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp85
2 files changed, 109 insertions, 19 deletions
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 6ebb98a11ab..855ff679b8b 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -933,12 +933,30 @@ void IndexBuildsCoordinator::_runIndexBuildInner(OperationContext* opCtx,
Lock::DBLock dbLock(opCtx, nss.db(), MODE_IX);
- unlockRSTLForIndexCleanup(opCtx);
-
- Lock::CollectionLock collLock(opCtx, nss, MODE_X);
-
- _indexBuildsManager.tearDownIndexBuild(
- opCtx, collection, replState->buildUUID, MultiIndexBlock::kNoopOnCleanUpFn);
+ if (!replSetAndNotPrimary) {
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ if (replCoord->getSettings().usingReplSets() &&
+ replCoord->canAcceptWritesFor(opCtx, nss)) {
+ // We are currently a primary node. Notify downstream nodes to abort their index
+ // builds with the same build UUID.
+ Lock::CollectionLock collLock(opCtx, nss, MODE_X);
+ auto onCleanUpFn = [&] { onAbortIndexBuild(opCtx, nss, *replState, status); };
+ _indexBuildsManager.tearDownIndexBuild(
+ opCtx, collection, replState->buildUUID, onCleanUpFn);
+ } else {
+ // This index build was aborted because we are stepping down from primary.
+ unlockRSTLForIndexCleanup(opCtx);
+ Lock::CollectionLock collLock(opCtx, nss, MODE_X);
+ _indexBuildsManager.tearDownIndexBuild(
+ opCtx, collection, replState->buildUUID, MultiIndexBlock::kNoopOnCleanUpFn);
+ }
+ } else {
+ // We started this index build during oplog application as a secondary node.
+ unlockRSTLForIndexCleanup(opCtx);
+ Lock::CollectionLock collLock(opCtx, nss, MODE_X);
+ _indexBuildsManager.tearDownIndexBuild(
+ opCtx, collection, replState->buildUUID, MultiIndexBlock::kNoopOnCleanUpFn);
+ }
} else {
_indexBuildsManager.tearDownIndexBuild(
opCtx, collection, replState->buildUUID, MultiIndexBlock::kNoopOnCleanUpFn);
@@ -959,19 +977,6 @@ void IndexBuildsCoordinator::_runIndexBuildInner(OperationContext* opCtx,
<< "; Database: " << replState->dbName));
}
- UninterruptibleLockGuard noInterrupt(opCtx->lockState());
- Lock::GlobalLock lock(opCtx, MODE_IX);
-
- auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- if (replCoord->getSettings().usingReplSets() && replCoord->canAcceptWritesFor(opCtx, nss)) {
- writeConflictRetry(
- opCtx, "onAbortIndexBuild", NamespaceString::kRsOplogNamespace.ns(), [&] {
- WriteUnitOfWork wuow(opCtx);
- onAbortIndexBuild(opCtx, nss, *replState, status);
- wuow.commit();
- });
- }
-
uassertStatusOK(status);
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index 92f5764f00b..7981f2abdf5 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -2298,6 +2298,90 @@ public:
}
};
+/**
+ * This test asserts that the catalog updates that represent the beginning and end of an aborted
+ * index build are timestamped. The oplog should contain two entries startIndexBuild and
+ * abortIndexBuild. We will inspect the catalog at the timestamp corresponding to each of these
+ * oplog entries.
+ */
+class TimestampAbortIndexBuild : public StorageTimestampTest {
+public:
+ void run() {
+ auto storageEngine = _opCtx->getServiceContext()->getStorageEngine();
+ auto durableCatalog = storageEngine->getCatalog();
+
+ NamespaceString nss("unittests.timestampAbortIndexBuild");
+ reset(nss);
+
+ std::vector<std::string> origIdents;
+ {
+ AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X);
+
+ auto insertTimestamp1 = _clock->reserveTicks(1);
+ auto insertTimestamp2 = _clock->reserveTicks(1);
+
+ // Insert two documents with the same value for field 'a' so that
+ // we will fail to create a unique index.
+ WriteUnitOfWork wuow(_opCtx);
+ insertDocument(autoColl.getCollection(),
+ InsertStatement(BSON("_id" << 0 << "a" << 1),
+ insertTimestamp1.asTimestamp(),
+ presentTerm));
+ insertDocument(autoColl.getCollection(),
+ InsertStatement(BSON("_id" << 1 << "a" << 1),
+ insertTimestamp2.asTimestamp(),
+ presentTerm));
+ wuow.commit();
+ ASSERT_EQ(2, itCount(autoColl.getCollection()));
+
+ // Save the pre-state idents so we can capture the specific ident related to index
+ // creation.
+ origIdents = durableCatalog->getAllIdents(_opCtx);
+ }
+
+ {
+ DBDirectClient client(_opCtx);
+
+ IndexSpec index1;
+ // Name this index for easier querying.
+ index1.addKeys(BSON("a" << 1)).name("a_1").unique();
+
+ std::vector<const IndexSpec*> indexes;
+ indexes.push_back(&index1);
+ ASSERT_THROWS_CODE(
+ client.createIndexes(nss.ns(), indexes), DBException, ErrorCodes::DuplicateKey);
+ }
+
+ // Confirm that startIndexBuild and abortIndexBuild oplog entries have been written to the
+ // oplog.
+ auto indexStartDocument =
+ queryOplog(BSON("ns" << nss.db() + ".$cmd"
+ << "o.startIndexBuild" << nss.coll() << "o.indexes.0.name"
+ << "a_1"));
+ auto indexStartTs = indexStartDocument["ts"].timestamp();
+ auto indexAbortDocument =
+ queryOplog(BSON("ns" << nss.db() + ".$cmd"
+ << "o.abortIndexBuild" << nss.coll() << "o.indexes.0.name"
+ << "a_1"));
+ auto indexAbortTs = indexAbortDocument["ts"].timestamp();
+
+ // Check index state in catalog at oplog entry times for both startIndexBuild and
+ // abortIndexBuild.
+ AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X);
+
+ // We expect one new one new index ident during this index build.
+ assertRenamedCollectionIdentsAtTimestamp(
+ durableCatalog, origIdents, /*expectedNewIndexIdents*/ 1, indexStartTs);
+ ASSERT_FALSE(
+ getIndexMetaData(getMetaDataAtTime(durableCatalog, nss, indexStartTs), "a_1").ready);
+
+ // We expect all new idents to be removed after the index build has aborted.
+ assertRenamedCollectionIdentsAtTimestamp(
+ durableCatalog, origIdents, /*expectedNewIndexIdents*/ 0, indexAbortTs);
+ assertIndexMetaDataMissing(getMetaDataAtTime(durableCatalog, nss, indexAbortTs), "a_1");
+ }
+};
+
class TimestampIndexDrops : public StorageTimestampTest {
public:
void run() {
@@ -3480,6 +3564,7 @@ public:
// addIf<TimestampIndexBuildDrain<true>>();
addIf<TimestampMultiIndexBuilds>();
addIf<TimestampMultiIndexBuildsDuringRename>();
+ addIf<TimestampAbortIndexBuild>();
addIf<TimestampIndexDrops>();
addIf<TimestampIndexBuilderOnPrimary>();
addIf<SecondaryReadsDuringBatchApplicationAreAllowed>();