summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGregory Noma <gregory.noma@gmail.com>2020-08-05 13:45:52 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-08-05 17:59:52 +0000
commitc3421443d50acfb81ee299bf5eff6e14879591f3 (patch)
tree7d28ca9c606c4d81b8a16103076170ffb474e1da
parentfd109f08f97117237dae3a9428c3d2d66f14e221 (diff)
downloadmongo-c3421443d50acfb81ee299bf5eff6e14879591f3.tar.gz
SERVER-49448 Interrupt index builds for shutdown during the expected phase in resumable index build tests
-rw-r--r--jstests/noPassthrough/libs/index_build.js73
-rw-r--r--jstests/noPassthrough/resumable_index_build_bulk_load_phase.js26
-rw-r--r--jstests/noPassthrough/resumable_index_build_collection_scan_phase.js30
-rw-r--r--jstests/noPassthrough/resumable_index_build_drain_writes_phase.js39
-rw-r--r--jstests/replsets/libs/rollback_resumable_index_build.js20
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp6
-rw-r--r--src/mongo/db/catalog/index_builds_manager.h3
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp70
-rw-r--r--src/mongo/db/index/index_access_method.cpp4
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp17
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp6
-rw-r--r--src/mongo/db/mongod_main.cpp6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp5
14 files changed, 164 insertions, 147 deletions
diff --git a/jstests/noPassthrough/libs/index_build.js b/jstests/noPassthrough/libs/index_build.js
index 3f6a19dfa37..b647e715c5d 100644
--- a/jstests/noPassthrough/libs/index_build.js
+++ b/jstests/noPassthrough/libs/index_build.js
@@ -239,29 +239,6 @@ const ResumableIndexBuildTest = class {
}
/**
- * Waits for the specified index build to be interrupted and then disables the given failpoint.
- */
- static disableFailPointAfterInterruption(conn, failPointName, buildUUID) {
- return startParallelShell(
- funWithArgs(function(failPointName, buildUUID) {
- // Wait for the index build to be interrupted.
- checkLog.containsJson(db.getMongo(), 20449, {
- buildUUID: function(uuid) {
- return uuid["uuid"]["$uuid"] === buildUUID;
- },
- error: function(error) {
- return error.code === ErrorCodes.InterruptedDueToReplStateChange;
- }
- });
-
- // Once the index build has been interrupted, disable the failpoint so that shutdown
- // or stepdown can proceed.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: failPointName, mode: "off"}));
- }, failPointName, buildUUID), conn.port);
- }
-
- /**
* Inserts the given documents once an index build reaches the end of the bulk load phase so
* that the documents are inserted into the side writes table for that index build.
*/
@@ -299,7 +276,7 @@ const ResumableIndexBuildTest = class {
* Restarts the given node, ensuring that the the index build with name indexName has its state
* written to disk upon shutdown and is completed upon startup.
*/
- static restart(rst, conn, coll, indexName, failPointName) {
+ static restart(rst, conn, coll, indexName, failPointName, failPointData) {
clearRawMongoProgramOutput();
const buildUUID = extractUUIDFromObject(
@@ -307,11 +284,42 @@ const ResumableIndexBuildTest = class {
.assertIndexes(coll, 2, ["_id_"], [indexName], {includeBuildUUIDs: true})[indexName]
.buildUUID);
- const awaitDisableFailPoint = ResumableIndexBuildTest.disableFailPointAfterInterruption(
- conn, failPointName, buildUUID);
+ // Don't interrupt the index build for shutdown until it is at the desired point.
+ const shutdownFpTimesEntered =
+ assert
+ .commandWorked(
+ conn.adminCommand({configureFailPoint: "hangBeforeShutdown", mode: "alwaysOn"}))
+ .count;
+
+ const awaitContinueShutdown = startParallelShell(
+ funWithArgs(function(failPointName, failPointData, shutdownFpTimesEntered) {
+ load("jstests/libs/fail_point_util.js");
+
+ // Wait until we hang before shutdown to ensure that we do not move the index build
+ // forward before the step down process is complete.
+ assert.commandWorked(db.adminCommand({
+ waitForFailPoint: "hangBeforeShutdown",
+ timesEntered: shutdownFpTimesEntered + 1,
+ maxTimeMS: kDefaultWaitForFailPointTimeout
+ }));
+
+ // Move the index build forward to the point that we want it to be interrupted for
+ // shutdown at.
+ const fp = configureFailPoint(db.getMongo(), failPointName, failPointData);
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "hangAfterSettingUpIndexBuildUnlocked", mode: "off"}));
+ fp.wait();
+
+ // Disabling this failpoint will allow shutdown to continue and cause the operation
+ // context to be killed. This will cause the failpoint specified by failPointName
+ // to be interrupted and allow the index build to be interrupted for shutdown at
+ // its current location.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "hangBeforeShutdown", mode: "off"}));
+ }, failPointName, failPointData, shutdownFpTimesEntered), conn.port);
rst.stop(conn);
- awaitDisableFailPoint();
+ awaitContinueShutdown();
// Ensure that the resumable index build state was written to disk upon clean shutdown.
assert(RegExp("4841502.*" + buildUUID).test(rawMongoProgramOutput()));
@@ -337,10 +345,16 @@ const ResumableIndexBuildTest = class {
insertIntoSideWritesTable,
postIndexBuildInserts = {}) {
const primary = rst.getPrimary();
+
+ if (!ResumableIndexBuildTest.resumableIndexBuildsEnabled(primary)) {
+ jsTestLog("Skipping test because resumable index builds are not enabled");
+ return;
+ }
+
const coll = primary.getDB(dbName).getCollection(collName);
const indexName = "resumable_index_build";
- const fp = configureFailPoint(primary, failPointName, failPointData);
+ const fp = configureFailPoint(primary, "hangAfterSettingUpIndexBuildUnlocked");
const awaitInsertIntoSideWritesTable = ResumableIndexBuildTest.insertIntoSideWritesTable(
primary, collName, insertIntoSideWritesTable);
@@ -350,7 +364,8 @@ const ResumableIndexBuildTest = class {
fp.wait();
- ResumableIndexBuildTest.restart(rst, primary, coll, indexName, failPointName);
+ ResumableIndexBuildTest.restart(
+ rst, primary, coll, indexName, failPointName, failPointData);
awaitInsertIntoSideWritesTable();
awaitCreateIndex();
diff --git a/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js b/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js
index cbf78d0ebc0..c5807105737 100644
--- a/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js
+++ b/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js
@@ -3,7 +3,10 @@
* build is in the bulk load phase, and that the index build is subsequently completed when the
* node is started back up.
*
- * @tags: [requires_persistence, requires_replication]
+ * @tags: [
+ * requires_persistence,
+ * requires_replication,
+ * ]
*/
(function() {
"use strict";
@@ -11,28 +14,17 @@
load("jstests/noPassthrough/libs/index_build.js");
const dbName = "test";
-const collName = "resumable_index_build_bulk_load_phase";
+const failPointName = "hangIndexBuildDuringBulkLoadPhase";
const rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
-const primary = rst.getPrimary();
-const coll = primary.getDB(dbName).getCollection(collName);
+const coll = rst.getPrimary().getDB(dbName).getCollection(jsTestName());
+assert.commandWorked(coll.insert([{a: 1}, {a: 2}]));
-if (!ResumableIndexBuildTest.resumableIndexBuildsEnabled(primary)) {
- jsTestLog("Skipping test because resumable index builds are not enabled");
- rst.stopSet();
- return;
-}
-
-assert.commandWorked(coll.insert({a: 1}));
-assert.commandWorked(coll.insert({a: 2}));
-
-ResumableIndexBuildTest.run(
- rst, dbName, collName, {a: 1}, "hangIndexBuildDuringBulkLoadPhase", {iteration: 0});
-ResumableIndexBuildTest.run(
- rst, dbName, collName, {a: 1}, "hangIndexBuildDuringBulkLoadPhase", {iteration: 1});
+ResumableIndexBuildTest.run(rst, dbName, coll.getName(), {a: 1}, failPointName, {iteration: 0});
+ResumableIndexBuildTest.run(rst, dbName, coll.getName(), {a: 1}, failPointName, {iteration: 1});
rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js b/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js
index f4982f0ca08..6aa18a03341 100644
--- a/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js
+++ b/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js
@@ -3,7 +3,10 @@
* build is in the collection scan phase, and that the index build is subsequently completed when
* the node is started back up.
*
- * @tags: [requires_persistence, requires_replication]
+ * @tags: [
+ * requires_persistence,
+ * requires_replication,
+ * ]
*/
(function() {
"use strict";
@@ -11,32 +14,17 @@
load("jstests/noPassthrough/libs/index_build.js");
const dbName = "test";
-const collName = "resumable_index_build_collection_scan_phase";
+const failPointName = "hangIndexBuildDuringCollectionScanPhaseBeforeInsertion";
const rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
-const primary = rst.getPrimary();
-const coll = primary.getDB(dbName).getCollection(collName);
+const coll = rst.getPrimary().getDB(dbName).getCollection(jsTestName());
+assert.commandWorked(coll.insert([{a: 1}, {a: 2}]));
-if (!ResumableIndexBuildTest.resumableIndexBuildsEnabled(primary)) {
- jsTestLog("Skipping test because resumable index builds are not enabled");
- rst.stopSet();
- return;
-}
-
-assert.commandWorked(coll.insert({a: 1}));
-assert.commandWorked(coll.insert({a: 2}));
-
-ResumableIndexBuildTest.run(
- rst, dbName, collName, {a: 1}, "hangIndexBuildDuringCollectionScanPhaseBeforeInsertion", {
- fieldsToMatch: {a: 1}
- });
-ResumableIndexBuildTest.run(
- rst, dbName, collName, {a: 1}, "hangIndexBuildDuringCollectionScanPhaseBeforeInsertion", {
- fieldsToMatch: {a: 2}
- });
+ResumableIndexBuildTest.run(rst, dbName, coll.getName(), {a: 1}, failPointName, {iteration: 0});
+ResumableIndexBuildTest.run(rst, dbName, coll.getName(), {a: 1}, failPointName, {iteration: 1});
rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js b/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js
index c6f78592a0a..fc4f0cd776b 100644
--- a/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js
+++ b/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js
@@ -3,7 +3,10 @@
* build is in the drain writes phase, and that the index build is subsequently completed when the
* node is started back up.
*
- * @tags: [requires_persistence, requires_replication]
+ * @tags: [
+ * requires_persistence,
+ * requires_replication,
+ * ]
*/
(function() {
"use strict";
@@ -11,38 +14,22 @@
load("jstests/noPassthrough/libs/index_build.js");
const dbName = "test";
-const collName = "resumable_index_build_drain_writes_phase";
+const failPointName = "hangIndexBuildDuringDrainWritesPhase";
const rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
-const primary = rst.getPrimary();
-const coll = primary.getDB(dbName).getCollection(collName);
-
-if (!ResumableIndexBuildTest.resumableIndexBuildsEnabled(primary)) {
- jsTestLog("Skipping test because resumable index builds are not enabled");
- rst.stopSet();
- return;
-}
-
+const coll = rst.getPrimary().getDB(dbName).getCollection(jsTestName());
assert.commandWorked(coll.insert({a: 1}));
-ResumableIndexBuildTest.run(rst,
- dbName,
- collName,
- {a: 1},
- "hangIndexBuildDuringDrainWritesPhase",
- {iteration: 0},
- [{a: 2}, {a: 3}]);
-ResumableIndexBuildTest.run(rst,
- dbName,
- collName,
- {a: 1},
- "hangIndexBuildDuringDrainWritesPhase",
- {iteration: 1},
- [{a: 4}, {a: 5}],
- [{a: 6}, {a: 7}]);
+ResumableIndexBuildTest.run(
+ rst, dbName, coll.getName(), {a: 1}, failPointName, {iteration: 0}, [{a: 2}, {a: 3}]);
+ResumableIndexBuildTest.run(
+ rst, dbName, coll.getName(), {a: 1}, failPointName, {iteration: 1}, [{a: 4}, {a: 5}], [
+ {a: 6},
+ {a: 7}
+ ]);
rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/libs/rollback_resumable_index_build.js b/jstests/replsets/libs/rollback_resumable_index_build.js
index 8e2fb92901c..206acab18f3 100644
--- a/jstests/replsets/libs/rollback_resumable_index_build.js
+++ b/jstests/replsets/libs/rollback_resumable_index_build.js
@@ -33,7 +33,7 @@ const RollbackResumableIndexBuildTest = class {
const coll = originalPrimary.getDB(dbName).getCollection(collName);
const indexName = "rollback_resumable_index_build";
- let rollbackEndFp =
+ const rollbackEndFp =
configureFailPoint(originalPrimary, rollbackEndFailPointName, rollbackEndFailPointData);
const rollbackStartFp = configureFailPoint(
originalPrimary, rollbackStartFailPointName, rollbackStartFailPointData);
@@ -60,13 +60,23 @@ const RollbackResumableIndexBuildTest = class {
// Disable the failpoint in a parallel shell so that the primary can step down when the
// rollback test is transitioning to sync source operations before rollback.
- const awaitDisableFailPointAfterInterruption =
- ResumableIndexBuildTest.disableFailPointAfterInterruption(
- originalPrimary, rollbackEndFp.failPointName, buildUUID);
+ const awaitDisableFailPointAfterContinuingInBackground = startParallelShell(
+ funWithArgs(function(failPointName, buildUUID) {
+ // Wait for the index build to be continue in the background.
+ checkLog.containsJson(db.getMongo(), 20442, {
+ buildUUID: function(uuid) {
+ return uuid["uuid"]["$uuid"] === buildUUID;
+ }
+ });
+
+ // Disable the failpoint so that stepdown can proceed.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+ }, rollbackEndFp.failPointName, buildUUID), originalPrimary.port);
rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- awaitDisableFailPointAfterInterruption();
+ awaitDisableFailPointAfterContinuingInBackground();
// The index creation will report as having failed due to InterruptedDueToReplStateChange,
// but it is still building in the background.
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index 55b6a0c027e..87967ef7fcd 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -327,8 +327,7 @@ bool IndexBuildsManager::abortIndexBuild(OperationContext* opCtx,
bool IndexBuildsManager::abortIndexBuildWithoutCleanupForRollback(OperationContext* opCtx,
Collection* collection,
- const UUID& buildUUID,
- const std::string& reason) {
+ const UUID& buildUUID) {
auto builder = _getBuilder(buildUUID);
if (!builder.isOK()) {
return false;
@@ -351,7 +350,8 @@ bool IndexBuildsManager::abortIndexBuildWithoutCleanupForShutdown(OperationConte
return false;
}
- LOGV2(4841500, "Index build aborted without cleanup for shutdown", logAttrs(buildUUID));
+ LOGV2(
+ 4841500, "Index build aborted without cleanup for shutdown", "buildUUID"_attr = buildUUID);
builder.getValue()->abortWithoutCleanupForShutdown(opCtx);
return true;
diff --git a/src/mongo/db/catalog/index_builds_manager.h b/src/mongo/db/catalog/index_builds_manager.h
index c476e91368d..8dc909e7dad 100644
--- a/src/mongo/db/catalog/index_builds_manager.h
+++ b/src/mongo/db/catalog/index_builds_manager.h
@@ -161,8 +161,7 @@ public:
*/
bool abortIndexBuildWithoutCleanupForRollback(OperationContext* opCtx,
Collection* collection,
- const UUID& buildUUID,
- const std::string& reason);
+ const UUID& buildUUID);
/**
* The same as abortIndexBuildWithoutCleanupForRollback above, but additionally writes the
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index c9ba79ec06e..4bf146c8b7e 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -72,6 +72,38 @@ MONGO_FAIL_POINT_DEFINE(hangIndexBuildDuringCollectionScanPhaseBeforeInsertion);
MONGO_FAIL_POINT_DEFINE(hangIndexBuildDuringCollectionScanPhaseAfterInsertion);
MONGO_FAIL_POINT_DEFINE(leaveIndexBuildUnfinishedForShutdown);
+namespace {
+
+void failPointHangDuringBuild(OperationContext* opCtx,
+ FailPoint* fp,
+ StringData where,
+ const BSONObj& doc,
+ unsigned long long iteration) {
+ fp->executeIf(
+ [=, &doc](const BSONObj& data) {
+ LOGV2(20386,
+ "Hanging index build during collection scan phase insertion",
+ "where"_attr = where,
+ "doc"_attr = doc);
+
+ fp->pauseWhileSet(opCtx);
+ },
+ [&doc, iteration](const BSONObj& data) {
+ if (data.hasField("iteration")) {
+ return iteration == static_cast<unsigned long long>(data["iteration"].numberLong());
+ }
+
+ auto fieldsToMatch = data.getObjectField("fieldsToMatch");
+ return std::all_of(
+ fieldsToMatch.begin(), fieldsToMatch.end(), [&doc](const auto& elem) {
+ return SimpleBSONElementComparator::kInstance.evaluate(elem ==
+ doc[elem.fieldName()]);
+ });
+ });
+}
+
+} // namespace
+
MultiIndexBlock::~MultiIndexBlock() {
invariant(_buildIsCleanedUp);
}
@@ -172,7 +204,9 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(
invariant(_indexes.empty());
- if (resumeInfo) {
+ // TODO (SERVER-49409): Resume from the collection scan phase.
+ // TODO (SERVER-49408): Resume from the bulk load phase.
+ if (resumeInfo && resumeInfo->getPhase() == IndexBuildPhaseEnum::kDrainWrites) {
_phase = resumeInfo->getPhase();
}
@@ -338,26 +372,6 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(
}
}
-void failPointHangDuringBuild(FailPoint* fp, StringData where, const BSONObj& doc) {
- fp->executeIf(
- [&](const BSONObj& data) {
- LOGV2(20386,
- "Hanging index build during collection scan phase insertion",
- "where"_attr = where,
- "doc"_attr = doc);
-
- fp->pauseWhileSet();
- },
- [&doc](const BSONObj& data) {
- auto fieldsToMatch = data.getObjectField("fieldsToMatch");
- return std::all_of(
- fieldsToMatch.begin(), fieldsToMatch.end(), [&doc](const auto& elem) {
- return SimpleBSONElementComparator::kInstance.evaluate(elem ==
- doc[elem.fieldName()]);
- });
- });
-}
-
Status MultiIndexBlock::insertAllDocumentsInCollection(OperationContext* opCtx,
Collection* collection) {
invariant(!_buildIsCleanedUp);
@@ -445,8 +459,11 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(OperationContext* opCtx,
progress->setTotalWhileRunning(collection->numRecords(opCtx));
- failPointHangDuringBuild(
- &hangIndexBuildDuringCollectionScanPhaseBeforeInsertion, "before", objToIndex);
+ failPointHangDuringBuild(opCtx,
+ &hangIndexBuildDuringCollectionScanPhaseBeforeInsertion,
+ "before",
+ objToIndex,
+ n);
// The external sorter is not part of the storage engine and therefore does not need a
// WriteUnitOfWork to write keys.
@@ -455,8 +472,11 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(OperationContext* opCtx,
return ret;
}
- failPointHangDuringBuild(
- &hangIndexBuildDuringCollectionScanPhaseAfterInsertion, "after", objToIndex);
+ failPointHangDuringBuild(opCtx,
+ &hangIndexBuildDuringCollectionScanPhaseAfterInsertion,
+ "after",
+ objToIndex,
+ n);
// Go to the next document.
progress->hit();
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index bf61ed855cc..e7e0bb694ca 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -652,13 +652,13 @@ Status AbstractIndexAccessMethod::commitBulk(OperationContext* opCtx,
}
hangIndexBuildDuringBulkLoadPhase.executeIf(
- [i](const BSONObj& data) {
+ [opCtx, i](const BSONObj& data) {
LOGV2(4924400,
"Hanging index build during bulk load phase due to "
"'hangIndexBuildDuringBulkLoadPhase' failpoint",
"iteration"_attr = i);
- hangIndexBuildDuringBulkLoadPhase.pauseWhileSet();
+ hangIndexBuildDuringBulkLoadPhase.pauseWhileSet(opCtx);
},
[i](const BSONObj& data) { return i == data["iteration"].numberLong(); });
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index f569fc00e46..12f339894f6 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -53,14 +53,14 @@
namespace mongo {
namespace {
-void checkDrainPhaseFailPoint(FailPoint* fp, long long iteration) {
+void checkDrainPhaseFailPoint(OperationContext* opCtx, FailPoint* fp, long long iteration) {
fp->executeIf(
[=](const BSONObj& data) {
LOGV2(4841800,
"Hanging index build during drain writes phase",
"iteration"_attr = iteration);
- fp->pauseWhileSet();
+ fp->pauseWhileSet(opCtx);
},
[iteration](const BSONObj& data) { return iteration == data["iteration"].numberLong(); });
}
@@ -115,12 +115,13 @@ IndexBuildInterceptor::IndexBuildInterceptor(OperationContext* opCtx,
boost::optional<StringData> duplicateKeyTrackerIdent,
boost::optional<StringData> skippedRecordTrackerIdent)
: _indexCatalogEntry(entry),
+ _sideWritesTable(
+ opCtx->getServiceContext()->getStorageEngine()->makeTemporaryRecordStoreFromExistingIdent(
+ opCtx, sideWritesIdent)),
_skippedRecordTracker(opCtx, entry, skippedRecordTrackerIdent),
- _sideWritesCounter(std::make_shared<AtomicWord<long long>>()) {
+ _sideWritesCounter(
+ std::make_shared<AtomicWord<long long>>(_sideWritesTable->rs()->numRecords(opCtx))) {
- _sideWritesTable =
- opCtx->getServiceContext()->getStorageEngine()->makeTemporaryRecordStoreFromExistingIdent(
- opCtx, sideWritesIdent);
auto finalizeTableOnFailure = makeGuard([&] {
_sideWritesTable->finalizeTemporaryTable(opCtx,
TemporaryRecordStore::FinalizationAction::kDelete);
@@ -241,8 +242,8 @@ Status IndexBuildInterceptor::drainWritesIntoIndex(OperationContext* opCtx,
}
const long long iteration = _numApplied + batchSize;
- checkDrainPhaseFailPoint(&hangIndexBuildDuringDrainWritesPhase, iteration);
- checkDrainPhaseFailPoint(&hangIndexBuildDuringDrainWritesPhaseSecond, iteration);
+ checkDrainPhaseFailPoint(opCtx, &hangIndexBuildDuringDrainWritesPhase, iteration);
+ checkDrainPhaseFailPoint(opCtx, &hangIndexBuildDuringDrainWritesPhaseSecond, iteration);
batchSize += 1;
batchSizeBytes += objSize;
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index b0e1daf3208..b9479f378cb 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -1278,7 +1278,7 @@ void IndexBuildsCoordinator::_completeAbort(OperationContext* opCtx,
invariant(replState->protocol == IndexBuildProtocol::kTwoPhase);
invariant(replCoord->getMemberState().rollback());
_indexBuildsManager.abortIndexBuildWithoutCleanupForRollback(
- opCtx, coll, replState->buildUUID, reason.reason());
+ opCtx, coll, replState->buildUUID);
break;
}
case IndexBuildAction::kNoAction:
@@ -2231,7 +2231,9 @@ void IndexBuildsCoordinator::_runIndexBuildInner(
hangAfterInitializingIndexBuild.pauseWhileSet(opCtx);
}
- if (resumeInfo) {
+ // TODO (SERVER-49409): Resume from the collection scan phase.
+ // TODO (SERVER-49408): Resume from the bulk load phase.
+ if (resumeInfo && resumeInfo->getPhase() == IndexBuildPhaseEnum::kDrainWrites) {
_resumeIndexBuildFromPhase(opCtx, replState, indexBuildOptions, resumeInfo.get());
} else {
_buildIndex(opCtx, replState, indexBuildOptions);
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index 8ad2de63145..775e13d2021 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -218,6 +218,7 @@ namespace {
MONGO_FAIL_POINT_DEFINE(hangDuringQuiesceMode);
MONGO_FAIL_POINT_DEFINE(pauseWhileKillingOperationsAtShutdown);
+MONGO_FAIL_POINT_DEFINE(hangBeforeShutdown);
const NamespaceString startupLogCollectionName("local.startup_log");
@@ -1025,6 +1026,11 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) {
shutdownTimeout = Milliseconds(repl::shutdownTimeoutMillisForSignaledShutdown.load());
}
+ if (MONGO_unlikely(hangBeforeShutdown.shouldFail())) {
+ LOGV2(4944800, "Hanging before shutdown due to hangBeforeShutdown failpoint");
+ hangBeforeShutdown.pauseWhileSet();
+ }
+
// If we don't have shutdownArgs, we're shutting down from a signal, or other clean shutdown
// path.
//
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 5e688b96d51..8c2435f5d04 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -1754,10 +1754,8 @@ std::unique_ptr<RecordStore> WiredTigerKVEngine::makeTemporaryRecordStore(Operat
params.isCapped = false;
params.isEphemeral = _ephemeral;
params.cappedCallback = nullptr;
- // Temporary collections do not need to persist size information to the size storer.
- params.sizeStorer = nullptr;
- // Temporary collections do not need to reconcile collection size/counts.
- params.tracksSizeAdjustments = false;
+ params.sizeStorer = _sizeStorer.get();
+ params.tracksSizeAdjustments = true;
params.isReadOnly = false;
params.cappedMaxSize = -1;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 26aa8e6e7a0..a83bd10d82b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -857,9 +857,8 @@ WiredTigerRecordStore::WiredTigerRecordStore(WiredTigerKVEngine* kvEngine,
}
// If no SizeStorer is in use, start counting at zero. In practice, this will only ever be the
- // the case for temporary RecordStores (those not associated with any collection) and in unit
- // tests. Persistent size information is not required in either case. If a RecordStore needs
- // persistent size information, we require it to use a SizeStorer.
+ // the case in unit tests. Persistent size information is not required in this case. If a
+ // RecordStore needs persistent size information, we require it to use a SizeStorer.
_sizeInfo = _sizeStorer ? _sizeStorer->load(_uri)
: std::make_shared<WiredTigerSizeStorer::SizeInfo>(0, 0);
}