summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMihai Andrei <mihai.andrei@mongodb.com>2019-09-26 15:55:39 +0000
committerevergreen <evergreen@mongodb.com>2019-09-26 15:55:39 +0000
commit91be8f3008241107539ab26cd085be923588bc9a (patch)
tree9bb9c459db32cb8d0b8bf430adb15ae2b1816d7f
parente1370d816a25a14f32eeb5990302c640eb872730 (diff)
downloadmongo-91be8f3008241107539ab26cd085be923588bc9a.tar.gz
SERVER-41918 CollectionBulkLoader does not anticipate exceptions from MultiIndexBlock
(cherry picked from commit 6dc4461c0db2954da0a43d3934bb6c97ac02fd8e)
-rw-r--r--src/mongo/db/catalog/index_create_impl.cpp228
-rw-r--r--src/mongo/dbtests/validate_tests.cpp9
2 files changed, 134 insertions, 103 deletions
diff --git a/src/mongo/db/catalog/index_create_impl.cpp b/src/mongo/db/catalog/index_create_impl.cpp
index b574ab0c994..1f21757c7fc 100644
--- a/src/mongo/db/catalog/index_create_impl.cpp
+++ b/src/mongo/db/catalog/index_create_impl.cpp
@@ -218,115 +218,134 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlockImpl::init(const std::vector<BSO
WriteUnitOfWork wunit(_opCtx);
invariant(_indexes.empty());
- _opCtx->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this));
-
- const string& ns = _collection->ns().ns();
-
- const auto idxCat = _collection->getIndexCatalog();
- invariant(idxCat);
- invariant(idxCat->ok());
- Status status = idxCat->checkUnfinished();
- if (!status.isOK())
- return status;
-
- for (size_t i = 0; i < indexSpecs.size(); i++) {
- BSONObj info = indexSpecs[i];
-
- string pluginName = IndexNames::findPluginName(info["key"].Obj());
- if (pluginName.size()) {
- Status s = _collection->getIndexCatalog()->_upgradeDatabaseMinorVersionIfNeeded(
- _opCtx, pluginName);
- if (!s.isOK())
- return s;
- }
-
- // Any foreground indexes make all indexes be built in the foreground.
- _buildInBackground = (_buildInBackground && info["background"].trueValue());
- }
+ // Guarantees that exceptions cannot be returned from index builder initialization except for
+ // WriteConflictExceptions, which should be dealt with by the caller.
+ try {
+ _opCtx->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this));
- std::vector<BSONObj> indexInfoObjs;
- indexInfoObjs.reserve(indexSpecs.size());
- std::size_t eachIndexBuildMaxMemoryUsageBytes = 0;
- if (!indexSpecs.empty()) {
- eachIndexBuildMaxMemoryUsageBytes =
- static_cast<std::size_t>(maxIndexBuildMemoryUsageMegabytes.load()) * 1024 * 1024 /
- indexSpecs.size();
- }
+ const string& ns = _collection->ns().ns();
- for (size_t i = 0; i < indexSpecs.size(); i++) {
- BSONObj info = indexSpecs[i];
- StatusWith<BSONObj> statusWithInfo =
- _collection->getIndexCatalog()->prepareSpecForCreate(_opCtx, info);
- Status status = statusWithInfo.getStatus();
+ const auto idxCat = _collection->getIndexCatalog();
+ invariant(idxCat);
+ invariant(idxCat->ok());
+ Status status = idxCat->checkUnfinished();
if (!status.isOK())
return status;
- info = statusWithInfo.getValue();
- indexInfoObjs.push_back(info);
- IndexToBuild index;
- index.block.reset(new IndexCatalogImpl::IndexBuildBlock(_opCtx, _collection, info));
- status = index.block->init();
- if (!status.isOK())
- return status;
+ for (size_t i = 0; i < indexSpecs.size(); i++) {
+ BSONObj info = indexSpecs[i];
- index.real = index.block->getEntry()->accessMethod();
- status = index.real->initializeAsEmpty(_opCtx);
- if (!status.isOK())
- return status;
+ string pluginName = IndexNames::findPluginName(info["key"].Obj());
+ if (pluginName.size()) {
+ Status s = _collection->getIndexCatalog()->_upgradeDatabaseMinorVersionIfNeeded(
+ _opCtx, pluginName);
+ if (!s.isOK())
+ return s;
+ }
- if (!_buildInBackground) {
- // Bulk build process requires foreground building as it assumes nothing is changing
- // under it.
- index.bulk = index.real->initiateBulk(eachIndexBuildMaxMemoryUsageBytes);
+ // Any foreground indexes make all indexes be built in the foreground.
+ _buildInBackground = (_buildInBackground && info["background"].trueValue());
}
- const IndexDescriptor* descriptor = index.block->getEntry()->descriptor();
-
- IndexCatalog::prepareInsertDeleteOptions(_opCtx, descriptor, &index.options);
- index.options.dupsAllowed = index.options.dupsAllowed || _ignoreUnique;
- if (_ignoreUnique) {
- index.options.getKeysMode = IndexAccessMethod::GetKeysMode::kRelaxConstraints;
+ std::vector<BSONObj> indexInfoObjs;
+ indexInfoObjs.reserve(indexSpecs.size());
+ std::size_t eachIndexBuildMaxMemoryUsageBytes = 0;
+ if (!indexSpecs.empty()) {
+ eachIndexBuildMaxMemoryUsageBytes =
+ static_cast<std::size_t>(maxIndexBuildMemoryUsageMegabytes.load()) * 1024 * 1024 /
+ indexSpecs.size();
}
- log() << "build index on: " << ns << " properties: " << descriptor->toString();
- if (index.bulk)
- log() << "\t building index using bulk method; build may temporarily use up to "
- << eachIndexBuildMaxMemoryUsageBytes / 1024 / 1024 << " megabytes of RAM";
+ for (size_t i = 0; i < indexSpecs.size(); i++) {
+ BSONObj info = indexSpecs[i];
+ StatusWith<BSONObj> statusWithInfo =
+ _collection->getIndexCatalog()->prepareSpecForCreate(_opCtx, info);
+ Status status = statusWithInfo.getStatus();
+ if (!status.isOK())
+ return status;
+ info = statusWithInfo.getValue();
+ indexInfoObjs.push_back(info);
+
+ IndexToBuild index;
+ index.block.reset(new IndexCatalogImpl::IndexBuildBlock(_opCtx, _collection, info));
+ status = index.block->init();
+ if (!status.isOK())
+ return status;
+
+ index.real = index.block->getEntry()->accessMethod();
+ status = index.real->initializeAsEmpty(_opCtx);
+ if (!status.isOK())
+ return status;
+
+ if (!_buildInBackground) {
+ // Bulk build process requires foreground building as it assumes nothing is changing
+ // under it.
+ index.bulk = index.real->initiateBulk(eachIndexBuildMaxMemoryUsageBytes);
+ }
- index.filterExpression = index.block->getEntry()->getFilterExpression();
+ const IndexDescriptor* descriptor = index.block->getEntry()->descriptor();
- // TODO SERVER-14888 Suppress this in cases we don't want to audit.
- audit::logCreateIndex(_opCtx->getClient(), &info, descriptor->indexName(), ns);
+ IndexCatalog::prepareInsertDeleteOptions(_opCtx, descriptor, &index.options);
+ index.options.dupsAllowed = index.options.dupsAllowed || _ignoreUnique;
+ if (_ignoreUnique) {
+ index.options.getKeysMode = IndexAccessMethod::GetKeysMode::kRelaxConstraints;
+ }
- _indexes.push_back(std::move(index));
- }
+ log() << "build index on: " << ns << " properties: " << descriptor->toString();
+ if (index.bulk)
+ log() << "\t building index using bulk method; build may temporarily use up to "
+ << eachIndexBuildMaxMemoryUsageBytes / 1024 / 1024 << " megabytes of RAM";
- if (_buildInBackground)
- _backgroundOperation.reset(new BackgroundOperation(ns));
-
- auto replCoord = repl::ReplicationCoordinator::get(_opCtx);
- if (_opCtx->recoveryUnit()->getCommitTimestamp().isNull() &&
- replCoord->canAcceptWritesForDatabase(_opCtx, "admin")) {
- // Only primaries must timestamp this write. Secondaries run this from within a
- // `TimestampBlock`. Primaries performing an index build via `applyOps` may have a
- // wrapping commit timestamp that will be used instead.
- _opCtx->getServiceContext()->getOpObserver()->onOpMessage(
- _opCtx, BSON("msg" << std::string(str::stream() << "Creating indexes. Coll: " << ns)));
- }
+ index.filterExpression = index.block->getEntry()->getFilterExpression();
- wunit.commit();
+ // TODO SERVER-14888 Suppress this in cases we don't want to audit.
+ audit::logCreateIndex(_opCtx->getClient(), &info, descriptor->indexName(), ns);
- if (MONGO_FAIL_POINT(crashAfterStartingIndexBuild)) {
- log() << "Index build interrupted due to 'crashAfterStartingIndexBuild' failpoint. Exiting "
- "after waiting for changes to become durable.";
- Locker::LockSnapshot lockInfo;
- invariant(_opCtx->lockState()->saveLockStateAndUnlock(&lockInfo));
- if (_opCtx->recoveryUnit()->waitUntilDurable()) {
- quickExit(EXIT_TEST);
+ _indexes.push_back(std::move(index));
+ }
+
+ if (_buildInBackground)
+ _backgroundOperation.reset(new BackgroundOperation(ns));
+
+ auto replCoord = repl::ReplicationCoordinator::get(_opCtx);
+ if (_opCtx->recoveryUnit()->getCommitTimestamp().isNull() &&
+ replCoord->canAcceptWritesForDatabase(_opCtx, "admin")) {
+ // Only primaries must timestamp this write. Secondaries run this from within a
+ // `TimestampBlock`. Primaries performing an index build via `applyOps` may have a
+ // wrapping commit timestamp that will be used instead.
+ _opCtx->getServiceContext()->getOpObserver()->onOpMessage(
+ _opCtx,
+ BSON("msg" << std::string(str::stream() << "Creating indexes. Coll: " << ns)));
+ }
+
+ wunit.commit();
+
+ if (MONGO_FAIL_POINT(crashAfterStartingIndexBuild)) {
+ log() << "Index build interrupted due to 'crashAfterStartingIndexBuild' failpoint. "
+ "Exiting "
+ "after waiting for changes to become durable.";
+ Locker::LockSnapshot lockInfo;
+ invariant(_opCtx->lockState()->saveLockStateAndUnlock(&lockInfo));
+ if (_opCtx->recoveryUnit()->waitUntilDurable()) {
+ quickExit(EXIT_TEST);
+ }
}
- }
- return indexInfoObjs;
+ return indexInfoObjs;
+ // Avoid converting WCE to Status
+ } catch (const WriteConflictException&) {
+ throw;
+ } catch (...) {
+ return {exceptionToStatus().code(),
+ str::stream() << "Caught exception during index builder initialization "
+ << _collection->ns().toString()
+ << " ("
+ << _collection->uuid()
+ << "): "
+ << indexSpecs.size()
+ << " provided. First index spec: "
+ << (indexSpecs.empty() ? BSONObj() : indexSpecs[0])};
+ }
}
void failPointHangDuringBuild(FailPoint* fp, StringData where, const BSONObj& doc) {
@@ -494,7 +513,14 @@ Status MultiIndexBlockImpl::insert(const BSONObj& doc, const RecordId& loc) {
int64_t unused;
Status idxStatus(ErrorCodes::InternalError, "");
if (_indexes[i].bulk) {
- idxStatus = _indexes[i].bulk->insert(_opCtx, doc, loc, _indexes[i].options, &unused);
+ // When calling insert, BulkBuilderImpl's Sorter performs file I/O that may result in an
+ // exception.
+ try {
+ idxStatus =
+ _indexes[i].bulk->insert(_opCtx, doc, loc, _indexes[i].options, &unused);
+ } catch (...) {
+ return exceptionToStatus();
+ }
} else {
idxStatus = _indexes[i].real->insert(_opCtx, doc, loc, _indexes[i].options, &unused);
}
@@ -512,13 +538,19 @@ Status MultiIndexBlockImpl::doneInserting(std::set<RecordId>* dupsOut) {
continue;
LOG(1) << "\t bulk commit starting for index: "
<< _indexes[i].block->getEntry()->descriptor()->indexName();
- Status status = _indexes[i].real->commitBulk(_opCtx,
- _indexes[i].bulk.get(),
- _allowInterruption,
- _indexes[i].options.dupsAllowed,
- dupsOut);
- if (!status.isOK()) {
- return status;
+ // SERVER-41918 This call to commitBulk() results in file I/O that may result in an
+ // exception.
+ try {
+ Status status = _indexes[i].real->commitBulk(_opCtx,
+ _indexes[i].bulk.get(),
+ _allowInterruption,
+ _indexes[i].options.dupsAllowed,
+ dupsOut);
+ if (!status.isOK()) {
+ return status;
+ }
+ } catch (...) {
+ return exceptionToStatus();
}
}
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index 2c6b20253aa..6410e60dca1 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -691,8 +691,9 @@ public:
wunit.commit();
}
- // Create a partial geo index that indexes the document. This should throw an error.
- ASSERT_THROWS(dbtests::createIndexFromSpec(&_opCtx,
+ // Create a partial geo index that indexes the document. This should return a non-ok
+ // status.
+ ASSERT_NOT_OK(dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "partial_index"
@@ -706,9 +707,7 @@ public:
<< "background"
<< false
<< "partialFilterExpression"
- << BSON("a" << BSON("$eq" << 2))))
- .transitional_ignore(),
- AssertionException);
+ << BSON("a" << BSON("$eq" << 2)))));
// Create a partial geo index that does not index the document.
auto status = dbtests::createIndexFromSpec(&_opCtx,