diff options
author | Daniel Ernst <daniel.ernst@mongodb.com> | 2020-01-30 16:40:42 +0000 |
---|---|---|
committer | evergreen <evergreen@mongodb.com> | 2020-01-30 16:40:42 +0000 |
commit | a6d3529b264b8b2331faea6a0e645fcf9def8f7f (patch) | |
tree | ee6277d3bf15a97cee213db95fc2c145c93ede48 | |
parent | 4bea6f38267b33731f4f95e4fb10396bc6f9c8b5 (diff) | |
download | mongo-a6d3529b264b8b2331faea6a0e645fcf9def8f7f.tar.gz |
SERVER-41965 Change repair to only rebuild indexes on necessary collections
25 files changed, 615 insertions, 203 deletions
diff --git a/jstests/disk/wt_repair_corrupt_files.js b/jstests/disk/wt_repair_corrupt_files.js index 839dda32ab7..a863292bdca 100644 --- a/jstests/disk/wt_repair_corrupt_files.js +++ b/jstests/disk/wt_repair_corrupt_files.js @@ -107,6 +107,52 @@ let runTest = function(mongodOptions) { assert.eq(orphanColl.count(), 1); MongoRunner.stopMongod(mongod); + + /** + * Test 4. Create two collections, and an index on each. Corrupt one collection's .wt file + * in an unrecoverable way, leave the other as is, then run repair. + * Verify that repair rebuilds the index on the corrupted collection but does not rebuild + * the index on the unaffected collection. + */ + + let createIndexedColl = function(collName) { + let coll = mongod.getDB(baseName)[collName]; + assert.commandWorked(coll.insert(doc)); + assert.commandWorked(coll.createIndex({a: 1}, {name: indexName})); + assertQueryUsesIndex(coll, doc, indexName); + return coll; + }; + + const corruptedCollName = "corrupted_coll"; + const healthyCollName = "healthy_coll"; + + mongod = startMongodOnExistingPath(dbpath, mongodOptions); + + let corruptedColl = createIndexedColl(corruptedCollName); + let corruptedOriginalIndexUri = getUriForIndex(corruptedColl, indexName); + let corruptedCollUri = getUriForColl(corruptedColl); + + let healthyColl = createIndexedColl(healthyCollName); + let healthyIndexUri = getUriForIndex(healthyColl, indexName); + let healthyCollUri = getUriForColl(healthyColl); + + let corruptedCollFile = dbpath + corruptedCollUri + ".wt"; + + MongoRunner.stopMongod(mongod); + + jsTestLog("corrupting collection file: " + corruptedCollFile); + corruptFile(corruptedCollFile); + + assertRepairSucceeds(dbpath, mongod.port, mongodOptions); + mongod = startMongodOnExistingPath(dbpath, mongodOptions); + + corruptedColl = mongod.getDB(baseName)[corruptedCollName]; + healthyColl = mongod.getDB(baseName)[healthyCollName]; + + assert.neq(corruptedOriginalIndexUri, getUriForIndex(corruptedColl, indexName)); + assert.eq(healthyIndexUri, getUriForIndex(healthyColl, indexName)); + + MongoRunner.stopMongod(mongod); }; runTest({}); diff --git a/jstests/disk/wt_repair_inconsistent_index.js b/jstests/disk/wt_repair_inconsistent_index.js new file mode 100644 index 00000000000..d93656c807e --- /dev/null +++ b/jstests/disk/wt_repair_inconsistent_index.js @@ -0,0 +1,71 @@ +/** + * Tests that --repair on WiredTiger correctly and gracefully handles inconsistent indexes. + * + * @tags: [requires_wiredtiger, requires_fcv_44] + */ + +(function() { + +load('jstests/disk/libs/wt_file_helper.js'); + +const baseName = "wt_repair_inconsistent_index"; +const collName = "test"; +const dbpath = MongoRunner.dataPath + baseName + "/"; + +const forceCheckpoint = () => { + assert.commandWorked(db.fsyncLock()); + assert.commandWorked(db.fsyncUnlock()); +}; + +/** + * Run the test by supplying additional paramters to MongoRunner.runMongod with 'mongodOptions'. + */ +let runTest = function(mongodOptions) { + resetDbpath(dbpath); + jsTestLog("Running test with args: " + tojson(mongodOptions)); + + /** + * Test 1. Configure the skipIndexNewRecords failpoint, then insert documents into + * testColl, which will result in an index inconsistency. Run repair and verify + * that the index is rebuilt. + */ + + let mongod = startMongodOnExistingPath(dbpath, mongodOptions); + let testColl = mongod.getDB(baseName)[collName]; + + const doc = {a: 1}; + assert.commandWorked(testColl.insert(doc)); + + const indexName = "a_1"; + assert.commandWorked(testColl.createIndex({a: 1}, {name: indexName})); + assertQueryUsesIndex(testColl, doc, indexName); + + let testCollUri = getUriForColl(testColl); + let indexUri = getUriForIndex(testColl, indexName); + + let db = mongod.getDB(baseName); + assert.commandWorked( + db.adminCommand({configureFailPoint: 'skipIndexNewRecords', mode: 'alwaysOn'})); + assert.commandWorked(testColl.insert({a: 2})); + + // Disable validation because it is expected to not pass due to index inconsistencies. + MongoRunner.stopMongod(mongod, null, {skipValidation: true}); + + assertRepairSucceeds(dbpath, mongod.port, mongodOptions); + mongod = startMongodOnExistingPath(dbpath, mongodOptions); + testColl = mongod.getDB(baseName)[collName]; + + // Repair creates new idents. + assert.neq(indexUri, getUriForIndex(testColl, indexName)); + + assertQueryUsesIndex(testColl, doc, indexName); + assert.eq(testCollUri, getUriForColl(testColl)); + assert.eq(testColl.count(), 2); + + MongoRunner.stopMongod(mongod); +}; + +runTest({}); +runTest({directoryperdb: ""}); +runTest({wiredTigerDirectoryForIndexes: ""}); +})(); diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript index 45f957ebef9..a5cd7c1390d 100644 --- a/src/mongo/db/SConscript +++ b/src/mongo/db/SConscript @@ -923,6 +923,20 @@ env.Library( ) env.Library( + target="rebuild_indexes", + source=[ + "rebuild_indexes.cpp", + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/db/catalog/collection', + ], + LIBDEPS_PRIVATE=[ + 'index_builds_coordinator_interface', + 'catalog/index_key_validate', + ], +) + +env.Library( target="repair_database", source=[ "repair_database.cpp", @@ -935,6 +949,8 @@ env.Library( ], LIBDEPS_PRIVATE=[ 'index_builds_coordinator_interface', + 'rebuild_indexes', + 'catalog/collection_validation', 'catalog/database_holder', 'catalog/document_validation', 'catalog/index_key_validate', @@ -1084,6 +1100,7 @@ env.Library( "repair_database_and_check_version.cpp", ], LIBDEPS_PRIVATE=[ + 'rebuild_indexes', 'catalog/catalog_helpers', 'catalog/database_holder', 'commands/mongod_fcv', diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript index e5c6db5bf68..ab0b9549256 100644 --- a/src/mongo/db/catalog/SConscript +++ b/src/mongo/db/catalog/SConscript @@ -299,7 +299,7 @@ env.Library( 'collection_catalog', 'database_holder', '$BUILD_DIR/mongo/db/index_builds_coordinator_interface', - '$BUILD_DIR/mongo/db/repair_database', + '$BUILD_DIR/mongo/db/rebuild_indexes', '$BUILD_DIR/mongo/db/service_context', ], ) diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp index 3ba439ae0b2..3df128c471f 100644 --- a/src/mongo/db/catalog/catalog_control.cpp +++ b/src/mongo/db/catalog/catalog_control.cpp @@ -42,7 +42,7 @@ #include "mongo/db/ftdc/ftdc_mongod.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/repair_database.h" +#include "mongo/db/rebuild_indexes.h" #include "mongo/util/log.h" namespace mongo { diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp index 7899df3e356..553af36eaf5 100644 --- a/src/mongo/db/catalog/collection_impl.cpp +++ b/src/mongo/db/catalog/collection_impl.cpp @@ -273,7 +273,6 @@ void CollectionImpl::init(OperationContext* opCtx) { } _validationAction = uassertStatusOK(_parseValidationAction(collectionOptions.validationAction)); _validationLevel = uassertStatusOK(_parseValidationLevel(collectionOptions.validationLevel)); - getIndexCatalog()->init(opCtx).transitional_ignore(); _initialized = true; } diff --git a/src/mongo/db/catalog/collection_validation.cpp b/src/mongo/db/catalog/collection_validation.cpp index 83feee3965c..df087f66649 100644 --- a/src/mongo/db/catalog/collection_validation.cpp +++ b/src/mongo/db/catalog/collection_validation.cpp @@ -78,7 +78,7 @@ std::map<std::string, int64_t> _validateIndexesInternalStructure( ValidateResults* results) { std::map<std::string, int64_t> numIndexKeysPerIndex; // Need to use the IndexCatalog here because the 'validateState->indexes' object hasn't been - // constructed yet. + // constructed yet. It must be initialized to ensure we're validating all indexes. const IndexCatalog* indexCatalog = validateState->getCollection()->getIndexCatalog(); const std::unique_ptr<IndexCatalog::IndexIterator> it = indexCatalog->getIndexIterator(opCtx, false); @@ -95,11 +95,16 @@ std::map<std::string, int64_t> _validateIndexesInternalStructure( log(LogComponent::kIndex) << "validating the internal structure of index " << descriptor->indexName() << " on collection " << descriptor->parentNS(); + ValidateResults& curIndexResults = (*indexNsResultsMap)[descriptor->indexName()]; int64_t numValidated; iam->validate(opCtx, &numValidated, &curIndexResults); + if (!curIndexResults.valid) { + results->valid = false; + } + numIndexKeysPerIndex[descriptor->indexName()] = numValidated; } return numIndexKeysPerIndex; @@ -131,11 +136,11 @@ void _validateIndexes(OperationContext* opCtx, int64_t numTraversedKeys; indexValidator->traverseIndex(opCtx, index.get(), &numTraversedKeys, &curIndexResults); - // If we are performing a full validation, we have information on the number of index keys - // validated in _validateIndexesInternalStructure (when we validated the internal structure - // of the index). Check if this is consistent with 'numTraversedKeys' from traverseIndex - // above. - if (validateState->isFullValidate()) { + // If we are performing a full index validation, we have information on the number of index + // keys validated in _validateIndexesInternalStructure (when we validated the internal + // structure of the index). Check if this is consistent with 'numTraversedKeys' from + // traverseIndex above. + if (validateState->isFullIndexValidation()) { invariant(opCtx->lockState()->isCollectionLockedForMode(validateState->nss(), MODE_X)); // Ensure that this index was validated in _validateIndexesInternalStructure. @@ -239,7 +244,7 @@ void _reportValidationResults(OperationContext* opCtx, BSONObjBuilder* output) { std::unique_ptr<BSONObjBuilder> indexDetails; - if (validateState->isFullValidate()) { + if (validateState->isFullIndexValidation()) { invariant(opCtx->lockState()->isCollectionLockedForMode(validateState->nss(), MODE_X)); indexDetails = std::make_unique<BSONObjBuilder>(); } @@ -282,6 +287,19 @@ void _reportValidationResults(OperationContext* opCtx, } } +void _reportInvalidResults(OperationContext* opCtx, + ValidateState* validateState, + ValidateResultsMap* indexNsResultsMap, + BSONObjBuilder* keysPerIndex, + ValidateResults* results, + BSONObjBuilder* output, + const string uuidString) { + _reportValidationResults( + opCtx, validateState, indexNsResultsMap, keysPerIndex, results, output); + log(LogComponent::kIndex) << "Validation complete for collection " << validateState->nss() + << uuidString << ". Corruption found."; +} + template <typename T> void addErrorIfUnequal(T stored, T cached, StringData name, ValidateResults* results) { if (stored != cached) { @@ -388,16 +406,17 @@ void _validateCatalogEntry(OperationContext* opCtx, Status validate(OperationContext* opCtx, const NamespaceString& nss, - const bool fullValidate, + ValidateOptions options, bool background, ValidateResults* results, BSONObjBuilder* output) { - invariant(!opCtx->lockState()->isLocked()); - invariant(!(background && fullValidate)); + invariant(!opCtx->lockState()->isLocked() || storageGlobalParams.repair); + // Background validation does not support any type of full validation. + invariant(!(background && (options != ValidateOptions::kNoFullValidation))); // This is deliberately outside of the try-catch block, so that any errors thrown in the // constructor fail the cmd, as opposed to returning OK with valid:false. - ValidateState validateState(opCtx, nss, background, fullValidate); + ValidateState validateState(opCtx, nss, background, options); const auto replCoord = repl::ReplicationCoordinator::get(opCtx); // Check whether we are allowed to read from this node after acquiring our locks. If we are @@ -410,19 +429,37 @@ Status validate(OperationContext* opCtx, ValidateResultsMap indexNsResultsMap; BSONObjBuilder keysPerIndex; // not using subObjStart to be exception safe. - // Full validation code is executed before we open cursors because it may close + // Full record store validation code is executed before we open cursors because it may close // and/or invalidate all open cursors. - if (fullValidate) { + if (options & ValidateOptions::kFullRecordStoreValidation) { invariant(opCtx->lockState()->isCollectionLockedForMode(validateState.nss(), MODE_X)); - // For full validation we use the storage engine's validation functionality. + // For full record store validation we use the storage engine's validation + // functionality. validateState.getCollection()->getRecordStore()->validate(opCtx, results, output); - // For full validation, we validate the internal structure of each index and save the - // number of keys in the index to compare against _validateIndexes()'s count results. + } + if (options & ValidateOptions::kFullIndexValidation) { + invariant(opCtx->lockState()->isCollectionLockedForMode(validateState.nss(), MODE_X)); + // For full index validation, we validate the internal structure of each index and save + // the number of keys in the index to compare against _validateIndexes()'s count + // results. numIndexKeysPerIndex = _validateIndexesInternalStructure( opCtx, &validateState, &indexNsResultsMap, results); } + const string uuidString = str::stream() << " (UUID: " << validateState.uuid() << ")"; + + if (!results->valid) { + _reportInvalidResults(opCtx, + &validateState, + &indexNsResultsMap, + &keysPerIndex, + results, + output, + uuidString); + return Status::OK(); + } + // Validate in-memory catalog information with persisted info prior to setting the read // source to kCheckpoint otherwise we'd use a checkpointed MDB catalog file. _validateCatalogEntry(opCtx, &validateState, results); @@ -431,8 +468,6 @@ Status validate(OperationContext* opCtx, // validation during background validation use the same view of the data. validateState.initializeCursors(opCtx); - const string uuidString = str::stream() << " (UUID: " << validateState.uuid() << ")"; - // Validate the record store. log(LogComponent::kIndex) << "validating collection " << validateState.nss() << uuidString; @@ -462,46 +497,70 @@ Status validate(OperationContext* opCtx, _validationIsPausedForTest.store(false); } + if (!results->valid) { + _reportInvalidResults(opCtx, + &validateState, + &indexNsResultsMap, + &keysPerIndex, + results, + output, + uuidString); + return Status::OK(); + } + // Validate indexes and check for mismatches. - if (results->valid) { - _validateIndexes(opCtx, - &validateState, - &keysPerIndex, - &indexValidator, - numIndexKeysPerIndex, - &indexNsResultsMap, - results); - - if (indexConsistency.haveEntryMismatch()) { - log(LogComponent::kIndex) - << "Index inconsistencies were detected on collection " << validateState.nss() - << ". Starting the second phase of index validation to gather concise errors."; - _gatherIndexEntryErrors(opCtx, - &validateState, - &indexConsistency, - &indexValidator, - &indexNsResultsMap, - results); - } + _validateIndexes(opCtx, + &validateState, + &keysPerIndex, + &indexValidator, + numIndexKeysPerIndex, + &indexNsResultsMap, + results); + + if (indexConsistency.haveEntryMismatch()) { + log(LogComponent::kIndex) + << "Index inconsistencies were detected on collection " << validateState.nss() + << ". Starting the second phase of index validation to gather concise errors."; + _gatherIndexEntryErrors(opCtx, + &validateState, + &indexConsistency, + &indexValidator, + &indexNsResultsMap, + results); + } + + if (!results->valid) { + _reportInvalidResults(opCtx, + &validateState, + &indexNsResultsMap, + &keysPerIndex, + results, + output, + uuidString); + return Status::OK(); } // Validate index key count. - if (results->valid) { - _validateIndexKeyCount(opCtx, &validateState, &indexValidator, &indexNsResultsMap); + _validateIndexKeyCount(opCtx, &validateState, &indexValidator, &indexNsResultsMap); + + if (!results->valid) { + _reportInvalidResults(opCtx, + &validateState, + &indexNsResultsMap, + &keysPerIndex, + results, + output, + uuidString); + return Status::OK(); } + // At this point, validation is complete and successful. // Report the validation results for the user to see. _reportValidationResults( opCtx, &validateState, &indexNsResultsMap, &keysPerIndex, results, output); - if (!results->valid) { - log(LogComponent::kIndex) << "Validation complete for collection " - << validateState.nss() << uuidString << ". Corruption found."; - } else { - log(LogComponent::kIndex) - << "Validation complete for collection " << validateState.nss() << uuidString - << ". No corruption found."; - } + log(LogComponent::kIndex) << "Validation complete for collection " << validateState.nss() + << uuidString << ". No corruption found."; output->append("ns", validateState.nss().ns()); } catch (ExceptionFor<ErrorCodes::CursorNotFound>&) { diff --git a/src/mongo/db/catalog/collection_validation.h b/src/mongo/db/catalog/collection_validation.h index a13fd37d756..8809337edd8 100644 --- a/src/mongo/db/catalog/collection_validation.h +++ b/src/mongo/db/catalog/collection_validation.h @@ -41,11 +41,30 @@ class Status; namespace CollectionValidation { +enum class ValidateOptions { + kNoFullValidation = 0, + + // If the FullRecordStoreValidation option is set, validate() will do a full validation of the + // underlying record store using the storage engine's validation functionality. For WiredTiger + // this results in a call to verify(). + kFullRecordStoreValidation = 1 << 0, + // If set, validate() will validate the internal structure of each index, and checks consistency + // of the number of keys in the index compared to the internal structure. + kFullIndexValidation = 1 << 1, + // Includes all of the full validations above. + kFullValidation = kFullRecordStoreValidation | kFullIndexValidation, +}; + +inline bool operator&(ValidateOptions lhs, ValidateOptions rhs) { + return (static_cast<int>(lhs) & static_cast<int>(rhs)) != 0; +} + /** * Expects the caller to hold no locks. * - * Background validation does not support full validation and so the combination of level = - * 'kValidateTrue' and background = 'True' is prohibited. + * Background validation does not support any type of full validation above. + * The combination of background = true and options of anything other than kNoFullValidation is + * prohibited. * * @return OK if the validate run successfully * OK will be returned even if corruption is found @@ -53,7 +72,7 @@ namespace CollectionValidation { */ Status validate(OperationContext* opCtx, const NamespaceString& nss, - const bool fullValidate, + ValidateOptions options, bool background, ValidateResults* results, BSONObjBuilder* output); diff --git a/src/mongo/db/catalog/collection_validation_test.cpp b/src/mongo/db/catalog/collection_validation_test.cpp index 85a648b0f51..4aeeae2b22c 100644 --- a/src/mongo/db/catalog/collection_validation_test.cpp +++ b/src/mongo/db/catalog/collection_validation_test.cpp @@ -93,12 +93,16 @@ public: */ void foregroundValidate( OperationContext* opCtx, bool valid, int numRecords, int numInvalidDocuments, int numErrors) { - std::vector<bool> levels = {false, true}; - for (auto level : levels) { + std::vector<CollectionValidation::ValidateOptions> optionsList = { + CollectionValidation::ValidateOptions::kNoFullValidation, + CollectionValidation::ValidateOptions::kFullRecordStoreValidation, + CollectionValidation::ValidateOptions::kFullIndexValidation, + CollectionValidation::ValidateOptions::kFullValidation}; + for (auto options : optionsList) { ValidateResults validateResults; BSONObjBuilder output; ASSERT_OK(CollectionValidation::validate( - opCtx, kNss, level, /*background*/ false, &validateResults, &output)); + opCtx, kNss, options, /*background*/ false, &validateResults, &output)); ASSERT_EQ(validateResults.valid, valid); ASSERT_EQ(validateResults.errors.size(), static_cast<long unsigned int>(numErrors)); @@ -128,12 +132,13 @@ void backgroundValidate(OperationContext* opCtx, ValidateResults validateResults; BSONObjBuilder output; - ASSERT_OK(CollectionValidation::validate(opCtx, - kNss, - /*fullValidate*/ false, - /*background*/ true, - &validateResults, - &output)); + ASSERT_OK( + CollectionValidation::validate(opCtx, + kNss, + CollectionValidation::ValidateOptions::kNoFullValidation, + /*background*/ true, + &validateResults, + &output)); BSONObj obj = output.obj(); ASSERT_EQ(validateResults.valid, valid); diff --git a/src/mongo/db/catalog/index_builds_manager.h b/src/mongo/db/catalog/index_builds_manager.h index 2c4d61bce4a..ad89d608d5b 100644 --- a/src/mongo/db/catalog/index_builds_manager.h +++ b/src/mongo/db/catalog/index_builds_manager.h @@ -36,7 +36,7 @@ #include "mongo/db/catalog/multi_index_block.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/repair_database.h" +#include "mongo/db/rebuild_indexes.h" #include "mongo/db/repl_index_build_state.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp index 3d8e16e77c8..eafde9b1b86 100644 --- a/src/mongo/db/catalog/index_catalog_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_impl.cpp @@ -73,6 +73,7 @@ #include "mongo/db/storage/storage_engine_init.h" #include "mongo/db/ttl_collection_cache.h" #include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" #include "mongo/util/log.h" #include "mongo/util/represent_as.h" #include "mongo/util/str.h" @@ -80,6 +81,7 @@ namespace mongo { MONGO_FAIL_POINT_DEFINE(skipUnindexingDocumentWhenDeleted); +MONGO_FAIL_POINT_DEFINE(skipIndexNewRecords); using std::endl; using std::string; @@ -122,7 +124,6 @@ Status IndexCatalogImpl::init(OperationContext* opCtx) { } CollectionQueryInfo::get(_collection).init(opCtx); - return Status::OK(); } @@ -1345,6 +1346,10 @@ Status IndexCatalogImpl::_indexRecords(OperationContext* opCtx, IndexCatalogEntry* index, const std::vector<BsonRecord>& bsonRecords, int64_t* keysInsertedOut) { + if (MONGO_unlikely(skipIndexNewRecords.shouldFail())) { + return Status::OK(); + } + const MatchExpression* filter = index->getFilterExpression(); if (!filter) return _indexFilteredRecords(opCtx, index, bsonRecords, keysInsertedOut); diff --git a/src/mongo/db/catalog/validate_adaptor.cpp b/src/mongo/db/catalog/validate_adaptor.cpp index 3249d53092f..78c4e00239b 100644 --- a/src/mongo/db/catalog/validate_adaptor.cpp +++ b/src/mongo/db/catalog/validate_adaptor.cpp @@ -323,7 +323,7 @@ void ValidateAdaptor::validateIndexKeyCount(const IndexDescriptor* idx, Validate // Do not fail on finding too few index entries compared to collection entries when full:false. bool hasTooFewKeys = false; - bool noErrorOnTooFewKeys = !_validateState->isFullValidate(); + bool noErrorOnTooFewKeys = !_validateState->isFullIndexValidation(); if (idx->isIdIndex() && numTotalKeys != _numRecords) { hasTooFewKeys = (numTotalKeys < _numRecords); @@ -367,7 +367,7 @@ void ValidateAdaptor::validateIndexKeyCount(const IndexDescriptor* idx, Validate } } - if (!_validateState->isFullValidate() && hasTooFewKeys) { + if (!_validateState->isFullIndexValidation() && hasTooFewKeys) { std::string warning = str::stream() << "index " << idx->indexName() << " has fewer keys than records." << " Please re-run the validate command with {full: true}"; diff --git a/src/mongo/db/catalog/validate_state.cpp b/src/mongo/db/catalog/validate_state.cpp index f1924d254f9..c8462c1bc86 100644 --- a/src/mongo/db/catalog/validate_state.cpp +++ b/src/mongo/db/catalog/validate_state.cpp @@ -54,8 +54,8 @@ namespace CollectionValidation { ValidateState::ValidateState(OperationContext* opCtx, const NamespaceString& nss, bool background, - bool fullValidate) - : _nss(nss), _background(background), _fullValidate(fullValidate), _dataThrottle(opCtx) { + ValidateOptions options) + : _nss(nss), _background(background), _options(options), _dataThrottle(opCtx) { // Subsequent re-locks will use the UUID when 'background' is true. if (_background) { diff --git a/src/mongo/db/catalog/validate_state.h b/src/mongo/db/catalog/validate_state.h index b05055333a8..d4c642f2c0c 100644 --- a/src/mongo/db/catalog/validate_state.h +++ b/src/mongo/db/catalog/validate_state.h @@ -30,6 +30,7 @@ #pragma once #include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/collection_validation.h" #include "mongo/db/catalog/throttle_cursor.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/d_concurrency.h" @@ -57,7 +58,7 @@ public: ValidateState(OperationContext* opCtx, const NamespaceString& nss, bool background, - bool fullValidate); + ValidateOptions options); const NamespaceString& nss() const { return _nss; @@ -67,8 +68,12 @@ public: return _background; } - bool isFullValidate() const { - return _fullValidate; + bool isFullCollectionValidation() const { + return (_options & ValidateOptions::kFullRecordStoreValidation); + } + + bool isFullIndexValidation() const { + return (_options & ValidateOptions::kFullIndexValidation); } const UUID uuid() const { @@ -164,7 +169,7 @@ private: NamespaceString _nss; bool _background; - bool _fullValidate; + ValidateOptions _options; OptionalCollectionUUID _uuid; boost::optional<AutoGetDb> _databaseLock; diff --git a/src/mongo/db/catalog/validate_state_test.cpp b/src/mongo/db/catalog/validate_state_test.cpp index 0ec7a5a5cbb..29ae065c147 100644 --- a/src/mongo/db/catalog/validate_state_test.cpp +++ b/src/mongo/db/catalog/validate_state_test.cpp @@ -108,12 +108,18 @@ TEST_F(ValidateStateTest, NonExistentCollectionShouldThrowNamespaceNotFoundError auto opCtx = operationContext(); ASSERT_THROWS_CODE(CollectionValidation::ValidateState( - opCtx, kNss, /*background*/ false, /*fullValidate*/ false), + opCtx, + kNss, + /*background*/ false, + CollectionValidation::ValidateOptions::kNoFullValidation), AssertionException, ErrorCodes::NamespaceNotFound); ASSERT_THROWS_CODE(CollectionValidation::ValidateState( - opCtx, kNss, /*background*/ true, /*fullValidate*/ false), + opCtx, + kNss, + /*background*/ true, + CollectionValidation::ValidateOptions::kNoFullValidation), AssertionException, ErrorCodes::NamespaceNotFound); } @@ -132,7 +138,7 @@ TEST_F(ValidateStateTest, UncheckpointedCollectionShouldThrowCursorNotFoundError // error is thrown when attempting to open cursors. createCollectionAndPopulateIt(opCtx, kNss); CollectionValidation::ValidateState validateState( - opCtx, kNss, /*background*/ true, /*fullValidate*/ false); + opCtx, kNss, /*background*/ true, CollectionValidation::ValidateOptions::kNoFullValidation); ASSERT_THROWS_CODE( validateState.initializeCursors(opCtx), AssertionException, ErrorCodes::CursorNotFound); } @@ -154,7 +160,10 @@ TEST_F(ValidateStateTest, OpenCursorsOnAllIndexes) { { // Open the cursors. CollectionValidation::ValidateState validateState( - opCtx, kNss, /*background*/ false, /*fullValidate*/ false); + opCtx, + kNss, + /*background*/ false, + CollectionValidation::ValidateOptions::kNoFullValidation); validateState.initializeCursors(opCtx); // Make sure all of the indexes were found and cursors opened against them. Including the @@ -169,7 +178,10 @@ TEST_F(ValidateStateTest, OpenCursorsOnAllIndexes) { // Check that foreground validation behaves just the same with checkpoint'ed data. CollectionValidation::ValidateState validateState( - opCtx, kNss, /*background*/ false, /*fullValidate*/ false); + opCtx, + kNss, + /*background*/ false, + CollectionValidation::ValidateOptions::kNoFullValidation); validateState.initializeCursors(opCtx); ASSERT_EQ(validateState.getIndexes().size(), 5); } @@ -193,7 +205,7 @@ TEST_F(ValidateStateTest, OpenCursorsOnCheckpointedIndexes) { // Open the cursors. CollectionValidation::ValidateState validateState( - opCtx, kNss, /*background*/ true, /*fullValidate*/ false); + opCtx, kNss, /*background*/ true, CollectionValidation::ValidateOptions::kNoFullValidation); validateState.initializeCursors(opCtx); // Make sure the uncheckpoint'ed indexes are not found. @@ -238,7 +250,7 @@ TEST_F(ValidateStateTest, OpenCursorsOnConsistentlyCheckpointedIndexes) { // The two inconsistent indexes should not be found. // (Note the _id index was create with collection creation, so we have 3 indexes.) CollectionValidation::ValidateState validateState( - opCtx, kNss, /*background*/ true, /*fullValidate*/ false); + opCtx, kNss, /*background*/ true, CollectionValidation::ValidateOptions::kNoFullValidation); validateState.initializeCursors(opCtx); ASSERT_EQ(validateState.getIndexes().size(), 3); } @@ -269,7 +281,10 @@ TEST_F(ValidateStateTest, CursorsAreNotOpenedAgainstCheckpointedIndexesThatWereL // (Note the _id index was create with collection creation, so we have 3 indexes.) { CollectionValidation::ValidateState validateState( - opCtx, kNss, /*background*/ true, /*fullValidate*/ false); + opCtx, + kNss, + /*background*/ true, + CollectionValidation::ValidateOptions::kNoFullValidation); validateState.initializeCursors(opCtx); ASSERT_EQ(validateState.getIndexes().size(), 3); } @@ -279,7 +294,7 @@ TEST_F(ValidateStateTest, CursorsAreNotOpenedAgainstCheckpointedIndexesThatWereL opCtx->recoveryUnit()->waitUntilUnjournaledWritesDurable(opCtx); // provokes a checkpoint. CollectionValidation::ValidateState validateState( - opCtx, kNss, /*background*/ true, /*fullValidate*/ false); + opCtx, kNss, /*background*/ true, CollectionValidation::ValidateOptions::kNoFullValidation); validateState.initializeCursors(opCtx); ASSERT_EQ(validateState.getIndexes().size(), 3); } diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp index 09594aa207e..3cdf9353749 100644 --- a/src/mongo/db/commands/validate.cpp +++ b/src/mongo/db/commands/validate.cpp @@ -170,9 +170,12 @@ public: _validationNotifier.notify_all(); }); + auto options = (fullValidate) ? CollectionValidation::ValidateOptions::kFullValidation + : CollectionValidation::ValidateOptions::kNoFullValidation; + ValidateResults validateResults; Status status = CollectionValidation::validate( - opCtx, nss, fullValidate, background, &validateResults, &result); + opCtx, nss, options, background, &validateResults, &result); if (!status.isOK()) { return CommandHelpers::appendCommandStatusNoThrow(result, status); } diff --git a/src/mongo/db/index_builds_coordinator.h b/src/mongo/db/index_builds_coordinator.h index a1af0886797..ced2c6c233f 100644 --- a/src/mongo/db/index_builds_coordinator.h +++ b/src/mongo/db/index_builds_coordinator.h @@ -43,7 +43,7 @@ #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/database_index_builds_tracker.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/repair_database.h" +#include "mongo/db/rebuild_indexes.h" #include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl_index_build_state.h" #include "mongo/db/storage/durable_catalog.h" diff --git a/src/mongo/db/rebuild_indexes.cpp b/src/mongo/db/rebuild_indexes.cpp new file mode 100644 index 00000000000..8a266c1fde1 --- /dev/null +++ b/src/mongo/db/rebuild_indexes.cpp @@ -0,0 +1,123 @@ +/** + * Copyright (C) 2020-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage + +#include "mongo/platform/basic.h" + +#include <algorithm> + +#include "mongo/db/rebuild_indexes.h" + +#include "mongo/base/status.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_key_validate.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/storage/durable_catalog.h" + +namespace mongo { + +StatusWith<IndexNameObjs> getIndexNameObjs(OperationContext* opCtx, + RecordId catalogId, + std::function<bool(const std::string&)> filter) { + IndexNameObjs ret; + std::vector<std::string>& indexNames = ret.first; + std::vector<BSONObj>& indexSpecs = ret.second; + auto durableCatalog = DurableCatalog::get(opCtx); + { + // Fetch all indexes + durableCatalog->getAllIndexes(opCtx, catalogId, &indexNames); + auto newEnd = + std::remove_if(indexNames.begin(), + indexNames.end(), + [&filter](const std::string& indexName) { return !filter(indexName); }); + indexNames.erase(newEnd, indexNames.end()); + + indexSpecs.reserve(indexNames.size()); + + + for (const auto& name : indexNames) { + BSONObj spec = durableCatalog->getIndexSpec(opCtx, catalogId, name); + using IndexVersion = IndexDescriptor::IndexVersion; + IndexVersion indexVersion = IndexVersion::kV1; + if (auto indexVersionElem = spec[IndexDescriptor::kIndexVersionFieldName]) { + auto indexVersionNum = indexVersionElem.numberInt(); + invariant(indexVersionNum == static_cast<int>(IndexVersion::kV1) || + indexVersionNum == static_cast<int>(IndexVersion::kV2)); + indexVersion = static_cast<IndexVersion>(indexVersionNum); + } + invariant(spec.isOwned()); + indexSpecs.push_back(spec); + + const BSONObj key = spec.getObjectField("key"); + const Status keyStatus = index_key_validate::validateKeyPattern(key, indexVersion); + if (!keyStatus.isOK()) { + return Status( + ErrorCodes::CannotCreateIndex, + str::stream() + << "Cannot rebuild index " << spec << ": " << keyStatus.reason() + << " For more info see http://dochub.mongodb.org/core/index-validation"); + } + } + } + + return ret; +} + +Status rebuildIndexesOnCollection(OperationContext* opCtx, + Collection* collection, + const std::vector<BSONObj>& indexSpecs, + RepairData repair) { + // Skip the rest if there are no indexes to rebuild. + if (indexSpecs.empty()) + return Status::OK(); + + // Rebuild the indexes provided by 'indexSpecs'. + IndexBuildsCoordinator* indexBuildsCoord = IndexBuildsCoordinator::get(opCtx); + UUID buildUUID = UUID::gen(); + auto swRebuild = indexBuildsCoord->rebuildIndexesForRecovery( + opCtx, collection->ns(), indexSpecs, buildUUID, repair); + if (!swRebuild.isOK()) { + return swRebuild.getStatus(); + } + + auto [numRecords, dataSize] = swRebuild.getValue(); + + auto rs = collection->getRecordStore(); + + // Update the record store stats after finishing and committing the index builds. + WriteUnitOfWork wuow(opCtx); + rs->updateStatsAfterRepair(opCtx, numRecords, dataSize); + wuow.commit(); + + return Status::OK(); +} + +} // namespace mongo diff --git a/src/mongo/db/rebuild_indexes.h b/src/mongo/db/rebuild_indexes.h new file mode 100644 index 00000000000..1654e359882 --- /dev/null +++ b/src/mongo/db/rebuild_indexes.h @@ -0,0 +1,76 @@ +/** + * Copyright (C) 2020-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include <functional> +#include <string> + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/record_id.h" + +namespace mongo { +class Collection; +class OperationContext; + +typedef std::pair<std::vector<std::string>, std::vector<BSONObj>> IndexNameObjs; + +/** + * Returns a pair of parallel vectors. The first item is the index name. The second is the + * `BSONObj` "index spec" with an index name matching the `filter`. + * + * @param filter is a predicate that is passed in an index name, returning true if the index + * should be included in the result. + */ +StatusWith<IndexNameObjs> getIndexNameObjs(OperationContext* opCtx, + RecordId catalogId, + std::function<bool(const std::string&)> filter = + [](const std::string& indexName) { return true; }); + +/** + * Rebuilds the indexes provided by the 'indexSpecs' on the given collection. + * One example usage is when a 'dropIndex' command is rolled back. The dropped index must be remade. + * When 'repair' is set to kYes, this function will delete corrupt records when found, rather than + * crashing. + */ +enum class RepairData { kYes, kNo }; +Status rebuildIndexesOnCollection(OperationContext* opCtx, + Collection* collection, + const std::vector<BSONObj>& indexSpecs, + RepairData repair); + +/** + * Rebuilds the indexes provided by the 'indexSpecs' on the given collection. + * One example usage is when a 'dropIndex' command is rolled back. The dropped index must be remade. + */ +Status rebuildIndexesOnCollection(OperationContext* opCtx, + Collection* collection, + const std::vector<BSONObj>& indexSpecs); + +} // namespace mongo diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp index 47283e8719b..fa04464064f 100644 --- a/src/mongo/db/repair_database.cpp +++ b/src/mongo/db/repair_database.cpp @@ -42,17 +42,20 @@ #include "mongo/db/background.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_validation.h" #include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/catalog/multi_index_block.h" #include "mongo/db/concurrency/write_conflict_exception.h" +#include "mongo/db/db_raii.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/logical_clock.h" #include "mongo/db/namespace_string.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/rebuild_indexes.h" #include "mongo/db/storage/durable_catalog.h" #include "mongo/db/storage/storage_engine.h" #include "mongo/util/log.h" @@ -60,79 +63,21 @@ namespace mongo { -StatusWith<IndexNameObjs> getIndexNameObjs(OperationContext* opCtx, - RecordId catalogId, - std::function<bool(const std::string&)> filter) { - IndexNameObjs ret; - std::vector<std::string>& indexNames = ret.first; - std::vector<BSONObj>& indexSpecs = ret.second; - auto durableCatalog = DurableCatalog::get(opCtx); - { - // Fetch all indexes - durableCatalog->getAllIndexes(opCtx, catalogId, &indexNames); - auto newEnd = - std::remove_if(indexNames.begin(), - indexNames.end(), - [&filter](const std::string& indexName) { return !filter(indexName); }); - indexNames.erase(newEnd, indexNames.end()); - - indexSpecs.reserve(indexNames.size()); - - - for (const auto& name : indexNames) { - BSONObj spec = durableCatalog->getIndexSpec(opCtx, catalogId, name); - using IndexVersion = IndexDescriptor::IndexVersion; - IndexVersion indexVersion = IndexVersion::kV1; - if (auto indexVersionElem = spec[IndexDescriptor::kIndexVersionFieldName]) { - auto indexVersionNum = indexVersionElem.numberInt(); - invariant(indexVersionNum == static_cast<int>(IndexVersion::kV1) || - indexVersionNum == static_cast<int>(IndexVersion::kV2)); - indexVersion = static_cast<IndexVersion>(indexVersionNum); - } - invariant(spec.isOwned()); - indexSpecs.push_back(spec); - - const BSONObj key = spec.getObjectField("key"); - const Status keyStatus = index_key_validate::validateKeyPattern(key, indexVersion); - if (!keyStatus.isOK()) { - return Status( - ErrorCodes::CannotCreateIndex, - str::stream() - << "Cannot rebuild index " << spec << ": " << keyStatus.reason() - << " For more info see http://dochub.mongodb.org/core/index-validation"); - } - } - } - - return ret; -} - -Status rebuildIndexesOnCollection(OperationContext* opCtx, - Collection* collection, - const std::vector<BSONObj>& indexSpecs, - RepairData repair) { - // Skip the rest if there are no indexes to rebuild. - if (indexSpecs.empty()) - return Status::OK(); - - // Rebuild the indexes provided by 'indexSpecs'. - IndexBuildsCoordinator* indexBuildsCoord = IndexBuildsCoordinator::get(opCtx); - UUID buildUUID = UUID::gen(); - auto swRebuild = indexBuildsCoord->rebuildIndexesForRecovery( - opCtx, collection->ns(), indexSpecs, buildUUID, repair); - if (!swRebuild.isOK()) { - return swRebuild.getStatus(); - } - - auto [numRecords, dataSize] = swRebuild.getValue(); - - auto rs = collection->getRecordStore(); +Status rebuildIndexesForNamespace(OperationContext* opCtx, + const NamespaceString& nss, + StorageEngine* engine) { + opCtx->checkForInterrupt(); + auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss); + auto swIndexNameObjs = getIndexNameObjs(opCtx, collection->getCatalogId()); + if (!swIndexNameObjs.isOK()) + return swIndexNameObjs.getStatus(); - // Update the record store stats after finishing and committing the index builds. - WriteUnitOfWork wuow(opCtx); - rs->updateStatsAfterRepair(opCtx, numRecords, dataSize); - wuow.commit(); + std::vector<BSONObj> indexSpecs = swIndexNameObjs.getValue().second; + Status status = rebuildIndexesOnCollection(opCtx, collection, indexSpecs, RepairData::kYes); + if (!status.isOK()) + return status; + engine->flushAllFiles(opCtx, true); return Status::OK(); } @@ -150,23 +95,48 @@ Status repairCollections(OperationContext* opCtx, auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss); Status status = engine->repairRecordStore(opCtx, collection->getCatalogId(), nss); - if (!status.isOK()) + + // If data was modified during repairRecordStore, we know to rebuild indexes without needing + // to run an expensive collection validation. + if (status.code() == ErrorCodes::DataModifiedByRepair) { + Status status = rebuildIndexesForNamespace(opCtx, nss, engine); + if (!status.isOK()) { + return status; + } + continue; + } else if (!status.isOK()) { return status; - } + } - for (const auto& nss : colls) { - opCtx->checkForInterrupt(); - auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss); - auto swIndexNameObjs = getIndexNameObjs(opCtx, collection->getCatalogId()); - if (!swIndexNameObjs.isOK()) - return swIndexNameObjs.getStatus(); + // Run collection validation to avoid unecessarily rebuilding indexes on valid collections + // with consistent indexes. Initialize the collection prior to validation. Need to lookup + // from catalog again because the old collection object was invalidated by + // repairRecordStore. + collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss); + collection->init(opCtx); + + ValidateResults validateResults; + BSONObjBuilder output; + + // Set options to exclude FullRecordStoreValidation because we have already validated the + // underlying record store in the call to repairRecordStore above. + auto options = CollectionValidation::ValidateOptions::kFullIndexValidation; - std::vector<BSONObj> indexSpecs = swIndexNameObjs.getValue().second; - Status status = rebuildIndexesOnCollection(opCtx, collection, indexSpecs, RepairData::kYes); - if (!status.isOK()) + const bool background = false; + status = CollectionValidation::validate( + opCtx, nss, options, background, &validateResults, &output); + if (!status.isOK()) { return status; + } + + log() << "Collection validation results: " << output.done(); - engine->flushAllFiles(opCtx, true); + if (!validateResults.valid) { + status = rebuildIndexesForNamespace(opCtx, nss, engine); + if (!status.isOK()) { + return status; + } + } } return Status::OK(); } @@ -189,6 +159,9 @@ Status repairDatabase(OperationContext* opCtx, StorageEngine* engine, const std: auto databaseHolder = DatabaseHolder::get(opCtx); databaseHolder->close(opCtx, dbName); + // Reopening db is necessary for repairCollections. + auto db = databaseHolder->openDb(opCtx, dbName); + auto status = repairCollections(opCtx, engine, dbName); if (!status.isOK()) { severe() << "Failed to repair database " << dbName << ": " << status.reason(); @@ -198,9 +171,6 @@ Status repairDatabase(OperationContext* opCtx, StorageEngine* engine, const std: // Ensure that we don't trigger an exception when attempting to take locks. UninterruptibleLockGuard noInterrupt(opCtx->lockState()); - // Open the db after everything finishes. - auto db = databaseHolder->openDb(opCtx, dbName); - // Set the minimum snapshot for all Collections in this db. This ensures that readers // using majority readConcern level can only use the collections after their repaired // versions are in the committed view. diff --git a/src/mongo/db/repair_database.h b/src/mongo/db/repair_database.h index 4f3ae143bdd..db4e110bcf1 100644 --- a/src/mongo/db/repair_database.h +++ b/src/mongo/db/repair_database.h @@ -43,32 +43,6 @@ class OperationContext; class Status; class StringData; -typedef std::pair<std::vector<std::string>, std::vector<BSONObj>> IndexNameObjs; - -/** - * Returns a pair of parallel vectors. The first item is the index name. The second is the - * `BSONObj` "index spec" with an index name matching the `filter`. - * - * @param filter is a predicate that is passed in an index name, returning true if the index - * should be included in the result. - */ -StatusWith<IndexNameObjs> getIndexNameObjs(OperationContext* opCtx, - RecordId catalogId, - std::function<bool(const std::string&)> filter = - [](const std::string& indexName) { return true; }); - -/** - * Rebuilds the indexes provided by the 'indexSpecs' on the given collection. - * One example usage is when a 'dropIndex' command is rolled back. The dropped index must be remade. - * When 'repair' is set to kYes, this function will delete corrupt records when found, rather than - * crashing. - */ -enum class RepairData { kYes, kNo }; -Status rebuildIndexesOnCollection(OperationContext* opCtx, - Collection* collection, - const std::vector<BSONObj>& indexSpecs, - RepairData repair); - /** * Repairs a database using a storage engine-specific, best-effort process. * Some data may be lost or modified in the process but the output will diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp index 699b2d2fcc5..3897bd82cd4 100644 --- a/src/mongo/db/repair_database_and_check_version.cpp +++ b/src/mongo/db/repair_database_and_check_version.cpp @@ -49,6 +49,7 @@ #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/rebuild_indexes.h" #include "mongo/db/repair_database.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_process.h" diff --git a/src/mongo/db/repl/idempotency_test_fixture.cpp b/src/mongo/db/repl/idempotency_test_fixture.cpp index 2d398f1a817..c6ea408d821 100644 --- a/src/mongo/db/repl/idempotency_test_fixture.cpp +++ b/src/mongo/db/repl/idempotency_test_fixture.cpp @@ -598,8 +598,13 @@ CollectionState IdempotencyTest::validate(const NamespaceString& nss) { ValidateResults validateResults; BSONObjBuilder bob; - ASSERT_OK(CollectionValidation::validate( - _opCtx.get(), nss, /*fullValidate=*/true, false, &validateResults, &bob)); + ASSERT_OK( + CollectionValidation::validate(_opCtx.get(), + nss, + CollectionValidation::ValidateOptions::kFullValidation, + false, + &validateResults, + &bob)); ASSERT_TRUE(validateResults.valid); } diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp index a7072f74a45..71cdd7fd74b 100644 --- a/src/mongo/db/storage/storage_engine_impl.cpp +++ b/src/mongo/db/storage/storage_engine_impl.cpp @@ -722,7 +722,8 @@ Status StorageEngineImpl::repairRecordStore(OperationContext* opCtx, auto uuid = collectionCatalog.lookupUUIDByNSS(opCtx, nss).get(); collectionCatalog.deregisterCollection(uuid); _initCollection(opCtx, catalogId, nss, false); - return Status::OK(); + + return status; } std::unique_ptr<TemporaryRecordStore> StorageEngineImpl::makeTemporaryRecordStore( diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp index 74923ceb7e7..2a5332b1181 100644 --- a/src/mongo/dbtests/validate_tests.cpp +++ b/src/mongo/dbtests/validate_tests.cpp @@ -95,11 +95,14 @@ protected: // checkpoint and see all the new data. _opCtx.recoveryUnit()->waitUntilUnjournaledWritesDurable(&_opCtx); + auto options = (_full) ? CollectionValidation::ValidateOptions::kFullValidation + : CollectionValidation::ValidateOptions::kNoFullValidation; + ValidateResults results; BSONObjBuilder output; ASSERT_OK( - CollectionValidation::validate(&_opCtx, _nss, _full, _background, &results, &output)); + CollectionValidation::validate(&_opCtx, _nss, options, _background, &results, &output)); // Check if errors are reported if and only if valid is set to false. ASSERT_EQ(results.valid, results.errors.empty()); @@ -1207,7 +1210,12 @@ public: BSONObjBuilder output; ASSERT_OK(CollectionValidation::validate( - &_opCtx, _nss, /*fullValidate=*/true, _background, &results, &output)); + &_opCtx, + _nss, + CollectionValidation::ValidateOptions::kFullValidation, + _background, + &results, + &output)); ASSERT_EQ(false, results.valid); ASSERT_EQ(static_cast<size_t>(1), results.errors.size()); @@ -1314,7 +1322,12 @@ public: BSONObjBuilder output; ASSERT_OK(CollectionValidation::validate( - &_opCtx, _nss, /*fullValidate=*/true, _background, &results, &output)); + &_opCtx, + _nss, + CollectionValidation::ValidateOptions::kFullValidation, + _background, + &results, + &output)); ASSERT_EQ(false, results.valid); ASSERT_EQ(static_cast<size_t>(1), results.errors.size()); @@ -1397,7 +1410,12 @@ public: BSONObjBuilder output; ASSERT_OK(CollectionValidation::validate( - &_opCtx, _nss, /*fullValidate=*/true, _background, &results, &output)); + &_opCtx, + _nss, + CollectionValidation::ValidateOptions::kFullValidation, + _background, + &results, + &output)); ASSERT_EQ(false, results.valid); ASSERT_EQ(static_cast<size_t>(2), results.errors.size()); |