summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGregory Wlodarek <gregory.wlodarek@mongodb.com>2017-08-09 14:44:24 -0400
committerGregory Wlodarek <gregory.wlodarek@mongodb.com>2017-08-09 14:44:33 -0400
commit336e2bd92d020f4e2f273fa02c388ff1305f89f2 (patch)
treebd8996b49a33eaf9565b633ddd6f93e6419f064d
parent60ca5426e3903775654c12f874828baf24e05b13 (diff)
downloadmongo-336e2bd92d020f4e2f273fa02c388ff1305f89f2.tar.gz
SERVER-30352 Add a new class that will keep track of the index inconsistencies during validation
-rw-r--r--src/mongo/base/error_codes.err1
-rw-r--r--src/mongo/db/catalog/SConscript8
-rw-r--r--src/mongo/db/catalog/collection.h7
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp17
-rw-r--r--src/mongo/db/catalog/collection_impl.h2
-rw-r--r--src/mongo/db/catalog/collection_mock.h2
-rw-r--r--src/mongo/db/catalog/index_consistency.cpp579
-rw-r--r--src/mongo/db/catalog/index_consistency.h378
-rw-r--r--src/mongo/db/catalog/private/record_store_validate_adaptor.cpp51
-rw-r--r--src/mongo/db/catalog/private/record_store_validate_adaptor.h37
-rw-r--r--src/mongo/db/commands/validate.cpp5
-rw-r--r--src/mongo/db/repl/idempotency_test_fixture.cpp5
-rw-r--r--src/mongo/dbtests/validate_tests.cpp297
13 files changed, 1236 insertions, 153 deletions
diff --git a/src/mongo/base/error_codes.err b/src/mongo/base/error_codes.err
index fa980506671..41858174c1d 100644
--- a/src/mongo/base/error_codes.err
+++ b/src/mongo/base/error_codes.err
@@ -218,6 +218,7 @@ error_code("IncompleteTransactionHistory", 217);
error_code("UpdateOperationFailed", 218)
error_code("FTDCPathNotSet", 219)
error_code("FTDCPathAlreadySet", 220)
+error_code("IndexModified", 221)
# Error codes 4000-8999 are reserved.
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index d42ba033db3..0b14758f9cc 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -193,12 +193,13 @@ env.CppUnitTest(
target='uuid_catalog_test',
source=[
'uuid_catalog_test.cpp',
- ],
+ ],
LIBDEPS=[
'uuid_catalog',
+ '$BUILD_DIR/mongo/db/concurrency/lock_manager',
'$BUILD_DIR/mongo/db/service_context',
- ]
- )
+ ],
+)
env.Library(
target='catalog',
@@ -210,6 +211,7 @@ env.Library(
"database_holder_impl.cpp",
"index_catalog_impl.cpp",
"index_catalog_entry_impl.cpp",
+ "index_consistency.cpp",
"index_create_impl.cpp",
"private/record_store_validate_adaptor.cpp",
],
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index d1f08edc0e8..4f87e98b7d4 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -40,6 +40,7 @@
#include "mongo/db/catalog/collection_info_cache.h"
#include "mongo/db/catalog/collection_options.h"
#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/cursor_manager.h"
#include "mongo/db/exec/collection_scan_common.h"
#include "mongo/db/logical_session_id.h"
@@ -294,6 +295,8 @@ public:
virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
+ bool background,
+ std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* results,
BSONObjBuilder* output) = 0;
@@ -587,9 +590,11 @@ public:
*/
inline Status validate(OperationContext* const opCtx,
const ValidateCmdLevel level,
+ bool background,
+ std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* const results,
BSONObjBuilder* const output) {
- return this->_impl().validate(opCtx, level, results, output);
+ return this->_impl().validate(opCtx, level, background, std::move(collLk), results, output);
}
/**
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index b4780bd66a7..a834a6eae22 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/database_catalog_entry.h"
#include "mongo/db/catalog/document_validation.h"
+#include "mongo/db/catalog/index_consistency.h"
#include "mongo/db/catalog/index_create.h"
#include "mongo/db/catalog/namespace_uuid_cache.h"
#include "mongo/db/catalog/uuid_catalog.h"
@@ -1157,6 +1158,8 @@ void _reportValidationResults(OperationContext* opCtx,
Status CollectionImpl::validate(OperationContext* opCtx,
ValidateCmdLevel level,
+ bool background,
+ std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* results,
BSONObjBuilder* output) {
dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
@@ -1164,14 +1167,16 @@ Status CollectionImpl::validate(OperationContext* opCtx,
try {
ValidateResultsMap indexNsResultsMap;
BSONObjBuilder keysPerIndex; // not using subObjStart to be exception safe
- RecordStoreValidateAdaptor indexValidator =
- RecordStoreValidateAdaptor(opCtx, level, &_indexCatalog, &indexNsResultsMap);
+ IndexConsistency indexConsistency(
+ opCtx, _this, ns(), _recordStore, std::move(collLk), background);
+ RecordStoreValidateAdaptor indexValidator = RecordStoreValidateAdaptor(
+ opCtx, &indexConsistency, level, &_indexCatalog, &indexNsResultsMap);
// Validate the record store
log(LogComponent::kIndex) << "validating collection " << ns().toString() << endl;
_validateRecordStore(
- opCtx, _recordStore, level, /*somgarg*/ false, &indexValidator, results, output);
+ opCtx, _recordStore, level, background, &indexValidator, results, output);
// Validate indexes and check for mismatches.
if (results->valid) {
@@ -1183,11 +1188,7 @@ Status CollectionImpl::validate(OperationContext* opCtx,
&indexNsResultsMap,
results);
- if (indexValidator.tooFewIndexEntries()) {
- string msg = "one or more indexes contain invalid index entries.";
- results->errors.push_back(msg);
- results->valid = false;
- } else if (indexValidator.tooManyIndexEntries()) {
+ if (indexConsistency.haveEntryMismatch()) {
_markIndexEntriesInvalid(&indexNsResultsMap, results);
}
}
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index 92ba2076dec..4a766faa9aa 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -247,6 +247,8 @@ public:
*/
Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
+ bool background,
+ std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* results,
BSONObjBuilder* output) final;
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index e92c81924e2..6c912439b19 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -200,6 +200,8 @@ public:
Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
+ bool background,
+ std::unique_ptr<Lock::CollectionLock> collLk,
ValidateResults* results,
BSONObjBuilder* output) {
std::abort();
diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp
new file mode 100644
index 00000000000..6fe74485296
--- /dev/null
+++ b/src/mongo/db/catalog/index_consistency.cpp
@@ -0,0 +1,579 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include <third_party/murmurhash3/MurmurHash3.h>
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/collection_catalog_entry.h"
+#include "mongo/db/catalog/database_holder.h"
+#include "mongo/db/catalog/index_consistency.h"
+#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/query/query_yield.h"
+#include "mongo/db/storage/key_string.h"
+#include "mongo/db/storage/sorted_data_interface.h"
+#include "mongo/util/elapsed_tracker.h"
+
+namespace mongo {
+
+namespace {
+// The number of items we can scan before we must yield.
+static const int kScanLimit = 1000;
+} // namespace
+
+IndexConsistency::IndexConsistency(OperationContext* opCtx,
+ Collection* collection,
+ NamespaceString nss,
+ RecordStore* recordStore,
+ std::unique_ptr<Lock::CollectionLock> collLk,
+ const bool background)
+ : _opCtx(opCtx),
+ _collection(collection),
+ _nss(nss),
+ _recordStore(recordStore),
+ _collLk(std::move(collLk)),
+ _isBackground(background),
+ _tracker(opCtx->getServiceContext()->getFastClockSource(),
+ internalQueryExecYieldIterations.load(),
+ Milliseconds(internalQueryExecYieldPeriodMS.load())) {
+
+ IndexCatalog* indexCatalog = _collection->getIndexCatalog();
+ IndexCatalog::IndexIterator indexIterator = indexCatalog->getIndexIterator(_opCtx, false);
+
+ int indexNumber = 0;
+ while (indexIterator.more()) {
+
+ const IndexDescriptor* descriptor = indexIterator.next();
+ std::string indexNs = descriptor->indexNamespace();
+
+ _indexNumber[descriptor->indexNamespace()] = indexNumber;
+
+ IndexInfo indexInfo;
+
+ indexInfo.isReady =
+ _collection->getCatalogEntry()->isIndexReady(opCtx, descriptor->indexName());
+
+ uint32_t indexNsHash;
+ MurmurHash3_x86_32(indexNs.c_str(), indexNs.size(), 0, &indexNsHash);
+ indexInfo.indexNsHash = indexNsHash;
+ indexInfo.indexScanFinished = false;
+
+ indexInfo.numKeys = 0;
+ indexInfo.numLongKeys = 0;
+ indexInfo.numRecords = 0;
+ indexInfo.numExtraIndexKeys = 0;
+
+ _indexesInfo[indexNumber] = indexInfo;
+
+ indexNumber++;
+ }
+}
+
+void IndexConsistency::addDocKey(const KeyString& ks, int indexNumber) {
+
+ if (indexNumber < 0 || indexNumber >= static_cast<int>(_indexesInfo.size())) {
+ return;
+ }
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ _addDocKey_inlock(ks, indexNumber);
+}
+
+void IndexConsistency::removeDocKey(const KeyString& ks, int indexNumber) {
+
+ if (indexNumber < 0 || indexNumber >= static_cast<int>(_indexesInfo.size())) {
+ return;
+ }
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ _removeDocKey_inlock(ks, indexNumber);
+}
+
+void IndexConsistency::addIndexKey(const KeyString& ks, int indexNumber) {
+
+ if (indexNumber < 0 || indexNumber >= static_cast<int>(_indexesInfo.size())) {
+ return;
+ }
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ _addIndexKey_inlock(ks, indexNumber);
+}
+
+void IndexConsistency::removeIndexKey(const KeyString& ks, int indexNumber) {
+
+ if (indexNumber < 0 || indexNumber >= static_cast<int>(_indexesInfo.size())) {
+ return;
+ }
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ _removeIndexKey_inlock(ks, indexNumber);
+}
+
+void IndexConsistency::addLongIndexKey(int indexNumber) {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ if (indexNumber < 0 || indexNumber >= static_cast<int>(_indexesInfo.size())) {
+ return;
+ }
+
+ _indexesInfo[indexNumber].numRecords++;
+ _indexesInfo[indexNumber].numLongKeys++;
+}
+
+int64_t IndexConsistency::getNumKeys(int indexNumber) const {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ if (indexNumber < 0 || indexNumber >= static_cast<int>(_indexesInfo.size())) {
+ return 0;
+ }
+
+ return _indexesInfo.at(indexNumber).numKeys;
+}
+
+int64_t IndexConsistency::getNumLongKeys(int indexNumber) const {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ if (indexNumber < 0 || indexNumber >= static_cast<int>(_indexesInfo.size())) {
+ return 0;
+ }
+
+ return _indexesInfo.at(indexNumber).numLongKeys;
+}
+
+int64_t IndexConsistency::getNumRecords(int indexNumber) const {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ if (indexNumber < 0 || indexNumber >= static_cast<int>(_indexesInfo.size())) {
+ return 0;
+ }
+
+ return _indexesInfo.at(indexNumber).numRecords;
+}
+
+bool IndexConsistency::haveEntryMismatch() const {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ for (auto iterator = _indexKeyCount.begin(); iterator != _indexKeyCount.end(); iterator++) {
+ if (iterator->second != 0) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int64_t IndexConsistency::getNumExtraIndexKeys(int indexNumber) const {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ if (indexNumber < 0 || indexNumber >= static_cast<int>(_indexesInfo.size())) {
+ return 0;
+ }
+
+ return _indexesInfo.at(indexNumber).numExtraIndexKeys;
+}
+
+void IndexConsistency::applyChange(const IndexDescriptor* descriptor,
+ const boost::optional<IndexKeyEntry>& indexEntry,
+ ValidationOperation operation) {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+
+ const std::string& indexNs = descriptor->indexNamespace();
+ int indexNumber = getIndexNumber(indexNs);
+ if (indexNumber == -1) {
+ return;
+ }
+
+ // Ignore indexes that weren't ready before we started validation.
+ if (!_indexesInfo.at(indexNumber).isReady) {
+ return;
+ }
+
+ const auto& key = descriptor->keyPattern();
+ const Ordering ord = Ordering::make(key);
+ KeyString::Version version = KeyString::kLatestVersion;
+
+ KeyString ks(version, indexEntry->key, ord, indexEntry->loc);
+
+ if (_stage == ValidationStage::DOCUMENT) {
+ _setYieldAtRecord_inlock(indexEntry->loc);
+ if (_isBeforeLastProcessedRecordId_inlock(indexEntry->loc)) {
+ if (operation == ValidationOperation::INSERT) {
+ if (indexEntry->key.objsize() >=
+ static_cast<int64_t>(KeyString::TypeBits::kMaxKeyBytes)) {
+ // Index keys >= 1024 bytes are not indexed but are stored in the document key
+ // set.
+ _indexesInfo[indexNumber].numRecords++;
+ _indexesInfo[indexNumber].numLongKeys++;
+ } else {
+ _addDocKey_inlock(ks, indexNumber);
+ }
+ } else if (operation == ValidationOperation::REMOVE) {
+ if (indexEntry->key.objsize() >=
+ static_cast<int64_t>(KeyString::TypeBits::kMaxKeyBytes)) {
+ _indexesInfo[indexNumber].numRecords--;
+ _indexesInfo[indexNumber].numLongKeys--;
+ } else {
+ _removeDocKey_inlock(ks, indexNumber);
+ }
+ }
+ }
+ } else if (_stage == ValidationStage::INDEX) {
+
+ // Index entries with key sizes >= 1024 bytes are not indexed.
+ if (indexEntry->key.objsize() >= static_cast<int64_t>(KeyString::TypeBits::kMaxKeyBytes)) {
+ return;
+ }
+
+ if (_isIndexScanning_inlock(indexNumber)) {
+ _setYieldAtIndexEntry_inlock(ks);
+ }
+
+ const bool wasIndexScanStarted =
+ _isIndexFinished_inlock(indexNumber) || _isIndexScanning_inlock(indexNumber);
+ const bool isUpcomingChangeToCurrentIndex =
+ _isIndexScanning_inlock(indexNumber) && !_isBeforeLastProcessedIndexEntry_inlock(ks);
+
+ if (!wasIndexScanStarted || isUpcomingChangeToCurrentIndex) {
+
+ // We haven't started scanning this index namespace yet so everything
+ // happens after the cursor, OR, we are scanning this index namespace,
+ // and an event occured after our cursor
+ if (operation == ValidationOperation::INSERT) {
+ _removeIndexKey_inlock(ks, indexNumber);
+ _indexesInfo.at(indexNumber).numExtraIndexKeys++;
+ } else if (operation == ValidationOperation::REMOVE) {
+ _addIndexKey_inlock(ks, indexNumber);
+ _indexesInfo.at(indexNumber).numExtraIndexKeys--;
+ }
+ }
+ }
+}
+
+
+void IndexConsistency::nextStage() {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ if (_stage == ValidationStage::DOCUMENT) {
+ _stage = ValidationStage::INDEX;
+ } else if (_stage == ValidationStage::INDEX) {
+ _stage = ValidationStage::NONE;
+ }
+}
+
+const ValidationStage IndexConsistency::getStage() {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ return _stage;
+}
+
+void IndexConsistency::setLastProcessedRecordId(RecordId recordId) {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ if (!recordId.isNormal()) {
+ _lastProcessedRecordId = boost::none;
+ } else {
+ _lastProcessedRecordId = recordId;
+ }
+}
+
+void IndexConsistency::setLastProcessedIndexEntry(
+ const IndexDescriptor& descriptor, const boost::optional<IndexKeyEntry>& indexEntry) {
+
+ const auto& key = descriptor.keyPattern();
+ const Ordering ord = Ordering::make(key);
+ KeyString::Version version = KeyString::kLatestVersion;
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ if (!indexEntry) {
+ _lastProcessedIndexEntry.reset();
+ } else {
+ _lastProcessedIndexEntry.reset(
+ new KeyString(version, indexEntry->key, ord, indexEntry->loc));
+ }
+}
+
+void IndexConsistency::notifyStartIndex(int indexNumber) {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ if (indexNumber < 0 || indexNumber >= static_cast<int>(_indexesInfo.size())) {
+ return;
+ }
+
+ _lastProcessedIndexEntry.reset(nullptr);
+ _currentIndex = indexNumber;
+}
+
+void IndexConsistency::notifyDoneIndex(int indexNumber) {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ if (indexNumber < 0 || indexNumber >= static_cast<int>(_indexesInfo.size())) {
+ return;
+ }
+
+ _lastProcessedIndexEntry.reset(nullptr);
+ _currentIndex = -1;
+ _indexesInfo.at(indexNumber).indexScanFinished = true;
+}
+
+int IndexConsistency::getIndexNumber(const std::string& indexNs) {
+
+ auto search = _indexNumber.find(indexNs);
+ if (search != _indexNumber.end()) {
+ return search->second;
+ }
+
+ return -1;
+}
+
+bool IndexConsistency::shouldGetNewSnapshot(const RecordId recordId) const {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ if (!_yieldAtRecordId) {
+ return false;
+ }
+
+ return _yieldAtRecordId <= recordId;
+}
+
+bool IndexConsistency::shouldGetNewSnapshot(const KeyString& keyString) const {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+ if (!_yieldAtIndexEntry) {
+ return false;
+ }
+
+ return *_yieldAtIndexEntry <= keyString;
+}
+
+void IndexConsistency::relockCollectionWithMode(LockMode mode) {
+ // Release the lock and grab the provided lock mode.
+ _collLk.reset();
+ _collLk.reset(new Lock::CollectionLock(_opCtx->lockState(), _nss.toString(), mode));
+ invariant(_opCtx->lockState()->isCollectionLockedForMode(_nss.toString(), mode));
+
+ // Check if the operation was killed.
+ _opCtx->checkForInterrupt();
+
+ // Ensure it is safe to continue.
+ uassertStatusOK(_throwExceptionIfError());
+}
+
+const bool IndexConsistency::scanLimitHit() {
+
+ stdx::lock_guard<stdx::mutex> lock(_classMutex);
+
+ // We have to yield every so many scans while doing background validation only.
+ return _isBackground && _tracker.intervalHasElapsed();
+}
+
+void IndexConsistency::yield() {
+
+ stdx::unique_lock<stdx::mutex> lock(_classMutex);
+
+ // Reset the yield tracker
+ _tracker.resetLastTime();
+
+ lock.unlock();
+ QueryYield::yieldAllLocks(_opCtx, nullptr, _nss);
+ lock.lock();
+
+ // Check if the operation was killed.
+ _opCtx->checkForInterrupt();
+
+ _yieldAtRecordId = boost::none;
+ _yieldAtIndexEntry.reset();
+
+ // Ensure it is safe to continue.
+ uassertStatusOK(_throwExceptionIfError());
+}
+
+void IndexConsistency::_addDocKey_inlock(const KeyString& ks, int indexNumber) {
+
+ // Ignore indexes that weren't ready before we started validation.
+ if (!_indexesInfo.at(indexNumber).isReady) {
+ return;
+ }
+
+ const uint32_t hash = _hashKeyString(ks, indexNumber);
+ _indexKeyCount[hash]++;
+ _indexesInfo.at(indexNumber).numRecords++;
+}
+
+void IndexConsistency::_removeDocKey_inlock(const KeyString& ks, int indexNumber) {
+
+ // Ignore indexes that weren't ready before we started validation.
+ if (!_indexesInfo.at(indexNumber).isReady) {
+ return;
+ }
+
+ const uint32_t hash = _hashKeyString(ks, indexNumber);
+ _indexKeyCount[hash]--;
+ _indexesInfo.at(indexNumber).numRecords--;
+}
+
+void IndexConsistency::_addIndexKey_inlock(const KeyString& ks, int indexNumber) {
+
+ // Ignore indexes that weren't ready before we started validation.
+ if (!_indexesInfo.at(indexNumber).isReady) {
+ return;
+ }
+
+ const uint32_t hash = _hashKeyString(ks, indexNumber);
+ _indexKeyCount[hash]--;
+ _indexesInfo.at(indexNumber).numKeys++;
+}
+
+void IndexConsistency::_removeIndexKey_inlock(const KeyString& ks, int indexNumber) {
+
+ // Ignore indexes that weren't ready before we started validation.
+ if (!_indexesInfo.at(indexNumber).isReady) {
+ return;
+ }
+
+ const uint32_t hash = _hashKeyString(ks, indexNumber);
+ _indexKeyCount[hash]++;
+ _indexesInfo.at(indexNumber).numKeys--;
+}
+
+bool IndexConsistency::_isIndexFinished_inlock(int indexNumber) const {
+
+ return _indexesInfo.at(indexNumber).indexScanFinished;
+}
+
+bool IndexConsistency::_isIndexScanning_inlock(int indexNumber) const {
+
+ return indexNumber == _currentIndex;
+}
+
+void IndexConsistency::_setYieldAtRecord_inlock(const RecordId recordId) {
+
+ if (_isBeforeLastProcessedRecordId_inlock(recordId)) {
+ return;
+ }
+
+ if (!_yieldAtRecordId || recordId <= _yieldAtRecordId) {
+ _yieldAtRecordId = recordId;
+ }
+}
+
+void IndexConsistency::_setYieldAtIndexEntry_inlock(const KeyString& keyString) {
+
+ if (_isBeforeLastProcessedIndexEntry_inlock(keyString)) {
+ return;
+ }
+
+ if (!_yieldAtIndexEntry || keyString <= *_yieldAtIndexEntry) {
+ KeyString::Version version = KeyString::kLatestVersion;
+ _yieldAtIndexEntry.reset(new KeyString(version));
+ _yieldAtIndexEntry->resetFromBuffer(keyString.getBuffer(), keyString.getSize());
+ }
+}
+
+bool IndexConsistency::_isBeforeLastProcessedRecordId_inlock(RecordId recordId) const {
+
+ if (_lastProcessedRecordId && recordId <= _lastProcessedRecordId) {
+ return true;
+ }
+
+ return false;
+}
+
+bool IndexConsistency::_isBeforeLastProcessedIndexEntry_inlock(const KeyString& keyString) const {
+
+ if (_lastProcessedIndexEntry && keyString <= *_lastProcessedIndexEntry) {
+ return true;
+ }
+
+ return false;
+}
+
+uint32_t IndexConsistency::_hashKeyString(const KeyString& ks, int indexNumber) const {
+
+ uint32_t indexNsHash = _indexesInfo.at(indexNumber).indexNsHash;
+ MurmurHash3_x86_32(
+ ks.getTypeBits().getBuffer(), ks.getTypeBits().getSize(), indexNsHash, &indexNsHash);
+ MurmurHash3_x86_32(ks.getBuffer(), ks.getSize(), indexNsHash, &indexNsHash);
+ return indexNsHash % (1U << 22);
+}
+
+Status IndexConsistency::_throwExceptionIfError() {
+
+ Database* database = dbHolder().get(_opCtx, _nss.db());
+
+ // Ensure the database still exists.
+ if (!database) {
+ return Status(ErrorCodes::NamespaceNotFound,
+ "The database was dropped during background validation");
+ }
+
+ Collection* collection = database->getCollection(_opCtx, _nss);
+
+ // Ensure the collection still exists.
+ if (!collection) {
+ return Status(ErrorCodes::NamespaceNotFound,
+ "The collection was dropped during background validation");
+ }
+
+ // Ensure no indexes were removed or added.
+ IndexCatalog* indexCatalog = collection->getIndexCatalog();
+ IndexCatalog::IndexIterator indexIterator = indexCatalog->getIndexIterator(_opCtx, false);
+ int numRelevantIndexes = 0;
+
+ while (indexIterator.more()) {
+ const IndexDescriptor* descriptor = indexIterator.next();
+ int indexNumber = getIndexNumber(descriptor->indexNamespace());
+ if (indexNumber == -1) {
+ // Allow the collection scan to finish to verify that all the records are valid BSON.
+ if (_stage != ValidationStage::DOCUMENT) {
+ // An index was added.
+ return Status(ErrorCodes::IndexModified,
+ "An index was added during background validation");
+ }
+ } else {
+ // Ignore indexes that weren't ready
+ if (_indexesInfo.at(indexNumber).isReady) {
+ numRelevantIndexes++;
+ }
+ }
+ }
+
+ if (numRelevantIndexes != static_cast<int>(_indexesInfo.size())) {
+ // Allow the collection scan to finish to verify that all the records are valid BSON.
+ if (_stage != ValidationStage::DOCUMENT) {
+ // An index was dropped.
+ return Status(ErrorCodes::IndexModified,
+ "An index was dropped during background validation");
+ }
+ }
+
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/index_consistency.h b/src/mongo/db/catalog/index_consistency.h
new file mode 100644
index 00000000000..09e9745543c
--- /dev/null
+++ b/src/mongo/db/catalog/index_consistency.h
@@ -0,0 +1,378 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/storage/key_string.h"
+#include "mongo/db/storage/sorted_data_interface.h"
+#include "mongo/util/elapsed_tracker.h"
+
+namespace mongo {
+
+/**
+ * The ValidationStage allows the IndexConsistency class to perform
+ * the correct operations that depend on where we are in the validation.
+ */
+enum class ValidationStage { DOCUMENT, INDEX, NONE };
+
+/**
+ * The ValidationOperation is used by classes using the IndexObserver to let us know what operation
+ * was associated with it.
+ * The `UPDATE` operation can be seen as two independent operations (`REMOVE` operation followed
+ * by an `INSERT` operation).
+ */
+enum class ValidationOperation { INSERT, REMOVE };
+
+/**
+ * The IndexConsistency class is used to keep track of the index consistency.
+ * It does this by using the index keys from index entries and index keys generated from the
+ * document to ensure there is a one-to-one mapping for each key.
+ * In addition, an IndexObserver class can be hooked into the IndexAccessMethod to inform
+ * this class about changes to the indexes during a validation and compensate for them.
+ */
+
+/**
+ * Contains all the index information and stats throughout the validation.
+ */
+struct IndexInfo {
+ // Informs us if the index was ready or not for consumption during the start of validation.
+ bool isReady;
+ // Contains the pre-computed hashed of the index namespace.
+ uint32_t indexNsHash;
+ // True if the index has finished scanning from the index scan stage, otherwise false.
+ bool indexScanFinished;
+ // The number of index entries belonging to the index.
+ int64_t numKeys;
+ // The number of long keys that are not indexed for the index.
+ int64_t numLongKeys;
+ // The number of records that have a key in their document that referenced back to the
+ // this index
+ int64_t numRecords;
+ // Keeps track of how many indexes were removed (-1) and added (+1) after the
+ // point of validity was set for this index.
+ int64_t numExtraIndexKeys;
+};
+
+class IndexConsistency final {
+public:
+ IndexConsistency(OperationContext* opCtx,
+ Collection* collection,
+ NamespaceString nss,
+ RecordStore* recordStore,
+ std::unique_ptr<Lock::CollectionLock> collLk,
+ const bool background);
+
+ /**
+ * Helper functions for `_addDocKey`, `_removeDocKey`, `_addIndexKey`,
+ * and `_removeIndexKey` for concurrency control.
+ */
+ void addDocKey(const KeyString& ks, int indexNumber);
+ void removeDocKey(const KeyString& ks, int indexNumber);
+ void addIndexKey(const KeyString& ks, int indexNumber);
+ void removeIndexKey(const KeyString& ks, int indexNumber);
+
+ /**
+ * Add one to the `_longKeys` count for the given `indexNs`.
+ * This is required because index keys > `KeyString::kMaxKeyBytes` are not indexed.
+ */
+ void addLongIndexKey(int indexNumber);
+
+ /**
+ * Returns the number of index entries for the given `indexNs`.
+ */
+ int64_t getNumKeys(int indexNumber) const;
+
+ /**
+ * Returns the number of long keys that were not indexed for the given `indexNs`.
+ */
+ int64_t getNumLongKeys(int indexNumber) const;
+
+ /**
+ * Return the number of records with keys for the given `indexNs`.
+ */
+ int64_t getNumRecords(int indexNumber) const;
+
+ /**
+ * Returns true if any value in the `_indexKeyCount` map is not equal to 0, otherwise
+ * return false.
+ */
+ bool haveEntryMismatch() const;
+
+ /**
+ * Index entries may be added or removed by concurrent writes during the index scan phase,
+ * after establishing the point of validity. We need to account for these additions and
+ * removals so that when we validate the index key count, we also have a pre-image of the
+ * index counts and won't get incorrect results because of the extra index entries we may or
+ * may not have scanned.
+ */
+ int64_t getNumExtraIndexKeys(int indexNumber) const;
+
+ /**
+ * This is the entry point for the IndexObserver to apply its observed changes
+ * while it is listening for changes in the IndexAccessMethod.
+ *
+ * This method ensures that during the collection scan stage, inserted, removed and
+ * updated documents are reflected in the index key counts.
+ * It does this by:
+ * 1) Setting the yield point for the collection scan to inform us when we should
+ * get a new snapshot so we won't scan stale records.
+ * 2) Calling the appropriate `addDocKey` and `removeDocKey` functions if the
+ * record comes before or equal to our last processed RecordId.
+ *
+ * The IndexObserver will call this method while it is observing changes during
+ * the index scan stage of the collection validation. It ensures we maintain
+ * a pre-image of the indexes since we established the point of validity, which
+ * was determined when the collection scan stage completed.
+ * It does this by:
+ * 1) Setting the yield point for the index scan to inform us when we should get
+ * a new snapshot so we won't scan stale index entries. The difference between
+ * this and the collection scan is that it will only set the yield point for the
+ * index that is currently being scanned, since when we start the next index, we
+ * will yield before we begin and we would have the latest snapshot.
+ * 2) Calling the appropriate `addIndexKey` and `removeIndexKey` functions for indexes
+ * that haven't started scanning and are not finished, or they are scanning the
+ * index and the index changes are after the last processed index entry.
+ * 3) In addition, we maintain the number of external index changes here so that
+ * after we finish the index scan, we can remove the extra number of operations
+ * that happened after the point of validity.
+ */
+ void applyChange(const IndexDescriptor* descriptor,
+ const boost::optional<IndexKeyEntry>& indexEntry,
+ ValidationOperation operation);
+
+ /**
+ * Moves the `_stage` variable to the next corresponding stage in the following order:
+ * `DOCUMENT` -> `INDEX`
+ * `INDEX` -> `NONE`
+ * `NONE` -> `NONE`
+ */
+ void nextStage();
+
+ /**
+ * Returns the `_stage` that the validation is on.
+ */
+ const ValidationStage getStage();
+
+ /**
+ * Sets `_lastProcessedRecordId` to `recordId`.
+ */
+ void setLastProcessedRecordId(RecordId recordId);
+
+ /**
+ * Sets `_lastProcessedIndexEntry` to the KeyString of `indexEntry`.
+ */
+ void setLastProcessedIndexEntry(const IndexDescriptor& descriptor,
+ const boost::optional<IndexKeyEntry>& indexEntry);
+
+ /**
+ * Informs the IndexConsistency instance that the index scan is beginning to scan the index
+ * with namespace `indexNs`. This gives us a chance to clean up after the previous index and
+ * setup for the new index.
+ */
+ void notifyStartIndex(int indexNumber);
+
+ /**
+ * Informs the IndexConsistency instance that the index scan has finished scanning the index
+ * with namespace `indexNs`. This allows us to clean up just like in `notifyStartIndex` and to
+ * set the index to a finished state so that the hooks are prevented from affecting it.
+ */
+ void notifyDoneIndex(int indexNumber);
+
+ /**
+ * Returns the index number for the corresponding index namespace's.
+ */
+ int getIndexNumber(const std::string& indexNs);
+
+ /**
+ * Returns true if a new snapshot should be accquired.
+ * If the `recordId` is equal to or greater than `_yieldAtRecordId` then we must get
+ * a new snapshot otherwise we will use stale data.
+ * Otherwise we do not need a new snapshot and can continue with the collection scan.
+ */
+ bool shouldGetNewSnapshot(const RecordId recordId) const;
+
+ /**
+ * Returns true if a new snapshot should be accquired.
+ * If the `keyString` is equal to or greater than `_yieldAtIndexEntry` then we must get
+ * a new snapshot otherwise we will use stale data.
+ * Otherwise we do not need a new snapshot and can continue with the index scan.
+ */
+ bool shouldGetNewSnapshot(const KeyString& keyString) const;
+
+ /**
+ * Gives up the lock that the collection is currently held in and requests the
+ * the collection again in LockMode `mode`
+ */
+ void relockCollectionWithMode(LockMode mode);
+
+ /**
+ * Returns true if the ElapsedTracker says its time to yield during background validation.
+ */
+ const bool scanLimitHit();
+
+ /**
+ * Yields the locks temporarily to allow Global/DB/Collection operations
+ * to perform their work that they couldn't while we held our lock.
+ */
+ void yield();
+
+private:
+ OperationContext* _opCtx;
+ Collection* _collection;
+ const NamespaceString _nss;
+ const RecordStore* _recordStore;
+ std::unique_ptr<Lock::CollectionLock> _collLk;
+ const bool _isBackground;
+ ElapsedTracker _tracker;
+
+ // We map the hashed KeyString values to a bucket which contain the count of how many
+ // index keys and document keys we've seen in each bucket.
+ // Count rules:
+ // - If the count is 0 in the bucket, we have index consistency for
+ // KeyStrings that mapped to it
+ // - If the count is > 0 in the bucket at the end of the validation pass, then there
+ // are too few index entries.
+ // - If the count is < 0 in the bucket at the end of the validation pass, then there
+ // are too many index entries.
+ std::map<uint32_t, uint32_t> _indexKeyCount;
+
+ // Contains the corresponding index number for each index namespace
+ std::map<std::string, int> _indexNumber;
+
+ // A mapping of index numbers to IndexInfo
+ std::map<int, IndexInfo> _indexesInfo;
+
+ // RecordId of the last processed document during the collection scan.
+ boost::optional<RecordId> _lastProcessedRecordId = boost::none;
+
+ // KeyString of the last processed index entry during the index scan.
+ std::unique_ptr<KeyString> _lastProcessedIndexEntry = nullptr;
+
+ // The current index namespace being scanned in the index scan phase.
+ int _currentIndex = -1;
+
+ // The stage that the validation is currently on.
+ ValidationStage _stage = ValidationStage::DOCUMENT;
+
+ // Contains the RecordId of when we should yield collection scan.
+ boost::optional<RecordId> _yieldAtRecordId = boost::none;
+
+ // Contains the KeyString of when we should yield during the index scan.
+ std::unique_ptr<KeyString> _yieldAtIndexEntry = nullptr;
+
+ // Threshold for the number of errors to record before returning "There are too many errors".
+ static const int _kErrorThreshold = 100;
+
+ // The current number of errors that are recorded.
+ int _numErrorsRecorded = 0;
+
+ // Only one thread can use the class at a time
+ mutable stdx::mutex _classMutex;
+
+ /**
+ * Given the document's key KeyString, increment the corresponding `_indexKeyCount`
+ * by hashing it.
+ */
+ void _addDocKey_inlock(const KeyString& ks, int indexNumber);
+
+ /**
+ * Given the document's key KeyString, decrement the corresponding `_indexKeyCount`
+ * by hashing it.
+ */
+ void _removeDocKey_inlock(const KeyString& ks, int indexNumber);
+
+ /**
+ * Given the index entry's KeyString, decrement the corresponding `_indexKeyCount`
+ * by hashing it.
+ */
+ void _addIndexKey_inlock(const KeyString& ks, int indexNumber);
+
+ /**
+ * Given the index entry's KeyString, increment the corresponding `_indexKeyCount`
+ * by hashing it.
+ */
+ void _removeIndexKey_inlock(const KeyString& ks, int indexNumber);
+
+ /**
+ * Returns true if the index for the given `indexNs` has finished being scanned by
+ * the validation, otherwise it returns false.
+ */
+ bool _isIndexFinished_inlock(int indexNumber) const;
+
+ /**
+ * Returns true if this is the current `indexNs` being scanned
+ * by validation, otherwise it returns false.
+ */
+ bool _isIndexScanning_inlock(int indexNumber) const;
+
+ /**
+ * Allows the IndexObserver to set a yield point at `recordId` so that during the collection
+ * scan we must yield before processing the record. This is a preventive measure so the
+ * collection scan doesn't scan stale records.
+ */
+ void _setYieldAtRecord_inlock(const RecordId recordId);
+
+ /**
+ * Allows the IndexObserver to set a yield point at the KeyString of `indexEntry` so that
+ * during the index scan we must yield before processing the index entry.
+ * This is a preventive measure so the index scan doesn't scan stale index entries.
+ */
+ void _setYieldAtIndexEntry_inlock(const KeyString& keyString);
+
+ /**
+ * Returns true if the `recordId` is before or equal to the last processed
+ * RecordId.
+ */
+ bool _isBeforeLastProcessedRecordId_inlock(RecordId recordId) const;
+
+ /**
+ * Returns true if the `keyString` is before or equal to the last processed
+ * index entry.
+ */
+ bool _isBeforeLastProcessedIndexEntry_inlock(const KeyString& keyString) const;
+
+ /**
+ * Returns a hashed value from the given KeyString and index namespace.
+ */
+ uint32_t _hashKeyString(const KeyString& ks, int indexNumbers) const;
+
+ /**
+ * Used alongside `yield()` and `relockCollectionWithMode()` to ensure that after the execution
+ * of them it is safe to continue validating.
+ * Validation can be stopped for a number of reasons including:
+ * 1) The database was dropped.
+ * 2) The collection was dropped.
+ * 3) An index was added or removed in the collection being validated.
+ * 4) The operation was killed.
+ */
+ Status _throwExceptionIfError();
+
+}; // IndexConsistency
+} // namespace mongo
diff --git a/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp b/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp
index 8b978820b9b..d7d8ca2c1ba 100644
--- a/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp
+++ b/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp
@@ -32,10 +32,9 @@
#include "mongo/db/catalog/private/record_store_validate_adaptor.h"
-#include <third_party/murmurhash3/MurmurHash3.h>
-
#include "mongo/bson/bsonobj.h"
#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/catalog/index_consistency.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/operation_context.h"
@@ -46,14 +45,6 @@
namespace mongo {
-namespace {
-uint32_t hashIndexEntry(KeyString& ks, uint32_t hash) {
- MurmurHash3_x86_32(ks.getTypeBits().getBuffer(), ks.getTypeBits().getSize(), hash, &hash);
- MurmurHash3_x86_32(ks.getBuffer(), ks.getSize(), hash, &hash);
- return hash % kKeyCountTableSize;
-}
-}
-
Status RecordStoreValidateAdaptor::validate(const RecordId& recordId,
const RecordData& record,
size_t* dataSize) {
@@ -76,6 +67,7 @@ Status RecordStoreValidateAdaptor::validate(const RecordId& recordId,
while (i.more()) {
const IndexDescriptor* descriptor = i.next();
const std::string indexNs = descriptor->indexNamespace();
+ int indexNumber = _indexConsistency->getIndexNumber(indexNs);
ValidateResults curRecordResults;
const IndexAccessMethod* iam = _indexCatalog->getIndex(descriptor);
@@ -105,26 +97,19 @@ Status RecordStoreValidateAdaptor::validate(const RecordId& recordId,
curRecordResults.valid = false;
}
- uint32_t indexNsHash;
const auto& pattern = descriptor->keyPattern();
const Ordering ord = Ordering::make(pattern);
- MurmurHash3_x86_32(indexNs.c_str(), indexNs.size(), 0, &indexNsHash);
for (const auto& key : documentKeySet) {
- if (key.objsize() >= IndexKeyMaxSize) {
+ if (key.objsize() >= static_cast<int64_t>(KeyString::TypeBits::kMaxKeyBytes)) {
// Index keys >= 1024 bytes are not indexed.
- _longKeys[indexNs]++;
+ _indexConsistency->addLongIndexKey(indexNumber);
continue;
}
// We want to use the latest version of KeyString here.
KeyString ks(KeyString::kLatestVersion, key, ord, recordId);
- uint32_t indexEntryHash = hashIndexEntry(ks, indexNsHash);
-
- if ((*_ikc)[indexEntryHash] == 0) {
- _indexKeyCountTableNumEntries++;
- }
- (*_ikc)[indexEntryHash]++;
+ _indexConsistency->addDocKey(ks, indexNumber);
}
(*_indexNsResultsMap)[indexNs] = curRecordResults;
}
@@ -136,11 +121,9 @@ void RecordStoreValidateAdaptor::traverseIndex(const IndexAccessMethod* iam,
ValidateResults* results,
int64_t* numTraversedKeys) {
auto indexNs = descriptor->indexNamespace();
+ int indexNumber = _indexConsistency->getIndexNumber(indexNs);
int64_t numKeys = 0;
- uint32_t indexNsHash;
- MurmurHash3_x86_32(indexNs.c_str(), indexNs.size(), 0, &indexNsHash);
-
const auto& key = descriptor->keyPattern();
const Ordering ord = Ordering::make(key);
KeyString::Version version = KeyString::kLatestVersion;
@@ -164,26 +147,13 @@ void RecordStoreValidateAdaptor::traverseIndex(const IndexAccessMethod* iam,
results->valid = false;
}
- // Cache the index keys to cross-validate with documents later.
- uint32_t keyHash = hashIndexEntry(*indexKeyString, indexNsHash);
- uint64_t& indexEntryCount = (*_ikc)[keyHash];
- if (indexEntryCount != 0) {
- indexEntryCount--;
- dassert(indexEntryCount >= 0);
- if (indexEntryCount == 0) {
- _indexKeyCountTableNumEntries--;
- }
- } else {
- _hasDocWithoutIndexEntry = true;
- results->valid = false;
- }
- numKeys++;
+ _indexConsistency->addIndexKey(*indexKeyString, indexNumber);
+ numKeys++;
isFirstEntry = false;
prevIndexKeyString.swap(indexKeyString);
}
- _keyCounts[indexNs] = numKeys;
*numTraversedKeys = numKeys;
}
@@ -245,8 +215,9 @@ void RecordStoreValidateAdaptor::validateIndexKeyCount(IndexDescriptor* idx,
int64_t numRecs,
ValidateResults& results) {
const std::string indexNs = idx->indexNamespace();
- int64_t numIndexedKeys = _keyCounts[indexNs];
- int64_t numLongKeys = _longKeys[indexNs];
+ int indexNumber = _indexConsistency->getIndexNumber(indexNs);
+ int64_t numIndexedKeys = _indexConsistency->getNumKeys(indexNumber);
+ int64_t numLongKeys = _indexConsistency->getNumLongKeys(indexNumber);
auto totalKeys = numLongKeys + numIndexedKeys;
bool hasTooFewKeys = false;
diff --git a/src/mongo/db/catalog/private/record_store_validate_adaptor.h b/src/mongo/db/catalog/private/record_store_validate_adaptor.h
index 2cdb21ad429..d404e5e11be 100644
--- a/src/mongo/db/catalog/private/record_store_validate_adaptor.h
+++ b/src/mongo/db/catalog/private/record_store_validate_adaptor.h
@@ -36,10 +36,10 @@
namespace mongo {
+class IndexConsistency;
+
namespace {
-const uint32_t kKeyCountTableSize = 1U << 22;
-using IndexKeyCountTable = std::array<uint64_t, kKeyCountTableSize>;
using ValidateResultsMap = std::map<std::string, ValidateResults>;
}
@@ -50,12 +50,13 @@ using ValidateResultsMap = std::map<std::string, ValidateResults>;
class RecordStoreValidateAdaptor : public ValidateAdaptor {
public:
RecordStoreValidateAdaptor(OperationContext* opCtx,
+ IndexConsistency* indexConsistency,
ValidateCmdLevel level,
IndexCatalog* ic,
ValidateResultsMap* irm)
- : _ikc(stdx::make_unique<IndexKeyCountTable>()),
- _opCtx(opCtx),
+ : _opCtx(opCtx),
+ _indexConsistency(indexConsistency),
_level(level),
_indexCatalog(ic),
_indexNsResultsMap(irm) {}
@@ -89,33 +90,9 @@ public:
*/
void validateIndexKeyCount(IndexDescriptor* idx, int64_t numRecs, ValidateResults& results);
- /**
- * Returns true if there are too many index entries, otherwise return false.
- */
- bool tooManyIndexEntries() const {
- return _indexKeyCountTableNumEntries != 0;
- }
-
- /**
- * Returns true if there are too few index entries, which happens when a document doesn't have
- * and index entry, otherwise return false.
- */
- bool tooFewIndexEntries() const {
- return _hasDocWithoutIndexEntry;
- }
-
-
private:
- std::map<std::string, int64_t> _longKeys;
- std::map<std::string, int64_t> _keyCounts;
- std::unique_ptr<IndexKeyCountTable> _ikc;
-
- uint32_t _indexKeyCountTableNumEntries = 0;
- bool _hasDocWithoutIndexEntry = false;
-
- const int IndexKeyMaxSize = 1024; // this goes away with SERVER-3372
-
- OperationContext* _opCtx; // Not owned.
+ OperationContext* _opCtx; // Not owned.
+ IndexConsistency* _indexConsistency; // Not owned.
ValidateCmdLevel _level;
IndexCatalog* _indexCatalog; // Not owned.
ValidateResultsMap* _indexNsResultsMap; // Not owned.
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 76a77abfaf4..90789bd51c1 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -125,7 +125,7 @@ public:
}
AutoGetDb ctx(opCtx, nss.db(), MODE_IX);
- Lock::CollectionLock collLk(opCtx->lockState(), nss.ns(), MODE_X);
+ auto collLk = stdx::make_unique<Lock::CollectionLock>(opCtx->lockState(), nss.ns(), MODE_X);
Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(opCtx, nss) : NULL;
if (!collection) {
if (ctx.getDb() && ctx.getDb()->getViewCatalog()->lookup(opCtx, nss.ns())) {
@@ -192,7 +192,8 @@ public:
});
ValidateResults results;
- Status status = collection->validate(opCtx, level, &results, &result);
+ Status status =
+ collection->validate(opCtx, level, background, std::move(collLk), &results, &result);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
diff --git a/src/mongo/db/repl/idempotency_test_fixture.cpp b/src/mongo/db/repl/idempotency_test_fixture.cpp
index d4848b36ba2..579d00487d5 100644
--- a/src/mongo/db/repl/idempotency_test_fixture.cpp
+++ b/src/mongo/db/repl/idempotency_test_fixture.cpp
@@ -282,7 +282,10 @@ CollectionState IdempotencyTest::validate() {
ValidateResults validateResults;
BSONObjBuilder bob;
- ASSERT_OK(collection->validate(_opCtx.get(), kValidateFull, &validateResults, &bob));
+ Lock::DBLock lk(_opCtx.get(), nss.db(), MODE_IX);
+ auto lock = stdx::make_unique<Lock::CollectionLock>(_opCtx->lockState(), nss.ns(), MODE_X);
+ ASSERT_OK(collection->validate(
+ _opCtx.get(), kValidateFull, false, std::move(lock), &validateResults, &bob));
ASSERT_TRUE(validateResults.valid);
IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(_opCtx.get());
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index 0337435a98e..189ff9e788d 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -58,23 +58,45 @@ static const char* const _ns = "unittests.validate_tests";
*/
class ValidateBase {
public:
- explicit ValidateBase(bool full) : _ctx(&_opCtx, _ns), _client(&_opCtx), _full(full) {
+ explicit ValidateBase(bool full, bool background)
+ : _client(&_opCtx),
+ _full(full),
+ _background(background),
+ _nss(_ns),
+ _autoDb(nullptr),
+ _db(nullptr) {
_client.createCollection(_ns);
+ {
+ AutoGetCollection autoGetCollection(&_opCtx, _nss, MODE_X);
+ _isInRecordIdOrder =
+ autoGetCollection.getCollection()->getRecordStore()->isInRecordIdOrder();
+ }
}
+
~ValidateBase() {
_client.dropCollection(_ns);
getGlobalServiceContext()->unsetKillAllOperations();
}
- Collection* collection() {
- return _ctx.getCollection();
- }
protected:
bool checkValid() {
ValidateResults results;
BSONObjBuilder output;
- ASSERT_OK(collection()->validate(
- &_opCtx, _full ? kValidateFull : kValidateIndex, &results, &output));
+
+ lockDb(MODE_IX);
+ invariant(_opCtx.lockState()->isDbLockedForMode(_nss.db(), MODE_IX));
+ std::unique_ptr<Lock::CollectionLock> lock =
+ stdx::make_unique<Lock::CollectionLock>(_opCtx.lockState(), _nss.ns(), MODE_X);
+ invariant(_opCtx.lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
+
+ Database* db = _autoDb.get()->getDb();
+ ASSERT_OK(db->getCollection(&_opCtx, _nss)
+ ->validate(&_opCtx,
+ _full ? kValidateFull : kValidateIndex,
+ _background,
+ std::move(lock),
+ &results,
+ &output));
// Check if errors are reported if and only if valid is set to false.
ASSERT_EQ(results.valid, results.errors.empty());
@@ -92,28 +114,52 @@ protected:
return results.valid;
}
+ void lockDb(LockMode mode) {
+ _autoDb.reset();
+ invariant(_opCtx.lockState()->isDbLockedForMode(_nss.db(), MODE_NONE));
+ _autoDb.reset(new AutoGetDb(&_opCtx, _nss.db().toString(), mode));
+ invariant(_opCtx.lockState()->isDbLockedForMode(_nss.db(), mode));
+ _db = _autoDb.get()->getDb();
+ }
+
+ void releaseDb() {
+ _autoDb.reset();
+ _db = nullptr;
+ invariant(_opCtx.lockState()->isDbLockedForMode(_nss.db(), MODE_NONE));
+ }
+
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
- OldClientWriteContext _ctx;
DBDirectClient _client;
bool _full;
+ bool _background;
+ const NamespaceString _nss;
+ unique_ptr<AutoGetDb> _autoDb;
+ Database* _db;
+ bool _isInRecordIdOrder;
};
-template <bool full>
+template <bool full, bool background>
class ValidateIdIndexCount : public ValidateBase {
public:
- ValidateIdIndexCount() : ValidateBase(full) {}
+ ValidateIdIndexCount() : ValidateBase(full, background) {}
void run() {
+
+ // Can't do it in background is the RecordStore is not in RecordId order.
+ if (_background && !_isInRecordIdOrder) {
+ return;
+ }
+
// Create a new collection, insert records {_id: 1} and {_id: 2} and check it's valid.
- Database* db = _ctx.db();
+ lockDb(MODE_X);
Collection* coll;
RecordId id1;
{
OpDebug* const nullOpDebug = nullptr;
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
+ coll = _db->createCollection(&_opCtx, _ns);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1)), nullOpDebug, true));
@@ -125,6 +171,7 @@ public:
ASSERT_TRUE(checkValid());
+ lockDb(MODE_X);
RecordStore* rs = coll->getRecordStore();
// Remove {_id: 1} from the record store, so we get more _id entries than records.
@@ -136,6 +183,8 @@ public:
ASSERT_FALSE(checkValid());
+ lockDb(MODE_X);
+
// Insert records {_id: 0} and {_id: 1} , so we get too few _id entries, and verify
// validate fails.
{
@@ -149,23 +198,30 @@ public:
}
ASSERT_FALSE(checkValid());
+ releaseDb();
}
};
-template <bool full>
+template <bool full, bool background>
class ValidateSecondaryIndexCount : public ValidateBase {
public:
- ValidateSecondaryIndexCount() : ValidateBase(full) {}
+ ValidateSecondaryIndexCount() : ValidateBase(full, background) {}
void run() {
+
+ // Can't do it in background is the RecordStore is not in RecordId order.
+ if (_background && !_isInRecordIdOrder) {
+ return;
+ }
+
// Create a new collection, insert two documents.
- Database* db = _ctx.db();
+ lockDb(MODE_X);
Collection* coll;
RecordId id1;
{
OpDebug* const nullOpDebug = nullptr;
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
+ coll = _db->createCollection(&_opCtx, _ns);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
id1 = coll->getCursor(&_opCtx)->next()->id;
@@ -190,6 +246,7 @@ public:
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
+ lockDb(MODE_X);
RecordStore* rs = coll->getRecordStore();
// Remove a record, so we get more _id entries than records, and verify validate fails.
@@ -201,6 +258,8 @@ public:
ASSERT_FALSE(checkValid());
+ lockDb(MODE_X);
+
// Insert two more records, so we get too few entries for a non-sparse index, and
// verify validate fails.
{
@@ -214,22 +273,30 @@ public:
}
ASSERT_FALSE(checkValid());
+ releaseDb();
}
};
+template <bool full, bool background>
class ValidateSecondaryIndex : public ValidateBase {
public:
- ValidateSecondaryIndex() : ValidateBase(true) {}
+ ValidateSecondaryIndex() : ValidateBase(full, background) {}
void run() {
+
+ // Can't do it in background is the RecordStore is not in RecordId order.
+ if (_background && !_isInRecordIdOrder) {
+ return;
+ }
+
// Create a new collection, insert three records.
- Database* db = _ctx.db();
+ lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
Collection* coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
+ coll = _db->createCollection(&_opCtx, _ns);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
id1 = coll->getCursor(&_opCtx)->next()->id;
@@ -256,6 +323,7 @@ public:
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
+ lockDb(MODE_X);
RecordStore* rs = coll->getRecordStore();
// Update {a: 1} to {a: 9} without updating the index, so we get inconsistent values
@@ -271,23 +339,31 @@ public:
}
ASSERT_FALSE(checkValid());
+ releaseDb();
}
};
+template <bool full, bool background>
class ValidateIdIndex : public ValidateBase {
public:
- ValidateIdIndex() : ValidateBase(true) {}
+ ValidateIdIndex() : ValidateBase(full, background) {}
void run() {
+
+ // Can't do it in background is the RecordStore is not in RecordId order.
+ if (_background && !_isInRecordIdOrder) {
+ return;
+ }
+
// Create a new collection, insert records {_id: 1} and {_id: 2} and check it's valid.
- Database* db = _ctx.db();
+ lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
Collection* coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
+ coll = _db->createCollection(&_opCtx, _ns);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1)), nullOpDebug, true));
@@ -299,6 +375,7 @@ public:
ASSERT_TRUE(checkValid());
+ lockDb(MODE_X);
RecordStore* rs = coll->getRecordStore();
// Update {_id: 1} to {_id: 9} without updating the index, so we get inconsistent values
@@ -314,6 +391,8 @@ public:
ASSERT_FALSE(checkValid());
+ lockDb(MODE_X);
+
// Revert {_id: 9} to {_id: 1} and verify that validate succeeds.
{
WriteUnitOfWork wunit(&_opCtx);
@@ -326,6 +405,8 @@ public:
ASSERT_TRUE(checkValid());
+ lockDb(MODE_X);
+
// Remove the {_id: 1} document and insert a new document without an index entry, so there
// will still be the same number of index entries and documents, but one document will not
// have an index entry.
@@ -340,16 +421,24 @@ public:
}
ASSERT_FALSE(checkValid());
+ releaseDb();
}
};
+template <bool full, bool background>
class ValidateMultiKeyIndex : public ValidateBase {
public:
- ValidateMultiKeyIndex() : ValidateBase(true) {}
+ ValidateMultiKeyIndex() : ValidateBase(full, background) {}
void run() {
+
+ // Can't do it in background is the RecordStore is not in RecordId order.
+ if (_background && !_isInRecordIdOrder) {
+ return;
+ }
+
// Create a new collection, insert three records and check it's valid.
- Database* db = _ctx.db();
+ lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
Collection* coll;
RecordId id1;
@@ -364,8 +453,8 @@ public:
auto doc3 = BSON("_id" << 3 << "a" << BSON_ARRAY(BSON("c" << 1)));
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
+ coll = _db->createCollection(&_opCtx, _ns);
ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(doc1), nullOpDebug, true));
@@ -377,6 +466,8 @@ public:
ASSERT_TRUE(checkValid());
+ lockDb(MODE_X);
+
// Create multi-key index.
auto status = dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
@@ -394,6 +485,7 @@ public:
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
+ lockDb(MODE_X);
RecordStore* rs = coll->getRecordStore();
// Update a document's indexed field without updating the index.
@@ -407,6 +499,8 @@ public:
ASSERT_FALSE(checkValid());
+ lockDb(MODE_X);
+
// Update a document's non-indexed field without updating the index.
// Index validation should still be valid.
{
@@ -418,23 +512,31 @@ public:
}
ASSERT_TRUE(checkValid());
+ releaseDb();
}
};
+template <bool full, bool background>
class ValidateSparseIndex : public ValidateBase {
public:
- ValidateSparseIndex() : ValidateBase(true) {}
+ ValidateSparseIndex() : ValidateBase(full, background) {}
void run() {
+
+ // Can't do it in background is the RecordStore is not in RecordId order.
+ if (_background && !_isInRecordIdOrder) {
+ return;
+ }
+
// Create a new collection, insert three records and check it's valid.
- Database* db = _ctx.db();
+ lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
Collection* coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
+ coll = _db->createCollection(&_opCtx, _ns);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
@@ -465,6 +567,7 @@ public:
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
+ lockDb(MODE_X);
RecordStore* rs = coll->getRecordStore();
// Update a document's indexed field without updating the index.
@@ -478,23 +581,31 @@ public:
}
ASSERT_FALSE(checkValid());
+ releaseDb();
}
};
+template <bool full, bool background>
class ValidatePartialIndex : public ValidateBase {
public:
- ValidatePartialIndex() : ValidateBase(true) {}
+ ValidatePartialIndex() : ValidateBase(full, background) {}
void run() {
+
+ // Can't do it in background is the RecordStore is not in RecordId order.
+ if (_background && !_isInRecordIdOrder) {
+ return;
+ }
+
// Create a new collection, insert three records and check it's valid.
- Database* db = _ctx.db();
+ lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
Collection* coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
+ coll = _db->createCollection(&_opCtx, _ns);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
@@ -530,6 +641,7 @@ public:
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
+ lockDb(MODE_X);
RecordStore* rs = coll->getRecordStore();
// Update an unindexed document without updating the index.
@@ -543,24 +655,32 @@ public:
}
ASSERT_TRUE(checkValid());
+ releaseDb();
}
};
+template <bool full, bool background>
class ValidatePartialIndexOnCollectionWithNonIndexableFields : public ValidateBase {
public:
- ValidatePartialIndexOnCollectionWithNonIndexableFields() : ValidateBase(true) {}
+ ValidatePartialIndexOnCollectionWithNonIndexableFields() : ValidateBase(full, background) {}
void run() {
+
+ // Can't do it in background is the RecordStore is not in RecordId order.
+ if (_background && !_isInRecordIdOrder) {
+ return;
+ }
+
// Create a new collection and insert a record that has a non-indexable value on the indexed
// field.
- Database* db = _ctx.db();
+ lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
Collection* coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
+ coll = _db->createCollection(&_opCtx, _ns);
ASSERT_OK(
coll->insertDocument(&_opCtx,
InsertStatement(BSON("_id" << 1 << "x" << 1 << "a" << 2)),
@@ -606,23 +726,31 @@ public:
<< BSON("a" << BSON("$eq" << 1))));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
+ releaseDb();
}
};
+template <bool full, bool background>
class ValidateCompoundIndex : public ValidateBase {
public:
- ValidateCompoundIndex() : ValidateBase(true) {}
+ ValidateCompoundIndex() : ValidateBase(full, background) {}
void run() {
+
+ // Can't do it in background is the RecordStore is not in RecordId order.
+ if (_background && !_isInRecordIdOrder) {
+ return;
+ }
+
// Create a new collection, insert five records and check it's valid.
- Database* db = _ctx.db();
+ lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
Collection* coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
+ coll = _db->createCollection(&_opCtx, _ns);
ASSERT_OK(
coll->insertDocument(&_opCtx,
@@ -676,6 +804,7 @@ public:
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
+ lockDb(MODE_X);
RecordStore* rs = coll->getRecordStore();
// Update a document's indexed field without updating the index.
@@ -689,23 +818,31 @@ public:
}
ASSERT_FALSE(checkValid());
+ releaseDb();
}
};
+template <bool full, bool background>
class ValidateIndexEntry : public ValidateBase {
public:
- ValidateIndexEntry() : ValidateBase(true) {}
+ ValidateIndexEntry() : ValidateBase(full, background) {}
void run() {
+
+ // Can't do it in background is the RecordStore is not in RecordId order.
+ if (_background && !_isInRecordIdOrder) {
+ return;
+ }
+
// Create a new collection, insert three records and check it's valid.
- Database* db = _ctx.db();
+ lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
Collection* coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
+ coll = _db->createCollection(&_opCtx, _ns);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
@@ -729,6 +866,8 @@ public:
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
+ lockDb(MODE_X);
+
// Replace a correct index entry with a bad one and check it's invalid.
IndexCatalog* indexCatalog = coll->getIndexCatalog();
IndexDescriptor* descriptor = indexCatalog->findIndexByName(&_opCtx, indexName);
@@ -754,23 +893,31 @@ public:
}
ASSERT_FALSE(checkValid());
+ releaseDb();
}
};
+template <bool full, bool background>
class ValidateIndexOrdering : public ValidateBase {
public:
- ValidateIndexOrdering() : ValidateBase(true) {}
+ ValidateIndexOrdering() : ValidateBase(full, background) {}
void run() {
+
+ // Can't do it in background is the RecordStore is not in RecordId order.
+ if (_background && !_isInRecordIdOrder) {
+ return;
+ }
+
// Create a new collection, insert three records and check it's valid.
- Database* db = _ctx.db();
+ lockDb(MODE_X);
OpDebug* const nullOpDebug = nullptr;
Collection* coll;
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
+ coll = _db->createCollection(&_opCtx, _ns);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
@@ -794,6 +941,8 @@ public:
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
+ lockDb(MODE_X);
+
// Change the IndexDescriptor's keyPattern to descending so the index ordering
// appears wrong.
IndexCatalog* indexCatalog = coll->getIndexCatalog();
@@ -801,6 +950,7 @@ public:
descriptor->setKeyPatternForTest(BSON("a" << -1));
ASSERT_FALSE(checkValid());
+ releaseDb();
}
};
@@ -810,23 +960,34 @@ public:
void setupTests() {
// Add tests for both full validate and non-full validate.
- add<ValidateIdIndexCount<true>>();
- add<ValidateIdIndexCount<false>>();
- add<ValidateSecondaryIndexCount<true>>();
- add<ValidateSecondaryIndexCount<false>>();
-
- // These tests are only needed for full validate.
- add<ValidateIdIndex>();
- add<ValidateSecondaryIndex>();
- add<ValidateMultiKeyIndex>();
- add<ValidateSparseIndex>();
- add<ValidateCompoundIndex>();
- add<ValidatePartialIndex>();
- add<ValidatePartialIndexOnCollectionWithNonIndexableFields>();
+ add<ValidateIdIndexCount<true, false>>();
+ add<ValidateIdIndexCount<false, false>>();
+ add<ValidateIdIndexCount<false, true>>();
+ add<ValidateSecondaryIndexCount<true, false>>();
+ add<ValidateSecondaryIndexCount<false, false>>();
+ add<ValidateSecondaryIndexCount<false, true>>();
+
+ // These tests are only needed for non-full validate.
+ add<ValidateIdIndex<false, false>>();
+ add<ValidateIdIndex<false, true>>();
+ add<ValidateSecondaryIndex<false, false>>();
+ add<ValidateSecondaryIndex<false, true>>();
+ add<ValidateMultiKeyIndex<false, false>>();
+ add<ValidateMultiKeyIndex<false, true>>();
+ add<ValidateSparseIndex<false, false>>();
+ add<ValidateSparseIndex<false, true>>();
+ add<ValidateCompoundIndex<false, false>>();
+ add<ValidateCompoundIndex<false, true>>();
+ add<ValidatePartialIndex<false, false>>();
+ add<ValidatePartialIndex<false, true>>();
+ add<ValidatePartialIndexOnCollectionWithNonIndexableFields<false, false>>();
+ add<ValidatePartialIndexOnCollectionWithNonIndexableFields<false, true>>();
// Tests for index validation.
- add<ValidateIndexEntry>();
- add<ValidateIndexOrdering>();
+ add<ValidateIndexEntry<false, false>>();
+ add<ValidateIndexEntry<false, true>>();
+ add<ValidateIndexOrdering<false, false>>();
+ add<ValidateIndexOrdering<false, true>>();
}
} validateTests;
} // namespace ValidateTests