summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Larkin-York <dan.larkin-york@mongodb.com>2023-02-03 23:45:23 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-02-11 13:19:08 +0000
commit71715290cbe0caeac6911176b9721d664444d19a (patch)
tree3adfe3616cdb1109e32a89799ceae86e1c36dea6
parentd74542dd8333db36eea8ad173357b4bbeea705de (diff)
downloadmongo-71715290cbe0caeac6911176b9721d664444d19a.tar.gz
SERVER-72512 SERVER-73636 Ensure validate surfaces index key inconsistencies if they exist
(cherry picked from commit c0e1438cad31e00372a9f9edaee80f9db5a4e8ed) (cherry picked from commit 2d90115a227572cc0f46b309ceb47dda6b740da3)
-rw-r--r--jstests/noPassthrough/validate_memory_limit.js41
-rw-r--r--src/mongo/db/catalog/index_consistency.cpp195
2 files changed, 150 insertions, 86 deletions
diff --git a/jstests/noPassthrough/validate_memory_limit.js b/jstests/noPassthrough/validate_memory_limit.js
index 841b247ddaa..f229d2848c8 100644
--- a/jstests/noPassthrough/validate_memory_limit.js
+++ b/jstests/noPassthrough/validate_memory_limit.js
@@ -22,31 +22,40 @@ function corruptIndex() {
coll = conn.getDB("test").getCollection("corrupt");
}
-function checkValidate(errorPrefix, numMissingIndexEntries) {
- conn.getDB("test").adminCommand({setParameter: 1, maxValidateMemoryUsageMB: 1});
+function checkValidate(maxMemoryUsage, {minMissingKeys, maxMissingKeys}) {
+ conn.getDB("test").adminCommand({setParameter: 1, maxValidateMemoryUsageMB: maxMemoryUsage});
const res = coll.validate();
assert.commandWorked(res);
- assert(!res.valid);
- assert.containsPrefix(errorPrefix, res.errors);
- assert.eq(res.missingIndexEntries.length, numMissingIndexEntries);
+ assert(!res.valid, tojson(res));
+ const notAllReportedPrefix =
+ "Not all index entry inconsistencies are reported due to memory limitations.";
+ assert.containsPrefix(notAllReportedPrefix, res.errors, tojson(res));
+ assert.gte(res.missingIndexEntries.length, minMissingKeys, tojson(res));
+ assert.lte(res.missingIndexEntries.length, maxMissingKeys, tojson(res));
}
-const noneReportedPrefix =
- "Unable to report index entry inconsistencies due to memory limitations.";
-const notAllReportedPrefix =
- "Not all index entry inconsistencies are reported due to memory limitations.";
-
-// Insert a document with a key larger than maxValidateMemoryUsageMB so that validate does not
-// report any missing index entries.
+// Insert a document with a key larger than maxValidateMemoryUsageMB and test that we still report
+// at least one inconsistency.
const indexKey = "a".repeat(kIndexKeyLength);
assert.commandWorked(coll.insert({_id: indexKey}));
corruptIndex();
-checkValidate(noneReportedPrefix, 0);
+checkValidate(1, {minMissingKeys: 1, maxMissingKeys: 1});
+
+// Clear collection between tests.
+coll.drop();
+
+// Test that if we have keys distributed across many buckets, and would exceed
+// maxValidateMemoryUsageMB, we report as many inconsistencies as we can.
+for (let i = 0; i < 10; ++i) {
+ const indexKey = i.toString().repeat(kIndexKeyLength / 5);
+ assert.commandWorked(coll.insert({_id: indexKey}));
+}
-// Insert a document with a small key so that validate reports one missing index entry.
-assert.commandWorked(coll.insert({_id: 1}));
corruptIndex();
-checkValidate(notAllReportedPrefix, 1);
+// If each key is maxMem/5, then we can keep 4 of them (the 5th would put us at the limit). However,
+// each key is counted twice, so realistically we only expect to track 2 of them. However, there's
+// a small chance we could get hash collisions that would lead to us reporting only 1.
+checkValidate(1, {minMissingKeys: 1, maxMissingKeys: 2});
MongoRunner.stopMongod(conn, null, {skipValidation: true});
})(); \ No newline at end of file
diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp
index fde68c2c949..d38230ea2d1 100644
--- a/src/mongo/db/catalog/index_consistency.cpp
+++ b/src/mongo/db/catalog/index_consistency.cpp
@@ -139,15 +139,31 @@ void IndexConsistency::addIndexEntryErrors(ValidateResultsMap* indexNsResultsMap
numExtraIndexEntryErrors += item.second.size();
}
+ // Sort missing index entries by size so we can process in order of increasing size and return
+ // as many as possible within memory limits.
+ using MissingIt = decltype(_missingIndexEntries)::const_iterator;
+ std::vector<MissingIt> missingIndexEntriesBySize;
+ missingIndexEntriesBySize.reserve(_missingIndexEntries.size());
+ for (auto it = _missingIndexEntries.begin(); it != _missingIndexEntries.end(); ++it) {
+ missingIndexEntriesBySize.push_back(it);
+ }
+ std::sort(missingIndexEntriesBySize.begin(),
+ missingIndexEntriesBySize.end(),
+ [](const MissingIt& a, const MissingIt& b) {
+ return a->second.objsize() < b->second.objsize();
+ });
+
// Inform which indexes have inconsistencies and add the BSON objects of the inconsistent index
// entries to the results vector.
bool missingIndexEntrySizeLimitWarning = false;
- for (const auto& missingIndexEntry : _missingIndexEntries) {
- const BSONObj& entry = missingIndexEntry.second;
+ bool first = true;
+ for (const auto& it : missingIndexEntriesBySize) {
+ const auto& entry = it->second;
numMissingIndexEntriesSizeBytes += entry.objsize();
- if (numMissingIndexEntriesSizeBytes <= kErrorSizeBytes) {
+ if (first || numMissingIndexEntriesSizeBytes <= kErrorSizeBytes) {
results->missingIndexEntries.push_back(entry);
+ first = false;
} else if (!missingIndexEntrySizeLimitWarning) {
StringBuilder ss;
ss << "Not all missing index entry inconsistencies are listed due to size limitations.";
@@ -168,33 +184,58 @@ void IndexConsistency::addIndexEntryErrors(ValidateResultsMap* indexNsResultsMap
indexNsResultsMap->at(indexName).valid = false;
}
- bool extraIndexEntrySizeLimitWarning = false;
+ // Sort extra index entries by size so we can process in order of increasing size and return as
+ // many as possible within memory limits.
+ using ExtraIt = SimpleBSONObjSet::const_iterator;
+ std::vector<ExtraIt> extraIndexEntriesBySize;
+ // Since the extra entries are stored in a map of sets, we have to iterate the entries in the
+ // map and sum the size of the sets in order to get the total number. Given that we can have at
+ // most 64 indexes per collection, and the total number of entries could potentially be in the
+ // millions, we expect that iterating the map will be much less costly than the additional
+ // allocations and copies that could result from not calling 'reserve' on the vector.
+ size_t totalExtraIndexEntriesCount =
+ std::accumulate(_extraIndexEntries.begin(),
+ _extraIndexEntries.end(),
+ 0,
+ [](size_t total, const std::pair<IndexKey, SimpleBSONObjSet>& set) {
+ return total + set.second.size();
+ });
+ extraIndexEntriesBySize.reserve(totalExtraIndexEntriesCount);
for (const auto& extraIndexEntry : _extraIndexEntries) {
const SimpleBSONObjSet& entries = extraIndexEntry.second;
- for (const auto& entry : entries) {
- numExtraIndexEntriesSizeBytes += entry.objsize();
- if (numExtraIndexEntriesSizeBytes <= kErrorSizeBytes) {
- results->extraIndexEntries.push_back(entry);
- } else if (!extraIndexEntrySizeLimitWarning) {
- StringBuilder ss;
- ss << "Not all extra index entry inconsistencies are listed due to size "
- "limitations.";
- results->errors.push_back(ss.str());
-
- extraIndexEntrySizeLimitWarning = true;
- }
-
- std::string indexName = entry["indexName"].String();
- if (!indexNsResultsMap->at(indexName).valid) {
- continue;
- }
+ for (auto it = entries.begin(); it != entries.end(); ++it) {
+ extraIndexEntriesBySize.push_back(it);
+ }
+ }
+ std::sort(extraIndexEntriesBySize.begin(),
+ extraIndexEntriesBySize.end(),
+ [](const ExtraIt& a, const ExtraIt& b) { return a->objsize() < b->objsize(); });
+ bool extraIndexEntrySizeLimitWarning = false;
+ for (const auto& entry : extraIndexEntriesBySize) {
+ numExtraIndexEntriesSizeBytes += entry->objsize();
+ if (first || numExtraIndexEntriesSizeBytes <= kErrorSizeBytes) {
+ results->extraIndexEntries.push_back(*entry);
+ first = false;
+ } else if (!extraIndexEntrySizeLimitWarning) {
StringBuilder ss;
- ss << "Index with name '" << indexName << "' has inconsistencies.";
+ ss << "Not all extra index entry inconsistencies are listed due to size "
+ "limitations.";
results->errors.push_back(ss.str());
- indexNsResultsMap->at(indexName).valid = false;
+ extraIndexEntrySizeLimitWarning = true;
+ }
+
+ std::string indexName = (*entry)["indexName"].String();
+ if (!indexNsResultsMap->at(indexName).valid) {
+ continue;
}
+
+ StringBuilder ss;
+ ss << "Index with name '" << indexName << "' has inconsistencies.";
+ results->errors.push_back(ss.str());
+
+ indexNsResultsMap->at(indexName).valid = false;
}
// Inform how many inconsistencies were detected.
@@ -224,8 +265,8 @@ void IndexConsistency::addDocKey(OperationContext* opCtx,
auto& upper = _indexKeyBuckets[hashUpper];
if (_firstPhase) {
- // During the first phase of validation we only keep track of the count for the document
- // keys encountered.
+ // During the first phase of validation we only keep track of the count for the
+ // document keys encountered.
lower.indexKeyCount++;
lower.bucketSizeBytes += ks.getSize();
upper.indexKeyCount++;
@@ -260,7 +301,8 @@ void IndexConsistency::addDocKey(OperationContext* opCtx,
KeyString::toBsonSafe(ks.getBuffer(), ks.getSize(), indexInfo->ord, ks.getTypeBits());
BSONObj info = _generateInfo(*indexInfo, recordId, indexKey, idKey);
- // Cannot have duplicate KeyStrings during the document scan phase for the same index.
+ // Cannot have duplicate KeyStrings during the document scan phase for the same
+ // index.
IndexKey key = _generateKeyForMap(*indexInfo, ks);
invariant(_missingIndexEntries.count(key) == 0);
_missingIndexEntries.insert(std::make_pair(key, info));
@@ -277,8 +319,8 @@ void IndexConsistency::addIndexKey(const KeyString::Value& ks,
auto& upper = _indexKeyBuckets[hashUpper];
if (_firstPhase) {
- // During the first phase of validation we only keep track of the count for the index entry
- // keys encountered.
+ // During the first phase of validation we only keep track of the count for the
+ // index entry keys encountered.
lower.indexKeyCount--;
lower.bucketSizeBytes += ks.getSize();
upper.indexKeyCount--;
@@ -298,9 +340,9 @@ void IndexConsistency::addIndexKey(const KeyString::Value& ks,
}
} else if (lower.indexKeyCount || upper.indexKeyCount) {
// Found an index key for a bucket that has inconsistencies.
- // If there is a corresponding document key for the index entry key, we remove the key from
- // the '_missingIndexEntries' map. However if there was no document key for the index entry
- // key, we add the key to the '_extraIndexEntries' map.
+ // If there is a corresponding document key for the index entry key, we remove the
+ // key from the '_missingIndexEntries' map. However if there was no document key for
+ // the index entry key, we add the key to the '_extraIndexEntries' map.
auto indexKey =
KeyString::toBsonSafe(ks.getBuffer(), ks.getSize(), indexInfo->ord, ks.getTypeBits());
BSONObj info = _generateInfo(*indexInfo, recordId, indexKey, boost::none);
@@ -334,54 +376,67 @@ bool IndexConsistency::limitMemoryUsageForSecondPhase(ValidateResults* result) {
return bucket.indexKeyCount ? bytes + bucket.bucketSizeBytes : bytes;
});
- // Allows twice the "maxValidateMemoryUsageMB" because each KeyString has two hashes stored.
+ // Allows twice the "maxValidateMemoryUsageMB" because each KeyString has two hashes
+ // stored.
if (totalMemoryNeededBytes <= maxMemoryUsageBytes * 2) {
// The amount of memory we need is under the limit, so no need to do anything else.
return true;
}
- bool hasNonZeroBucket = false;
- uint64_t memoryUsedSoFarBytes = 0;
- uint32_t smallestBucketBytes = std::numeric_limits<uint32_t>::max();
- // Zero out any nonzero buckets that would put us over maxMemoryUsageBytes.
- std::for_each(_indexKeyBuckets.begin(), _indexKeyBuckets.end(), [&](IndexKeyBucket& bucket) {
- if (bucket.indexKeyCount == 0) {
- return;
- }
-
- smallestBucketBytes = std::min(smallestBucketBytes, bucket.bucketSizeBytes);
- if (bucket.bucketSizeBytes + memoryUsedSoFarBytes > maxMemoryUsageBytes) {
- // Including this bucket would put us over the memory limit, so zero this bucket. We
- // don't want to keep any entry that will exceed the memory limit in the second phase so
- // we don't double the 'maxMemoryUsageBytes' here.
- bucket.indexKeyCount = 0;
- return;
- }
- memoryUsedSoFarBytes += bucket.bucketSizeBytes;
- hasNonZeroBucket = true;
- });
-
- StringBuilder memoryLimitMessage;
- memoryLimitMessage << "Memory limit for validation is currently set to "
- << maxValidateMemoryUsageMB.load()
- << "MB and can be configured via the 'maxValidateMemoryUsageMB' parameter.";
-
- if (!hasNonZeroBucket) {
- const uint32_t minMemoryNeededMB = (smallestBucketBytes / (1024 * 1024)) + 1;
- StringBuilder ss;
- ss << "Unable to report index entry inconsistencies due to memory limitations. Need at "
- "least "
- << minMemoryNeededMB << "MB to report at least one index entry inconsistency. "
- << memoryLimitMessage.str();
- result->errors.push_back(ss.str());
- result->valid = false;
-
- return false;
+ // At this point we know we'll exceed the memory limit, and will pare back some of the
+ // buckets. First we'll see what the smallest bucket is, and if that's over the limit by
+ // itself, then we can zero out all the other buckets. Otherwise we'll keep as many
+ // buckets as we can.
+
+ auto smallestBucketWithAnInconsistency = std::min_element(
+ _indexKeyBuckets.begin(),
+ _indexKeyBuckets.end(),
+ [](const IndexKeyBucket& lhs, const IndexKeyBucket& rhs) {
+ if (lhs.indexKeyCount != 0) {
+ return rhs.indexKeyCount == 0 || lhs.bucketSizeBytes < rhs.bucketSizeBytes;
+ }
+ return false;
+ });
+ invariant(smallestBucketWithAnInconsistency->indexKeyCount != 0);
+
+ if (smallestBucketWithAnInconsistency->bucketSizeBytes > maxMemoryUsageBytes) {
+ // We're going to just keep the smallest bucket, and zero everything else.
+ std::for_each(
+ _indexKeyBuckets.begin(), _indexKeyBuckets.end(), [&](IndexKeyBucket& bucket) {
+ if (&bucket == &(*smallestBucketWithAnInconsistency)) {
+ // We keep the smallest bucket.
+ return;
+ }
+
+ bucket.indexKeyCount = 0;
+ });
+ } else {
+ // We're going to scan through the buckets and keep as many as we can.
+ std::uint32_t memoryUsedSoFarBytes = 0;
+ std::for_each(
+ _indexKeyBuckets.begin(), _indexKeyBuckets.end(), [&](IndexKeyBucket& bucket) {
+ if (bucket.indexKeyCount == 0) {
+ return;
+ }
+
+ if (bucket.bucketSizeBytes + memoryUsedSoFarBytes > maxMemoryUsageBytes) {
+ // Including this bucket would put us over the memory limit, so zero
+ // this bucket. We don't want to keep any entry that will exceed the
+ // memory limit in the second phase so we don't double the
+ // 'maxMemoryUsageBytes' here.
+ bucket.indexKeyCount = 0;
+ return;
+ }
+ memoryUsedSoFarBytes += bucket.bucketSizeBytes;
+ });
}
StringBuilder ss;
ss << "Not all index entry inconsistencies are reported due to memory limitations. "
- << memoryLimitMessage.str();
+ "Memory "
+ "limit for validation is currently set to "
+ << maxValidateMemoryUsageMB.load()
+ << "MB and can be configured via the 'maxValidateMemoryUsageMB' parameter.";
result->errors.push_back(ss.str());
return true;