summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorLouis Williams <louis.williams@mongodb.com>2020-02-25 17:49:45 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-03 20:22:43 +0000
commit8cdcfd2ab0d28dca863557a02cafc86ae80f960e (patch)
tree61eb159e53415614e0e2a226029d015dc339d0bc /src/mongo
parent5f3625880188f9ca76bacb02c541abc2bd2d0f96 (diff)
downloadmongo-8cdcfd2ab0d28dca863557a02cafc86ae80f960e.tar.gz
SERVER-46410 Validate should check for duplicate keys in unique indexes
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/catalog/validate_adaptor.cpp61
-rw-r--r--src/mongo/dbtests/validate_tests.cpp160
2 files changed, 212 insertions, 9 deletions
diff --git a/src/mongo/db/catalog/validate_adaptor.cpp b/src/mongo/db/catalog/validate_adaptor.cpp
index ec37e24c913..84183e6dce2 100644
--- a/src/mongo/db/catalog/validate_adaptor.cpp
+++ b/src/mongo/db/catalog/validate_adaptor.cpp
@@ -139,6 +139,55 @@ Status ValidateAdaptor::validateRecord(OperationContext* opCtx,
return status;
}
+namespace {
+// Ensures that index entries are in increasing or decreasing order.
+void _validateKeyOrder(OperationContext* opCtx,
+ const IndexCatalogEntry* index,
+ const KeyString::Value& currKey,
+ const KeyString::Value& prevKey,
+ ValidateResults* results) {
+ auto descriptor = index->descriptor();
+ bool unique = descriptor->unique();
+
+ // KeyStrings will be in strictly increasing order because all keys are sorted and they are in
+ // the format (Key, RID), and all RecordIDs are unique.
+ if (currKey.compare(prevKey) <= 0) {
+ if (results && results->valid) {
+ results->errors.push_back(str::stream()
+ << "index '" << descriptor->indexName()
+ << "' is not in strictly ascending or descending order");
+ }
+ if (results) {
+ results->valid = false;
+ }
+ return;
+ }
+
+ if (unique) {
+ // Unique indexes must not have duplicate keys.
+ int cmp = currKey.compareWithoutRecordId(prevKey);
+ if (cmp != 0) {
+ return;
+ }
+
+ if (results && results->valid) {
+ auto bsonKey = KeyString::toBson(currKey, Ordering::make(descriptor->keyPattern()));
+ auto firstRecordId =
+ KeyString::decodeRecordIdAtEnd(prevKey.getBuffer(), prevKey.getSize());
+ auto secondRecordId =
+ KeyString::decodeRecordIdAtEnd(currKey.getBuffer(), currKey.getSize());
+ results->errors.push_back(str::stream() << "Unique index '" << descriptor->indexName()
+ << "' has duplicate key: " << bsonKey
+ << ", first record: " << firstRecordId
+ << ", second record: " << secondRecordId);
+ }
+ if (results) {
+ results->valid = false;
+ }
+ }
+}
+} // namespace
+
void ValidateAdaptor::traverseIndex(OperationContext* opCtx,
const IndexCatalogEntry* index,
int64_t* numTraversedKeys,
@@ -173,16 +222,10 @@ void ValidateAdaptor::traverseIndex(OperationContext* opCtx,
for (auto indexEntry = indexCursor->seekForKeyString(opCtx, firstKeyString.getValueCopy());
indexEntry;
indexEntry = indexCursor->nextKeyString(opCtx)) {
- // Ensure that the index entries are in increasing or decreasing order.
- if (!isFirstEntry && indexEntry->keyString < prevIndexKeyStringValue) {
- if (results && results->valid) {
- results->errors.push_back(
- "one or more indexes are not in strictly ascending or descending order");
- }
- if (results) {
- results->valid = false;
- }
+ if (!isFirstEntry) {
+ _validateKeyOrder(
+ opCtx, index, indexEntry->keyString, prevIndexKeyStringValue, results);
}
const RecordId kWildcardMultikeyMetadataRecordId{
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index 27c737c71a5..275fd35e0eb 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -1596,6 +1596,163 @@ public:
}
};
+template <bool full, bool background>
+class ValidateDuplicateKeysUniqueIndex : public ValidateBase {
+public:
+ ValidateDuplicateKeysUniqueIndex() : ValidateBase(full, background) {}
+
+ void run() {
+ // Cannot run validate with {background:true} if either
+ // - the RecordStore cursor does not retrieve documents in RecordId order
+ // - or the storage engine does not support checkpoints.
+ if (_background && (!_isInRecordIdOrder || !_engineSupportsCheckpoints)) {
+ return;
+ }
+
+ // Create a new collection.
+ lockDb(MODE_X);
+ Collection* coll;
+ {
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
+ wunit.commit();
+ }
+
+ // Create a unique index.
+ const auto indexName = "a";
+ {
+ const auto indexKey = BSON("a" << 1);
+ auto status = dbtests::createIndexFromSpec(
+ &_opCtx,
+ coll->ns().ns(),
+ BSON("name" << indexName << "key" << indexKey << "v"
+ << static_cast<int>(kIndexVersion) << "background" << false << "unique"
+ << true));
+ ASSERT_OK(status);
+ }
+
+ // Insert a document.
+ OpDebug* const nullOpDebug = nullptr;
+ lockDb(MODE_X);
+ {
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(coll->insertDocument(
+ &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ wunit.commit();
+ }
+
+ // Confirm that inserting a document with the same value for "a" fails, verifying the
+ // uniqueness constraint.
+ BSONObj dupObj = BSON("_id" << 2 << "a" << 1);
+ {
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_NOT_OK(
+ coll->insertDocument(&_opCtx, InsertStatement(dupObj), nullOpDebug, true));
+ }
+ releaseDb();
+ ensureValidateWorked();
+
+ // Insert a document with a duplicate key for "a".
+ {
+ lockDb(MODE_X);
+
+ IndexCatalog* indexCatalog = coll->getIndexCatalog();
+
+ WriteUnitOfWork wunit(&_opCtx);
+ InsertDeleteOptions options;
+ options.logIfError = true;
+ options.dupsAllowed = true;
+
+ // Insert a record and its keys separately. We do this to bypass duplicate constraint
+ // checking. Inserting a record and all of its keys ensures that validation fails
+ // because there are duplicate keys, and not just because there are keys without
+ // corresponding records.
+ auto swRecordId = coll->getRecordStore()->insertRecord(
+ &_opCtx, dupObj.objdata(), dupObj.objsize(), Timestamp());
+ ASSERT_OK(swRecordId);
+
+ // Insert the key on _id.
+ {
+ auto descriptor = indexCatalog->findIdIndex(&_opCtx);
+ auto iam = const_cast<IndexAccessMethod*>(
+ indexCatalog->getEntry(descriptor)->accessMethod());
+ KeyStringSet keys;
+ iam->getKeys(dupObj,
+ IndexAccessMethod::GetKeysMode::kRelaxConstraints,
+ IndexAccessMethod::GetKeysContext::kReadOrAddKeys,
+ &keys,
+ nullptr,
+ nullptr,
+ swRecordId.getValue(),
+ IndexAccessMethod::kNoopOnSuppressedErrorFn);
+ ASSERT_EQ(1, keys.size());
+
+ InsertResult result;
+ auto insertStatus = iam->insertKeys(&_opCtx,
+ {keys.begin(), keys.end()},
+ {},
+ MultikeyPaths{},
+ swRecordId.getValue(),
+ options,
+ &result);
+
+ ASSERT_EQUALS(result.dupsInserted.size(), 0);
+ ASSERT_EQUALS(result.numInserted, 1);
+ ASSERT_OK(insertStatus);
+ }
+
+ // Insert the key on "a".
+ {
+ auto descriptor = indexCatalog->findIndexByName(&_opCtx, indexName);
+ auto iam = const_cast<IndexAccessMethod*>(
+ indexCatalog->getEntry(descriptor)->accessMethod());
+
+ KeyStringSet keys;
+ InsertResult result;
+ iam->getKeys(dupObj,
+ IndexAccessMethod::GetKeysMode::kRelaxConstraints,
+ IndexAccessMethod::GetKeysContext::kReadOrAddKeys,
+ &keys,
+ nullptr,
+ nullptr,
+ swRecordId.getValue(),
+ IndexAccessMethod::kNoopOnSuppressedErrorFn);
+ ASSERT_EQ(1, keys.size());
+ auto insertStatus = iam->insertKeys(&_opCtx,
+ {keys.begin(), keys.end()},
+ {},
+ MultikeyPaths{},
+ swRecordId.getValue(),
+ options,
+ &result);
+
+ ASSERT_EQUALS(result.dupsInserted.size(), 1);
+ ASSERT_EQUALS(result.numInserted, 1);
+ ASSERT_OK(insertStatus);
+ }
+ wunit.commit();
+
+ releaseDb();
+ }
+
+ ValidateResults results = runValidate();
+
+ auto dumpOnErrorGuard = makeGuard([&] {
+ StorageDebugUtil::printValidateResults(results);
+ StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
+ });
+
+ ASSERT_FALSE(results.valid) << "Validation worked when it should have failed.";
+ ASSERT_EQ(static_cast<size_t>(1), results.errors.size());
+ ASSERT_EQ(static_cast<size_t>(0), results.warnings.size());
+ ASSERT_EQ(static_cast<size_t>(0), results.extraIndexEntries.size());
+ ASSERT_EQ(static_cast<size_t>(0), results.missingIndexEntries.size());
+
+ dumpOnErrorGuard.dismiss();
+ }
+};
+
class ValidateTests : public OldStyleSuiteSpecification {
public:
ValidateTests() : OldStyleSuiteSpecification("validate_tests") {}
@@ -1638,6 +1795,9 @@ public:
add<ValidateExtraIndexEntryResults<false, false>>();
add<ValidateDuplicateDocumentIndexKeySet>();
+
+ add<ValidateDuplicateKeysUniqueIndex<false, false>>();
+ add<ValidateDuplicateKeysUniqueIndex<false, true>>();
}
};