diff options
-rw-r--r-- | jstests/noPassthroughWithMongod/reindex_duplicate_keys.js | 46 | ||||
-rw-r--r-- | src/mongo/db/catalog/index_catalog.h | 2 | ||||
-rw-r--r-- | src/mongo/db/catalog/multi_index_block.cpp | 7 | ||||
-rw-r--r-- | src/mongo/db/catalog/multi_index_block.h | 9 |
4 files changed, 55 insertions, 9 deletions
diff --git a/jstests/noPassthroughWithMongod/reindex_duplicate_keys.js b/jstests/noPassthroughWithMongod/reindex_duplicate_keys.js new file mode 100644 index 00000000000..aa4a363a72a --- /dev/null +++ b/jstests/noPassthroughWithMongod/reindex_duplicate_keys.js @@ -0,0 +1,46 @@ +/** + * Tests that reIndex command fails with duplicate key error when there are duplicates in the + * collection. + */ + +(function() { +"use strict"; + +const collNamePrefix = "reindex_duplicate_keys_"; +let count = 0; + +// Bypasses DuplicateKey insertion error for testing via failpoint. +let addDuplicateDocumentsToCol = function(db, coll, doc) { + jsTestLog("Inserts documents without index entries."); + assert.commandWorked( + db.adminCommand({configureFailPoint: "skipIndexNewRecords", mode: "alwaysOn"})); + + assert.commandWorked(coll.insert(doc)); + assert.commandWorked(coll.insert(doc)); + + assert.commandWorked(db.adminCommand({configureFailPoint: "skipIndexNewRecords", mode: "off"})); +}; + +let runTest = function(doc) { + const collName = collNamePrefix + count++; + const coll = db.getCollection(collName); + coll.drop(); + + // Makes sure to create the _id index. + assert.commandWorked(db.createCollection(collName)); + if (doc) { + assert.commandWorked(coll.createIndex(doc, {unique: true})); + } else { + doc = {_id: 1}; + } + + // Inserts two violating documents without indexing them. + addDuplicateDocumentsToCol(db, coll, doc); + + // Checks reIndex command fails with duplicate key error. + assert.commandFailedWithCode(coll.reIndex(), ErrorCodes.DuplicateKey); +}; + +runTest(); +runTest({a: 1}); +})();
\ No newline at end of file diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h index 2f922b0bcc3..93480e3a0b6 100644 --- a/src/mongo/db/catalog/index_catalog.h +++ b/src/mongo/db/catalog/index_catalog.h @@ -76,7 +76,7 @@ enum class IndexBuildMethod { */ kHybrid, /** - * Perform a collection scan to dump all keys into the exteral sorter, then into the index. + * Perform a collection scan to dump all keys into the external sorter, then into the index. * During this process, callers guarantee that no writes will be accepted on this collection. */ kForeground, diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp index 1ad98278b22..47fe26aa513 100644 --- a/src/mongo/db/catalog/multi_index_block.cpp +++ b/src/mongo/db/catalog/multi_index_block.cpp @@ -324,10 +324,13 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init( collection->getIndexCatalog()->prepareInsertDeleteOptions( opCtx, collection->ns(), descriptor, &index.options); - // Index builds always relax constraints and check for violations at commit-time. + // Foreground index builds have to check for duplicates. Other index builds can relax + // constraints and check for violations at commit-time. index.options.getKeysMode = InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraints; - index.options.dupsAllowed = true; + index.options.dupsAllowed = _method == IndexBuildMethod::kForeground + ? !descriptor->unique() || _ignoreUnique + : true; index.options.fromIndexBuilder = true; LOGV2(20384, diff --git a/src/mongo/db/catalog/multi_index_block.h b/src/mongo/db/catalog/multi_index_block.h index fa747afd8a3..840770595cf 100644 --- a/src/mongo/db/catalog/multi_index_block.h +++ b/src/mongo/db/catalog/multi_index_block.h @@ -82,12 +82,9 @@ public: ~MultiIndexBlock(); /** - * By default we enforce the 'unique' flag in specs when building an index by failing. - * If this is called before init(), we will ignore unique violations. This has no effect if - * no specs are unique. - * - * If this is called, any 'dupRecords' set passed to dumpInsertsFromBulk() will never be - * filled. + * When this is called: + * For hybrid index builds, the index interceptor will not track duplicates. + * For foreground index builds, the uniqueness constraint will be relaxed. */ void ignoreUniqueConstraint(); |