summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYuhong Zhang <danielzhangyh@gmail.com>2022-01-26 21:26:16 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-01-28 16:50:38 +0000
commitf02247dfac78ed35cbd4bc8b0a510209463c6080 (patch)
tree2aff8e3c3eddbd7cd4ec524847776b11e4f596ae
parentdb799be5aebf432380cb5f7acb0f204fbc120a13 (diff)
downloadmongo-f02247dfac78ed35cbd4bc8b0a510209463c6080.tar.gz
SERVER-62886 Add the option `disallowNewDuplicateKeys` to collMod command
-rw-r--r--jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js260
-rw-r--r--jstests/noPassthrough/collmod_convert_to_unique_side_writes.js2
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp33
-rw-r--r--src/mongo/db/catalog/coll_mod_index.cpp87
-rw-r--r--src/mongo/db/catalog/coll_mod_index.h1
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp2
-rw-r--r--src/mongo/db/coll_mod.idl12
-rw-r--r--src/mongo/db/coll_mod_reply_validation.cpp12
-rw-r--r--src/mongo/db/coll_mod_reply_validation.h2
-rw-r--r--src/mongo/db/op_observer.h2
-rw-r--r--src/mongo/db/op_observer_impl.cpp4
11 files changed, 386 insertions, 31 deletions
diff --git a/jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js b/jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js
new file mode 100644
index 00000000000..903d1b36b5b
--- /dev/null
+++ b/jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js
@@ -0,0 +1,260 @@
+/**
+ * Tests that the collMod command disallows concurrent writes that introduce new duplicate keys
+ * while converting regular indexes to unique indexes.
+ *
+ * @tags: [
+ * # TODO(SERVER-61181): Fix validation errors under ephemeralForTest.
+ * incompatible_with_eft,
+ * # TODO(SERVER-61182): Fix WiredTigerKVEngine::alterIdentMetadata() under inMemory.
+ * requires_persistence,
+ * # Replication requires journaling support so this tag also implies exclusion from
+ * # --nojournal test configurations.
+ * requires_replication,
+ * ]
+ */
+
+(function() {
+'use strict';
+
+load('jstests/libs/fail_point_util.js');
+load('jstests/libs/parallel_shell_helpers.js');
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const collModIndexUniqueEnabled =
+ assert.commandWorked(primary.adminCommand({getParameter: 1, featureFlagCollModIndexUnique: 1}))
+ .featureFlagCollModIndexUnique.value;
+
+if (!collModIndexUniqueEnabled) {
+ jsTestLog('Skipping test because the collMod unique index feature flag is disabled');
+ rst.stopSet();
+ return;
+}
+
+let collCount = 0;
+const collPrefix = 'collmod_convert_to_unique_disallow_duplicates_';
+
+/**
+ * Returns the number of unique indexes with the given key pattern.
+ */
+const countUnique = function(coll, key) {
+ const all = coll.getIndexes().filter(function(z) {
+ return z.unique && friendlyEqual(z.key, key);
+ });
+ return all.length;
+};
+
+/**
+ * Starts and pauses a unique index conversion in the collection.
+ * While the 'collMod' command in paused, runs 'performCrudOpsFunc' before resuming the
+ * conversion process. Confirms expected 'collMod' behavior.
+ */
+const testCollModConvertUniqueWithSideWrites = function(initialDocs,
+ performCrudOpsFunc,
+ duplicateDoc = {
+ _id: 100,
+ a: 100
+ },
+ expectedViolations = undefined) {
+ const testDB = primary.getDB('test');
+ const collName = collPrefix + collCount++;
+ const coll = testDB.getCollection(collName);
+
+ jsTestLog('Starting test on collection: ' + coll.getFullName());
+ assert.commandWorked(testDB.createCollection(collName));
+
+ // Creates a regular index and use collMod to convert it to a unique index.
+ assert.commandWorked(coll.createIndex({a: 1}));
+
+ // Initial documents.
+ assert.commandWorked(coll.insert(initialDocs));
+
+ // Disallows new duplicate keys on the index.
+ assert.commandWorked(testDB.runCommand(
+ {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: true}}));
+
+ let awaitCollMod = () => {};
+ const failPoint = configureFailPoint(
+ primary, 'hangAfterCollModIndexUniqueSideWriteTracker', {nss: coll.getFullName()});
+ try {
+ // Starts collMod unique index conversion.
+ if (!expectedViolations) {
+ awaitCollMod = assertCommandWorkedInParallelShell(
+ primary, testDB, {collMod: collName, index: {keyPattern: {a: 1}, unique: true}});
+ } else {
+ const assertViolations = function(result, expectedViolations) {
+ const compareIds = function(lhs, rhs) {
+ try {
+ assert.sameMembers(lhs.ids, rhs.ids);
+ } catch (e) {
+ return false;
+ }
+ return true;
+ };
+ assert.sameMembers(result.violations, expectedViolations, '', compareIds);
+ };
+ awaitCollMod = assertCommandFailedWithCodeInParallelShell(
+ primary,
+ testDB,
+ {collMod: collName, index: {keyPattern: {a: 1}, unique: true}},
+ ErrorCodes.CannotConvertIndexToUnique,
+ assertViolations,
+ expectedViolations);
+ }
+ failPoint.wait();
+
+ // Checks locks held by collMod while waiting on fail point.
+ const currentOpResult = testDB.getSiblingDB("admin")
+ .aggregate(
+ [
+ {$currentOp: {allUsers: true, idleConnections: true}},
+ {
+ $match: {
+ type: 'op',
+ op: 'command',
+ connectionId: {$exists: true},
+ ns: `${coll.getDB().$cmd.getFullName()}`,
+ 'command.collMod': coll.getName(),
+ 'locks.Collection': 'w'
+ }
+ },
+ ],
+ {readConcern: {level: "local"}})
+ .toArray();
+ assert.eq(
+ currentOpResult.length,
+ 1,
+ 'unable to find collMod command in db.currentOp() result: ' + tojson(currentOpResult));
+ const collModOp = currentOpResult[0];
+ assert(collModOp.hasOwnProperty('locks'),
+ 'no lock info in collMod op from db.currentOp(): ' + tojson(collModOp));
+ assert.eq(collModOp.locks.Collection,
+ 'w',
+ 'collMod is not holding collection lock in read mode: ' + tojson(collModOp));
+
+ jsTestLog('Performing CRUD ops on collection while collMod is paused: ' +
+ performCrudOpsFunc);
+ try {
+ performCrudOpsFunc(coll);
+ } catch (ex) {
+ jsTestLog('CRUD ops failed: ' + ex);
+ doassert('CRUD ops failed: ' + ex + ': ' + performCrudOpsFunc);
+ }
+ } finally {
+ failPoint.off();
+ awaitCollMod();
+ }
+
+ if (!expectedViolations) {
+ assert.eq(countUnique(coll, {a: 1}),
+ 1,
+ 'index should be unique now: ' + tojson(coll.getIndexes()));
+
+ // Tests uniqueness constraint.
+ assert.commandFailedWithCode(coll.insert(duplicateDoc), ErrorCodes.DuplicateKey);
+ } else {
+ assert.eq(
+ countUnique(coll, {a: 1}), 0, 'index should not unique: ' + tojson(coll.getIndexes()));
+
+ // Resets to allow duplicates on the regular index.
+ assert.commandWorked(testDB.runCommand(
+ {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: false}}));
+
+ // Checks that uniqueness constraint is not enforced.
+ assert.commandWorked(coll.insert(duplicateDoc));
+ }
+ jsTestLog('Successfully completed test on collection: ' + coll.getFullName());
+};
+
+const initialDocsUnique = [
+ {_id: 1, a: 100},
+ {_id: 2, a: 200},
+ {_id: 3, a: 300},
+];
+
+const initialDocsDuplicate = [
+ {_id: 1, a: 100},
+ {_id: 2, a: 100},
+ {_id: 3, a: 200},
+ {_id: 4, a: 200},
+];
+
+// Checks successful conversion with non-conflicting documents inserted during collMod.
+testCollModConvertUniqueWithSideWrites(initialDocsUnique, (coll) => {
+ const docs = [
+ {_id: 4, a: 400},
+ {_id: 5, a: 500},
+ {_id: 6, a: 600},
+ ];
+ jsTestLog('Inserting additional documents after collMod completed index scan: ' + tojson(docs));
+ assert.commandWorked(coll.insert(docs));
+ jsTestLog('Successfully inserted documents. Resuming collMod index conversion: ' +
+ tojson(docs));
+});
+
+// Checks successful conversion with a conflicting document rejected during collMod.
+testCollModConvertUniqueWithSideWrites(initialDocsUnique, (coll) => {
+ jsTestLog('Inserting additional documents after collMod completed index scan.');
+ assert.commandFailedWithCode(coll.insert({_id: 1000, a: 100}), ErrorCodes.DuplicateKey);
+ jsTestLog('Failed to insert documents. Resuming collMod index conversion.');
+});
+
+// Checks successful conversion with a conflicting update rejected during collMod.
+testCollModConvertUniqueWithSideWrites(initialDocsUnique, (coll) => {
+ jsTestLog('Updating single document after collMod completed index scan.');
+ assert.commandFailedWithCode(coll.update({_id: 1}, {a: 200}), ErrorCodes.DuplicateKey);
+ jsTestLog('Failed to update document. Resuming collMod index conversion.');
+});
+
+// Inserts a non-conflicting document containing an unindexed field should not affect conversion.
+testCollModConvertUniqueWithSideWrites(initialDocsUnique, (coll) => {
+ jsTestLog('Inserting a non-conflicting document containing an unindexed field.');
+ assert.commandWorked(coll.insert({_id: 7, a: 700, b: 2222}));
+ jsTestLog('Successfully inserted a non-conflicting document containing an unindexed field. ' +
+ 'Resuming collMod index conversion.');
+});
+
+// Removes the last entry in the index should not throw off the index scan.
+testCollModConvertUniqueWithSideWrites(initialDocsUnique, (coll) => {
+ jsTestLog('Removing the last index entry.');
+ assert.commandWorked(coll.remove({_id: 3}));
+ jsTestLog('Successfully removed the last index entry. Resuming collMod index conversion.');
+});
+
+// Makes the index multikey with a non-conflicting document.
+testCollModConvertUniqueWithSideWrites(initialDocsUnique, (coll) => {
+ jsTestLog('Converting the index to multikey with non-conflicting document.');
+ assert.commandWorked(coll.insert({_id: 8, a: [400, 500]}));
+ jsTestLog('Successfully converted the index to multikey with non-conflicting document.');
+});
+
+// Makes the index multikey with a conflicting document.
+testCollModConvertUniqueWithSideWrites(initialDocsUnique, (coll) => {
+ jsTestLog('Converting the index to multikey with conflicting document.');
+ assert.commandFailedWithCode(coll.insert({_id: 9, a: [900, 100]}), ErrorCodes.DuplicateKey);
+ jsTestLog('Failed to convert the index to multikey with a conflicting document.');
+});
+
+// All duplicates will be rejected during collMod. The conversion still succeeds eventually.
+testCollModConvertUniqueWithSideWrites(initialDocsUnique, (coll) => {
+ jsTestLog('Inserting additional documents after collMod completed index scan.');
+ assert.commandFailedWithCode(coll.insert({_id: 1000, a: 100}), ErrorCodes.DuplicateKey);
+ assert.commandFailedWithCode(coll.insert({_id: 1001, a: 100}), ErrorCodes.DuplicateKey);
+ assert.commandFailedWithCode(coll.insert({_id: 1002, a: 200}), ErrorCodes.DuplicateKey);
+ assert.commandFailedWithCode(coll.insert({_id: 1003, a: 200}), ErrorCodes.DuplicateKey);
+ jsTestLog('Failed to insert documents. Resuming collMod index conversion.');
+});
+
+// Checks unsuccessful conversion due to duplicates in the initial collection as well as rejects a
+// conflicting document during collMod.
+testCollModConvertUniqueWithSideWrites(initialDocsDuplicate, (coll) => {
+ jsTestLog('Inserting additional documents after collMod completed index scan.');
+ assert.commandFailedWithCode(coll.insert({_id: 1000, a: 100}), ErrorCodes.DuplicateKey);
+ jsTestLog('Failed to insert documents. Resuming collMod index conversion.');
+}, {_id: 1000, a: 100} /* duplicateDoc */, [{ids: [1, 2]}, {ids: [3, 4]}] /* expectedViolations */);
+
+rst.stopSet();
+})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/collmod_convert_to_unique_side_writes.js b/jstests/noPassthrough/collmod_convert_to_unique_side_writes.js
index 8ceee831013..c30b9d6c97b 100644
--- a/jstests/noPassthrough/collmod_convert_to_unique_side_writes.js
+++ b/jstests/noPassthrough/collmod_convert_to_unique_side_writes.js
@@ -49,7 +49,7 @@ const countUnique = function(coll, key) {
/**
* Starts and pauses a unique index conversion in the collection.
- * While the 'collMod' command in paused, runs 'doCrudOpsFunc' before resuming the
+ * While the 'collMod' command in paused, runs 'performCrudOpsFunc' before resuming the
* conversion process. Confirms expected 'collMod' behavior.
*/
const testCollModConvertUniqueWithSideWrites = function(performCrudOpsFunc, expectedViolations) {
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 2b4bc8ee5f2..ac2b11d73b1 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -164,15 +164,16 @@ StatusWith<ParsedCollModRequest> parseCollModRequest(OperationContext* opCtx,
}
if (!cmdIndex.getExpireAfterSeconds() && !cmdIndex.getHidden() &&
- !cmdIndex.getUnique()) {
- return Status(ErrorCodes::InvalidOptions,
- "no expireAfterSeconds, hidden, or unique field");
+ !cmdIndex.getUnique() && !cmdIndex.getDisallowNewDuplicateKeys()) {
+ return Status(
+ ErrorCodes::InvalidOptions,
+ "no expireAfterSeconds, hidden, unique, or disallowNewDuplicateKeys field");
}
auto cmrIndex = &cmr.indexRequest;
auto indexObj = e.Obj();
- if (cmdIndex.getUnique()) {
+ if (cmdIndex.getUnique() || cmdIndex.getDisallowNewDuplicateKeys()) {
uassert(ErrorCodes::InvalidOptions,
"collMod does not support converting an index to unique",
feature_flags::gCollModIndexUnique.isEnabled(
@@ -309,6 +310,15 @@ StatusWith<ParsedCollModRequest> parseCollModRequest(OperationContext* opCtx,
}
}
+ // The 'disallowNewDuplicateKeys' option is an ephemeral setting. It is replicated but
+ // still susceptible to process restarts. We do not compare the requested change with
+ // the existing state, so there is no need for the no-op conversion logic that we have
+ // for 'hidden' or 'unique'.
+ if (cmdIndex.getDisallowNewDuplicateKeys()) {
+ cmr.numModifications++;
+ cmrIndex->indexDisallowNewDuplicateKeys = cmdIndex.getDisallowNewDuplicateKeys();
+ }
+
// The index options doc must contain either the name or key pattern, but not both.
// If we have just one field, the index modifications requested matches the current
// state in catalog and there is nothing further to do.
@@ -450,6 +460,12 @@ StatusWith<ParsedCollModRequest> parseCollModRequest(OperationContext* opCtx,
oplogEntryBuilder->append(e);
}
+ // Currently disallows the use of 'indexDisallowNewDuplicateKeys' with other collMod options.
+ if (cmr.indexRequest.indexDisallowNewDuplicateKeys && cmr.numModifications > 1) {
+ return {ErrorCodes::InvalidOptions,
+ "disallowNewDuplicateKeys cannot be combined with any other modification."};
+ }
+
return {std::move(cmr)};
}
@@ -572,10 +588,6 @@ StatusWith<std::unique_ptr<CollModWriteOpsTracker::Token>> _setUpCollModIndexUni
const auto& cmr = statusW.getValue();
auto idx = cmr.indexRequest.idx;
auto violatingRecordsList = scanIndexForDuplicates(opCtx, collection, idx);
- if (!violatingRecordsList.empty()) {
- uassertStatusOK(buildConvertUniqueErrorStatus(
- buildDuplicateViolations(opCtx, collection, violatingRecordsList)));
- }
CurOpFailpointHelpers::waitWhileFailPointEnabled(&hangAfterCollModIndexUniqueSideWriteTracker,
opCtx,
@@ -583,6 +595,11 @@ StatusWith<std::unique_ptr<CollModWriteOpsTracker::Token>> _setUpCollModIndexUni
[]() {},
nss);
+ if (!violatingRecordsList.empty()) {
+ uassertStatusOK(buildConvertUniqueErrorStatus(
+ buildDuplicateViolations(opCtx, collection, violatingRecordsList)));
+ }
+
return std::move(writeOpsToken);
}
diff --git a/src/mongo/db/catalog/coll_mod_index.cpp b/src/mongo/db/catalog/coll_mod_index.cpp
index d9d94e0933f..480afa91916 100644
--- a/src/mongo/db/catalog/coll_mod_index.cpp
+++ b/src/mongo/db/catalog/coll_mod_index.cpp
@@ -183,6 +183,25 @@ void _processCollModIndexRequestUnique(OperationContext* opCtx,
*newUnique = true;
autoColl->getWritableCollection(opCtx)->updateUniqueSetting(opCtx, idx->indexName());
+ idx->getEntry()->accessMethod()->setEnforceDuplicateConstraints(false);
+}
+
+/**
+ * Adjusts enforceDuplicateConstraints setting on an index.
+ */
+void _processCollModIndexRequestDisallowNewDuplicateKeys(
+ OperationContext* opCtx,
+ AutoGetCollection* autoColl,
+ const IndexDescriptor* idx,
+ bool indexDisallowNewDuplicateKeys,
+ boost::optional<bool>* newDisallowNewDuplicateKeys,
+ boost::optional<bool>* oldDisallowNewDuplicateKeys) {
+ *newDisallowNewDuplicateKeys = indexDisallowNewDuplicateKeys;
+ auto accessMethod = idx->getEntry()->accessMethod();
+ *oldDisallowNewDuplicateKeys = accessMethod->isEnforcingDuplicateConstraints();
+ if (*oldDisallowNewDuplicateKeys != *newDisallowNewDuplicateKeys) {
+ accessMethod->setEnforceDuplicateConstraints(indexDisallowNewDuplicateKeys);
+ }
}
} // namespace
@@ -198,9 +217,11 @@ void processCollModIndexRequest(OperationContext* opCtx,
auto indexExpireAfterSeconds = collModIndexRequest.indexExpireAfterSeconds;
auto indexHidden = collModIndexRequest.indexHidden;
auto indexUnique = collModIndexRequest.indexUnique;
+ auto indexDisallowNewDuplicateKeys = collModIndexRequest.indexDisallowNewDuplicateKeys;
// Return early if there are no index modifications requested.
- if (!indexExpireAfterSeconds && !indexHidden && !indexUnique) {
+ if (!indexExpireAfterSeconds && !indexHidden && !indexUnique &&
+ !indexDisallowNewDuplicateKeys) {
return;
}
@@ -209,6 +230,8 @@ void processCollModIndexRequest(OperationContext* opCtx,
boost::optional<bool> newHidden;
boost::optional<bool> oldHidden;
boost::optional<bool> newUnique;
+ boost::optional<bool> newDisallowNewDuplicateKeys;
+ boost::optional<bool> oldDisallowNewDuplicateKeys;
// TTL Index
if (indexExpireAfterSeconds) {
@@ -230,12 +253,23 @@ void processCollModIndexRequest(OperationContext* opCtx,
opCtx, autoColl, idx, mode, docsForUniqueIndex, &newUnique);
}
+ if (indexDisallowNewDuplicateKeys) {
+ _processCollModIndexRequestDisallowNewDuplicateKeys(opCtx,
+ autoColl,
+ idx,
+ *indexDisallowNewDuplicateKeys,
+ &newDisallowNewDuplicateKeys,
+ &oldDisallowNewDuplicateKeys);
+ }
+
*indexCollModInfo =
IndexCollModInfo{!newExpireSecs ? boost::optional<Seconds>() : Seconds(*newExpireSecs),
!oldExpireSecs ? boost::optional<Seconds>() : Seconds(*oldExpireSecs),
newHidden,
oldHidden,
newUnique,
+ oldDisallowNewDuplicateKeys,
+ newDisallowNewDuplicateKeys,
idx->indexName()};
// This matches the default for IndexCatalog::refreshEntry().
@@ -251,26 +285,37 @@ void processCollModIndexRequest(OperationContext* opCtx,
autoColl->getWritableCollection(opCtx)->getIndexCatalog()->refreshEntry(
opCtx, autoColl->getWritableCollection(opCtx), idx, flags);
- opCtx->recoveryUnit()->onCommit(
- [oldExpireSecs, newExpireSecs, oldHidden, newHidden, newUnique, result](
- boost::optional<Timestamp>) {
- // add the fields to BSONObjBuilder result
- if (oldExpireSecs) {
- result->append("expireAfterSeconds_old", *oldExpireSecs);
- }
- if (newExpireSecs) {
- result->append("expireAfterSeconds_new", *newExpireSecs);
- }
- if (newHidden) {
- invariant(oldHidden);
- result->append("hidden_old", *oldHidden);
- result->append("hidden_new", *newHidden);
- }
- if (newUnique) {
- invariant(*newUnique);
- result->appendBool("unique_new", true);
- }
- });
+ opCtx->recoveryUnit()->onCommit([oldExpireSecs,
+ newExpireSecs,
+ oldHidden,
+ newHidden,
+ newUnique,
+ oldDisallowNewDuplicateKeys,
+ newDisallowNewDuplicateKeys,
+ result](boost::optional<Timestamp>) {
+ // add the fields to BSONObjBuilder result
+ if (oldExpireSecs) {
+ result->append("expireAfterSeconds_old", *oldExpireSecs);
+ }
+ if (newExpireSecs) {
+ result->append("expireAfterSeconds_new", *newExpireSecs);
+ }
+ if (newHidden) {
+ invariant(oldHidden);
+ result->append("hidden_old", *oldHidden);
+ result->append("hidden_new", *newHidden);
+ }
+ if (newUnique) {
+ invariant(*newUnique);
+ result->appendBool("unique_new", true);
+ }
+ if (newDisallowNewDuplicateKeys) {
+ // Unlike other fields, 'disallowNewDuplicateKeys' can have the same old and new values.
+ invariant(oldDisallowNewDuplicateKeys);
+ result->append("disallowNewDuplicateKeys_old", *oldDisallowNewDuplicateKeys);
+ result->append("disallowNewDuplicateKeys_new", *newDisallowNewDuplicateKeys);
+ }
+ });
if (MONGO_unlikely(assertAfterIndexUpdate.shouldFail())) {
LOGV2(20307, "collMod - assertAfterIndexUpdate fail point enabled");
diff --git a/src/mongo/db/catalog/coll_mod_index.h b/src/mongo/db/catalog/coll_mod_index.h
index ba4e9ba65de..f7b43353ec2 100644
--- a/src/mongo/db/catalog/coll_mod_index.h
+++ b/src/mongo/db/catalog/coll_mod_index.h
@@ -49,6 +49,7 @@ struct ParsedCollModIndexRequest {
boost::optional<long long> indexExpireAfterSeconds;
boost::optional<bool> indexHidden;
boost::optional<bool> indexUnique;
+ boost::optional<bool> indexDisallowNewDuplicateKeys;
};
/**
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index fa180b98be7..e3463a5b033 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -1434,6 +1434,7 @@ const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx,
// CollectionIndexUsageTrackerDecoration (shared state among Collection instances).
auto oldEntry = _readyIndexes.release(oldDesc);
invariant(oldEntry);
+ auto enforceDuplicateConstraints = oldEntry->accessMethod()->isEnforcingDuplicateConstraints();
opCtx->recoveryUnit()->registerChange(std::make_unique<IndexRemoveChange>(
std::move(oldEntry), collection->getSharedDecorations()));
CollectionIndexUsageTrackerDecoration::get(collection->getSharedDecorations())
@@ -1448,6 +1449,7 @@ const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx,
auto newDesc = std::make_unique<IndexDescriptor>(_getAccessMethodName(keyPattern), spec);
auto newEntry = createIndexEntry(opCtx, collection, std::move(newDesc), flags);
invariant(newEntry->isReady(opCtx, collection));
+ newEntry->accessMethod()->setEnforceDuplicateConstraints(enforceDuplicateConstraints);
auto desc = newEntry->descriptor();
CollectionIndexUsageTrackerDecoration::get(collection->getSharedDecorations())
.registerIndex(desc->indexName(), desc->keyPattern());
diff --git a/src/mongo/db/coll_mod.idl b/src/mongo/db/coll_mod.idl
index 68eee65ffdb..821aed598d6 100644
--- a/src/mongo/db/coll_mod.idl
+++ b/src/mongo/db/coll_mod.idl
@@ -63,6 +63,10 @@ structs:
optional: true
type: safeBool
unstable: true
+ disallowNewDuplicateKeys:
+ optional: true
+ type: safeBool
+ unstable: true
CollModReply:
description: "The collMod command's reply."
@@ -88,6 +92,14 @@ structs:
optional: true
type: safeBool
unstable: true
+ disallowNewDuplicateKeys_old:
+ optional: true
+ type: safeBool
+ unstable: true
+ disallowNewDuplicateKeys_new:
+ optional: true
+ type: safeBool
+ unstable: true
CollModRequest:
description: "The collMod command's request."
diff --git a/src/mongo/db/coll_mod_reply_validation.cpp b/src/mongo/db/coll_mod_reply_validation.cpp
index 46ac5b0b64d..3ebe4a58675 100644
--- a/src/mongo/db/coll_mod_reply_validation.cpp
+++ b/src/mongo/db/coll_mod_reply_validation.cpp
@@ -40,5 +40,17 @@ void validateReply(const CollModReply& reply) {
<< "(hidden_new and hidden_old) or none of them.",
false);
}
+
+ auto disallowNewDuplicateKeys_new = reply.getDisallowNewDuplicateKeys_new().is_initialized();
+ auto disallowNewDuplicateKeys_old = reply.getDisallowNewDuplicateKeys_old().is_initialized();
+
+ if ((!disallowNewDuplicateKeys_new && disallowNewDuplicateKeys_old) ||
+ (disallowNewDuplicateKeys_new && !disallowNewDuplicateKeys_old)) {
+ uassert(ErrorCodes::CommandResultSchemaViolation,
+ str::stream() << "Invalid CollModReply: Reply should define either both fields "
+ << "(disallowNewDuplicateKeys_new and disallowNewDuplicateKeys_old) "
+ "or none of them.",
+ false);
+ }
}
} // namespace mongo::coll_mod_reply_validation
diff --git a/src/mongo/db/coll_mod_reply_validation.h b/src/mongo/db/coll_mod_reply_validation.h
index cf0fca8359f..e4a9ac9e49a 100644
--- a/src/mongo/db/coll_mod_reply_validation.h
+++ b/src/mongo/db/coll_mod_reply_validation.h
@@ -36,8 +36,8 @@ namespace mongo::coll_mod_reply_validation {
* CollMod reply object requires extra validation, as the current IDL validation capabilities
* are not sufficient in this case.
* It is used to check that reply includes:
- * - (expireAfterSeconds_new and expireAfterSeconds_old) together or none of them.
* - (hidden_new and hidden_old) together or none of them.
+ * - (disallowNewDuplicateKeys_new and disallowNewDuplicateKeys_old) together or none of them."
*/
void validateReply(const CollModReply& reply);
} // namespace mongo::coll_mod_reply_validation
diff --git a/src/mongo/db/op_observer.h b/src/mongo/db/op_observer.h
index 9c2a30501d6..e42f9533939 100644
--- a/src/mongo/db/op_observer.h
+++ b/src/mongo/db/op_observer.h
@@ -99,6 +99,8 @@ struct IndexCollModInfo {
boost::optional<bool> hidden;
boost::optional<bool> oldHidden;
boost::optional<bool> unique;
+ boost::optional<bool> disallowNewDuplicateKeys;
+ boost::optional<bool> oldDisallowNewDuplicateKeys;
std::string indexName;
};
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index 40f7d2569bf..a47a6055603 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -992,6 +992,10 @@ void OpObserverImpl::onCollMod(OperationContext* opCtx,
auto oldHidden = indexInfo->oldHidden.get();
o2Builder.append("hidden_old", oldHidden);
}
+ if (indexInfo->oldDisallowNewDuplicateKeys) {
+ auto oldDisallowNewDuplicateKeys = indexInfo->oldDisallowNewDuplicateKeys.get();
+ o2Builder.append("disallowNewDuplicates_old", oldDisallowNewDuplicateKeys);
+ }
}
MutableOplogEntry oplogEntry;