summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/noPassthrough/collmod_disallow_duplicates_restart.js63
-rw-r--r--jstests/noPassthrough/collmod_disallow_duplicates_step_up.js63
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp17
-rw-r--r--src/mongo/db/catalog/coll_mod_index.cpp14
-rw-r--r--src/mongo/db/catalog/collection.h7
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp11
-rw-r--r--src/mongo/db/catalog/collection_impl.h4
-rw-r--r--src/mongo/db/catalog/collection_mock.h6
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp2
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp2
-rw-r--r--src/mongo/db/index/index_access_method.cpp7
-rw-r--r--src/mongo/db/index/index_access_method.h18
-rw-r--r--src/mongo/db/index/index_descriptor.cpp11
-rw-r--r--src/mongo/db/index/index_descriptor.h6
-rw-r--r--src/mongo/db/list_indexes.idl4
-rw-r--r--src/mongo/db/op_observer_util.cpp4
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp17
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.h2
-rw-r--r--src/mongo/dbtests/index_access_method_test.cpp171
19 files changed, 306 insertions, 123 deletions
diff --git a/jstests/noPassthrough/collmod_disallow_duplicates_restart.js b/jstests/noPassthrough/collmod_disallow_duplicates_restart.js
new file mode 100644
index 00000000000..ea03c03613d
--- /dev/null
+++ b/jstests/noPassthrough/collmod_disallow_duplicates_restart.js
@@ -0,0 +1,63 @@
+/**
+ * Tests that the collMod command disallows writes that introduce new duplicate keys and the option
+ * is persisted over restarts.
+ *
+ * @tags: [
+ * # TODO(SERVER-61181): Fix validation errors under ephemeralForTest.
+ * incompatible_with_eft,
+ * requires_persistence,
+ * # Replication requires journaling support so this tag also implies exclusion from
+ * # --nojournal test configurations.
+ * requires_replication,
+ * ]
+ */
+
+(function() {
+'use strict';
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+let primary = rst.getPrimary();
+const collModIndexUniqueEnabled =
+ assert.commandWorked(primary.adminCommand({getParameter: 1, featureFlagCollModIndexUnique: 1}))
+ .featureFlagCollModIndexUnique.value;
+
+if (!collModIndexUniqueEnabled) {
+ jsTestLog('Skipping test because the collMod unique index feature flag is disabled');
+ rst.stopSet();
+ return;
+}
+
+const collName = 'collmod_disallow_duplicates_step_up';
+let db_primary = primary.getDB('test');
+let coll_primary = db_primary.getCollection(collName);
+
+// Sets 'disallowNewDuplicateKeys' and checks that the index rejects duplicates.
+coll_primary.drop();
+assert.commandWorked(coll_primary.createIndex({a: 1}));
+assert.commandWorked(coll_primary.insert({_id: 0, a: 1}));
+assert.commandWorked(db_primary.runCommand(
+ {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: true}}));
+assert.commandFailedWithCode(coll_primary.insert({_id: 1, a: 1}), ErrorCodes.DuplicateKey);
+
+// Restarts the primary and checks the index spec is persisted.
+rst.restart(primary);
+rst.waitForPrimary();
+primary = rst.getPrimary();
+db_primary = primary.getDB('test');
+coll_primary = db_primary.getCollection(collName);
+assert.commandFailedWithCode(coll_primary.insert({_id: 1, a: 1}), ErrorCodes.DuplicateKey);
+
+// Converts the index to unique.
+assert.commandWorked(
+ db_primary.runCommand({collMod: collName, index: {keyPattern: {a: 1}, unique: true}}));
+
+const uniqueIndexes = coll_primary.getIndexes().filter(function(doc) {
+ return doc.unique && friendlyEqual(doc.key, {a: 1});
+});
+assert.eq(1, uniqueIndexes.length);
+
+rst.stopSet();
+})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/collmod_disallow_duplicates_step_up.js b/jstests/noPassthrough/collmod_disallow_duplicates_step_up.js
new file mode 100644
index 00000000000..9d6908e3590
--- /dev/null
+++ b/jstests/noPassthrough/collmod_disallow_duplicates_step_up.js
@@ -0,0 +1,63 @@
+/**
+ * Tests that the collMod command disallows writes that introduce new duplicate keys and the option
+ * is persisted on all nodes in the replica set.
+ *
+ * @tags: [
+ * # TODO(SERVER-61181): Fix validation errors under ephemeralForTest.
+ * incompatible_with_eft,
+ * # TODO(SERVER-61182): Fix WiredTigerKVEngine::alterIdentMetadata() under inMemory.
+ * requires_persistence,
+ * # Replication requires journaling support so this tag also implies exclusion from
+ * # --nojournal test configurations.
+ * requires_replication,
+ * ]
+ */
+
+(function() {
+'use strict';
+
+const rst = new ReplSetTest({nodes: 3});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const [secondary, _] = rst.getSecondaries();
+const collModIndexUniqueEnabled =
+ assert.commandWorked(primary.adminCommand({getParameter: 1, featureFlagCollModIndexUnique: 1}))
+ .featureFlagCollModIndexUnique.value;
+
+if (!collModIndexUniqueEnabled) {
+ jsTestLog('Skipping test because the collMod unique index feature flag is disabled');
+ rst.stopSet();
+ return;
+}
+
+const collName = 'collmod_disallow_duplicates_step_up';
+const db_primary = primary.getDB('test');
+const coll_primary = db_primary.getCollection(collName);
+const db_secondary = secondary.getDB('test');
+const coll_secondary = db_secondary.getCollection(collName);
+
+// Sets 'disallowNewDuplicateKeys' on the old primary and checks that the index rejects duplicates.
+coll_primary.drop();
+assert.commandWorked(coll_primary.createIndex({a: 1}));
+assert.commandWorked(coll_primary.insert({_id: 0, a: 1}));
+assert.commandWorked(db_primary.runCommand(
+ {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: true}}));
+assert.commandFailedWithCode(coll_primary.insert({_id: 1, a: 1}), ErrorCodes.DuplicateKey);
+
+// Steps up a new primary and checks the index spec is replicated.
+rst.stepUp(secondary);
+assert.commandFailedWithCode(coll_secondary.insert({_id: 1, a: 1}), ErrorCodes.DuplicateKey);
+
+// Converts the index to unique on the new primary.
+assert.commandWorked(
+ db_secondary.runCommand({collMod: collName, index: {keyPattern: {a: 1}, unique: true}}));
+
+const uniqueIndexes = coll_secondary.getIndexes().filter(function(doc) {
+ return doc.unique && friendlyEqual(doc.key, {a: 1});
+});
+assert.eq(1, uniqueIndexes.length);
+
+rst.stopSet();
+})(); \ No newline at end of file
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 515b655fa3b..48194767c48 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -175,7 +175,8 @@ StatusWith<ParsedCollModRequest> parseCollModRequest(OperationContext* opCtx,
if (cmdIndex.getUnique() || cmdIndex.getDisallowNewDuplicateKeys()) {
uassert(ErrorCodes::InvalidOptions,
- "collMod does not support converting an index to unique",
+ "collMod does not support converting an index to 'unique' or to "
+ "'disallowNewDuplicateKeys' mode",
feature_flags::gCollModIndexUnique.isEnabled(
serverGlobalParams.featureCompatibility));
}
@@ -310,13 +311,17 @@ StatusWith<ParsedCollModRequest> parseCollModRequest(OperationContext* opCtx,
}
}
- // The 'disallowNewDuplicateKeys' option is an ephemeral setting. It is replicated but
- // still susceptible to process restarts. We do not compare the requested change with
- // the existing state, so there is no need for the no-op conversion logic that we have
- // for 'hidden' or 'unique'.
if (cmdIndex.getDisallowNewDuplicateKeys()) {
cmr.numModifications++;
- cmrIndex->indexDisallowNewDuplicateKeys = cmdIndex.getDisallowNewDuplicateKeys();
+ // Attempting to modify with the same value should be treated as a no-op.
+ if (cmrIndex->idx->disallowNewDuplicateKeys() ==
+ *cmdIndex.getDisallowNewDuplicateKeys()) {
+ indexObjForOplog = indexObjForOplog.removeField(
+ CollModIndex::kDisallowNewDuplicateKeysFieldName);
+ } else {
+ cmrIndex->indexDisallowNewDuplicateKeys =
+ cmdIndex.getDisallowNewDuplicateKeys();
+ }
}
// The index options doc must contain either the name or key pattern, but not both.
diff --git a/src/mongo/db/catalog/coll_mod_index.cpp b/src/mongo/db/catalog/coll_mod_index.cpp
index b66a2b50db5..700c658f399 100644
--- a/src/mongo/db/catalog/coll_mod_index.cpp
+++ b/src/mongo/db/catalog/coll_mod_index.cpp
@@ -184,11 +184,12 @@ void _processCollModIndexRequestUnique(OperationContext* opCtx,
*newUnique = true;
autoColl->getWritableCollection(opCtx)->updateUniqueSetting(opCtx, idx->indexName());
- idx->getEntry()->accessMethod()->setEnforceDuplicateConstraints(false);
+ autoColl->getWritableCollection(opCtx)->updateDisallowNewDuplicateKeysSetting(
+ opCtx, idx->indexName(), false);
}
/**
- * Adjusts enforceDuplicateConstraints setting on an index.
+ * Adjusts disallowNewDuplicateKeys setting on an index.
*/
void _processCollModIndexRequestDisallowNewDuplicateKeys(
OperationContext* opCtx,
@@ -198,10 +199,10 @@ void _processCollModIndexRequestDisallowNewDuplicateKeys(
boost::optional<bool>* newDisallowNewDuplicateKeys,
boost::optional<bool>* oldDisallowNewDuplicateKeys) {
*newDisallowNewDuplicateKeys = indexDisallowNewDuplicateKeys;
- auto accessMethod = idx->getEntry()->accessMethod();
- *oldDisallowNewDuplicateKeys = accessMethod->isEnforcingDuplicateConstraints();
+ *oldDisallowNewDuplicateKeys = idx->disallowNewDuplicateKeys();
if (*oldDisallowNewDuplicateKeys != *newDisallowNewDuplicateKeys) {
- accessMethod->setEnforceDuplicateConstraints(indexDisallowNewDuplicateKeys);
+ autoColl->getWritableCollection(opCtx)->updateDisallowNewDuplicateKeysSetting(
+ opCtx, idx->indexName(), indexDisallowNewDuplicateKeys);
}
}
@@ -269,8 +270,8 @@ void processCollModIndexRequest(OperationContext* opCtx,
newHidden,
oldHidden,
newUnique,
- oldDisallowNewDuplicateKeys,
newDisallowNewDuplicateKeys,
+ oldDisallowNewDuplicateKeys,
idx->indexName()};
// This matches the default for IndexCatalog::refreshEntry().
@@ -311,7 +312,6 @@ void processCollModIndexRequest(OperationContext* opCtx,
result->appendBool("unique_new", true);
}
if (newDisallowNewDuplicateKeys) {
- // Unlike other fields, 'disallowNewDuplicateKeys' can have the same old and new values.
invariant(oldDisallowNewDuplicateKeys);
result->append("disallowNewDuplicateKeys_old", *oldDisallowNewDuplicateKeys);
result->append("disallowNewDuplicateKeys_new", *newDisallowNewDuplicateKeys);
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 6fd99c61da8..145054d8498 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -607,6 +607,13 @@ public:
*/
virtual void updateUniqueSetting(OperationContext* opCtx, StringData idxName) = 0;
+ /*
+ * Disallows or allows new duplicates in the given index.
+ */
+ virtual void updateDisallowNewDuplicateKeysSetting(OperationContext* opCtx,
+ StringData idxName,
+ bool disallowNewDuplicateKeys) = 0;
+
/**
* Removes invalid index options on all indexes in this collection. Returns a list of index
* names that contained invalid index options.
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index 77828ef564f..0e9c4a28239 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -1980,6 +1980,17 @@ void CollectionImpl::updateUniqueSetting(OperationContext* opCtx, StringData idx
});
}
+void CollectionImpl::updateDisallowNewDuplicateKeysSetting(OperationContext* opCtx,
+ StringData idxName,
+ bool disallowNewDuplicateKeys) {
+ int offset = _metadata->findIndexOffset(idxName);
+ invariant(offset >= 0);
+
+ _writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) {
+ md.indexes[offset].updateDisallowNewDuplicateKeysSetting(disallowNewDuplicateKeys);
+ });
+}
+
std::vector<std::string> CollectionImpl::removeInvalidIndexOptions(OperationContext* opCtx) {
std::vector<std::string> indexesWithInvalidOptions;
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index 30342ef3730..c7fc2cc314f 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -443,6 +443,10 @@ public:
void updateUniqueSetting(OperationContext* opCtx, StringData idxName) final;
+ void updateDisallowNewDuplicateKeysSetting(OperationContext* opCtx,
+ StringData idxName,
+ bool disallowNewDuplicateKeys) final;
+
std::vector<std::string> removeInvalidIndexOptions(OperationContext* opCtx) final;
void setIsTemp(OperationContext* opCtx, bool isTemp) final;
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index f111892b265..00711f95cfc 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -413,6 +413,12 @@ public:
std::abort();
}
+ void updateDisallowNewDuplicateKeysSetting(OperationContext* opCtx,
+ StringData idxName,
+ bool disallowNewDuplicateKeys) {
+ std::abort();
+ }
+
std::vector<std::string> removeInvalidIndexOptions(OperationContext* opCtx) {
std::abort();
}
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index c6d64ddf583..163d1807b57 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -1433,7 +1433,6 @@ const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx,
// CollectionIndexUsageTrackerDecoration (shared state among Collection instances).
auto oldEntry = _readyIndexes.release(oldDesc);
invariant(oldEntry);
- auto enforceDuplicateConstraints = oldEntry->accessMethod()->isEnforcingDuplicateConstraints();
opCtx->recoveryUnit()->registerChange(std::make_unique<IndexRemoveChange>(
std::move(oldEntry), collection->getSharedDecorations()));
CollectionIndexUsageTrackerDecoration::get(collection->getSharedDecorations())
@@ -1448,7 +1447,6 @@ const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx,
auto newDesc = std::make_unique<IndexDescriptor>(_getAccessMethodName(keyPattern), spec);
auto newEntry = createIndexEntry(opCtx, collection, std::move(newDesc), flags);
invariant(newEntry->isReady(opCtx));
- newEntry->accessMethod()->setEnforceDuplicateConstraints(enforceDuplicateConstraints);
auto desc = newEntry->descriptor();
CollectionIndexUsageTrackerDecoration::get(collection->getSharedDecorations())
.registerIndex(desc->indexName(), desc->keyPattern());
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index caa209fd3c3..3a663298d5d 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -94,6 +94,7 @@ static std::set<StringData> allowedFieldNames = {
IndexDescriptor::kUniqueFieldName,
IndexDescriptor::kWeightsFieldName,
IndexDescriptor::kOriginalSpecFieldName,
+ IndexDescriptor::kDisallowNewDuplicateKeysFieldName,
// Index creation under legacy writeMode can result in an index spec with an _id field.
"_id"};
@@ -497,6 +498,7 @@ StatusWith<BSONObj> validateIndexSpec(OperationContext* opCtx, const BSONObj& in
IndexDescriptor::k2dsphereCoarsestIndexedLevel == indexSpecElemFieldName ||
IndexDescriptor::k2dsphereFinestIndexedLevel == indexSpecElemFieldName ||
IndexDescriptor::kDropDuplicatesFieldName == indexSpecElemFieldName ||
+ IndexDescriptor::kDisallowNewDuplicateKeysFieldName == indexSpecElemFieldName ||
"clustered" == indexSpecElemFieldName) &&
!indexSpecElem.isNumber() && !indexSpecElem.isBoolean()) {
return {ErrorCodes::TypeMismatch,
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 52e02ccbce8..d291efd6105 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -268,6 +268,7 @@ Status SortedDataIndexAccessMethod::insertKeys(OperationContext* opCtx,
*numInserted = 0;
}
bool unique = _descriptor->unique();
+ bool disallowNewDuplicateKeys = _descriptor->disallowNewDuplicateKeys();
bool dupsAllowed;
if (!_descriptor->isIdIndex() && !opCtx->isEnforcingConstraints() &&
coll->isIndexReady(_descriptor->indexName())) {
@@ -279,7 +280,7 @@ Status SortedDataIndexAccessMethod::insertKeys(OperationContext* opCtx,
// Additionally, unique indexes conflict checking can cause out-of-order updates in
// wiredtiger. See SERVER-59831.
dupsAllowed = true;
- } else if (isEnforcingDuplicateConstraints()) {
+ } else if (disallowNewDuplicateKeys) {
// This currently is only used by collMod command when converting a regular index to a
// unique index. The regular index will start rejecting duplicates even before the
// conversion finishes.
@@ -294,7 +295,7 @@ Status SortedDataIndexAccessMethod::insertKeys(OperationContext* opCtx,
// When duplicates are encountered and allowed, retry with dupsAllowed. Call
// onDuplicateKey() with the inserted duplicate key.
if (ErrorCodes::DuplicateKey == result.getStatus().code() && options.dupsAllowed &&
- !isEnforcingDuplicateConstraints()) {
+ !disallowNewDuplicateKeys) {
invariant(unique);
result = _newInterface->insert(opCtx, keyString, true /* dupsAllowed */);
@@ -541,7 +542,7 @@ Status SortedDataIndexAccessMethod::doUpdate(OperationContext* opCtx,
// Add all new data keys into the index.
for (const auto& keyString : ticket.added) {
- bool dupsAllowed = !isEnforcingDuplicateConstraints() && ticket.dupsAllowed;
+ bool dupsAllowed = !_descriptor->disallowNewDuplicateKeys() && ticket.dupsAllowed;
auto result = _newInterface->insert(opCtx, keyString, dupsAllowed);
if (!result.isOK())
return result.getStatus();
diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h
index 8b911c01603..2fe9efc1778 100644
--- a/src/mongo/db/index/index_access_method.h
+++ b/src/mongo/db/index/index_access_method.h
@@ -43,7 +43,6 @@
#include "mongo/db/sorter/sorter.h"
#include "mongo/db/storage/sorted_data_interface.h"
#include "mongo/db/yieldable.h"
-#include "mongo/platform/atomic_word.h"
namespace mongo {
@@ -240,23 +239,6 @@ public:
size_t maxMemoryUsageBytes,
const boost::optional<IndexStateInfo>& stateInfo,
StringData dbName) = 0;
-
- void setEnforceDuplicateConstraints(bool enforceDuplicateConstraints) {
- _enforceDuplicateConstraints.swap(enforceDuplicateConstraints);
- }
-
- /**
- * When `true`, disallows duplicates when inserting to or updating the index. Otherwise, sets
- * `dupsAllowed` according to other options.
- * Currently only temporarily set to `true` during collMod converting index to unique. This
- * should always remain `false` otherwise.
- */
- bool isEnforcingDuplicateConstraints() const {
- return _enforceDuplicateConstraints.load();
- }
-
-private:
- AtomicWord<bool> _enforceDuplicateConstraints{false};
};
/**
diff --git a/src/mongo/db/index/index_descriptor.cpp b/src/mongo/db/index/index_descriptor.cpp
index 69fac811cd6..b276fb9b38c 100644
--- a/src/mongo/db/index/index_descriptor.cpp
+++ b/src/mongo/db/index/index_descriptor.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/server_options.h"
+#include "mongo/db/storage/storage_parameters_gen.h"
namespace mongo {
@@ -106,6 +107,7 @@ constexpr StringData IndexDescriptor::kTextVersionFieldName;
constexpr StringData IndexDescriptor::kUniqueFieldName;
constexpr StringData IndexDescriptor::kHiddenFieldName;
constexpr StringData IndexDescriptor::kWeightsFieldName;
+constexpr StringData IndexDescriptor::kDisallowNewDuplicateKeysFieldName;
IndexDescriptor::IndexDescriptor(const std::string& accessMethodName, BSONObj infoObj)
: _accessMethodName(accessMethodName),
@@ -133,6 +135,15 @@ IndexDescriptor::IndexDescriptor(const std::string& accessMethodName, BSONObj in
invariant(collationElement.isABSONObj());
_collation = collationElement.Obj().getOwned();
}
+
+ if (BSONElement disallowNewDuplicateKeysElement =
+ _infoObj[kDisallowNewDuplicateKeysFieldName]) {
+ uassert(
+ ErrorCodes::InvalidOptions,
+ "Index does not support the 'disallowNewDuplicateKeys' field",
+ feature_flags::gCollModIndexUnique.isEnabled(serverGlobalParams.featureCompatibility));
+ _disallowNewDuplicateKeys = disallowNewDuplicateKeysElement.trueValue();
+ }
}
bool IndexDescriptor::isIndexVersionSupported(IndexVersion indexVersion) {
diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h
index 76e5dce4c91..35a005371b4 100644
--- a/src/mongo/db/index/index_descriptor.h
+++ b/src/mongo/db/index/index_descriptor.h
@@ -88,6 +88,7 @@ public:
static constexpr StringData kUniqueFieldName = "unique"_sd;
static constexpr StringData kWeightsFieldName = "weights"_sd;
static constexpr StringData kOriginalSpecFieldName = "originalSpec"_sd;
+ static constexpr StringData kDisallowNewDuplicateKeysFieldName = "disallowNewDuplicateKeys"_sd;
/**
* infoObj is a copy of the index-describing BSONObj contained in the catalog.
@@ -226,6 +227,10 @@ public:
return _partialFilterExpression;
}
+ bool disallowNewDuplicateKeys() const {
+ return _disallowNewDuplicateKeys;
+ }
+
/**
* Returns true if the key pattern is for the _id index.
* The _id index must have form exactly {_id : 1} or {_id : -1}.
@@ -275,6 +280,7 @@ private:
IndexVersion _version;
BSONObj _collation;
BSONObj _partialFilterExpression;
+ bool _disallowNewDuplicateKeys = false;
// Many query stages require going from an IndexDescriptor to its IndexCatalogEntry, so for
// now we need this.
diff --git a/src/mongo/db/list_indexes.idl b/src/mongo/db/list_indexes.idl
index 10d78a0479d..5d4773d5a12 100644
--- a/src/mongo/db/list_indexes.idl
+++ b/src/mongo/db/list_indexes.idl
@@ -156,6 +156,10 @@ structs:
type: safeBool
optional: true
unstable: false
+ disallowNewDuplicateKeys:
+ type: safeBool
+ optional: true
+ unstable: true
#
# Depending on the values of includeIndexBuildInfo and includeBuildUUIDs, indexes may
# appear with a combination of these three fields. Specifically, if includeIndexBuildInfo
diff --git a/src/mongo/db/op_observer_util.cpp b/src/mongo/db/op_observer_util.cpp
index 5c394ac57ec..e871f7c1cb2 100644
--- a/src/mongo/db/op_observer_util.cpp
+++ b/src/mongo/db/op_observer_util.cpp
@@ -64,6 +64,10 @@ BSONObj makeCollModCmdObj(const BSONObj& collModCmd,
if (indexInfo->unique)
indexObjBuilder.append("unique", indexInfo->unique.get());
+ if (indexInfo->disallowNewDuplicateKeys)
+ indexObjBuilder.append("disallowNewDuplicateKeys",
+ indexInfo->disallowNewDuplicateKeys.get());
+
cmdObjBuilder.append(indexFieldName, indexObjBuilder.obj());
} else {
cmdObjBuilder.append(elem);
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index 1266e07ac37..def3dc283d5 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -146,6 +146,23 @@ void BSONCollectionCatalogEntry::IndexMetaData::updateUniqueSetting() {
spec = b.obj();
}
+void BSONCollectionCatalogEntry::IndexMetaData::updateDisallowNewDuplicateKeysSetting(
+ bool disallowNewDuplicateKeys) {
+ // If disallowNewDuplicateKeys == false, we remove this field from catalog rather than add a
+ // field with false.
+ BSONObjBuilder b;
+ for (BSONObjIterator bi(spec); bi.more();) {
+ BSONElement e = bi.next();
+ if (e.fieldNameStringData() != "disallowNewDuplicateKeys") {
+ b.append(e);
+ }
+ }
+
+ if (disallowNewDuplicateKeys) {
+ b.append("disallowNewDuplicateKeys", disallowNewDuplicateKeys);
+ }
+ spec = b.obj();
+}
// --------------------------
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h
index cf9c907e491..c85bc6755d0 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.h
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.h
@@ -100,6 +100,8 @@ public:
void updateUniqueSetting();
+ void updateDisallowNewDuplicateKeysSetting(bool disallowNewDuplicateKeys);
+
StringData nameStringData() const {
return spec["name"].valueStringDataSafe();
}
diff --git a/src/mongo/dbtests/index_access_method_test.cpp b/src/mongo/dbtests/index_access_method_test.cpp
index e4e92f8d99d..4d24fb5c11f 100644
--- a/src/mongo/dbtests/index_access_method_test.cpp
+++ b/src/mongo/dbtests/index_access_method_test.cpp
@@ -34,6 +34,7 @@
#include "mongo/db/client.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/json.h"
+#include "mongo/db/storage/storage_parameters_gen.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/unittest/unittest.h"
@@ -258,95 +259,91 @@ TEST(IndexAccessMethodInsertKeys, DuplicatesCheckingOnSecondaryUniqueIndexes) {
ASSERT_EQ(numInserted, 2);
}
-TEST(IndexAccessMethodInsertKeys, InsertWhenEnforcingDuplicateConstraints) {
- ServiceContext::UniqueOperationContext opCtxRaii = cc().makeOperationContext();
- OperationContext* opCtx = opCtxRaii.get();
- NamespaceString nss("unittests.InsertWhenEnforcingDuplicateConstraints");
- auto indexName = "a_1";
- auto indexSpec = BSON("name" << indexName << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(IndexDescriptor::IndexVersion::kV2));
- ASSERT_OK(dbtests::createIndexFromSpec(opCtx, nss.ns(), indexSpec));
-
- AutoGetCollection autoColl(opCtx, nss, LockMode::MODE_X);
- const auto& coll = autoColl.getCollection();
- auto indexDescriptor = coll->getIndexCatalog()->findIndexByName(opCtx, indexName);
- auto indexAccessMethod =
- coll->getIndexCatalog()->getEntry(indexDescriptor)->accessMethod()->asSortedData();
-
- KeyString::HeapBuilder keyString1(
- KeyString::Version::kLatestVersion, BSON("" << 1), Ordering::make(BSONObj()), RecordId(1));
- KeyString::HeapBuilder keyString2(
- KeyString::Version::kLatestVersion, BSON("" << 1), Ordering::make(BSONObj()), RecordId(2));
- KeyString::HeapBuilder keyString3(
- KeyString::Version::kLatestVersion, BSON("" << 1), Ordering::make(BSONObj()), RecordId(3));
- KeyStringSet keys1{keyString1.release(), keyString2.release()};
- KeyStringSet keys2{keyString3.release()};
- struct InsertDeleteOptions options;
- int64_t numInserted;
-
- // Allows duplicates in a regular index.
- ASSERT_OK(indexAccessMethod->insertKeys(opCtx, coll, keys1, options, {}, &numInserted));
- ASSERT_EQ(numInserted, 2);
-
- // Enforces the duplicate constraint on the index and rejects duplicates when inserting.
- indexAccessMethod->setEnforceDuplicateConstraints(true);
- auto status = indexAccessMethod->insertKeys(opCtx, coll, keys2, options, {}, &numInserted);
- ASSERT_EQ(status.code(), ErrorCodes::DuplicateKey);
- ASSERT_EQ(numInserted, 0);
-
- // Resets the duplicate constraint and accepts duplicates again.
- indexAccessMethod->setEnforceDuplicateConstraints(false);
- ASSERT_OK(indexAccessMethod->insertKeys(opCtx, coll, keys2, options, {}, &numInserted));
- ASSERT_EQ(numInserted, 1);
+TEST(IndexAccessMethodInsertKeys, InsertWhenDisallowNewDuplicateKeys) {
+ if (feature_flags::gCollModIndexUnique.isEnabled(serverGlobalParams.featureCompatibility)) {
+ ServiceContext::UniqueOperationContext opCtxRaii = cc().makeOperationContext();
+ OperationContext* opCtx = opCtxRaii.get();
+ NamespaceString nss("unittests.InsertWhenDisallowNewDuplicateKeys");
+ auto indexName = "a_1";
+ auto indexSpec =
+ BSON("name" << indexName << "key" << BSON("a" << 1) << "disallowNewDuplicateKeys"
+ << true << "v" << static_cast<int>(IndexDescriptor::IndexVersion::kV2));
+ ASSERT_OK(dbtests::createIndexFromSpec(opCtx, nss.ns(), indexSpec));
+
+ AutoGetCollection autoColl(opCtx, nss, LockMode::MODE_X);
+ const auto& coll = autoColl.getCollection();
+ auto indexDescriptor = coll->getIndexCatalog()->findIndexByName(opCtx, indexName);
+ auto indexAccessMethod =
+ coll->getIndexCatalog()->getEntry(indexDescriptor)->accessMethod()->asSortedData();
+
+ KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion,
+ BSON("" << 1),
+ Ordering::make(BSONObj()),
+ RecordId(1));
+ KeyString::HeapBuilder keyString2(KeyString::Version::kLatestVersion,
+ BSON("" << 1),
+ Ordering::make(BSONObj()),
+ RecordId(2));
+ KeyStringSet keys{keyString1.release(), keyString2.release()};
+ struct InsertDeleteOptions options;
+ int64_t numInserted;
+
+ // Disallows new duplicates in a regular index and rejects the insert.
+ auto status = indexAccessMethod->insertKeys(opCtx, coll, keys, options, {}, &numInserted);
+ ASSERT_EQ(status.code(), ErrorCodes::DuplicateKey);
+ ASSERT_EQ(numInserted, 0);
+ }
}
-TEST(IndexAccessMethodUpdateKeys, UpdateWhenEnforcingDuplicateConstraints) {
- ServiceContext::UniqueOperationContext opCtxRaii = cc().makeOperationContext();
- OperationContext* opCtx = opCtxRaii.get();
- NamespaceString nss("unittests.UpdateWhenEnforcingDuplicateConstraints");
- auto indexName = "a_1";
- auto indexSpec = BSON("name" << indexName << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(IndexDescriptor::IndexVersion::kV2));
- ASSERT_OK(dbtests::createIndexFromSpec(opCtx, nss.ns(), indexSpec));
-
- AutoGetCollection autoColl(opCtx, nss, LockMode::MODE_X);
- const auto& coll = autoColl.getCollection();
- auto indexDescriptor = coll->getIndexCatalog()->findIndexByName(opCtx, indexName);
- auto indexAccessMethod =
- coll->getIndexCatalog()->getEntry(indexDescriptor)->accessMethod()->asSortedData();
-
- KeyString::HeapBuilder keyString1(
- KeyString::Version::kLatestVersion, BSON("" << 1), Ordering::make(BSONObj()), RecordId(1));
- KeyString::HeapBuilder keyString2_old(
- KeyString::Version::kLatestVersion, BSON("" << 2), Ordering::make(BSONObj()), RecordId(2));
- KeyString::HeapBuilder keyString2_new(
- KeyString::Version::kLatestVersion, BSON("" << 1), Ordering::make(BSONObj()), RecordId(2));
- KeyStringSet key1{keyString1.release()};
- KeyStringSet key2_old{keyString2_old.release()};
- KeyStringSet key2_new{keyString2_new.release()};
- struct InsertDeleteOptions options;
- UpdateTicket ticket{true, {}, {}, {}, key2_old, key2_new, RecordId(2), true, {}};
- int64_t numInserted;
- int64_t numDeleted;
-
- // Inserts two keys.
- ASSERT_OK(indexAccessMethod->insertKeys(opCtx, coll, key1, options, {}, &numInserted));
- ASSERT_EQ(numInserted, 1);
- ASSERT_OK(indexAccessMethod->insertKeys(opCtx, coll, key2_old, options, {}, &numInserted));
- ASSERT_EQ(numInserted, 1);
-
- // Enforces the duplicate constraint on the index and rejects duplicates when updating.
- indexAccessMethod->setEnforceDuplicateConstraints(true);
- auto status = indexAccessMethod->doUpdate(opCtx, coll, ticket, &numInserted, &numDeleted);
- ASSERT_EQ(status.code(), ErrorCodes::DuplicateKey);
- ASSERT_EQ(numInserted, 0);
- ASSERT_EQ(numDeleted, 0);
-
- // Resets the duplicate constraint and accepts duplicates again.
- indexAccessMethod->setEnforceDuplicateConstraints(false);
- ASSERT_OK(indexAccessMethod->doUpdate(opCtx, coll, ticket, &numInserted, &numDeleted));
- ASSERT_EQ(numInserted, 1);
- ASSERT_EQ(numDeleted, 1);
+TEST(IndexAccessMethodUpdateKeys, UpdateWhenDisallowNewDuplicateKeys) {
+ if (feature_flags::gCollModIndexUnique.isEnabled(serverGlobalParams.featureCompatibility)) {
+ ServiceContext::UniqueOperationContext opCtxRaii = cc().makeOperationContext();
+ OperationContext* opCtx = opCtxRaii.get();
+ NamespaceString nss("unittests.UpdateWhenDisallowNewDuplicateKeys");
+ auto indexName = "a_1";
+ auto indexSpec =
+ BSON("name" << indexName << "key" << BSON("a" << 1) << "disallowNewDuplicateKeys"
+ << true << "v" << static_cast<int>(IndexDescriptor::IndexVersion::kV2));
+ ASSERT_OK(dbtests::createIndexFromSpec(opCtx, nss.ns(), indexSpec));
+
+ AutoGetCollection autoColl(opCtx, nss, LockMode::MODE_X);
+ const auto& coll = autoColl.getCollection();
+ auto indexDescriptor = coll->getIndexCatalog()->findIndexByName(opCtx, indexName);
+ auto indexAccessMethod =
+ coll->getIndexCatalog()->getEntry(indexDescriptor)->accessMethod()->asSortedData();
+
+ KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion,
+ BSON("" << 1),
+ Ordering::make(BSONObj()),
+ RecordId(1));
+ KeyString::HeapBuilder keyString2_old(KeyString::Version::kLatestVersion,
+ BSON("" << 2),
+ Ordering::make(BSONObj()),
+ RecordId(2));
+ KeyString::HeapBuilder keyString2_new(KeyString::Version::kLatestVersion,
+ BSON("" << 1),
+ Ordering::make(BSONObj()),
+ RecordId(2));
+ KeyStringSet key1{keyString1.release()};
+ KeyStringSet key2_old{keyString2_old.release()};
+ KeyStringSet key2_new{keyString2_new.release()};
+ struct InsertDeleteOptions options;
+ UpdateTicket ticket{true, {}, {}, {}, key2_old, key2_new, RecordId(2), true, {}};
+ int64_t numInserted;
+ int64_t numDeleted;
+
+ // Inserts two keys.
+ ASSERT_OK(indexAccessMethod->insertKeys(opCtx, coll, key1, options, {}, &numInserted));
+ ASSERT_EQ(numInserted, 1);
+ ASSERT_OK(indexAccessMethod->insertKeys(opCtx, coll, key2_old, options, {}, &numInserted));
+ ASSERT_EQ(numInserted, 1);
+
+ // Disallows new duplicates in a regular index and rejects the update.
+ auto status = indexAccessMethod->doUpdate(opCtx, coll, ticket, &numInserted, &numDeleted);
+ ASSERT_EQ(status.code(), ErrorCodes::DuplicateKey);
+ ASSERT_EQ(numInserted, 0);
+ ASSERT_EQ(numDeleted, 0);
+ }
}
} // namespace