summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavi Vetriselvan <pavithra.vetriselvan@mongodb.com>2022-02-24 15:20:53 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-02-24 16:22:57 +0000
commita23d9da0518f79325b7dda04796dba0f856a08cd (patch)
treed31b1cd5ec01588521cb59860b0135d1f2e0612e
parent07ec8eb7a18490026a8a716f359a2d5c416e0006 (diff)
downloadmongo-a23d9da0518f79325b7dda04796dba0f856a08cd.tar.gz
SERVER-63664 Rename disallowNewDuplicateKeys option to prepareUnique
-rw-r--r--jstests/core/collmod_convert_to_unique.js24
-rw-r--r--jstests/core/collmod_convert_to_unique_apply_ops.js6
-rw-r--r--jstests/core/collmod_convert_to_unique_violations.js12
-rw-r--r--jstests/core/index_prepareUnique.js (renamed from jstests/core/index_disallowNewDuplicateKeys.js)20
-rw-r--r--jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js6
-rw-r--r--jstests/noPassthrough/collmod_convert_to_unique_locking.js4
-rw-r--r--jstests/noPassthrough/collmod_disallow_duplicates_restart.js6
-rw-r--r--jstests/noPassthrough/collmod_disallow_duplicates_step_up.js6
-rw-r--r--jstests/noPassthrough/collmod_index_noop.js30
-rw-r--r--jstests/noPassthrough/index_prepareUnique_downgrade.js (renamed from jstests/noPassthrough/index_disallowNewDuplicateKeys_downgrade.js)16
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp35
-rw-r--r--src/mongo/db/catalog/coll_mod_index.cpp64
-rw-r--r--src/mongo/db/catalog/coll_mod_index.h2
-rw-r--r--src/mongo/db/catalog/collection.h6
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp8
-rw-r--r--src/mongo/db/catalog/collection_impl.h6
-rw-r--r--src/mongo/db/catalog/collection_mock.h6
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp6
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp4
-rw-r--r--src/mongo/db/coll_mod.idl6
-rw-r--r--src/mongo/db/coll_mod_reply_validation.cpp9
-rw-r--r--src/mongo/db/coll_mod_reply_validation.h2
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp4
-rw-r--r--src/mongo/db/create_indexes.idl2
-rw-r--r--src/mongo/db/index/index_access_method.cpp8
-rw-r--r--src/mongo/db/index/index_descriptor.cpp9
-rw-r--r--src/mongo/db/index/index_descriptor.h8
-rw-r--r--src/mongo/db/list_indexes.idl2
-rw-r--r--src/mongo/db/op_observer.h4
-rw-r--r--src/mongo/db/op_observer_impl.cpp6
-rw-r--r--src/mongo/db/op_observer_util.cpp5
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp11
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.h2
-rw-r--r--src/mongo/dbtests/index_access_method_test.cpp16
34 files changed, 172 insertions, 189 deletions
diff --git a/jstests/core/collmod_convert_to_unique.js b/jstests/core/collmod_convert_to_unique.js
index 1491213c917..2d5871e69bd 100644
--- a/jstests/core/collmod_convert_to_unique.js
+++ b/jstests/core/collmod_convert_to_unique.js
@@ -42,14 +42,14 @@ function countUnique(key) {
// Creates a regular index and use collMod to convert it to a unique index.
assert.commandWorked(coll.createIndex({a: 1}));
-// Tries to convert to unique without setting `disallowNewDuplicateKeys`.
+// Tries to convert to unique without setting `prepareUnique`.
assert.commandFailedWithCode(
db.runCommand({collMod: collName, index: {keyPattern: {a: 1}, unique: true}}),
ErrorCodes.InvalidOptions);
-// First sets 'disallowNewDuplicateKeys' before converting the index to unique.
-assert.commandWorked(db.runCommand(
- {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: true}}));
+// First sets 'prepareUnique' before converting the index to unique.
+assert.commandWorked(
+ db.runCommand({collMod: collName, index: {keyPattern: {a: 1}, prepareUnique: true}}));
// Tries to modify with a string 'unique' value.
assert.commandFailedWithCode(
@@ -71,12 +71,12 @@ assert.commandFailedWithCode(db.runCommand({
ErrorCodes.NamespaceNotFound);
// Conversion should fail when there are existing duplicates.
-assert.commandWorked(db.runCommand(
- {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: false}}));
+assert.commandWorked(
+ db.runCommand({collMod: collName, index: {keyPattern: {a: 1}, prepareUnique: false}}));
assert.commandWorked(coll.insert({_id: 1, a: 100}));
assert.commandWorked(coll.insert({_id: 2, a: 100}));
-assert.commandWorked(db.runCommand(
- {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: true}}));
+assert.commandWorked(
+ db.runCommand({collMod: collName, index: {keyPattern: {a: 1}, prepareUnique: true}}));
const cannotConvertIndexToUniqueError = assert.commandFailedWithCode(
db.runCommand({collMod: collName, index: {keyPattern: {a: 1}, unique: true}}),
ErrorCodes.CannotConvertIndexToUnique);
@@ -124,11 +124,11 @@ assert.commandWorked(
assert.eq(countUnique({a: 1}), 0, 'index should not be unique: ' + tojson(coll.getIndexes()));
// Conversion should report errors if there are duplicates.
-assert.commandWorked(db.runCommand(
- {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: false}}));
+assert.commandWorked(
+ db.runCommand({collMod: collName, index: {keyPattern: {a: 1}, prepareUnique: false}}));
assert.commandWorked(coll.insert({_id: 3, a: 100}));
-assert.commandWorked(db.runCommand(
- {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: true}}));
+assert.commandWorked(
+ db.runCommand({collMod: collName, index: {keyPattern: {a: 1}, prepareUnique: true}}));
assert.commandFailedWithCode(
db.runCommand({collMod: collName, index: {keyPattern: {a: 1}, unique: true}, dryRun: true}),
ErrorCodes.CannotConvertIndexToUnique);
diff --git a/jstests/core/collmod_convert_to_unique_apply_ops.js b/jstests/core/collmod_convert_to_unique_apply_ops.js
index 7ec5b1d69e2..8bcfe11c5c4 100644
--- a/jstests/core/collmod_convert_to_unique_apply_ops.js
+++ b/jstests/core/collmod_convert_to_unique_apply_ops.js
@@ -67,9 +67,9 @@ const applyOpsCmd = {
// Conversion should fail when there are existing duplicates.
assert.commandWorked(coll.insert({_id: 1, a: 100}));
assert.commandWorked(coll.insert({_id: 2, a: 100}));
-// First sets 'disallowNewDuplicateKeys' before converting the index to unique.
-assert.commandWorked(db.runCommand(
- {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: true}}));
+// First sets 'prepareUnique' before converting the index to unique.
+assert.commandWorked(
+ db.runCommand({collMod: collName, index: {keyPattern: {a: 1}, prepareUnique: true}}));
const cannotConvertIndexToUniqueError = assert.commandFailedWithCode(
db.adminCommand(applyOpsCmd), ErrorCodes.CannotConvertIndexToUnique);
jsTestLog('Cannot enable index constraint error from failed conversion: ' +
diff --git a/jstests/core/collmod_convert_to_unique_violations.js b/jstests/core/collmod_convert_to_unique_violations.js
index 0994ad9d44b..aa4a50adecd 100644
--- a/jstests/core/collmod_convert_to_unique_violations.js
+++ b/jstests/core/collmod_convert_to_unique_violations.js
@@ -53,9 +53,9 @@ coll.drop();
// Checks that the violations match what we expect.
function assertFailedWithViolations(keyPattern, violations) {
- // First sets 'disallowNewDuplicateKeys' before converting the index to unique.
- assert.commandWorked(db.runCommand(
- {collMod: collName, index: {keyPattern: keyPattern, disallowNewDuplicateKeys: true}}));
+ // First sets 'prepareUnique' before converting the index to unique.
+ assert.commandWorked(
+ db.runCommand({collMod: collName, index: {keyPattern: keyPattern, prepareUnique: true}}));
const result =
db.runCommand({collMod: collName, index: {keyPattern: keyPattern, unique: true}});
assert.commandFailedWithCode(result, ErrorCodes.CannotConvertIndexToUnique);
@@ -63,9 +63,9 @@ function assertFailedWithViolations(keyPattern, violations) {
bsonWoCompare(sortViolationsArray(result.violations), sortViolationsArray(violations)),
0,
tojson(result));
- // Resets 'disallowNewDuplicateKeys'.
- assert.commandWorked(db.runCommand(
- {collMod: collName, index: {keyPattern: keyPattern, disallowNewDuplicateKeys: false}}));
+ // Resets 'prepareUnique'.
+ assert.commandWorked(
+ db.runCommand({collMod: collName, index: {keyPattern: keyPattern, prepareUnique: false}}));
}
assert.commandWorked(db.createCollection(collName));
diff --git a/jstests/core/index_disallowNewDuplicateKeys.js b/jstests/core/index_prepareUnique.js
index cbcdee2ce9e..97b3d0ef874 100644
--- a/jstests/core/index_disallowNewDuplicateKeys.js
+++ b/jstests/core/index_prepareUnique.js
@@ -1,5 +1,5 @@
/**
- * Tests that the createIndex command accepts a disallowNewDuplicateKeys field and works accordingly
+ * Tests that the createIndex command accepts a prepareUnique field and works accordingly
* then.
*
* @tags: [requires_fcv_53]
@@ -7,7 +7,7 @@
(function() {
"use strict";
-const coll = db.index_disallowNewDuplicateKeys;
+const coll = db.index_prepareUnique;
coll.drop();
const collModIndexUniqueEnabled = assert
@@ -23,31 +23,29 @@ if (!collModIndexUniqueEnabled) {
assert.commandWorked(coll.insert({_id: 0, a: 1}));
// Starts rejecting new duplicate keys.
-assert.commandWorked(coll.createIndex({a: 1}, {disallowNewDuplicateKeys: true}));
+assert.commandWorked(coll.createIndex({a: 1}, {prepareUnique: true}));
// Disallows creating another index on the same key with a different option.
-assert.commandFailedWithCode(coll.createIndex({a: 1}, {disallowNewDuplicateKeys: false}),
+assert.commandFailedWithCode(coll.createIndex({a: 1}, {prepareUnique: false}),
ErrorCodes.IndexOptionsConflict);
// Checks the index is rejecting duplicates but accepting other keys.
assert.commandFailedWithCode(coll.insert({_id: 1, a: 1}), ErrorCodes.DuplicateKey);
assert.commandWorked(coll.insert({_id: 2, a: 2}));
-// Checks that the disallowNewDuplicateKeys field exists when getIndexes is called.
+// Checks that the prepareUnique field exists when getIndexes is called.
let indexesWithComments = coll.getIndexes().filter(function(doc) {
- return friendlyEqual(doc.disallowNewDuplicateKeys, true);
+ return friendlyEqual(doc.prepareUnique, true);
});
assert.eq(1, indexesWithComments.length);
// Removes the field and checks the index works as a regular index.
-assert.commandWorked(coll.runCommand({
- collMod: "index_disallowNewDuplicateKeys",
- index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: false}
-}));
+assert.commandWorked(coll.runCommand(
+ {collMod: "index_prepareUnique", index: {keyPattern: {a: 1}, prepareUnique: false}}));
assert.commandWorked(coll.insert({_id: 1, a: 1}));
indexesWithComments = coll.getIndexes().filter(function(doc) {
- return friendlyEqual(doc.disallowNewDuplicateKeys, true);
+ return friendlyEqual(doc.prepareUnique, true);
});
assert.eq(0, indexesWithComments.length);
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js b/jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js
index 038346448fa..11fe01ce8fb 100644
--- a/jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js
+++ b/jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js
@@ -73,8 +73,8 @@ const testCollModConvertUniqueWithSideWrites = function(initialDocs,
assert.commandWorked(coll.insert(initialDocs));
// Disallows new duplicate keys on the index.
- assert.commandWorked(testDB.runCommand(
- {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: true}}));
+ assert.commandWorked(
+ testDB.runCommand({collMod: collName, index: {keyPattern: {a: 1}, prepareUnique: true}}));
let awaitCollMod = () => {};
const failPoint = configureFailPoint(
@@ -161,7 +161,7 @@ const testCollModConvertUniqueWithSideWrites = function(initialDocs,
// Resets to allow duplicates on the regular index.
assert.commandWorked(testDB.runCommand(
- {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: false}}));
+ {collMod: collName, index: {keyPattern: {a: 1}, prepareUnique: false}}));
// Checks that uniqueness constraint is not enforced.
assert.commandWorked(coll.insert(duplicateDoc));
diff --git a/jstests/noPassthrough/collmod_convert_to_unique_locking.js b/jstests/noPassthrough/collmod_convert_to_unique_locking.js
index 4bc08a7958e..1c217272f2b 100644
--- a/jstests/noPassthrough/collmod_convert_to_unique_locking.js
+++ b/jstests/noPassthrough/collmod_convert_to_unique_locking.js
@@ -43,8 +43,8 @@ assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.insert({a: 1}));
// Disallows new duplicate keys on the index.
-assert.commandWorked(testDB.runCommand(
- {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: true}}));
+assert.commandWorked(
+ testDB.runCommand({collMod: collName, index: {keyPattern: {a: 1}, prepareUnique: true}}));
let awaitCollMod = () => {};
const failPoint = configureFailPoint(
diff --git a/jstests/noPassthrough/collmod_disallow_duplicates_restart.js b/jstests/noPassthrough/collmod_disallow_duplicates_restart.js
index ea03c03613d..7846252d5b2 100644
--- a/jstests/noPassthrough/collmod_disallow_duplicates_restart.js
+++ b/jstests/noPassthrough/collmod_disallow_duplicates_restart.js
@@ -34,12 +34,12 @@ const collName = 'collmod_disallow_duplicates_step_up';
let db_primary = primary.getDB('test');
let coll_primary = db_primary.getCollection(collName);
-// Sets 'disallowNewDuplicateKeys' and checks that the index rejects duplicates.
+// Sets 'prepareUnique' and checks that the index rejects duplicates.
coll_primary.drop();
assert.commandWorked(coll_primary.createIndex({a: 1}));
assert.commandWorked(coll_primary.insert({_id: 0, a: 1}));
-assert.commandWorked(db_primary.runCommand(
- {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: true}}));
+assert.commandWorked(
+ db_primary.runCommand({collMod: collName, index: {keyPattern: {a: 1}, prepareUnique: true}}));
assert.commandFailedWithCode(coll_primary.insert({_id: 1, a: 1}), ErrorCodes.DuplicateKey);
// Restarts the primary and checks the index spec is persisted.
diff --git a/jstests/noPassthrough/collmod_disallow_duplicates_step_up.js b/jstests/noPassthrough/collmod_disallow_duplicates_step_up.js
index 9d6908e3590..ae1a8f3c474 100644
--- a/jstests/noPassthrough/collmod_disallow_duplicates_step_up.js
+++ b/jstests/noPassthrough/collmod_disallow_duplicates_step_up.js
@@ -38,12 +38,12 @@ const coll_primary = db_primary.getCollection(collName);
const db_secondary = secondary.getDB('test');
const coll_secondary = db_secondary.getCollection(collName);
-// Sets 'disallowNewDuplicateKeys' on the old primary and checks that the index rejects duplicates.
+// Sets 'prepareUnique' on the old primary and checks that the index rejects duplicates.
coll_primary.drop();
assert.commandWorked(coll_primary.createIndex({a: 1}));
assert.commandWorked(coll_primary.insert({_id: 0, a: 1}));
-assert.commandWorked(db_primary.runCommand(
- {collMod: collName, index: {keyPattern: {a: 1}, disallowNewDuplicateKeys: true}}));
+assert.commandWorked(
+ db_primary.runCommand({collMod: collName, index: {keyPattern: {a: 1}, prepareUnique: true}}));
assert.commandFailedWithCode(coll_primary.insert({_id: 1, a: 1}), ErrorCodes.DuplicateKey);
// Steps up a new primary and checks the index spec is replicated.
diff --git a/jstests/noPassthrough/collmod_index_noop.js b/jstests/noPassthrough/collmod_index_noop.js
index 90272e41dc9..2e16352bd6e 100644
--- a/jstests/noPassthrough/collmod_index_noop.js
+++ b/jstests/noPassthrough/collmod_index_noop.js
@@ -1,5 +1,5 @@
/**
- * Validate that the 'collMod' command with 'hidden,' 'unique,' or 'disallowNewDuplicateKeys' fields
+ * Validate that the 'collMod' command with 'hidden,' 'unique,' or 'prepareUnique' fields
* will return expected result document for the command and generate expected oplog entries in which
* the index modifications (hiding/unhiding/convert to unique/allowing duplicates/disallowing
* duplicates) will be no-ops if no other index option (TTL, for example) is involved.
@@ -53,10 +53,8 @@ function validateResultForCollMod(result, expectedResult) {
assert.eq(result.expireAfterSeconds_old, expectedResult.expireAfterSeconds_old, result);
assert.eq(result.expireAfterSeconds_new, expectedResult.expireAfterSeconds_new, result);
assert.eq(result.unique_new, expectedResult.unique_new, result);
- assert.eq(
- result.disallowNewDuplicateKeys_old, expectedResult.disallowNewDuplicateKeys_old, result);
- assert.eq(
- result.disallowNewDuplicateKeys_new, expectedResult.disallowNewDuplicateKeys_new, result);
+ assert.eq(result.prepareUnique_old, expectedResult.prepareUnique_old, result);
+ assert.eq(result.prepareUnique_new, expectedResult.prepareUnique_new, result);
}
primaryColl.drop();
@@ -69,7 +67,7 @@ assert.commandWorked(primaryColl.createIndex({f: 1}, {unique: true, expireAfterS
assert.commandWorked(
primaryColl.createIndex({g: 1}, {hidden: true, unique: true, expireAfterSeconds: 25}));
if (collModIndexUniqueEnabled) {
- assert.commandWorked(primaryColl.createIndex({h: 1}, {disallowNewDuplicateKeys: true}));
+ assert.commandWorked(primaryColl.createIndex({h: 1}, {prepareUnique: true}));
}
// Hiding a non-hidden index will generate the oplog entry with a 'hidden_old: false'.
@@ -118,7 +116,7 @@ assert.eq(idxSpec.expireAfterSeconds, 10);
// oplog entry with only 'unique'. Ditto for the command result returned to the user.
if (collModIndexUniqueEnabled) {
assert.commandWorked(primaryDB.runCommand(
- {collMod: collName, index: {keyPattern: {c: 1}, disallowNewDuplicateKeys: true}}));
+ {collMod: collName, index: {keyPattern: {c: 1}, prepareUnique: true}}));
assert.commandFailedWithCode(primaryDB.runCommand({
"collMod": primaryColl.getName(),
"index": {"name": "c_1", "unique": false, "hidden": false},
@@ -143,7 +141,7 @@ if (collModIndexUniqueEnabled) {
// Validate that if the 'unique' option is specified but is a no-op, the operation as a whole
// will be a no-op.
assert.commandWorked(primaryDB.runCommand(
- {collMod: collName, index: {keyPattern: {d: 1}, disallowNewDuplicateKeys: true}}));
+ {collMod: collName, index: {keyPattern: {d: 1}, prepareUnique: true}}));
result = assert.commandWorked(primaryDB.runCommand({
"collMod": primaryColl.getName(),
"index": {"name": "d_1", "unique": true},
@@ -151,7 +149,7 @@ if (collModIndexUniqueEnabled) {
validateResultForCollMod(result, {});
validateCollModOplogEntryCount({
"o.index.name": "d_1",
- "o.index.disallowNewDuplicateKeys": {$exists: false},
+ "o.index.prepareUnique": {$exists: false},
},
0);
@@ -164,7 +162,7 @@ if (collModIndexUniqueEnabled) {
// Validate that if both the 'hidden' and 'unique' options are specified but the
// 'hidden' and 'unique' options are no-ops, the operation as a whole will be a no-op.
assert.commandWorked(primaryDB.runCommand(
- {collMod: collName, index: {keyPattern: {e: 1}, disallowNewDuplicateKeys: true}}));
+ {collMod: collName, index: {keyPattern: {e: 1}, prepareUnique: true}}));
result = assert.commandWorked(primaryDB.runCommand({
"collMod": primaryColl.getName(),
"index": {"name": "e_1", "hidden": true, "unique": true},
@@ -172,7 +170,7 @@ if (collModIndexUniqueEnabled) {
validateResultForCollMod(result, {});
validateCollModOplogEntryCount({
"o.index.name": "e_1",
- "o.index.disallowNewDuplicateKeys": {$exists: false},
+ "o.index.prepareUnique": {$exists: false},
},
0);
@@ -187,7 +185,7 @@ if (collModIndexUniqueEnabled) {
// generate an oplog entry with only 'expireAfterSeconds'. Ditto for the command result returned
// to the user.
assert.commandWorked(primaryDB.runCommand(
- {collMod: collName, index: {keyPattern: {f: 1}, disallowNewDuplicateKeys: true}}));
+ {collMod: collName, index: {keyPattern: {f: 1}, prepareUnique: true}}));
result = assert.commandWorked(primaryDB.runCommand({
"collMod": primaryColl.getName(),
"index": {"name": "f_1", "expireAfterSeconds": 20, "unique": true},
@@ -212,7 +210,7 @@ if (collModIndexUniqueEnabled) {
// instead, it will generate an oplog entry with only 'expireAfterSeconds'. Ditto for the
// command result returned to the user.
assert.commandWorked(primaryDB.runCommand(
- {collMod: collName, index: {keyPattern: {g: 1}, disallowNewDuplicateKeys: true}}));
+ {collMod: collName, index: {keyPattern: {g: 1}, prepareUnique: true}}));
result = assert.commandWorked(primaryDB.runCommand({
"collMod": primaryColl.getName(),
"index": {"name": "g_1", "expireAfterSeconds": 30, "hidden": true, "unique": true},
@@ -233,11 +231,11 @@ if (collModIndexUniqueEnabled) {
assert.eq(idxSpec.expireAfterSeconds, 30);
assert(idxSpec.unique, tojson(idxSpec));
- // Validate that if the 'disallowNewDuplicateKeys' option is specified but is a no-op, the
+ // Validate that if the 'prepareUnique' option is specified but is a no-op, the
// operation as a whole will be a no-op.
result = assert.commandWorked(primaryDB.runCommand({
"collMod": primaryColl.getName(),
- "index": {"name": "h_1", "disallowNewDuplicateKeys": true},
+ "index": {"name": "h_1", "prepareUnique": true},
}));
validateResultForCollMod(result, {});
validateCollModOplogEntryCount({
@@ -249,7 +247,7 @@ if (collModIndexUniqueEnabled) {
idxSpec = GetIndexHelpers.findByName(primaryColl.getIndexes(), "h_1");
assert.eq(idxSpec.hidden, undefined);
assert.eq(idxSpec.expireAfterSeconds, undefined);
- assert(idxSpec.disallowNewDuplicateKeys, tojson(idxSpec));
+ assert(idxSpec.prepareUnique, tojson(idxSpec));
}
rst.stopSet();
diff --git a/jstests/noPassthrough/index_disallowNewDuplicateKeys_downgrade.js b/jstests/noPassthrough/index_prepareUnique_downgrade.js
index 63dc7befd18..92b8d9e7793 100644
--- a/jstests/noPassthrough/index_disallowNewDuplicateKeys_downgrade.js
+++ b/jstests/noPassthrough/index_prepareUnique_downgrade.js
@@ -1,6 +1,6 @@
/**
* Tests that the cluster cannot be downgraded when there are indexes with the
- * 'disallowNewDuplicateKeys' field present.
+ * 'prepareUnique' field present.
*
* TODO SERVER-63564: Remove this test once kLastLTS is 6.0.
*
@@ -23,19 +23,19 @@ if (!collModIndexUniqueEnabled) {
return;
}
-const collName = "index_disallowNewDuplicateKeys_downgrade";
+const collName = "index_prepareUnique_downgrade";
const coll = db.getCollection(collName);
assert.commandWorked(db.createCollection(coll.getName()));
function checkIndexForDowngrade(withFCV, fixIndex, isCompatible) {
- assert.commandWorked(coll.createIndex({a: 1}, {disallowNewDuplicateKeys: true}));
- assert.commandWorked(coll.createIndex({b: 1}, {disallowNewDuplicateKeys: true}));
+ assert.commandWorked(coll.createIndex({a: 1}, {prepareUnique: true}));
+ assert.commandWorked(coll.createIndex({b: 1}, {prepareUnique: true}));
if (fixIndex) {
// Resolves the incompatibility before the downgrade.
assert.commandWorked(coll.dropIndex({a: 1}));
- assert.commandWorked(db.runCommand(
- {collMod: collName, index: {keyPattern: {b: 1}, disallowNewDuplicateKeys: false}}));
+ assert.commandWorked(
+ db.runCommand({collMod: collName, index: {keyPattern: {b: 1}, prepareUnique: false}}));
} else if (!isCompatible) {
assert.commandFailedWithCode(db.adminCommand({setFeatureCompatibilityVersion: withFCV}),
ErrorCodes.CannotDowngrade);
@@ -55,10 +55,10 @@ checkIndexForDowngrade(lastContinuousFCV, false, false);
// Fails to downgrade to 5.0.
checkIndexForDowngrade(lastLTSFCV, false, false);
-// Successfully downgrades to 5.2 after removing the 'disallowNewDuplicateKeys' field.
+// Successfully downgrades to 5.2 after removing the 'prepareUnique' field.
checkIndexForDowngrade(lastContinuousFCV, true, true);
-// Successfully downgrades to 5.0 after removing the 'disallowNewDuplicateKeys' field.
+// Successfully downgrades to 5.0 after removing the 'prepareUnique' field.
checkIndexForDowngrade(lastLTSFCV, true, true);
MongoRunner.stopMongod(conn);
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 10847bfce86..490ab5be065 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -164,19 +164,18 @@ StatusWith<ParsedCollModRequest> parseCollModRequest(OperationContext* opCtx,
}
if (!cmdIndex.getExpireAfterSeconds() && !cmdIndex.getHidden() &&
- !cmdIndex.getUnique() && !cmdIndex.getDisallowNewDuplicateKeys()) {
- return Status(
- ErrorCodes::InvalidOptions,
- "no expireAfterSeconds, hidden, unique, or disallowNewDuplicateKeys field");
+ !cmdIndex.getUnique() && !cmdIndex.getPrepareUnique()) {
+ return Status(ErrorCodes::InvalidOptions,
+ "no expireAfterSeconds, hidden, unique, or prepareUnique field");
}
auto cmrIndex = &cmr.indexRequest;
auto indexObj = e.Obj();
- if (cmdIndex.getUnique() || cmdIndex.getDisallowNewDuplicateKeys()) {
+ if (cmdIndex.getUnique() || cmdIndex.getPrepareUnique()) {
uassert(ErrorCodes::InvalidOptions,
"collMod does not support converting an index to 'unique' or to "
- "'disallowNewDuplicateKeys' mode",
+ "'prepareUnique' mode",
feature_flags::gCollModIndexUnique.isEnabled(
serverGlobalParams.featureCompatibility));
}
@@ -276,10 +275,10 @@ StatusWith<ParsedCollModRequest> parseCollModRequest(OperationContext* opCtx,
if (cmdIndex.getUnique()) {
// Disallow one-step unique convertion. The user has to set
- // 'disallowNewDuplicateKeys' to true first.
- if (!cmrIndex->idx->disallowNewDuplicateKeys()) {
+ // 'prepareUnique' to true first.
+ if (!cmrIndex->idx->prepareUnique()) {
return Status(ErrorCodes::InvalidOptions,
- "Cannot make index unique with 'disallowNewDuplicateKeys=false'. "
+ "Cannot make index unique with 'prepareUnique=false'. "
"Run collMod to set it first.");
}
@@ -319,16 +318,14 @@ StatusWith<ParsedCollModRequest> parseCollModRequest(OperationContext* opCtx,
}
}
- if (cmdIndex.getDisallowNewDuplicateKeys()) {
+ if (cmdIndex.getPrepareUnique()) {
cmr.numModifications++;
// Attempting to modify with the same value should be treated as a no-op.
- if (cmrIndex->idx->disallowNewDuplicateKeys() ==
- *cmdIndex.getDisallowNewDuplicateKeys()) {
- indexObjForOplog = indexObjForOplog.removeField(
- CollModIndex::kDisallowNewDuplicateKeysFieldName);
+ if (cmrIndex->idx->prepareUnique() == *cmdIndex.getPrepareUnique()) {
+ indexObjForOplog =
+ indexObjForOplog.removeField(CollModIndex::kPrepareUniqueFieldName);
} else {
- cmrIndex->indexDisallowNewDuplicateKeys =
- cmdIndex.getDisallowNewDuplicateKeys();
+ cmrIndex->indexPrepareUnique = cmdIndex.getPrepareUnique();
}
}
@@ -473,10 +470,10 @@ StatusWith<ParsedCollModRequest> parseCollModRequest(OperationContext* opCtx,
oplogEntryBuilder->append(e);
}
- // Currently disallows the use of 'indexDisallowNewDuplicateKeys' with other collMod options.
- if (cmr.indexRequest.indexDisallowNewDuplicateKeys && cmr.numModifications > 1) {
+ // Currently disallows the use of 'indexPrepareUnique' with other collMod options.
+ if (cmr.indexRequest.indexPrepareUnique && cmr.numModifications > 1) {
return {ErrorCodes::InvalidOptions,
- "disallowNewDuplicateKeys cannot be combined with any other modification."};
+ "prepareUnique cannot be combined with any other modification."};
}
return {std::move(cmr)};
diff --git a/src/mongo/db/catalog/coll_mod_index.cpp b/src/mongo/db/catalog/coll_mod_index.cpp
index e90ce013a82..6bcb123e3aa 100644
--- a/src/mongo/db/catalog/coll_mod_index.cpp
+++ b/src/mongo/db/catalog/coll_mod_index.cpp
@@ -152,26 +152,25 @@ void _processCollModIndexRequestUnique(OperationContext* opCtx,
*newUnique = true;
autoColl->getWritableCollection(opCtx)->updateUniqueSetting(opCtx, idx->indexName());
- // Resets 'disallowNewDuplicateKeys' to false after converting to unique index;
- autoColl->getWritableCollection(opCtx)->updateDisallowNewDuplicateKeysSetting(
+ // Resets 'prepareUnique' to false after converting to unique index;
+ autoColl->getWritableCollection(opCtx)->updatePrepareUniqueSetting(
opCtx, idx->indexName(), false);
}
/**
- * Adjusts disallowNewDuplicateKeys setting on an index.
+ * Adjusts prepareUnique setting on an index.
*/
-void _processCollModIndexRequestDisallowNewDuplicateKeys(
- OperationContext* opCtx,
- AutoGetCollection* autoColl,
- const IndexDescriptor* idx,
- bool indexDisallowNewDuplicateKeys,
- boost::optional<bool>* newDisallowNewDuplicateKeys,
- boost::optional<bool>* oldDisallowNewDuplicateKeys) {
- *newDisallowNewDuplicateKeys = indexDisallowNewDuplicateKeys;
- *oldDisallowNewDuplicateKeys = idx->disallowNewDuplicateKeys();
- if (*oldDisallowNewDuplicateKeys != *newDisallowNewDuplicateKeys) {
- autoColl->getWritableCollection(opCtx)->updateDisallowNewDuplicateKeysSetting(
- opCtx, idx->indexName(), indexDisallowNewDuplicateKeys);
+void _processCollModIndexRequestPrepareUnique(OperationContext* opCtx,
+ AutoGetCollection* autoColl,
+ const IndexDescriptor* idx,
+ bool indexPrepareUnique,
+ boost::optional<bool>* newPrepareUnique,
+ boost::optional<bool>* oldPrepareUnique) {
+ *newPrepareUnique = indexPrepareUnique;
+ *oldPrepareUnique = idx->prepareUnique();
+ if (*oldPrepareUnique != *newPrepareUnique) {
+ autoColl->getWritableCollection(opCtx)->updatePrepareUniqueSetting(
+ opCtx, idx->indexName(), indexPrepareUnique);
}
}
@@ -187,11 +186,10 @@ void processCollModIndexRequest(OperationContext* opCtx,
auto indexExpireAfterSeconds = collModIndexRequest.indexExpireAfterSeconds;
auto indexHidden = collModIndexRequest.indexHidden;
auto indexUnique = collModIndexRequest.indexUnique;
- auto indexDisallowNewDuplicateKeys = collModIndexRequest.indexDisallowNewDuplicateKeys;
+ auto indexPrepareUnique = collModIndexRequest.indexPrepareUnique;
// Return early if there are no index modifications requested.
- if (!indexExpireAfterSeconds && !indexHidden && !indexUnique &&
- !indexDisallowNewDuplicateKeys) {
+ if (!indexExpireAfterSeconds && !indexHidden && !indexUnique && !indexPrepareUnique) {
return;
}
@@ -200,8 +198,8 @@ void processCollModIndexRequest(OperationContext* opCtx,
boost::optional<bool> newHidden;
boost::optional<bool> oldHidden;
boost::optional<bool> newUnique;
- boost::optional<bool> newDisallowNewDuplicateKeys;
- boost::optional<bool> oldDisallowNewDuplicateKeys;
+ boost::optional<bool> newPrepareUnique;
+ boost::optional<bool> oldPrepareUnique;
// TTL Index
if (indexExpireAfterSeconds) {
@@ -222,13 +220,9 @@ void processCollModIndexRequest(OperationContext* opCtx,
_processCollModIndexRequestUnique(opCtx, autoColl, idx, mode, &newUnique);
}
- if (indexDisallowNewDuplicateKeys) {
- _processCollModIndexRequestDisallowNewDuplicateKeys(opCtx,
- autoColl,
- idx,
- *indexDisallowNewDuplicateKeys,
- &newDisallowNewDuplicateKeys,
- &oldDisallowNewDuplicateKeys);
+ if (indexPrepareUnique) {
+ _processCollModIndexRequestPrepareUnique(
+ opCtx, autoColl, idx, *indexPrepareUnique, &newPrepareUnique, &oldPrepareUnique);
}
*indexCollModInfo =
@@ -237,8 +231,8 @@ void processCollModIndexRequest(OperationContext* opCtx,
newHidden,
oldHidden,
newUnique,
- newDisallowNewDuplicateKeys,
- oldDisallowNewDuplicateKeys,
+ newPrepareUnique,
+ oldPrepareUnique,
idx->indexName()};
// This matches the default for IndexCatalog::refreshEntry().
@@ -259,8 +253,8 @@ void processCollModIndexRequest(OperationContext* opCtx,
oldHidden,
newHidden,
newUnique,
- oldDisallowNewDuplicateKeys,
- newDisallowNewDuplicateKeys,
+ oldPrepareUnique,
+ newPrepareUnique,
result](boost::optional<Timestamp>) {
// add the fields to BSONObjBuilder result
if (oldExpireSecs) {
@@ -278,10 +272,10 @@ void processCollModIndexRequest(OperationContext* opCtx,
invariant(*newUnique);
result->appendBool("unique_new", true);
}
- if (newDisallowNewDuplicateKeys) {
- invariant(oldDisallowNewDuplicateKeys);
- result->append("disallowNewDuplicateKeys_old", *oldDisallowNewDuplicateKeys);
- result->append("disallowNewDuplicateKeys_new", *newDisallowNewDuplicateKeys);
+ if (newPrepareUnique) {
+ invariant(oldPrepareUnique);
+ result->append("prepareUnique_old", *oldPrepareUnique);
+ result->append("prepareUnique_new", *newPrepareUnique);
}
});
diff --git a/src/mongo/db/catalog/coll_mod_index.h b/src/mongo/db/catalog/coll_mod_index.h
index b1323ab7788..e5f78be7f55 100644
--- a/src/mongo/db/catalog/coll_mod_index.h
+++ b/src/mongo/db/catalog/coll_mod_index.h
@@ -48,7 +48,7 @@ struct ParsedCollModIndexRequest {
boost::optional<long long> indexExpireAfterSeconds;
boost::optional<bool> indexHidden;
boost::optional<bool> indexUnique;
- boost::optional<bool> indexDisallowNewDuplicateKeys;
+ boost::optional<bool> indexPrepareUnique;
};
/**
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 145054d8498..2ec74f97a8f 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -610,9 +610,9 @@ public:
/*
* Disallows or allows new duplicates in the given index.
*/
- virtual void updateDisallowNewDuplicateKeysSetting(OperationContext* opCtx,
- StringData idxName,
- bool disallowNewDuplicateKeys) = 0;
+ virtual void updatePrepareUniqueSetting(OperationContext* opCtx,
+ StringData idxName,
+ bool prepareUnique) = 0;
/**
* Removes invalid index options on all indexes in this collection. Returns a list of index
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index 0e9c4a28239..4fd48515b5f 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -1980,14 +1980,14 @@ void CollectionImpl::updateUniqueSetting(OperationContext* opCtx, StringData idx
});
}
-void CollectionImpl::updateDisallowNewDuplicateKeysSetting(OperationContext* opCtx,
- StringData idxName,
- bool disallowNewDuplicateKeys) {
+void CollectionImpl::updatePrepareUniqueSetting(OperationContext* opCtx,
+ StringData idxName,
+ bool prepareUnique) {
int offset = _metadata->findIndexOffset(idxName);
invariant(offset >= 0);
_writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) {
- md.indexes[offset].updateDisallowNewDuplicateKeysSetting(disallowNewDuplicateKeys);
+ md.indexes[offset].updatePrepareUniqueSetting(prepareUnique);
});
}
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index c7fc2cc314f..be5153833d7 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -443,9 +443,9 @@ public:
void updateUniqueSetting(OperationContext* opCtx, StringData idxName) final;
- void updateDisallowNewDuplicateKeysSetting(OperationContext* opCtx,
- StringData idxName,
- bool disallowNewDuplicateKeys) final;
+ void updatePrepareUniqueSetting(OperationContext* opCtx,
+ StringData idxName,
+ bool prepareUnique) final;
std::vector<std::string> removeInvalidIndexOptions(OperationContext* opCtx) final;
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index aeaf4798afd..f656b232a88 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -413,9 +413,9 @@ public:
std::abort();
}
- void updateDisallowNewDuplicateKeysSetting(OperationContext* opCtx,
- StringData idxName,
- bool disallowNewDuplicateKeys) {
+ void updatePrepareUniqueSetting(OperationContext* opCtx,
+ StringData idxName,
+ bool prepareUnique) {
std::abort();
}
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 2525730604f..4315c583234 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -1782,8 +1782,8 @@ StatusWith<BSONObj> IndexCatalogImpl::_fixIndexSpec(OperationContext* opCtx,
if (o["hidden"].trueValue())
b.appendBool("hidden", true); // normalize to bool true in case was int 1 or something...
- if (o["disallowNewDuplicateKeys"].trueValue())
- b.appendBool("disallowNewDuplicateKeys",
+ if (o["prepareUnique"].trueValue())
+ b.appendBool("prepareUnique",
true); // normalize to bool true in case was int 1 or something...
BSONObj key = fixIndexKey(o["key"].Obj());
@@ -1815,7 +1815,7 @@ StatusWith<BSONObj> IndexCatalogImpl::_fixIndexSpec(OperationContext* opCtx,
// dropDups is silently ignored and removed from the spec as of SERVER-14710.
// ns is removed from the spec as of 4.4.
} else if (s == "v" || s == "unique" || s == "key" || s == "name" || s == "hidden" ||
- s == "disallowNewDuplicateKeys") {
+ s == "prepareUnique") {
// covered above
} else {
b.append(e);
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index 3a663298d5d..f6081f0ff73 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -94,7 +94,7 @@ static std::set<StringData> allowedFieldNames = {
IndexDescriptor::kUniqueFieldName,
IndexDescriptor::kWeightsFieldName,
IndexDescriptor::kOriginalSpecFieldName,
- IndexDescriptor::kDisallowNewDuplicateKeysFieldName,
+ IndexDescriptor::kPrepareUniqueFieldName,
// Index creation under legacy writeMode can result in an index spec with an _id field.
"_id"};
@@ -498,7 +498,7 @@ StatusWith<BSONObj> validateIndexSpec(OperationContext* opCtx, const BSONObj& in
IndexDescriptor::k2dsphereCoarsestIndexedLevel == indexSpecElemFieldName ||
IndexDescriptor::k2dsphereFinestIndexedLevel == indexSpecElemFieldName ||
IndexDescriptor::kDropDuplicatesFieldName == indexSpecElemFieldName ||
- IndexDescriptor::kDisallowNewDuplicateKeysFieldName == indexSpecElemFieldName ||
+ IndexDescriptor::kPrepareUniqueFieldName == indexSpecElemFieldName ||
"clustered" == indexSpecElemFieldName) &&
!indexSpecElem.isNumber() && !indexSpecElem.isBoolean()) {
return {ErrorCodes::TypeMismatch,
diff --git a/src/mongo/db/coll_mod.idl b/src/mongo/db/coll_mod.idl
index 821aed598d6..5d9422e111f 100644
--- a/src/mongo/db/coll_mod.idl
+++ b/src/mongo/db/coll_mod.idl
@@ -63,7 +63,7 @@ structs:
optional: true
type: safeBool
unstable: true
- disallowNewDuplicateKeys:
+ prepareUnique:
optional: true
type: safeBool
unstable: true
@@ -92,11 +92,11 @@ structs:
optional: true
type: safeBool
unstable: true
- disallowNewDuplicateKeys_old:
+ prepareUnique_old:
optional: true
type: safeBool
unstable: true
- disallowNewDuplicateKeys_new:
+ prepareUnique_new:
optional: true
type: safeBool
unstable: true
diff --git a/src/mongo/db/coll_mod_reply_validation.cpp b/src/mongo/db/coll_mod_reply_validation.cpp
index 3ebe4a58675..07de476ddde 100644
--- a/src/mongo/db/coll_mod_reply_validation.cpp
+++ b/src/mongo/db/coll_mod_reply_validation.cpp
@@ -41,14 +41,13 @@ void validateReply(const CollModReply& reply) {
false);
}
- auto disallowNewDuplicateKeys_new = reply.getDisallowNewDuplicateKeys_new().is_initialized();
- auto disallowNewDuplicateKeys_old = reply.getDisallowNewDuplicateKeys_old().is_initialized();
+ auto prepareUnique_new = reply.getPrepareUnique_new().is_initialized();
+ auto prepareUnique_old = reply.getPrepareUnique_old().is_initialized();
- if ((!disallowNewDuplicateKeys_new && disallowNewDuplicateKeys_old) ||
- (disallowNewDuplicateKeys_new && !disallowNewDuplicateKeys_old)) {
+ if ((!prepareUnique_new && prepareUnique_old) || (prepareUnique_new && !prepareUnique_old)) {
uassert(ErrorCodes::CommandResultSchemaViolation,
str::stream() << "Invalid CollModReply: Reply should define either both fields "
- << "(disallowNewDuplicateKeys_new and disallowNewDuplicateKeys_old) "
+ << "(prepareUnique_new and prepareUnique_old) "
"or none of them.",
false);
}
diff --git a/src/mongo/db/coll_mod_reply_validation.h b/src/mongo/db/coll_mod_reply_validation.h
index e4a9ac9e49a..bb35a3791dd 100644
--- a/src/mongo/db/coll_mod_reply_validation.h
+++ b/src/mongo/db/coll_mod_reply_validation.h
@@ -37,7 +37,7 @@ namespace mongo::coll_mod_reply_validation {
* are not sufficient in this case.
* It is used to check that reply includes:
* - (hidden_new and hidden_old) together or none of them.
- * - (disallowNewDuplicateKeys_new and disallowNewDuplicateKeys_old) together or none of them."
+ * - (prepareUnique_new and prepareUnique_old) together or none of them."
*/
void validateReply(const CollModReply& reply);
} // namespace mongo::coll_mod_reply_validation
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index d4ac59cfec3..119b9149557 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -745,7 +745,7 @@ private:
ErrorCodes::CannotDowngrade,
fmt::format(
"Cannot downgrade the cluster when there are indexes that have "
- "the 'disallowNewDuplicateKeys' field. Use listIndexes to find "
+ "the 'prepareUnique' field. Use listIndexes to find "
"them and drop "
"the indexes or use collMod to manually set it to false to "
"remove the field "
@@ -753,7 +753,7 @@ private:
"'{}' on collection: '{}'",
indexEntry->descriptor()->indexName(),
collection->ns().toString()),
- !indexEntry->descriptor()->disallowNewDuplicateKeys());
+ !indexEntry->descriptor()->prepareUnique());
}
return true;
});
diff --git a/src/mongo/db/create_indexes.idl b/src/mongo/db/create_indexes.idl
index 33815ce79b8..902731f9edb 100644
--- a/src/mongo/db/create_indexes.idl
+++ b/src/mongo/db/create_indexes.idl
@@ -180,7 +180,7 @@ structs:
type: safeBool
optional: true
unstable: false
- disallowNewDuplicateKeys:
+ prepareUnique:
type: safeBool
optional: true
unstable: true
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index aa0f401c91c..be710445eb1 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -268,7 +268,7 @@ Status SortedDataIndexAccessMethod::insertKeys(OperationContext* opCtx,
*numInserted = 0;
}
bool unique = _descriptor->unique();
- bool disallowNewDuplicateKeys = _descriptor->disallowNewDuplicateKeys();
+ bool prepareUnique = _descriptor->prepareUnique();
bool dupsAllowed;
if (!_descriptor->isIdIndex() && !opCtx->isEnforcingConstraints() &&
coll->isIndexReady(_descriptor->indexName())) {
@@ -280,7 +280,7 @@ Status SortedDataIndexAccessMethod::insertKeys(OperationContext* opCtx,
// Additionally, unique indexes conflict checking can cause out-of-order updates in
// wiredtiger. See SERVER-59831.
dupsAllowed = true;
- } else if (disallowNewDuplicateKeys) {
+ } else if (prepareUnique) {
// This currently is only used by collMod command when converting a regular index to a
// unique index. The regular index will start rejecting duplicates even before the
// conversion finishes.
@@ -295,7 +295,7 @@ Status SortedDataIndexAccessMethod::insertKeys(OperationContext* opCtx,
// When duplicates are encountered and allowed, retry with dupsAllowed. Call
// onDuplicateKey() with the inserted duplicate key.
if (ErrorCodes::DuplicateKey == result.getStatus().code() && options.dupsAllowed &&
- !disallowNewDuplicateKeys) {
+ !prepareUnique) {
invariant(unique);
result = _newInterface->insert(opCtx, keyString, true /* dupsAllowed */);
@@ -542,7 +542,7 @@ Status SortedDataIndexAccessMethod::doUpdate(OperationContext* opCtx,
// Add all new data keys into the index.
for (const auto& keyString : ticket.added) {
- bool dupsAllowed = !_descriptor->disallowNewDuplicateKeys() && ticket.dupsAllowed;
+ bool dupsAllowed = !_descriptor->prepareUnique() && ticket.dupsAllowed;
auto result = _newInterface->insert(opCtx, keyString, dupsAllowed);
if (!result.isOK())
return result.getStatus();
diff --git a/src/mongo/db/index/index_descriptor.cpp b/src/mongo/db/index/index_descriptor.cpp
index b276fb9b38c..3f4dbd088ed 100644
--- a/src/mongo/db/index/index_descriptor.cpp
+++ b/src/mongo/db/index/index_descriptor.cpp
@@ -107,7 +107,7 @@ constexpr StringData IndexDescriptor::kTextVersionFieldName;
constexpr StringData IndexDescriptor::kUniqueFieldName;
constexpr StringData IndexDescriptor::kHiddenFieldName;
constexpr StringData IndexDescriptor::kWeightsFieldName;
-constexpr StringData IndexDescriptor::kDisallowNewDuplicateKeysFieldName;
+constexpr StringData IndexDescriptor::kPrepareUniqueFieldName;
IndexDescriptor::IndexDescriptor(const std::string& accessMethodName, BSONObj infoObj)
: _accessMethodName(accessMethodName),
@@ -136,13 +136,12 @@ IndexDescriptor::IndexDescriptor(const std::string& accessMethodName, BSONObj in
_collation = collationElement.Obj().getOwned();
}
- if (BSONElement disallowNewDuplicateKeysElement =
- _infoObj[kDisallowNewDuplicateKeysFieldName]) {
+ if (BSONElement prepareUniqueElement = _infoObj[kPrepareUniqueFieldName]) {
uassert(
ErrorCodes::InvalidOptions,
- "Index does not support the 'disallowNewDuplicateKeys' field",
+ "Index does not support the 'prepareUnique' field",
feature_flags::gCollModIndexUnique.isEnabled(serverGlobalParams.featureCompatibility));
- _disallowNewDuplicateKeys = disallowNewDuplicateKeysElement.trueValue();
+ _prepareUnique = prepareUniqueElement.trueValue();
}
}
diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h
index 35a005371b4..f4f48cb35b9 100644
--- a/src/mongo/db/index/index_descriptor.h
+++ b/src/mongo/db/index/index_descriptor.h
@@ -88,7 +88,7 @@ public:
static constexpr StringData kUniqueFieldName = "unique"_sd;
static constexpr StringData kWeightsFieldName = "weights"_sd;
static constexpr StringData kOriginalSpecFieldName = "originalSpec"_sd;
- static constexpr StringData kDisallowNewDuplicateKeysFieldName = "disallowNewDuplicateKeys"_sd;
+ static constexpr StringData kPrepareUniqueFieldName = "prepareUnique"_sd;
/**
* infoObj is a copy of the index-describing BSONObj contained in the catalog.
@@ -227,8 +227,8 @@ public:
return _partialFilterExpression;
}
- bool disallowNewDuplicateKeys() const {
- return _disallowNewDuplicateKeys;
+ bool prepareUnique() const {
+ return _prepareUnique;
}
/**
@@ -280,7 +280,7 @@ private:
IndexVersion _version;
BSONObj _collation;
BSONObj _partialFilterExpression;
- bool _disallowNewDuplicateKeys = false;
+ bool _prepareUnique = false;
// Many query stages require going from an IndexDescriptor to its IndexCatalogEntry, so for
// now we need this.
diff --git a/src/mongo/db/list_indexes.idl b/src/mongo/db/list_indexes.idl
index 5d4773d5a12..286d393998f 100644
--- a/src/mongo/db/list_indexes.idl
+++ b/src/mongo/db/list_indexes.idl
@@ -156,7 +156,7 @@ structs:
type: safeBool
optional: true
unstable: false
- disallowNewDuplicateKeys:
+ prepareUnique:
type: safeBool
optional: true
unstable: true
diff --git a/src/mongo/db/op_observer.h b/src/mongo/db/op_observer.h
index e42f9533939..030521b9f4d 100644
--- a/src/mongo/db/op_observer.h
+++ b/src/mongo/db/op_observer.h
@@ -99,8 +99,8 @@ struct IndexCollModInfo {
boost::optional<bool> hidden;
boost::optional<bool> oldHidden;
boost::optional<bool> unique;
- boost::optional<bool> disallowNewDuplicateKeys;
- boost::optional<bool> oldDisallowNewDuplicateKeys;
+ boost::optional<bool> prepareUnique;
+ boost::optional<bool> oldPrepareUnique;
std::string indexName;
};
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index b8a7b4f28e3..b05d0f6e021 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -1029,9 +1029,9 @@ void OpObserverImpl::onCollMod(OperationContext* opCtx,
auto oldHidden = indexInfo->oldHidden.get();
o2Builder.append("hidden_old", oldHidden);
}
- if (indexInfo->oldDisallowNewDuplicateKeys) {
- auto oldDisallowNewDuplicateKeys = indexInfo->oldDisallowNewDuplicateKeys.get();
- o2Builder.append("disallowNewDuplicates_old", oldDisallowNewDuplicateKeys);
+ if (indexInfo->oldPrepareUnique) {
+ auto oldPrepareUnique = indexInfo->oldPrepareUnique.get();
+ o2Builder.append("prepareUnique_old", oldPrepareUnique);
}
}
diff --git a/src/mongo/db/op_observer_util.cpp b/src/mongo/db/op_observer_util.cpp
index e871f7c1cb2..cd79324a2aa 100644
--- a/src/mongo/db/op_observer_util.cpp
+++ b/src/mongo/db/op_observer_util.cpp
@@ -64,9 +64,8 @@ BSONObj makeCollModCmdObj(const BSONObj& collModCmd,
if (indexInfo->unique)
indexObjBuilder.append("unique", indexInfo->unique.get());
- if (indexInfo->disallowNewDuplicateKeys)
- indexObjBuilder.append("disallowNewDuplicateKeys",
- indexInfo->disallowNewDuplicateKeys.get());
+ if (indexInfo->prepareUnique)
+ indexObjBuilder.append("prepareUnique", indexInfo->prepareUnique.get());
cmdObjBuilder.append(indexFieldName, indexObjBuilder.obj());
} else {
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index def3dc283d5..58cdc01fe39 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -146,20 +146,19 @@ void BSONCollectionCatalogEntry::IndexMetaData::updateUniqueSetting() {
spec = b.obj();
}
-void BSONCollectionCatalogEntry::IndexMetaData::updateDisallowNewDuplicateKeysSetting(
- bool disallowNewDuplicateKeys) {
- // If disallowNewDuplicateKeys == false, we remove this field from catalog rather than add a
+void BSONCollectionCatalogEntry::IndexMetaData::updatePrepareUniqueSetting(bool prepareUnique) {
+ // If prepareUnique == false, we remove this field from catalog rather than add a
// field with false.
BSONObjBuilder b;
for (BSONObjIterator bi(spec); bi.more();) {
BSONElement e = bi.next();
- if (e.fieldNameStringData() != "disallowNewDuplicateKeys") {
+ if (e.fieldNameStringData() != "prepareUnique") {
b.append(e);
}
}
- if (disallowNewDuplicateKeys) {
- b.append("disallowNewDuplicateKeys", disallowNewDuplicateKeys);
+ if (prepareUnique) {
+ b.append("prepareUnique", prepareUnique);
}
spec = b.obj();
}
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h
index c85bc6755d0..18bca894a71 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.h
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.h
@@ -100,7 +100,7 @@ public:
void updateUniqueSetting();
- void updateDisallowNewDuplicateKeysSetting(bool disallowNewDuplicateKeys);
+ void updatePrepareUniqueSetting(bool prepareUnique);
StringData nameStringData() const {
return spec["name"].valueStringDataSafe();
diff --git a/src/mongo/dbtests/index_access_method_test.cpp b/src/mongo/dbtests/index_access_method_test.cpp
index 4d24fb5c11f..a1c20d7a5d8 100644
--- a/src/mongo/dbtests/index_access_method_test.cpp
+++ b/src/mongo/dbtests/index_access_method_test.cpp
@@ -259,15 +259,15 @@ TEST(IndexAccessMethodInsertKeys, DuplicatesCheckingOnSecondaryUniqueIndexes) {
ASSERT_EQ(numInserted, 2);
}
-TEST(IndexAccessMethodInsertKeys, InsertWhenDisallowNewDuplicateKeys) {
+TEST(IndexAccessMethodInsertKeys, InsertWhenPrepareUnique) {
if (feature_flags::gCollModIndexUnique.isEnabled(serverGlobalParams.featureCompatibility)) {
ServiceContext::UniqueOperationContext opCtxRaii = cc().makeOperationContext();
OperationContext* opCtx = opCtxRaii.get();
- NamespaceString nss("unittests.InsertWhenDisallowNewDuplicateKeys");
+ NamespaceString nss("unittests.InsertWhenPrepareUnique");
auto indexName = "a_1";
auto indexSpec =
- BSON("name" << indexName << "key" << BSON("a" << 1) << "disallowNewDuplicateKeys"
- << true << "v" << static_cast<int>(IndexDescriptor::IndexVersion::kV2));
+ BSON("name" << indexName << "key" << BSON("a" << 1) << "prepareUnique" << true << "v"
+ << static_cast<int>(IndexDescriptor::IndexVersion::kV2));
ASSERT_OK(dbtests::createIndexFromSpec(opCtx, nss.ns(), indexSpec));
AutoGetCollection autoColl(opCtx, nss, LockMode::MODE_X);
@@ -295,15 +295,15 @@ TEST(IndexAccessMethodInsertKeys, InsertWhenDisallowNewDuplicateKeys) {
}
}
-TEST(IndexAccessMethodUpdateKeys, UpdateWhenDisallowNewDuplicateKeys) {
+TEST(IndexAccessMethodUpdateKeys, UpdateWhenPrepareUnique) {
if (feature_flags::gCollModIndexUnique.isEnabled(serverGlobalParams.featureCompatibility)) {
ServiceContext::UniqueOperationContext opCtxRaii = cc().makeOperationContext();
OperationContext* opCtx = opCtxRaii.get();
- NamespaceString nss("unittests.UpdateWhenDisallowNewDuplicateKeys");
+ NamespaceString nss("unittests.UpdateWhenPrepareUnique");
auto indexName = "a_1";
auto indexSpec =
- BSON("name" << indexName << "key" << BSON("a" << 1) << "disallowNewDuplicateKeys"
- << true << "v" << static_cast<int>(IndexDescriptor::IndexVersion::kV2));
+ BSON("name" << indexName << "key" << BSON("a" << 1) << "prepareUnique" << true << "v"
+ << static_cast<int>(IndexDescriptor::IndexVersion::kV2));
ASSERT_OK(dbtests::createIndexFromSpec(opCtx, nss.ns(), indexSpec));
AutoGetCollection autoColl(opCtx, nss, LockMode::MODE_X);