summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml3
-rw-r--r--jstests/core/index_big1.js43
-rw-r--r--jstests/core/index_bigkeys.js86
-rw-r--r--jstests/core/index_bigkeys_background.js32
-rw-r--r--jstests/core/index_bigkeys_update.js19
-rw-r--r--jstests/core/insert_long_index_key.js12
-rw-r--r--jstests/core/txns/do_txn_atomicity.js5
-rw-r--r--jstests/libs/index_bigkeys.js113
-rw-r--r--jstests/multiVersion/index_bigkeys.js143
-rw-r--r--jstests/multiVersion/index_bigkeys_mixed_version_replset.js42
-rw-r--r--jstests/replsets/lastop.js8
-rw-r--r--src/mongo/base/error_codes.err1
-rw-r--r--src/mongo/db/catalog/index_consistency.cpp20
-rw-r--r--src/mongo/db/catalog/index_consistency.h1
-rw-r--r--src/mongo/db/catalog/private/record_store_validate_adaptor.cpp12
-rw-r--r--src/mongo/db/index/index_access_method.cpp60
-rw-r--r--src/mongo/db/index/index_access_method.h17
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp23
-rw-r--r--src/mongo/db/storage/biggie/biggie_sorted_impl.cpp10
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp11
-rw-r--r--src/mongo/db/storage/mobile/mobile_index.cpp21
-rw-r--r--src/mongo/db/storage/mobile/mobile_index.h5
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp34
23 files changed, 448 insertions, 273 deletions
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml
index fead09408d2..f1226141fcf 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml
@@ -34,14 +34,11 @@ selector:
- jstests/core/geonear_key.js
- jstests/core/in.js
- jstests/core/index8.js # No explicit check for failed command.
- - jstests/core/index_big1.js
- - jstests/core/index_bigkeys.js
- jstests/core/index_decimal.js
- jstests/core/index_multiple_compatibility.js
- jstests/core/index_partial_write_ops.js
- jstests/core/indexa.js # No explicit check for failed command.
- jstests/core/indexes_multiple_commands.js
- - jstests/core/insert_long_index_key.js
- jstests/core/js2.js
- jstests/core/json_schema/json_schema.js
- jstests/core/mr_bigobject.js
diff --git a/jstests/core/index_big1.js b/jstests/core/index_big1.js
deleted file mode 100644
index 304656dda9a..00000000000
--- a/jstests/core/index_big1.js
+++ /dev/null
@@ -1,43 +0,0 @@
-// Cannot implicitly shard accessed collections because of extra shard key index in sharded
-// collection.
-// @tags: [assumes_no_implicit_index_creation]
-
-// check where "key to big" happens
-
-t = db.index_big1;
-
-N = 3200;
-t.drop();
-
-var s = "";
-
-t.ensureIndex({a: 1, x: 1});
-
-var bulk = t.initializeUnorderedBulkOp();
-for (i = 0; i < N; i++) {
- bulk.insert({a: i + .5, x: s});
- s += "x";
-}
-assert.throws(function() {
- bulk.execute();
-});
-
-assert.eq(2, t.getIndexes().length);
-
-flip = -1;
-
-for (i = 0; i < N; i++) {
- var c = t.find({a: i + .5}).count();
- if (c == 1) {
- assert.eq(-1, flip, "flipping : " + i);
- } else {
- if (flip == -1) {
- flip = i;
- }
- }
-}
-
-// print(flip);
-// print(flip/1024);
-
-assert.eq(/*v0 index : 797*/ 1002, flip, "flip changed");
diff --git a/jstests/core/index_bigkeys.js b/jstests/core/index_bigkeys.js
index 1c995ed4273..925252e2d76 100644
--- a/jstests/core/index_bigkeys.js
+++ b/jstests/core/index_bigkeys.js
@@ -1,61 +1,25 @@
-// Cannot implicitly shard accessed collections because of extra shard key index in sharded
-// collection.
-// @tags: [assumes_no_implicit_index_creation, requires_fastcount]
-
-t = db.bigkeysidxtest;
-
-var keys = [];
-
-var str = "aaaabbbbccccddddeeeeffffgggghhhh";
-
-while (str.length < 20000) {
- keys.push(str);
- str = str + str;
-}
-
-function doInsert(order) {
- if (order == 1) {
- for (var i = 0; i < 10; i++) {
- t.insert({_id: i, k: keys[i]});
- }
- } else {
- for (var i = 9; i >= 0; i--) {
- t.insert({_id: i, k: keys[i]});
- }
- }
-}
-
-var expect = null;
-
-function check() {
- assert(t.validate().valid);
- assert.eq(5, t.count());
-
- var c = t.find({k: /^a/}).count();
- assert.eq(5, c);
-}
-
-function runTest(order) {
- t.drop();
- t.ensureIndex({k: 1});
- doInsert(order);
- check(); // check incremental addition
-
- t.reIndex();
- check(); // check bottom up
-
- t.drop();
- doInsert(order);
- assert.eq(1, t.getIndexes().length);
- t.ensureIndex({k: 1});
- assert.eq(1, t.getIndexes().length);
-
- t.drop();
- doInsert(order);
- assert.eq(1, t.getIndexes().length);
- t.ensureIndex({k: 1}, {background: true});
- assert.eq(1, t.getIndexes().length);
-}
-
-runTest(1);
-runTest(2);
+/**
+ * Test interactions with big index keys. There should be no size limit for index keys.
+ *
+ * assumes_no_implicit_index_creation: Cannot implicitly shard accessed collections because of extra
+ * shard key index in sharded collection.
+ * requires_non_retryable_writes: This test uses delete which is not retryable
+ * @tags: [assumes_no_implicit_index_creation, requires_non_retryable_writes]
+ */
+(function() {
+ "use strict";
+
+ load("jstests/libs/index_bigkeys.js");
+
+ // Case 1
+ runTest({unique: true}, bigStringKeys, bigStringCheck);
+ runTest({}, bigStringKeys, bigStringCheck);
+
+ // Case 2
+ runTest({unique: true}, docArrayKeys, docArrayCheck);
+ runTest({}, docArrayKeys, docArrayCheck);
+
+ // Case 3
+ runTest({unique: true}, arrayArrayKeys, arrayArrayCheck);
+ runTest({}, arrayArrayKeys, arrayArrayCheck);
+}());
diff --git a/jstests/core/index_bigkeys_background.js b/jstests/core/index_bigkeys_background.js
new file mode 100644
index 00000000000..c91a12f27e2
--- /dev/null
+++ b/jstests/core/index_bigkeys_background.js
@@ -0,0 +1,32 @@
+/**
+ * Test interactions with big index keys. There should be no size limit for index keys.
+ * Note: mobile storage engine does not support background index build so the background index build
+ * tests are moved from index_bigkeys.js to this file. index_bigkeys.js will still be run with
+ * mobile storage engine.
+ *
+ * assumes_no_implicit_index_creation: Cannot implicitly shard accessed collections because of extra
+ * shard key index in sharded collection.
+ * requires_non_retryable_writes: This test uses delete which is not retryable.
+ * @tags: [
+ * assumes_no_implicit_index_creation,
+ * requires_background_index,
+ * requires_non_retryable_writes
+ * ]
+ */
+(function() {
+ "use strict";
+
+ load("jstests/libs/index_bigkeys.js");
+
+ // Case 1
+ runTest({background: true, unique: true}, bigStringKeys, bigStringCheck);
+ runTest({background: true}, bigStringKeys, bigStringCheck);
+
+ // Case 2
+ runTest({background: true, unique: true}, docArrayKeys, docArrayCheck);
+ runTest({background: true}, docArrayKeys, docArrayCheck);
+
+ // Case 3
+ runTest({background: true, unique: true}, arrayArrayKeys, arrayArrayCheck);
+ runTest({background: true}, arrayArrayKeys, arrayArrayCheck);
+}());
diff --git a/jstests/core/index_bigkeys_update.js b/jstests/core/index_bigkeys_update.js
deleted file mode 100644
index 9ecc5e4ecd6..00000000000
--- a/jstests/core/index_bigkeys_update.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// @tags: [requires_fastcount]
-
-bigString = "";
-while (bigString.length < 16000)
- bigString += ".";
-
-t = db.index_bigkeys_update;
-t.drop();
-
-t.insert({_id: 0, x: "asd"});
-t.ensureIndex({x: 1});
-
-assert.eq(1, t.count());
-
-assert.writeError(t.update({}, {$set: {x: bigString}}));
-
-assert.eq(1, t.count());
-assert.eq("asd", t.findOne().x); // make sure doc is the old version
-assert.eq("asd", t.findOne({_id: 0}).x); // make sure doc is the old version
diff --git a/jstests/core/insert_long_index_key.js b/jstests/core/insert_long_index_key.js
deleted file mode 100644
index edbbd48200a..00000000000
--- a/jstests/core/insert_long_index_key.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// @tags: [requires_fastcount]
-
-t = db.insert_long_index_key;
-t.drop();
-
-var s = new Array(2000).toString();
-t.ensureIndex({x: 1});
-
-t.insert({x: 1});
-t.insert({x: s});
-
-assert.eq(1, t.count());
diff --git a/jstests/core/txns/do_txn_atomicity.js b/jstests/core/txns/do_txn_atomicity.js
index 6ca08890afd..f7077bd863e 100644
--- a/jstests/core/txns/do_txn_atomicity.js
+++ b/jstests/core/txns/do_txn_atomicity.js
@@ -24,15 +24,14 @@
assert.eq(t.count({x: 1}), 0);
// Operations only including CRUD commands should be atomic, so the next insert will fail.
- var tooLong = Array(2000).join("hello");
assert.commandFailedWithCode(sessionDb.adminCommand({
doTxn: [
{op: 'i', ns: t.getFullName(), o: {_id: ObjectId(), x: 1}},
- {op: 'i', ns: t.getFullName(), o: {_id: tooLong, x: 1}},
+ {op: 'i', ns: "invalid", o: {_id: ObjectId(), x: 1}},
],
txnNumber: NumberLong(txnNumber++)
}),
- ErrorCodes.KeyTooLong);
+ 16886); // nsToCollectionSubstring: no .
assert.eq(t.count({x: 1}), 0);
// Operations on non-existent databases cannot be atomic.
diff --git a/jstests/libs/index_bigkeys.js b/jstests/libs/index_bigkeys.js
new file mode 100644
index 00000000000..329d2ccf68a
--- /dev/null
+++ b/jstests/libs/index_bigkeys.js
@@ -0,0 +1,113 @@
+/*
+ * Shared functions and constants between index_bigkeys.js and index_bigkeys_background.js
+ */
+"use strict";
+
+var createCheckFunc = function(keyPattern) {
+ return (testColl, numItems, numIndexes) => {
+ assert(testColl.validate().valid);
+ assert.eq(numItems, testColl.count());
+ // Find by index
+ var c = testColl.find({k: keyPattern}).count();
+ assert.eq(numItems, c);
+ assert.eq(numIndexes, testColl.getIndexes().length);
+ };
+};
+
+function doInsert(collName, keys, expectDupError = false) {
+ for (let i = 0; i < keys.length; i++) {
+ let res = db.runCommand({insert: collName, documents: [{_id: i, k: keys[i], c: 0}]});
+ if (!expectDupError)
+ assert.commandWorked(res);
+ else
+ assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
+ }
+}
+
+function doUpdate(collName, keys) {
+ for (let key of keys) {
+ let res = db.runCommand({update: collName, updates: [{q: {k: key}, u: {$inc: {c: 1}}}]});
+ assert.commandWorked(res);
+ assert.eq(1, res.nModified);
+ }
+}
+
+function doDelete(collName, keys) {
+ for (let key of keys) {
+ let res = db.runCommand({delete: collName, deletes: [{q: {k: key}, limit: 0}]});
+ assert.commandWorked(res);
+ assert.eq(1, res.n);
+ }
+}
+
+function runTest(createIndexOpts, keys, checkFunc) {
+ const collName = "big_keys_index_test";
+ const testColl = db[collName];
+
+ // 1. Insert documents when index exists.
+ testColl.drop();
+ assert.commandWorked(testColl.createIndex({k: 1}, createIndexOpts));
+ doInsert(collName, keys);
+ checkFunc(testColl, keys.length, 2);
+
+ // Make sure unique index works.
+ if ("unique" in createIndexOpts) {
+ doInsert(collName, keys, true);
+ checkFunc(testColl, keys.length, 2);
+ }
+
+ // 2. Reindex when documents exist.
+ assert.commandWorked(testColl.dropIndex({k: 1}));
+ assert.commandWorked(testColl.createIndex({k: 1}, createIndexOpts));
+ checkFunc(testColl, keys.length, 2);
+
+ // 3. Create the index when documents exist.
+ testColl.drop();
+ doInsert(collName, keys);
+ assert.eq(1, testColl.getIndexes().length);
+ assert.commandWorked(testColl.createIndex({k: 1}, createIndexOpts));
+ checkFunc(testColl, keys.length, 2);
+
+ // 4. Update the documents.
+ doUpdate(collName, keys);
+ checkFunc(testColl, keys.length, 2);
+
+ // 5. Remove all the documents.
+ doDelete(collName, keys);
+ checkFunc(testColl, 0, 2);
+
+ // 6. Drop the index when documents exist.
+ doInsert(collName, keys);
+ assert.commandWorked(db.runCommand({dropIndexes: collName, index: {k: 1}}));
+ checkFunc(testColl, keys.length, 1);
+}
+
+// Case 1: Big string as key
+var bigStringKeys = (() => {
+ let keys = [];
+ var str = "aaaabbbbccccddddeeeeffffgggghhhh";
+ while (str.length < 20000) {
+ keys.push(str);
+ str = str + str;
+ }
+ return keys;
+})();
+var bigStringCheck = createCheckFunc(/^a/);
+
+// Case 2: Document containing large array as key
+var docArrayKeys = (() => {
+ let keys = [];
+ // {..., k: {a : [0,1,2, ... ,9999] } }
+ keys.push({a: Array.apply(null, {length: 10000}).map(Number.call, Number)});
+ return keys;
+})();
+var docArrayCheck = createCheckFunc(docArrayKeys[0]);
+
+// Case 3: Array containing large array as key
+var arrayArrayKeys = (() => {
+ let keys = [];
+ // {..., k: [ [0,1,2, ... ,9999] ] }
+ keys.push([Array.apply(null, {length: 10000}).map(Number.call, Number)]);
+ return keys;
+})();
+var arrayArrayCheck = createCheckFunc(arrayArrayKeys[0]);
diff --git a/jstests/multiVersion/index_bigkeys.js b/jstests/multiVersion/index_bigkeys.js
new file mode 100644
index 00000000000..66c41ebe874
--- /dev/null
+++ b/jstests/multiVersion/index_bigkeys.js
@@ -0,0 +1,143 @@
+/**
+ * Test interactions with big keys in different versions.
+ * TODO SERVER-36280: Add testcases for feature tracker bit for overlong TypeBits.
+ * TODO SERVER-36385: Remove this test in 4.4
+ */
+
+"use strict";
+
+const dbName = "test";
+const collName = "index_bigkeys";
+
+const largeKey = 's'.repeat(12345);
+const documentWithLargeKey = {
+ x: largeKey
+};
+
+function testInsertDocumentWithLargeKey(conn, expectKeyTooLongFailure) {
+ let testDB = conn.getDB(dbName);
+ let testColl = testDB[collName];
+
+ testColl.drop();
+ assert.commandWorked(
+ testDB.runCommand({createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1"}]}));
+
+ if (expectKeyTooLongFailure) {
+ assert.commandFailedWithCode(
+ testDB.runCommand({insert: collName, documents: [documentWithLargeKey]}),
+ ErrorCodes.KeyTooLong);
+ assert.eq(0, testColl.count());
+ } else {
+ assert.commandWorked(
+ testDB.runCommand({insert: collName, documents: [documentWithLargeKey]}));
+ assert.eq(1, testColl.count());
+ }
+}
+
+function downgradeAndVerifyBehavior(testDowngradeBehaviorFunc) {
+ const dbpath = MongoRunner.dataPath + "index_bigkeys";
+
+ // Test setting FCV4.0.
+ let conn = MongoRunner.runMongod({binVersion: "latest", cleanData: true, dbpath: dbpath});
+ let testDB = conn.getDB(dbName);
+ let testColl = testDB[collName];
+ assert.commandWorked(
+ testDB.runCommand({createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1"}]}));
+ assert.commandWorked(testDB.runCommand({insert: collName, documents: [documentWithLargeKey]}));
+ assert.eq(1, testColl.count());
+ assert.eq(2, testColl.getIndexes().length);
+
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+ testDowngradeBehaviorFunc(testDB, testColl);
+ MongoRunner.stopMongod(conn, null, {skipValidation: true});
+
+ // Test downgrading to 4.0 binary.
+ conn = MongoRunner.runMongod({binVersion: "latest", cleanData: true, dbpath: dbpath});
+ testDB = conn.getDB(dbName);
+ testColl = testDB[collName];
+ assert.commandWorked(
+ testDB.runCommand({createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1"}]}));
+ assert.commandWorked(testDB.runCommand({insert: collName, documents: [documentWithLargeKey]}));
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+ MongoRunner.stopMongod(conn, null, {skipValidation: true});
+
+ conn = MongoRunner.runMongod({binVersion: "4.0", noCleanData: true, dbpath: dbpath});
+ testDowngradeBehaviorFunc(conn.getDB(dbName), conn.getDB(dbName)[collName]);
+ MongoRunner.stopMongod(conn, null, {skipValidation: false});
+}
+
+function upgradeAndVerifyBehavior(testUpgradeBehaviorFunc) {
+ const dbpath = MongoRunner.dataPath + "index_bigkeys";
+
+ // Test upgrading to 4.2 binary.
+ let conn = MongoRunner.runMongod({binVersion: "4.0", cleanData: true, dbpath: dbpath});
+ let testDB = conn.getDB(dbName);
+ let testColl = testDB[collName];
+ assert.commandWorked(
+ testDB.runCommand({createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1"}]}));
+ assert.commandFailedWithCode(
+ testDB.runCommand({insert: collName, documents: [documentWithLargeKey]}),
+ ErrorCodes.KeyTooLong);
+ assert.eq(0, testColl.count());
+
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+ MongoRunner.stopMongod(conn);
+ conn = MongoRunner.runMongod({binVersion: "latest", noCleanData: true, dbpath: dbpath});
+ testDB = conn.getDB(dbName);
+ testColl = testDB[collName];
+ assert.commandFailedWithCode(
+ testDB.runCommand({insert: collName, documents: [documentWithLargeKey]}),
+ ErrorCodes.KeyTooLong);
+ assert.eq(0, testColl.count());
+
+ // Setting the FCV to 4.2
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+
+ testUpgradeBehaviorFunc(testDB, testColl);
+ MongoRunner.stopMongod(conn, null, {skipValidation: false});
+}
+
+(function() {
+ load("jstests/libs/feature_compatibility_version.js");
+
+ // 1. Test the behavior of inserting large index key of each version.
+ // 4.2 binary (with FCV 4.2)
+ let conn = MongoRunner.runMongod({binVersion: "latest", cleanData: true});
+ testInsertDocumentWithLargeKey(conn, false);
+ // 4.2 binary (with FCV 4.0)
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+ testInsertDocumentWithLargeKey(conn, true);
+ MongoRunner.stopMongod(conn);
+ // 4.0 binary
+ conn = MongoRunner.runMongod({binVersion: "4.0", cleanData: true});
+ testInsertDocumentWithLargeKey(conn, true);
+ MongoRunner.stopMongod(conn);
+
+ // 2. Test that 4.0 binary could query with large keys and remove docs with large keys which got
+ // inserted by 4.2 binary.
+ downgradeAndVerifyBehavior((testDB, testColl) => {
+ assert.commandWorked(
+ testDB.runCommand({delete: collName, deletes: [{q: {x: largeKey}, limit: 0}]}));
+ assert.eq(0, testColl.count());
+ });
+
+ // 3. Test that 4.0 binary could update large keys with short keys.
+ downgradeAndVerifyBehavior((testDB, testColl) => {
+ assert.commandWorked(
+ testDB.runCommand({update: collName, updates: [{q: {x: largeKey}, u: {x: "sss"}}]}));
+ assert.eq("sss", testColl.find({x: "sss"}).toArray()[0].x);
+ });
+
+ // 4. Test that 4.0 binary could drop the index which has large keys.
+ downgradeAndVerifyBehavior((testDB, testColl) => {
+ assert.eq(2, testColl.getIndexes().length);
+ assert.commandWorked(testDB.runCommand({dropIndexes: collName, index: "x_1"}));
+ assert.eq(1, testColl.getIndexes().length);
+ });
+
+ // 5. Test the normal upgrade path.
+ upgradeAndVerifyBehavior((testDB, testColl) => {
+ assert.commandWorked(
+ testDB.runCommand({insert: collName, documents: [documentWithLargeKey]}));
+ });
+}());
diff --git a/jstests/multiVersion/index_bigkeys_mixed_version_replset.js b/jstests/multiVersion/index_bigkeys_mixed_version_replset.js
new file mode 100644
index 00000000000..09b971e1f24
--- /dev/null
+++ b/jstests/multiVersion/index_bigkeys_mixed_version_replset.js
@@ -0,0 +1,42 @@
+/**
+ * Unlimited index key length is supported in 4.2.
+ * However, we should still disallow this feature under FCV4.0.
+ * TODO SERVER-36385: remove this test in 4.4.
+ */
+(function() {
+ 'use strict';
+
+ load("jstests/libs/feature_compatibility_version.js");
+
+ TestData.replSetFeatureCompatibilityVersion = "4.0";
+ const rst = new ReplSetTest({
+ nodes: [
+ {binVersion: 'latest'},
+ {rsConfig: {priority: 0, votes: 0}},
+ ]
+ });
+ rst.startSet();
+ rst.initiate();
+ rst.restart(1, {binVersion: '4.0'});
+
+ const dbName = "test";
+ const collName = "index_bigkeys";
+
+ const largeKey = 's'.repeat(12345);
+ const documentWithLargeKey = {x: largeKey};
+
+ const primary = rst.getPrimary();
+ const testDB = primary.getDB(dbName);
+ const testColl = testDB[collName];
+
+ testColl.drop();
+ assert.commandWorked(
+ testDB.runCommand({createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1"}]}));
+
+ assert.commandFailedWithCode(
+ testDB.runCommand({insert: collName, documents: [documentWithLargeKey]}),
+ ErrorCodes.KeyTooLong);
+ assert.eq(0, testColl.count());
+
+ rst.stopSet();
+}());
diff --git a/jstests/replsets/lastop.js b/jstests/replsets/lastop.js
index bf6c5cba438..cc368c1b95c 100644
--- a/jstests/replsets/lastop.js
+++ b/jstests/replsets/lastop.js
@@ -97,14 +97,6 @@
var bigString = new Array(3000).toString();
assert.writeOK(m2.getCollection("test.foo").insert({m2: 994, m3: bigString}));
- // createIndex with a >1024 byte field fails.
- var twelfthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.commandFailed(m1.getCollection("test.foo").createIndex({m3: 1}));
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // Index builds, whether a success of failure, change the metadata state twice. For primaries,
- // this will advance the optime twice.
- assert.gt(noOp.ts, twelfthOp.ts);
-
// No-op insert
assert.writeOK(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
var thirteenthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
diff --git a/src/mongo/base/error_codes.err b/src/mongo/base/error_codes.err
index c869f99f704..a978b90e915 100644
--- a/src/mongo/base/error_codes.err
+++ b/src/mongo/base/error_codes.err
@@ -279,6 +279,7 @@ error_code("InterruptedAtShutdown", 11600)
error_code("Interrupted", 11601)
error_code("InterruptedDueToStepDown", 11602)
error_code("OutOfDiskSpace", 14031 )
+# TODO SERVER-36385: Mark KeyTooLong error as obsolete
error_code("KeyTooLong", 17280);
error_code("BackgroundOperationInProgressForDatabase", 12586);
error_code("BackgroundOperationInProgressForNamespace", 12587);
diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp
index 594b0265a43..474ebf8112e 100644
--- a/src/mongo/db/catalog/index_consistency.cpp
+++ b/src/mongo/db/catalog/index_consistency.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/query/query_yield.h"
+#include "mongo/db/server_options.h"
#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/sorted_data_interface.h"
#include "mongo/util/elapsed_tracker.h"
@@ -47,6 +48,12 @@ namespace mongo {
namespace {
// The number of items we can scan before we must yield.
static const int kScanLimit = 1000;
+
+// TODO SERVER-36385: Completely remove the key size check in 4.4
+bool largeKeyDisallowed() {
+ return (serverGlobalParams.featureCompatibility.getVersion() ==
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40);
+}
} // namespace
IndexConsistency::IndexConsistency(OperationContext* opCtx,
@@ -227,8 +234,9 @@ void IndexConsistency::applyChange(const IndexDescriptor* descriptor,
_setYieldAtRecord_inlock(indexEntry->loc);
if (_isBeforeLastProcessedRecordId_inlock(indexEntry->loc)) {
if (operation == ValidationOperation::INSERT) {
- if (indexEntry->key.objsize() >=
- static_cast<int64_t>(KeyString::TypeBits::kMaxKeyBytes)) {
+ if (largeKeyDisallowed() &&
+ indexEntry->key.objsize() >=
+ static_cast<int64_t>(KeyString::TypeBits::kMaxKeyBytes)) {
// Index keys >= 1024 bytes are not indexed but are stored in the document key
// set.
_indexesInfo[indexNumber].numRecords++;
@@ -237,8 +245,9 @@ void IndexConsistency::applyChange(const IndexDescriptor* descriptor,
_addDocKey_inlock(ks, indexNumber);
}
} else if (operation == ValidationOperation::REMOVE) {
- if (indexEntry->key.objsize() >=
- static_cast<int64_t>(KeyString::TypeBits::kMaxKeyBytes)) {
+ if (largeKeyDisallowed() &&
+ indexEntry->key.objsize() >=
+ static_cast<int64_t>(KeyString::TypeBits::kMaxKeyBytes)) {
_indexesInfo[indexNumber].numRecords--;
_indexesInfo[indexNumber].numLongKeys--;
} else {
@@ -249,7 +258,8 @@ void IndexConsistency::applyChange(const IndexDescriptor* descriptor,
} else if (_stage == ValidationStage::INDEX) {
// Index entries with key sizes >= 1024 bytes are not indexed.
- if (indexEntry->key.objsize() >= static_cast<int64_t>(KeyString::TypeBits::kMaxKeyBytes)) {
+ if (largeKeyDisallowed() &&
+ indexEntry->key.objsize() >= static_cast<int64_t>(KeyString::TypeBits::kMaxKeyBytes)) {
return;
}
diff --git a/src/mongo/db/catalog/index_consistency.h b/src/mongo/db/catalog/index_consistency.h
index eac80d3341f..753e12c5ece 100644
--- a/src/mongo/db/catalog/index_consistency.h
+++ b/src/mongo/db/catalog/index_consistency.h
@@ -102,6 +102,7 @@ public:
/**
* Add one to the `_longKeys` count for the given `indexNs`.
* This is required because index keys > `KeyString::kMaxKeyBytes` are not indexed.
+ * TODO SERVER-36385: Completely remove the key size check in 4.4
*/
void addLongIndexKey(int indexNumber);
diff --git a/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp b/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp
index cf26dc31084..58d92ce2a49 100644
--- a/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp
+++ b/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp
@@ -46,6 +46,14 @@
namespace mongo {
+namespace {
+// TODO SERVER-36385: Completely remove the key size check in 4.4
+bool isLargeKeyDisallowed() {
+ return serverGlobalParams.featureCompatibility.getVersion() ==
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40;
+}
+}
+
Status RecordStoreValidateAdaptor::validate(const RecordId& recordId,
const RecordData& record,
size_t* dataSize) {
@@ -101,9 +109,11 @@ Status RecordStoreValidateAdaptor::validate(const RecordId& recordId,
const auto& pattern = descriptor->keyPattern();
const Ordering ord = Ordering::make(pattern);
+ bool largeKeyDisallowed = isLargeKeyDisallowed();
for (const auto& key : documentKeySet) {
- if (key.objsize() >= static_cast<int64_t>(KeyString::TypeBits::kMaxKeyBytes)) {
+ if (largeKeyDisallowed &&
+ key.objsize() >= static_cast<int64_t>(KeyString::TypeBits::kMaxKeyBytes)) {
// Index keys >= 1024 bytes are not indexed.
_indexConsistency->addLongIndexKey(indexNumber);
continue;
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 0dbe51ffbc9..9331c7ef395 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -85,6 +85,19 @@ std::vector<BSONObj> asVector(const BSONObjSet& objSet) {
return {objSet.begin(), objSet.end()};
}
+// TODO SERVER-36385: Remove this
+const int TempKeyMaxSize = 1024;
+
+// TODO SERVER-36385: Completely remove the key size check in 4.4
+Status checkKeySize(const BSONObj& key) {
+ if (key.objsize() >= TempKeyMaxSize) {
+ std::string msg = mongoutils::str::stream() << "Index key too large to index, failing "
+ << key.objsize() << ' ' << redact(key);
+ return Status(ErrorCodes::KeyTooLong, msg);
+ }
+ return Status::OK();
+}
+
} // namespace
// TODO SERVER-36386: Remove the server parameter
@@ -125,18 +138,31 @@ IndexAccessMethod::IndexAccessMethod(IndexCatalogEntry* btreeState, SortedDataIn
verify(IndexDescriptor::isIndexVersionSupported(_descriptor->version()));
}
-bool IndexAccessMethod::ignoreKeyTooLong(OperationContext* opCtx) {
- // Ignore this error if we cannot write to the collection or if the user requested it
+// TODO SERVER-36385: Remove this when there is no KeyTooLong error.
+bool IndexAccessMethod::ignoreKeyTooLong() {
+ return !failIndexKeyTooLongParam();
+}
+
+// TODO SERVER-36385: Remove this when there is no KeyTooLong error.
+bool IndexAccessMethod::shouldCheckIndexKeySize(OperationContext* opCtx) {
+ // Don't check index key size if we cannot write to the collection. That indicates we are a
+ // secondary node and we should accept any index key.
+ const NamespaceString collName(_btreeState->ns());
const auto shouldRelaxConstraints =
- repl::ReplicationCoordinator::get(opCtx)->shouldRelaxIndexConstraints(
- opCtx, NamespaceString(_btreeState->ns()));
- return shouldRelaxConstraints || !failIndexKeyTooLongParam();
+ repl::ReplicationCoordinator::get(opCtx)->shouldRelaxIndexConstraints(opCtx, collName);
+
+ // Don't check index key size if FCV hasn't been initialized.
+ return !shouldRelaxConstraints &&
+ serverGlobalParams.featureCompatibility.isVersionInitialized() &&
+ serverGlobalParams.featureCompatibility.getVersion() ==
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40;
}
bool IndexAccessMethod::isFatalError(OperationContext* opCtx, Status status, BSONObj key) {
// If the status is Status::OK(), or if it is ErrorCodes::KeyTooLong and the user has chosen to
// ignore this error, return false immediately.
- if (status.isOK() || (status == ErrorCodes::KeyTooLong && ignoreKeyTooLong(opCtx))) {
+ // TODO SERVER-36385: Remove this when there is no KeyTooLong error.
+ if (status.isOK() || (status == ErrorCodes::KeyTooLong && ignoreKeyTooLong())) {
return false;
}
@@ -157,6 +183,7 @@ Status IndexAccessMethod::insert(OperationContext* opCtx,
int64_t* numInserted) {
invariant(numInserted);
*numInserted = 0;
+ bool checkIndexKeySize = shouldCheckIndexKeySize(opCtx);
BSONObjSet multikeyMetadataKeys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
MultikeyPaths multikeyPaths;
@@ -169,7 +196,9 @@ Status IndexAccessMethod::insert(OperationContext* opCtx,
for (const auto keySet : {&keys, &multikeyMetadataKeys}) {
const auto& recordId = (keySet == &keys ? loc : kMultikeyMetadataKeyId);
for (const auto& key : *keySet) {
- Status status = _newInterface->insert(opCtx, key, recordId, options.dupsAllowed);
+ Status status = checkIndexKeySize ? checkKeySize(key) : Status::OK();
+ if (status.isOK())
+ status = _newInterface->insert(opCtx, key, recordId, options.dupsAllowed);
if (isFatalError(opCtx, status, key)) {
return status;
}
@@ -401,6 +430,8 @@ Status IndexAccessMethod::update(OperationContext* opCtx,
_newInterface->unindex(opCtx, remKey, ticket.loc, ticket.dupsAllowed);
}
+ bool checkIndexKeySize = shouldCheckIndexKeySize(opCtx);
+
// Add all new data keys, and all new multikey metadata keys, into the index. When iterating
// over the data keys, each of them should point to the doc's RecordId. When iterating over
// the multikey metadata keys, they should point to the reserved 'kMultikeyMetadataKeyId'.
@@ -408,7 +439,9 @@ Status IndexAccessMethod::update(OperationContext* opCtx,
for (const auto keySet : {&ticket.added, &newMultikeyMetadataKeys}) {
const auto& recordId = (keySet == &ticket.added ? ticket.loc : kMultikeyMetadataKeyId);
for (const auto& key : *keySet) {
- Status status = _newInterface->insert(opCtx, key, recordId, ticket.dupsAllowed);
+ Status status = checkIndexKeySize ? checkKeySize(key) : Status::OK();
+ if (status.isOK())
+ status = _newInterface->insert(opCtx, key, recordId, ticket.dupsAllowed);
if (isFatalError(opCtx, status, key)) {
return status;
}
@@ -505,6 +538,8 @@ Status IndexAccessMethod::commitBulk(OperationContext* opCtx,
auto builder = std::unique_ptr<SortedDataBuilderInterface>(
_newInterface->getBulkBuilder(opCtx, dupsAllowed));
+ bool checkIndexKeySize = shouldCheckIndexKeySize(opCtx);
+
while (it->more()) {
if (mayInterrupt) {
opCtx->checkForInterrupt();
@@ -514,11 +549,15 @@ Status IndexAccessMethod::commitBulk(OperationContext* opCtx,
// Get the next datum and add it to the builder.
BulkBuilder::Sorter::Data data = it->next();
- Status status = builder->addKey(data.first, data.second);
+
+ Status status = checkIndexKeySize ? checkKeySize(data.first) : Status::OK();
+ if (status.isOK())
+ status = builder->addKey(data.first, data.second);
if (!status.isOK()) {
// Overlong key that's OK to skip?
- if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(opCtx)) {
+ // TODO SERVER-36385: Remove this when there is no KeyTooLong error.
+ if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong()) {
continue;
}
@@ -563,6 +602,7 @@ void IndexAccessMethod::getKeys(const BSONObj& obj,
BSONObjSet* keys,
BSONObjSet* multikeyMetadataKeys,
MultikeyPaths* multikeyPaths) const {
+ // TODO SERVER-36385: Remove ErrorCodes::KeyTooLong.
static stdx::unordered_set<int> whiteList{ErrorCodes::CannotBuildIndexKeys,
// Btree
ErrorCodes::KeyTooLong,
diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h
index 07d8fa28c75..440fae9dedc 100644
--- a/src/mongo/db/index/index_access_method.h
+++ b/src/mongo/db/index/index_access_method.h
@@ -348,11 +348,6 @@ protected:
MultikeyPaths* multikeyPaths) const = 0;
/**
- * Determines whether it's OK to ignore ErrorCodes::KeyTooLong for this OperationContext
- */
- bool ignoreKeyTooLong(OperationContext* opCtx);
-
- /**
* Determine whether the given Status represents an exception that should cause the indexing
* process to abort. The 'key' argument is passed in to allow the offending entry to be logged
* in the event that a non-fatal 'ErrorCodes::DuplicateKeyValue' is encountered during a
@@ -364,6 +359,18 @@ protected:
const IndexDescriptor* _descriptor;
private:
+ /**
+ * Determines whether it's OK to ignore ErrorCodes::KeyTooLong for this OperationContext
+ * TODO SERVER-36385: Remove this function.
+ */
+ bool ignoreKeyTooLong();
+
+ /**
+ * If true, we should check whether the index key exceeds the hardcoded limit.
+ * TODO SERVER-36385: Remove this function.
+ */
+ bool shouldCheckIndexKeySize(OperationContext* opCtx);
+
void removeOneKey(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index 968e94780b2..9417ae01589 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -1221,29 +1221,6 @@ TEST_F(IdempotencyTest, ParallelArrayError) {
ASSERT_EQ(status.code(), ErrorCodes::CannotIndexParallelArrays);
}
-TEST_F(IdempotencyTest, IndexKeyTooLongError) {
- ASSERT_OK(
- ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING));
-
- ASSERT_OK(runOpInitialSync(createCollection()));
- ASSERT_OK(runOpInitialSync(insert(fromjson("{_id: 1}"))));
-
- // Key size limit is 1024 for ephemeral storage engine, so two 800 byte fields cannot
- // co-exist.
- std::string longStr(800, 'a');
- auto updateOp1 = update(1, BSON("$set" << BSON("x" << longStr)));
- auto updateOp2 = update(1, fromjson("{$set: {x: 1}}"));
- auto updateOp3 = update(1, BSON("$set" << BSON("y" << longStr)));
- auto indexOp = buildIndex(fromjson("{x: 1, y: 1}"));
-
- auto ops = {updateOp1, updateOp2, updateOp3, indexOp};
- testOpsAreIdempotent(ops);
-
- ASSERT_OK(ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY));
- auto status = runOpsInitialSync(ops);
- ASSERT_EQ(status.code(), ErrorCodes::KeyTooLong);
-}
-
TEST_F(IdempotencyTest, IndexWithDifferentOptions) {
ASSERT_OK(
ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING));
diff --git a/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp b/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp
index 66beebd5aba..d77252eb524 100644
--- a/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp
+++ b/src/mongo/db/storage/biggie/biggie_sorted_impl.cpp
@@ -55,8 +55,6 @@ namespace mongo {
namespace biggie {
namespace {
-const int TempKeyMaxSize = 1024; // this goes away with SERVER-3372
-
// This function is the same as the one in record store--basically, using the git analogy, create
// a working branch if one does not exist.
StringStore* getRecoveryUnitBranch_forking(OperationContext* opCtx) {
@@ -242,10 +240,6 @@ void SortedDataBuilderInterface::commit(bool mayInterrupt) {
Status SortedDataBuilderInterface::addKey(const BSONObj& key, const RecordId& loc) {
StringStore* workingCopy = getRecoveryUnitBranch_forking(_opCtx);
- if (key.objsize() >= TempKeyMaxSize) {
- return Status(ErrorCodes::KeyTooLong, "key too big");
- }
-
invariant(loc.isNormal());
invariant(!hasFieldNames(key));
@@ -322,10 +316,6 @@ Status SortedDataInterface::insert(OperationContext* opCtx,
StringStore* workingCopy = getRecoveryUnitBranch_forking(opCtx);
- if (key.objsize() >= TempKeyMaxSize) {
- return Status(ErrorCodes::KeyTooLong, "Error: key too long");
- }
-
if (workingCopy->find(workingCopyInsertKey) != workingCopy->end()) {
return Status::OK();
}
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
index 75c30bc0f68..2e7b939c26f 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
@@ -48,7 +48,6 @@ using std::vector;
namespace {
-const int TempKeyMaxSize = 1024; // this goes away with SERVER-3372
bool hasFieldNames(const BSONObj& obj) {
BSONForEach(e, obj) {
@@ -102,10 +101,6 @@ public:
Status addKey(const BSONObj& key, const RecordId& loc) {
// inserts should be in ascending (key, RecordId) order.
- if (key.objsize() >= TempKeyMaxSize) {
- return Status(ErrorCodes::KeyTooLong, "key too big");
- }
-
invariant(loc.isValid());
invariant(!hasFieldNames(key));
@@ -153,12 +148,6 @@ public:
invariant(loc.isValid());
invariant(!hasFieldNames(key));
- if (key.objsize() >= TempKeyMaxSize) {
- string msg = mongoutils::str::stream()
- << "EphemeralForTestBtree::insert: key too large to index, failing " << ' '
- << key.objsize() << ' ' << key;
- return Status(ErrorCodes::KeyTooLong, msg);
- }
// TODO optimization: save the iterator from the dup-check to speed up insert
if (!dupsAllowed && isDup(*_data, key, loc))
diff --git a/src/mongo/db/storage/mobile/mobile_index.cpp b/src/mongo/db/storage/mobile/mobile_index.cpp
index bf61b8fe5cd..c42a186707b 100644
--- a/src/mongo/db/storage/mobile/mobile_index.cpp
+++ b/src/mongo/db/storage/mobile/mobile_index.cpp
@@ -48,8 +48,6 @@ using std::vector;
// BTree stuff
-const int TempKeyMaxSize = 1024; // This goes away with SERVER-3372.
-
bool hasFieldNames(const BSONObj& obj) {
BSONForEach(e, obj) {
if (e.fieldName()[0])
@@ -83,11 +81,6 @@ Status MobileIndex::insert(OperationContext* opCtx,
invariant(recId.isValid());
invariant(!hasFieldNames(key));
- Status status = _checkKeySize(key);
- if (!status.isOK()) {
- return status;
- }
-
return _insert(opCtx, key, recId, dupsAllowed);
}
@@ -284,13 +277,6 @@ Status MobileIndex::_dupKeyError(const BSONObj& key) {
return Status(ErrorCodes::DuplicateKey, sb.str());
}
-Status MobileIndex::_checkKeySize(const BSONObj& key) {
- if (key.objsize() >= TempKeyMaxSize) {
- return Status(ErrorCodes::KeyTooLong, "key too big");
- }
- return Status::OK();
-}
-
class MobileIndex::BulkBuilderBase : public SortedDataBuilderInterface {
public:
BulkBuilderBase(MobileIndex* index, OperationContext* opCtx, bool dupsAllowed)
@@ -302,12 +288,7 @@ public:
invariant(recId.isValid());
invariant(!hasFieldNames(key));
- Status status = _checkKeySize(key);
- if (!status.isOK()) {
- return status;
- }
-
- status = _checkNextKey(key);
+ Status status = _checkNextKey(key);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/storage/mobile/mobile_index.h b/src/mongo/db/storage/mobile/mobile_index.h
index a22ba4f4e1c..dbf0d7d41da 100644
--- a/src/mongo/db/storage/mobile/mobile_index.h
+++ b/src/mongo/db/storage/mobile/mobile_index.h
@@ -113,11 +113,6 @@ protected:
Status _dupKeyError(const BSONObj& key);
/**
- * Checks if key size is too long.
- */
- static Status _checkKeySize(const BSONObj& key);
-
- /**
* Performs the deletion from the table matching the given key.
*/
void _doDelete(OperationContext* opCtx, const KeyString& key, KeyString* value = nullptr);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index 72d7c851266..8dfdd598764 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -80,8 +80,6 @@ MONGO_FAIL_POINT_DEFINE(WTEmulateOutOfOrderNextIndexKey);
using std::string;
using std::vector;
-static const int TempKeyMaxSize = 1024; // this goes away with SERVER-3372
-
static const WiredTigerItem emptyItem(NULL, 0);
@@ -103,16 +101,6 @@ BSONObj stripFieldNames(const BSONObj& query) {
}
return bb.obj();
}
-
-Status checkKeySize(const BSONObj& key) {
- if (key.objsize() >= TempKeyMaxSize) {
- string msg = mongoutils::str::stream()
- << "WiredTigerIndex::insert: key too large to index, failing " << ' ' << key.objsize()
- << ' ' << key;
- return Status(ErrorCodes::KeyTooLong, msg);
- }
- return Status::OK();
-}
} // namespace
@@ -321,10 +309,6 @@ Status WiredTigerIndex::insert(OperationContext* opCtx,
invariant(id.isValid());
dassert(!hasFieldNames(key));
- Status s = checkKeySize(key);
- if (!s.isOK())
- return s;
-
WiredTigerCursor curwrap(_uri, _tableId, false, opCtx);
curwrap.assertInActiveTxn();
WT_CURSOR* c = curwrap.get();
@@ -628,12 +612,6 @@ public:
: BulkBuilder(idx, opCtx, prefix), _idx(idx) {}
Status addKey(const BSONObj& key, const RecordId& id) override {
- {
- const Status s = checkKeySize(key);
- if (!s.isOK())
- return s;
- }
-
KeyString data(_idx->keyStringVersion(), key, _idx->_ordering, id);
// Can't use WiredTigerCursor since we aren't using the cache.
@@ -699,12 +677,6 @@ public:
private:
Status addKeyTimestampSafe(const BSONObj& newKey, const RecordId& id) {
- {
- const Status s = checkKeySize(newKey);
- if (!s.isOK())
- return s;
- }
-
// Do a duplicate check
const int cmp = newKey.woCompare(_previousKey, _ordering);
if (cmp == 0) {
@@ -738,12 +710,6 @@ private:
}
Status addKeyTimestampUnsafe(const BSONObj& newKey, const RecordId& id) {
- {
- const Status s = checkKeySize(newKey);
- if (!s.isOK())
- return s;
- }
-
const int cmp = newKey.woCompare(_previousKey, _ordering);
if (cmp != 0) {
if (!_previousKey.isEmpty()) {