summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2021-10-27 13:55:13 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-10-27 14:23:55 +0000
commit0ff1013b7da4f6e2b61cc6eab6f846d05bc80206 (patch)
treec629f7d868af07f093696fc918917ec69ad70cd7
parent5c466f92034e113ac3006b956492a7b51c0a6644 (diff)
downloadmongo-0ff1013b7da4f6e2b61cc6eab6f846d05bc80206.tar.gz
SERVER-60654 Backport the autoSplitVector logic to v4.0
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_misc.yml1
-rw-r--r--jstests/core/views/views_all_commands.js8
-rw-r--r--jstests/replsets/db_reads_while_recovering_all_commands.js1
-rw-r--r--jstests/sharding/autosplit_heuristics.js90
-rw-r--r--jstests/sharding/autosplit_with_balancer.js5
-rw-r--r--jstests/sharding/jumbo1.js38
-rw-r--r--jstests/sharding/presplit.js9
-rw-r--r--jstests/sharding/shard_existing.js12
-rw-r--r--jstests/sharding/sharding_balance1.js10
-rw-r--r--jstests/sharding/sharding_rs1.js9
-rw-r--r--jstests/sharding/write_cmd_auto_split.js4
-rw-r--r--jstests/ssl/libs/ssl_helpers.js4
-rw-r--r--src/mongo/db/s/SConscript3
-rw-r--r--src/mongo/db/s/auto_split_vector.cpp358
-rw-r--r--src/mongo/db/s/auto_split_vector.h134
-rw-r--r--src/mongo/db/s/auto_split_vector_command.cpp98
-rw-r--r--src/mongo/db/s/auto_split_vector_test.cpp262
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp3
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp23
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp14
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp2
-rw-r--r--src/mongo/s/SConscript1
-rw-r--r--src/mongo/s/request_types/auto_split_vector.idl71
-rw-r--r--src/mongo/s/shard_util.cpp78
-rw-r--r--src/mongo/s/shard_util.h3
-rw-r--r--src/mongo/s/write_ops/cluster_write.cpp3
28 files changed, 1056 insertions, 192 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
index 6e45219d2d4..03270732696 100644
--- a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
@@ -20,7 +20,6 @@ selector:
- jstests/sharding/autosplit.js
- jstests/sharding/auto_rebalance_parallel.js
- jstests/sharding/auto_rebalance_parallel_replica_sets.js
- - jstests/sharding/autosplit_heuristics.js
- jstests/sharding/basic_drop_coll.js
- jstests/sharding/basic_merge.js
- jstests/sharding/count1.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_misc.yml b/buildscripts/resmokeconfig/suites/sharding_misc.yml
index 765416e3444..6285d7bffea 100644
--- a/buildscripts/resmokeconfig/suites/sharding_misc.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_misc.yml
@@ -164,7 +164,6 @@ selector:
- jstests/sharding/secondary_shard_versioning.js
- jstests/sharding/move_chunk_wc.js
- jstests/sharding/shard_aware_init_secondaries.js
- - jstests/sharding/autosplit_heuristics.js
- jstests/sharding/move_chunk_open_cursors.js
- jstests/sharding/basic_split.js
- jstests/sharding/merge_chunks_test_with_md_ops.js
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 9c49bdbf4a6..ad017402d5b 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -120,6 +120,14 @@
skipSharded: true,
},
authenticate: {skip: isUnrelated},
+ autoSplitVector: {
+ command: {
+ splitVector: "test.view",
+ keyPattern: {x: 1},
+ maxChunkSize: 1,
+ },
+ expectFailure: true,
+ },
availableQueryOptions: {skip: isAnInternalCommand},
balancerStart: {skip: isUnrelated},
balancerStatus: {skip: isUnrelated},
diff --git a/jstests/replsets/db_reads_while_recovering_all_commands.js b/jstests/replsets/db_reads_while_recovering_all_commands.js
index 4004245070a..6bcdce6d723 100644
--- a/jstests/replsets/db_reads_while_recovering_all_commands.js
+++ b/jstests/replsets/db_reads_while_recovering_all_commands.js
@@ -78,6 +78,7 @@
appendOplogNote: {skip: isPrimaryOnly},
applyOps: {skip: isPrimaryOnly},
authenticate: {skip: isNotAUserDataRead},
+ autoSplitVector: {skip: isNotAUserDataRead},
availableQueryOptions: {skip: isNotAUserDataRead},
buildInfo: {skip: isNotAUserDataRead},
captrunc: {skip: isPrimaryOnly},
diff --git a/jstests/sharding/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js
deleted file mode 100644
index 46daff22e57..00000000000
--- a/jstests/sharding/autosplit_heuristics.js
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Tests autosplitting heuristics, and that the heuristic counting of chunk sizes
- * works as expected even after splitting.
- *
- * This test is labeled resource intensive because its total io_write is 53MB compared to a median
- * of 5MB across all sharding tests in wiredTiger.
- * @tags: [resource_intensive]
- */
-(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
-
- // The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk
- // moves/splits depending on the timing.
-
- // Test is not valid for debug build, heuristics get all mangled by debug reload behavior
- var isDebugBuild = st.s0.getDB("admin").serverBuildInfo().debug;
-
- if (!isDebugBuild) {
- var mongos = st.s0;
- var config = mongos.getDB("config");
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.hashBar");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-
- var numChunks = 10;
-
- // Split off the low and high chunks, to get non-special-case behavior
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: numChunks + 1}}));
-
- // Split all the other chunks, and an extra chunk. We need the extra chunk to compensate for
- // the fact that the chunk differ resets the highest chunk's (i.e. the last-split-chunk's)
- // data count on reload.
- for (var i = 1; i < numChunks + 1; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
- }
-
- jsTest.log("Setup collection...");
- st.printShardingStatus(true);
-
- var approxSize = Object.bsonsize({_id: 0.0});
-
- jsTest.log("Starting inserts of approx size: " + approxSize + "...");
-
- var chunkSizeBytes = 1024 * 1024;
-
- // We insert slightly more than the max number of docs per chunk, to test
- // if resetting the chunk size happens during reloads. If the size is
- // reset, we'd expect to split less, since the first split would then
- // disable further splits (statistically, since the decision is randomized).
- // We choose 1.4 since split attempts happen about once every 1/5 chunkSize,
- // and we want to be sure we def get a split attempt at a full chunk.
- var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize);
- var totalInserts = insertsForSplit * numChunks;
-
- printjson({
- chunkSizeBytes: chunkSizeBytes,
- insertsForSplit: insertsForSplit,
- totalInserts: totalInserts
- });
-
- // Insert enough docs to trigger splits into all chunks
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < totalInserts; i++) {
- bulk.insert({_id: i % numChunks + (i / totalInserts)});
- }
- assert.writeOK(bulk.execute());
-
- jsTest.log("Inserts completed...");
-
- st.printShardingStatus(true);
- printjson(coll.stats());
-
- // Check that all chunks (except the two extreme chunks)
- // have been split at least once + 1 extra chunk as reload buffer
- assert.gte(config.chunks.count({"ns": "foo.hashBar"}), numChunks * 2 + 3);
-
- jsTest.log("DONE!");
-
- } else {
- jsTest.log("Disabled test in debug builds.");
- }
-
- st.stop();
-
-})();
diff --git a/jstests/sharding/autosplit_with_balancer.js b/jstests/sharding/autosplit_with_balancer.js
index c6bce40aa9a..83c63f3907d 100644
--- a/jstests/sharding/autosplit_with_balancer.js
+++ b/jstests/sharding/autosplit_with_balancer.js
@@ -7,10 +7,7 @@
s.ensurePrimaryShard('test', s.shard1.shardName);
s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
- var bigString = "";
- while (bigString.length < 1024 * 50) {
- bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
- }
+ var bigString = "X".repeat(1024 * 1024); // 1MB
var db = s.getDB("test");
var coll = db.foo;
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index 573d7b1e5f7..f01d51942f8 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -9,18 +9,18 @@
var db = s.getDB("test");
- const big = 'X'.repeat(10000);
+ const big = 'X'.repeat(1024 * 1024); // 1MB
- // Create sufficient documents to create a jumbo chunk, and use the same shard key in all of
+ // Insert 3MB of documents to create a jumbo chunk, and use the same shard key in all of
// them so that the chunk cannot be split.
var x = 0;
var bulk = db.foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 500; i++) {
+ for (var i = 0; i < 3; i++) {
bulk.insert({x: x, big: big});
}
// Create documents with different shard keys that can be split and moved without issue.
- for (; x < 1500; x++) {
+ for (; x < 20; x++) {
bulk.insert({x: x, big: big});
}
@@ -30,25 +30,17 @@
s.startBalancer();
- function diff1() {
- var x = s.chunkCounts("foo");
- printjson(x);
- return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
- Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
- }
-
- assert.soon(function() {
- var d = diff1();
- print("diff: " + d);
- s.printShardingStatus(true);
- return d < 5;
- }, "balance didn't happen", 1000 * 60 * 10, 5000);
-
- // Check that the jumbo chunk did not move, which shouldn't be possible.
- var jumboChunk =
- s.getDB('config').chunks.findOne({ns: 'test.foo', min: {$lte: {x: 0}}, max: {$gt: {x: 0}}});
- assert.eq(
- s.shard1.shardName, jumboChunk.shard, 'jumbo chunk ' + tojson(jumboChunk) + ' was moved');
+ assert.soon(() => {
+ // Check that the jumbo chunk did not move, which shouldn't be possible, and that the jumbo
+ // flag
+ // has been added.
+ var jumboChunk = s.getDB('config').chunks.findOne(
+ {ns: 'test.foo', min: {$lte: {x: 0}}, max: {$gt: {x: 0}}});
+ assert.eq(s.shard1.shardName,
+ jumboChunk.shard,
+ 'jumbo chunk ' + tojson(jumboChunk) + ' was moved');
+ return jumboChunk.jumbo;
+ });
// TODO: SERVER-26531 Make sure that balancer marked the first chunk as jumbo.
// Assumption: balancer favors moving the lowest valued chunk out of a shard.
diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js
index ec71924fc53..392d451d3ad 100644
--- a/jstests/sharding/presplit.js
+++ b/jstests/sharding/presplit.js
@@ -1,3 +1,9 @@
+/*
+ * @tags: [
+ * requires_fcv_40, # autoSplitVector not present in v3.6 binaries
+ * ]
+ */
+
(function() {
var s = new ShardingTest({name: "presplit", shards: 2, mongos: 1, other: {chunkSize: 1}});
@@ -31,10 +37,9 @@
// Make sure the collection's original chunk got split
s.printChunks();
- assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "many chunks assertion");
+ assert.lte(20, s.config.chunks.count({"ns": "test.foo"}), "many chunks assertion");
assert.eq(num, primary.foo.count());
s.printChangeLog();
s.stop();
-
})();
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index 1c8415662a7..30c3b0c3188 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -1,3 +1,9 @@
+/*
+ * @tags: [
+ * requires_fcv_40, # autoSplitVector not present in v3.6 binaries
+ * ]
+ */
+
(function() {
'use strict';
@@ -28,10 +34,10 @@
var res = s.adminCommand({shardcollection: "test.data", key: {_id: 1}});
printjson(res);
- // number of chunks should be approx equal to the total data size / half the chunk size
+ // number of chunks should be approx equal to the total data size / chunk size
var numChunks = s.config.chunks.find({ns: 'test.data'}).itcount();
- var guess = Math.ceil(dataSize / (512 * 1024 + avgObjSize));
- assert(Math.abs(numChunks - guess) < 2, "not right number of chunks");
+ var guess = Math.ceil(dataSize / (1024 * 1024 + avgObjSize));
+ assert.lte(Math.abs(numChunks - guess), 2, "not right number of chunks");
s.stop();
})();
diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
index 413a7194c22..57fb1c3ddb3 100644
--- a/jstests/sharding/sharding_balance1.js
+++ b/jstests/sharding/sharding_balance1.js
@@ -1,3 +1,9 @@
+/*
+ * @tags: [
+ * requires_fcv_40, # autoSplitVector not present in v3.6 binaries
+ * ]
+ */
+
(function() {
'use strict';
@@ -23,7 +29,7 @@
assert.writeOK(bulk.execute());
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
+ assert.lte(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
function diff1() {
var x = s.chunkCounts("foo");
@@ -37,7 +43,7 @@
return x[s.shard0.shardName] + x[s.shard1.shardName];
}
- assert.lt(20, diff1(), "big differential here");
+ assert.lte(20, diff1(), "big differential here");
print(diff1());
assert.soon(function() {
diff --git a/jstests/sharding/sharding_rs1.js b/jstests/sharding/sharding_rs1.js
index 07af5b03862..43b8294cbf3 100644
--- a/jstests/sharding/sharding_rs1.js
+++ b/jstests/sharding/sharding_rs1.js
@@ -8,7 +8,7 @@
(function() {
'use strict';
- var s = new ShardingTest({shards: 3, other: {rs: true, chunkSize: 1, enableBalancer: true}});
+ var s = new ShardingTest({shards: 3, other: {rs: true, chunkSize: 2, enableBalancer: true}});
s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', s.shard0.shardName);
@@ -16,14 +16,14 @@
var db = s.getDB("test");
- var bigString = "X".repeat(256 * 1024);
+ var bigString = "X".repeat(256 * 1024); // 250 KB
var insertedBytes = 0;
var num = 0;
- // Insert 10 MB of data to result in 10+ chunks
+ // Insert 20 MB of data to result in 20+ chunks
var bulk = db.foo.initializeUnorderedBulkOp();
- while (insertedBytes < (10 * 1024 * 1024)) {
+ while (insertedBytes < (20 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString, x: Math.random()});
insertedBytes += bigString.length;
}
@@ -61,5 +61,4 @@
assert.eq(num, db.foo.find().sort({x: -1}).itcount(), "C6");
s.stop();
-
})();
diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js
index 74155d8b0bf..d93686bc1ac 100644
--- a/jstests/sharding/write_cmd_auto_split.js
+++ b/jstests/sharding/write_cmd_auto_split.js
@@ -40,7 +40,7 @@
assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
- for (var x = 0; x < 2100; x++) {
+ for (var x = 0; x < 2500; x++) {
assert.writeOK(testDB.runCommand({
update: 'update',
updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
@@ -59,7 +59,7 @@
assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
- for (var x = 0; x < 1100; x++) {
+ for (var x = 0; x < 1500; x++) {
assert.writeOK(testDB.runCommand({
delete: 'delete',
deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
diff --git a/jstests/ssl/libs/ssl_helpers.js b/jstests/ssl/libs/ssl_helpers.js
index 79bd2721819..0e4b311f932 100644
--- a/jstests/ssl/libs/ssl_helpers.js
+++ b/jstests/ssl/libs/ssl_helpers.js
@@ -144,11 +144,11 @@ function mixedShardTest(options1, options2, shouldSucceed) {
var bigstr = Array(1024 * 1024).join("#");
var bulk = db1.col.initializeUnorderedBulkOp();
- for (var i = 0; i < 128; i++) {
+ for (var i = 0; i < 64; i++) {
bulk.insert({_id: i, string: bigstr});
}
assert.writeOK(bulk.execute());
- assert.eq(128, db1.col.count(), "error retrieving documents from cluster");
+ assert.eq(64, db1.col.count(), "error retrieving documents from cluster");
// Test shards talking to each other
r = st.getDB('test').adminCommand(
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 75fa982f880..7f96b5499ff 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -33,6 +33,7 @@ env.Library(
'active_migrations_registry.cpp',
'active_move_primaries_registry.cpp',
'active_shard_collection_registry.cpp',
+ 'auto_split_vector.cpp',
'chunk_move_write_concern_options.cpp',
'chunk_splitter.cpp',
'collection_range_deleter.cpp',
@@ -220,6 +221,7 @@ env.Library(
env.Library(
target='sharding_commands_d',
source=[
+ 'auto_split_vector_command.cpp',
'check_sharding_index_command.cpp',
'cleanup_orphaned_cmd.cpp',
'clone_catalog_data_command.cpp',
@@ -304,6 +306,7 @@ env.CppUnitTest(
'active_migrations_registry_test.cpp',
'active_move_primaries_registry_test.cpp',
'active_shard_collection_registry_test.cpp',
+ 'auto_split_vector_test.cpp',
'catalog_cache_loader_mock.cpp',
'implicit_create_collection_test.cpp',
'migration_chunk_cloner_source_legacy_test.cpp',
diff --git a/src/mongo/db/s/auto_split_vector.cpp b/src/mongo/db/s/auto_split_vector.cpp
new file mode 100644
index 00000000000..a430d84593c
--- /dev/null
+++ b/src/mongo/db/s/auto_split_vector.cpp
@@ -0,0 +1,358 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/s/auto_split_vector.h"
+
+#include "mongo/base/status_with.h"
+#include "mongo/db/bson/dotted_path_support.h"
+#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/catalog_raii.h"
+#include "mongo/db/dbhelpers.h"
+#include "mongo/db/exec/working_set_common.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/keypattern.h"
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/query/internal_plans.h"
+#include "mongo/db/query/plan_executor.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+namespace {
+
+constexpr int estimatedAdditionalBytesPerItemInBSONArray{2};
+
+constexpr int kMaxSplitPointsToReposition{3};
+
+BSONObj prettyKey(const BSONObj& keyPattern, const BSONObj& key) {
+ return key.replaceFieldNames(keyPattern).clientReadable();
+}
+
+/*
+ * Takes the given min/max BSON objects that are a prefix of the shardKey and return two new BSON
+ * object extended to cover the entire shardKey. See KeyPattern::extendRangeBound documentation for
+ * some examples.
+ */
+const std::tuple<BSONObj, BSONObj> getMinMaxExtendedBounds(const IndexDescriptor* shardKeyIdx,
+ const BSONObj& min,
+ const BSONObj& max) {
+ KeyPattern kp(shardKeyIdx->keyPattern());
+
+ // Extend min to get (min, MinKey, MinKey, ....)
+ BSONObj minKey = Helpers::toKeyFormat(kp.extendRangeBound(min, false /* upperInclusive */));
+ BSONObj maxKey;
+ if (max.isEmpty()) {
+ // if max not specified, make it (MaxKey, Maxkey, MaxKey...)
+ maxKey = Helpers::toKeyFormat(kp.extendRangeBound(max, true /* upperInclusive */));
+ } else {
+ // otherwise make it (max,MinKey,MinKey...) so that bound is non-inclusive
+ maxKey = Helpers::toKeyFormat(kp.extendRangeBound(max, false /* upperInclusive*/));
+ }
+
+ return std::tuple<BSONObj, BSONObj>(minKey, maxKey);
+}
+
+/*
+ * Returns true if the final key in the range is the same as the first key, false otherwise.
+ */
+bool maxKeyEqualToMinKey(OperationContext* opCtx,
+ const Collection* collection,
+ const IndexDescriptor* shardKeyIdx,
+ const BSONObj& minBound,
+ const BSONObj& maxBound,
+ const BSONObj& minKeyInChunk) {
+ BSONObj maxKeyInChunk;
+ {
+ auto backwardIdxScanner = InternalPlanner::indexScan(opCtx,
+ collection,
+ shardKeyIdx,
+ maxBound,
+ minBound,
+ BoundInclusion::kIncludeEndKeyOnly,
+ PlanExecutor::YIELD_AUTO,
+ InternalPlanner::BACKWARD);
+
+ PlanExecutor::ExecState state = backwardIdxScanner->getNext(&maxKeyInChunk, nullptr);
+ uassert(ErrorCodes::OperationFailed,
+ "can't open a cursor to find final key in range (desired range is possibly empty)",
+ state == PlanExecutor::ADVANCED);
+ }
+
+ if (minKeyInChunk.woCompare(maxKeyInChunk) == 0) {
+ // Range contains only documents with a single key value. So we cannot possibly find a
+ // split point, and there is no need to scan any further.
+ warning() << "possible low cardinality key detected in " << collection->ns().toString()
+ << " - range " << redact(prettyKey(shardKeyIdx->keyPattern(), minKeyInChunk))
+ << " -->> " << redact(prettyKey(shardKeyIdx->keyPattern(), maxKeyInChunk))
+ << " contains only the key "
+ << redact(prettyKey(shardKeyIdx->keyPattern(), minKeyInChunk));
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Reshuffle fields according to the shard key pattern.
+ */
+auto orderShardKeyFields(const BSONObj& keyPattern, BSONObj& key) {
+ return dotted_path_support::extractElementsBasedOnTemplate(
+ prettyKey(keyPattern, key.getOwned()), keyPattern);
+}
+
+} // namespace
+
+std::vector<BSONObj> autoSplitVector(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const BSONObj& keyPattern,
+ const BSONObj& min,
+ const BSONObj& max,
+ long long maxChunkSizeBytes) {
+ std::vector<BSONObj> splitKeys;
+
+ int elapsedMillisToFindSplitPoints;
+
+ // Contains each key appearing multiple times and estimated to be able to fill-in a chunk alone
+ auto tooFrequentKeys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
+
+ {
+ AutoGetCollection autoGetColl(opCtx, nss, MODE_IS);
+ auto* collection = autoGetColl.getCollection();
+
+ uassert(ErrorCodes::NamespaceNotFound, "ns not found", collection);
+
+ // Get the size estimate for this namespace
+ const long long totalLocalCollDocuments = collection->numRecords(opCtx);
+ const long long dataSize = collection->dataSize(opCtx);
+
+ // Return empty vector if current estimated data size is less than max chunk size
+ if (dataSize < maxChunkSizeBytes || totalLocalCollDocuments == 0) {
+ return {};
+ }
+
+ // Allow multiKey based on the invariant that shard keys must be single-valued. Therefore,
+ // any multi-key index prefixed by shard key cannot be multikey over the shard key fields.
+ auto catalog = collection->getIndexCatalog();
+ auto shardKeyIdx =
+ catalog->findShardKeyPrefixedIndex(opCtx, keyPattern, false /* requireSingleKey */);
+ uassert(ErrorCodes::IndexNotFound,
+ str::stream() << "couldn't find index over splitting key "
+ << keyPattern.clientReadable().toString(),
+ shardKeyIdx);
+
+ const auto minKeyAndMaxKey = getMinMaxExtendedBounds(shardKeyIdx, min, max);
+ const auto minKey = std::get<0>(minKeyAndMaxKey);
+ const auto maxKey = std::get<1>(minKeyAndMaxKey);
+
+ // Setup the index scanner that will be used to find the split points
+ auto forwardIdxScanner = InternalPlanner::indexScan(opCtx,
+ &(*collection),
+ shardKeyIdx,
+ minKey,
+ maxKey,
+ BoundInclusion::kIncludeStartKeyOnly,
+ PlanExecutor::YIELD_AUTO,
+ InternalPlanner::FORWARD);
+
+ // Get minimum key belonging to the chunk
+ BSONObj minKeyInOriginalChunk;
+ {
+ PlanExecutor::ExecState state =
+ forwardIdxScanner->getNext(&minKeyInOriginalChunk, nullptr);
+ uassert(ErrorCodes::OperationFailed,
+ "can't open a cursor to scan the range (desired range is possibly empty)",
+ state == PlanExecutor::ADVANCED);
+ }
+
+ // Return empty vector if chunk's min and max keys are the same.
+ if (maxKeyEqualToMinKey(
+ opCtx, collection, shardKeyIdx, minKey, maxKey, minKeyInOriginalChunk)) {
+ return {};
+ }
+
+ log() << "request split points lookup for chunk " << nss.toString() << " " << redact(minKey)
+ << " -->> " << redact(maxKey);
+
+ // Use the average document size and number of documents to find the approximate number of
+ // keys each chunk should contain
+ const long long avgDocSize = dataSize / totalLocalCollDocuments;
+
+ // Split at max chunk size
+ long long maxDocsPerChunk = maxChunkSizeBytes / avgDocSize;
+
+ BSONObj currentKey; // Last key seen during the index scan
+ long long numScannedKeys = 1; // minKeyInOriginalChunk has already been scanned
+ std::size_t resultArraySize = 0; // Approximate size in bytes of the split points array
+ bool reachedMaxBSONSize = false; // True if the split points vector becomes too big
+
+ // Lambda to check whether the split points vector would exceed BSONObjMaxUserSize in case
+ // of additional split key of the specified size.
+ auto checkMaxBSONSize = [&resultArraySize](const int additionalKeySize) {
+ return resultArraySize + additionalKeySize > BSONObjMaxUserSize;
+ };
+
+ // Reference to last split point that needs to be checked in order to avoid adding duplicate
+ // split points. Initialized to the min of the first chunk being split.
+ auto minKeyElement = orderShardKeyFields(keyPattern, minKeyInOriginalChunk);
+ auto lastSplitPoint = minKeyElement;
+
+ Timer timer; // To measure time elapsed while searching split points
+
+ // Traverse the index and add the maxDocsPerChunk-th key to the result vector
+ while (forwardIdxScanner->getNext(&currentKey, nullptr) == PlanExecutor::ADVANCED) {
+ if (++numScannedKeys >= maxDocsPerChunk) {
+ currentKey = orderShardKeyFields(keyPattern, currentKey);
+
+ if (currentKey.woCompare(lastSplitPoint) == 0) {
+ // Do not add again the same split point in case of frequent shard key.
+ tooFrequentKeys.insert(currentKey.getOwned());
+ continue;
+ }
+
+ const auto additionalKeySize =
+ currentKey.objsize() + estimatedAdditionalBytesPerItemInBSONArray;
+ if (checkMaxBSONSize(additionalKeySize)) {
+ if (splitKeys.empty()) {
+ // Keep trying until finding at least one split point that isn't above
+ // the max object user size. Very improbable corner case: the shard key
+ // size for the chosen split point is exactly 16MB.
+ continue;
+ }
+ reachedMaxBSONSize = true;
+ break;
+ }
+
+ resultArraySize += additionalKeySize;
+ splitKeys.push_back(currentKey.getOwned());
+ lastSplitPoint = splitKeys.back();
+ numScannedKeys = 0;
+
+ LOG(4) << "picked a split key: " << redact(currentKey);
+ }
+ }
+
+ // Avoid creating small chunks by fairly recalculating the last split points if the last
+ // chunk would be too small (containing less than `80% maxDocsPerChunk` documents).
+ bool lastChunk80PercentFull = numScannedKeys >= maxDocsPerChunk * 0.8;
+ if (!lastChunk80PercentFull && !splitKeys.empty() && !reachedMaxBSONSize) {
+ // Eventually recalculate the last split points (at most `kMaxSplitPointsToReposition`).
+ int nSplitPointsToReposition = splitKeys.size() > kMaxSplitPointsToReposition
+ ? kMaxSplitPointsToReposition
+ : splitKeys.size();
+
+ // Equivalent to: (nSplitPointsToReposition * maxDocsPerChunk + numScannedKeys) divided
+ // by the number of reshuffled chunks (nSplitPointsToReposition + 1).
+ const auto maxDocsPerNewChunk = maxDocsPerChunk -
+ ((maxDocsPerChunk - numScannedKeys) / (nSplitPointsToReposition + 1));
+
+ if (numScannedKeys < maxDocsPerChunk - maxDocsPerNewChunk) {
+ // If the surplus is not too much, simply keep a bigger last chunk.
+ // The surplus is considered enough if repositioning the split points would imply
+ // generating chunks with a number of documents lower than `67% maxDocsPerChunk`.
+ splitKeys.pop_back();
+ } else {
+ // Fairly recalculate the last `nSplitPointsToReposition` split points.
+ splitKeys.erase(splitKeys.end() - nSplitPointsToReposition, splitKeys.end());
+
+ auto forwardIdxScanner =
+ InternalPlanner::indexScan(opCtx,
+ collection,
+ shardKeyIdx,
+ splitKeys.empty() ? minKeyElement : splitKeys.back(),
+ maxKey,
+ BoundInclusion::kIncludeStartKeyOnly,
+ PlanExecutor::YIELD_AUTO,
+ InternalPlanner::FORWARD);
+
+ numScannedKeys = 0;
+
+ auto previousSplitPoint = splitKeys.empty() ? minKeyElement : splitKeys.back();
+ while (forwardIdxScanner->getNext(&currentKey, nullptr) == PlanExecutor::ADVANCED) {
+ if (++numScannedKeys >= maxDocsPerNewChunk) {
+ currentKey = orderShardKeyFields(keyPattern, currentKey);
+
+ const auto compareWithPreviousSplitPoint =
+ currentKey.woCompare(previousSplitPoint);
+ if (compareWithPreviousSplitPoint > 0) {
+ const auto additionalKeySize =
+ currentKey.objsize() + estimatedAdditionalBytesPerItemInBSONArray;
+ if (checkMaxBSONSize(additionalKeySize)) {
+ reachedMaxBSONSize = true;
+ break;
+ }
+
+ splitKeys.push_back(currentKey.getOwned());
+ previousSplitPoint = splitKeys.back();
+ numScannedKeys = 0;
+
+ if (--nSplitPointsToReposition == 0) {
+ break;
+ }
+ } else if (compareWithPreviousSplitPoint == 0) {
+ // Don't add again the same split point in case of frequent shard key.
+ tooFrequentKeys.insert(currentKey.getOwned());
+ }
+ }
+ }
+ }
+ }
+
+ elapsedMillisToFindSplitPoints = timer.millis();
+
+ if (reachedMaxBSONSize) {
+ log() << "Max BSON response size reached for split vector before the end of chunk for "
+ "namespace "
+ << nss.toString() << " - range "
+ << redact(prettyKey(shardKeyIdx->keyPattern(), minKey)) << " --> "
+ << redact(prettyKey(shardKeyIdx->keyPattern(), maxKey));
+ }
+ }
+
+ // Emit a warning for each frequent key
+ for (const auto& frequentKey : tooFrequentKeys) {
+ warning() << "possible low cardinality key detected in " << nss.toString()
+ << " - key: " << redact(prettyKey(keyPattern, frequentKey));
+ }
+
+ if (elapsedMillisToFindSplitPoints > serverGlobalParams.slowMS) {
+ warning() << "Finding the auto split vector for " << nss.toString() << " completed over "
+ << redact(keyPattern) << " - numSplits: " << splitKeys.size()
+ << " - duration: " << elapsedMillisToFindSplitPoints << "ms";
+ }
+
+ std::sort(
+ splitKeys.begin(), splitKeys.end(), SimpleBSONObjComparator::kInstance.makeLessThan());
+
+ return splitKeys;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/auto_split_vector.h b/src/mongo/db/s/auto_split_vector.h
new file mode 100644
index 00000000000..559c1f814d6
--- /dev/null
+++ b/src/mongo/db/s/auto_split_vector.h
@@ -0,0 +1,134 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/operation_context.h"
+
+namespace mongo {
+
+/**
+ * Given a chunk, determines whether it satisfies the requisites to be auto-splitted and - if so -
+ * returns the split points (shard keys representing the lower bounds of the new chunks to create).
+ *
+ * The logic implemented can be summarized as follows: given a `maxChunkSize` of `x` MB, the
+ * algorithm aims to choose the split points so that the resulting chunks' size would be around
+ * `maxChunkSize`. As it is too expensive to precisely determine the dimension of a chunk, it is
+ * assumed a uniform distribution of document sizes, hence the aim is to balance the number of
+ * documents per chunk.
+ *
+ * ======= ALGORITHM DESCRIPTION =======
+ *
+ * The split points for a chunk `C` belonging to a collection `coll` are calculated as follows:
+ * - `averageDocumentSize` = `totalCollSizeOnShard / numberOfCollDocs`
+ * - `maxNumberOfDocsPerChunk` = `maxChunkSize / averageDocumentSize`
+ * - Scan forward the shard key index entries for `coll` that are belonging to chunk `C`:
+ * - (1) Choose a split point every `maxNumberOfDocsPerChunk` scanned keys.
+ * - (2) As it needs to be avoided the creation of small chunks, consider the number of documents
+ * `S` that the right-most chunk would contain given the calculated split points:
+ * --- (2.1) IF `S >= 80% maxNumberOfDocsPerChunk`, return the list of calculated split points.
+ * --- (2.2) ELSE IF `S` documents could be fairly redistributed in the last chunks so that their
+ * size would be at least `67% maxNumberOfDocsPerChunk`: recalculate the last split points (max 3).
+ * --- (2.3) ELSE simply remove the last split point and keep a bigger last chunk.
+ *
+ *
+ * ============== EXAMPLES =============
+ *
+ * ========= EXAMPLE (CASE 2.1) ========
+ * `maxChunkSize` = 100MB
+ * `averageDocumentSize` = 1MB
+ * `maxNumberOfDocsPerChunk` = 100
+ *
+ * Shard key type: integer
+ * Chunk `C` bounds: `[0, maxKey)` . Chunk `C` contains 190 documents with shard keys [0...189].
+ *
+ * (1) Initially calculated split points: [99].
+ * (2) The last chunk would contain the interval `[99-189]` so `S = 90`
+ * (2.1) `S >= 80% maxChunkSize`, so keep the current split points.
+ *
+ * Returned split points: [99].
+ *
+ * ========= EXAMPLE (CASE 2.2) ========
+ * `maxChunkSize` = 100MB
+ * `averageDocumentSize` = 1MB
+ * `maxNumberOfDocsPerChunk` = 100
+ *
+ * Shard key type: integer
+ * Chunk `C` bounds: `[0, maxKey)` . Chunk `C` contains 140 documents with shard keys [0...139].
+ *
+ * (1) Initially calculated split points: [99].
+ * (2) The last chunk would contain the interval `[99-139]` so `S = 40`
+ * (2.2) `S` documents can be redistributed on the last split point by generating chunks of size >=
+ * 67% maxChunkSize. Recalculate.
+ *
+ * Returned split points: [69].
+ *
+ * ========= EXAMPLE (CASE 2.3) ========
+ * `maxChunkSize` = 100MB
+ * `averageDocumentSize` = 1MB
+ * `maxNumberOfDocsPerChunk` = 100
+ *
+ * Shard key type: integer
+ * Chunk `C` bounds: `[0, maxKey)` . Chunk `C` contains 120 documents with shard keys [0...119].
+ *
+ * (1) Initially calculated split points: [99].
+ * (2) The last chunk would contain the interval `[99-119]` so `S = 20`
+ * (2.3) `S` documents can't be redistributed on the last split point by generating chunks of size
+ * >= 67% maxChunkSize. So remove the last split point.
+ *
+ * Returned split points: [].
+ */
+std::vector<BSONObj> autoSplitVector(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const BSONObj& keyPattern,
+ const BSONObj& min,
+ const BSONObj& max,
+ long long maxChunkSizeBytes);
+
+/*
+ * Utility function for deserializing autoSplitVector/splitVector responses.
+ */
+static std::vector<BSONObj> parseSplitKeys(const BSONElement& splitKeysArray) {
+ uassert(ErrorCodes::TypeMismatch,
+ "The split keys vector must be represented as a BSON array",
+ !splitKeysArray.eoo() && splitKeysArray.type() == BSONType::Array);
+
+ std::vector<BSONObj> splitKeys;
+ for (const auto& elem : splitKeysArray.Obj()) {
+ uassert(ErrorCodes::TypeMismatch,
+ "Each element of the split keys array must be an object",
+ elem.type() == BSONType::Object);
+ splitKeys.push_back(elem.embeddedObject().getOwned());
+ }
+
+ return splitKeys;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/auto_split_vector_command.cpp b/src/mongo/db/s/auto_split_vector_command.cpp
new file mode 100644
index 00000000000..b789fb86855
--- /dev/null
+++ b/src/mongo/db/s/auto_split_vector_command.cpp
@@ -0,0 +1,98 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/s/auto_split_vector.h"
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/s/sharding_state.h"
+#include "mongo/s/request_types/auto_split_vector_gen.h"
+
+namespace mongo {
+namespace {
+
+class AutoSplitVectorCommand final : public TypedCommand<AutoSplitVectorCommand> {
+public:
+ AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
+ return Command::AllowedOnSecondary::kNever;
+ }
+
+ std::string help() const override {
+ return "Internal command returning the split points for a chunk, given the maximum chunk "
+ "size.";
+ }
+
+ using Request = AutoSplitVectorRequest;
+ using Response = AutoSplitVectorResponse;
+
+ class Invocation final : public InvocationBase {
+ public:
+ using InvocationBase::InvocationBase;
+
+ Response typedRun(OperationContext* opCtx) {
+ uassert(ErrorCodes::IllegalOperation,
+ "The autoSplitVector command can only be invoked on shards (no CSRS).",
+ serverGlobalParams.clusterRole == ClusterRole::ShardServer);
+
+ uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands());
+
+ const auto& req = request();
+
+ auto splitKeys = autoSplitVector(opCtx,
+ ns(),
+ req.getKeyPattern(),
+ req.getMin(),
+ req.getMax(),
+ req.getMaxChunkSizeBytes());
+
+ AutoSplitVectorResponse res;
+ res.setSplitKeys(splitKeys);
+ return res;
+ }
+
+ private:
+ NamespaceString ns() const override {
+ return request().getNamespace();
+ }
+
+ bool supportsWriteConcern() const override {
+ return false;
+ }
+
+ void doCheckAuthorization(OperationContext* opCtx) const override {
+ uassert(ErrorCodes::Unauthorized,
+ "Unauthorized",
+ AuthorizationSession::get(opCtx->getClient())
+ ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::splitVector));
+ }
+ };
+} autoSplitVectorCommand;
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/s/auto_split_vector_test.cpp b/src/mongo/db/s/auto_split_vector_test.cpp
new file mode 100644
index 00000000000..88825cc25f3
--- /dev/null
+++ b/src/mongo/db/s/auto_split_vector_test.cpp
@@ -0,0 +1,262 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
+
+#include "mongo/db/s/auto_split_vector.h"
+#include "mongo/db/catalog/create_collection.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/dbdirectclient.h"
+#include "mongo/db/s/collection_sharding_runtime.h"
+#include "mongo/db/s/operation_sharding_state.h"
+#include "mongo/db/s/split_vector.h"
+#include "mongo/platform/random.h"
+#include "mongo/s/shard_server_test_fixture.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+namespace {
+
+const NamespaceString kNss = NamespaceString("autosplitDB", "coll");
+const std::string kPattern = "_id";
+
+/*
+ * Call the autoSplitVector function of the test collection on a chunk with bounds [0, 100) and with
+ * the specified `maxChunkSizeMB`.
+ */
+std::vector<BSONObj> autoSplit(OperationContext* opCtx, int maxChunkSizeMB) {
+ return autoSplitVector(opCtx,
+ kNss,
+ BSON(kPattern << 1) /* shard key pattern */,
+ BSON(kPattern << 0) /* min */,
+ BSON(kPattern << 1000) /* max */,
+ maxChunkSizeMB * 1024 * 1024 /* max chunk size in bytes*/);
+}
+
+class AutoSplitVectorTest : public ShardServerTestFixture {
+public:
+ /*
+ * Before each test case:
+ * - Creates a sharded collection with shard key `_id`
+ */
+ void setUp() {
+ ShardServerTestFixture::setUp();
+
+ auto opCtx = operationContext();
+
+ {
+ uassertStatusOK(createCollection(
+ operationContext(), kNss.db().toString(), BSON("create" << kNss.coll())));
+ }
+
+ DBDirectClient client(opCtx);
+ client.createIndex(kNss.ns(), BSON(kPattern << 1));
+ }
+
+ /*
+ * Insert the specified number of documents in the test collection, with incremental shard key
+ * `_id` starting from `_nextShardKey`.
+ */
+ void insertNDocsOf1MB(OperationContext* opCtx, int nDocs) {
+ DBDirectClient client(opCtx);
+
+ std::string s(1024 * 1024 - 24, 'a'); // To get a 1MB document
+ for (int i = 0; i < nDocs; i++) {
+ BSONObjBuilder builder;
+ builder.append(kPattern, _nextShardKey++);
+ builder.append("str", s);
+ BSONObj obj = builder.obj();
+ ASSERT(obj.objsize() == 1024 * 1024); // 1 MB document
+ client.insert(kNss.toString(), obj);
+ }
+ }
+
+ /*
+ * Get the number of documents inserted until now.
+ */
+ int getInsertedSize() {
+ return _nextShardKey;
+ }
+
+private:
+ int _nextShardKey = 0;
+};
+
+class AutoSplitVectorTest10MB : public AutoSplitVectorTest {
+ /*
+ * Before each test case:
+ * - Creates a sharded collection with shard key `_id`
+ * - Inserts `10` documents of ~1MB size (shard keys [0...9])
+ */
+ void setUp() {
+ AutoSplitVectorTest::setUp();
+
+ auto opCtx = operationContext();
+
+ DBDirectClient client(opCtx);
+ client.createIndex(kNss.ns(), BSON(kPattern << 1));
+
+ insertNDocsOf1MB(opCtx, 10 /* nDocs */);
+ ASSERT_EQUALS(10UL, client.count(kNss.ns()));
+ }
+};
+
+// Throw exception upon calling autoSplitVector on dropped/unexisting collection
+TEST_F(AutoSplitVectorTest10MB, NoCollection) {
+ ASSERT_THROWS_CODE(autoSplitVector(operationContext(),
+ NamespaceString("dummy", "collection"),
+ BSON(kPattern << 1) /* shard key pattern */,
+ BSON(kPattern << 0) /* min */,
+ BSON(kPattern << 100) /* max */,
+ 1 * 1024 * 1024 /* max chunk size in bytes*/),
+ DBException,
+ ErrorCodes::NamespaceNotFound);
+}
+
+// No split points if estimated `data size < max chunk size`
+TEST_F(AutoSplitVectorTest10MB, NoSplitIfDataLessThanMaxChunkSize) {
+ std::vector<BSONObj> splitKeys = autoSplit(operationContext(), 11 /* maxChunkSizeMB */);
+ ASSERT_EQ(splitKeys.size(), 0UL);
+}
+
+// Do not split in case of `chunk size == maxChunkSize`
+TEST_F(AutoSplitVectorTest10MB, NoSplitIfDataEqualMaxChunkSize) {
+ std::vector<BSONObj> splitKeys = autoSplit(operationContext(), 10 /* maxChunkSizeMB */);
+ ASSERT_EQ(splitKeys.size(), 0UL);
+}
+
+// No split points if `chunk size > max chunk size` but threshold not reached
+TEST_F(AutoSplitVectorTest10MB, NoSplitIfDataLessThanThreshold) {
+ const auto surplus = 2;
+ {
+ // Increase collection size so that the auto splitter can actually be triggered. Use a
+ // different range to don't interfere with the chunk getting splitted.
+ insertNDocsOf1MB(operationContext(), surplus /* nDocs */);
+ }
+ std::vector<BSONObj> splitKeys = autoSplit(operationContext(), 10 /* maxChunkSizeMB */);
+ ASSERT_EQ(splitKeys.size(), 0UL);
+}
+
+// One split point if `chunk size > max chunk size` and threshold reached
+TEST_F(AutoSplitVectorTest10MB, SplitIfDataSlightlyMoreThanThreshold) {
+ const auto surplus = 4;
+ insertNDocsOf1MB(operationContext(), surplus /* nDocs */);
+ std::vector<BSONObj> splitKeys = autoSplit(operationContext(), 10 /* maxChunkSizeMB */);
+ ASSERT_EQ(splitKeys.size(), 1UL);
+ ASSERT_EQ(6, splitKeys.front().getIntField(kPattern));
+}
+
+// Split points if `data size > max chunk size * 2` and threshold reached
+TEST_F(AutoSplitVectorTest10MB, SplitIfDataMoreThanThreshold) {
+ const auto surplus = 14;
+ insertNDocsOf1MB(operationContext(), surplus /* nDocs */);
+ std::vector<BSONObj> splitKeys = autoSplit(operationContext(), 10 /* maxChunkSizeMB */);
+ ASSERT_EQ(splitKeys.size(), 2UL);
+ ASSERT_EQ(7, splitKeys.front().getIntField(kPattern));
+ ASSERT_EQ(15, splitKeys.back().getIntField(kPattern));
+}
+
+// Split points are not recalculated if the right-most chunk is at least `80% maxChunkSize`
+TEST_F(AutoSplitVectorTest10MB, NoRecalculateIfBigLastChunk) {
+ const auto surplus = 8;
+ insertNDocsOf1MB(operationContext(), surplus /* nDocs */);
+ std::vector<BSONObj> splitKeys = autoSplit(operationContext(), 10 /* maxChunkSizeMB */);
+ ASSERT_EQ(splitKeys.size(), 1UL);
+ ASSERT_EQ(9, splitKeys.front().getIntField(kPattern));
+}
+
+class RepositionLastSplitPointsTest : public AutoSplitVectorTest {
+public:
+ /*
+ * Tests that last split points are properly repositioned in case the surplus allows so or not
+ * repositioned otherwise.
+ */
+ void checkRepositioning(int maxDocsPerChunk, int surplus, int nSplitPoints) {
+ ASSERT(surplus >= 0 && surplus < maxDocsPerChunk);
+
+ const auto maxDocsPerNewChunk =
+ maxDocsPerChunk - ((maxDocsPerChunk - surplus) / (nSplitPoints + 1));
+ bool mustReposition =
+ surplus >= maxDocsPerChunk - maxDocsPerNewChunk && surplus < maxDocsPerChunk * 0.8;
+
+ int toInsert = (maxDocsPerChunk * nSplitPoints) - getInsertedSize() + surplus;
+ insertNDocsOf1MB(operationContext(), toInsert);
+
+ int expectedChunkSize =
+ mustReposition ? getInsertedSize() / (nSplitPoints + 1) : maxDocsPerChunk;
+ std::vector<BSONObj> splitKeys =
+ autoSplit(operationContext(), maxDocsPerChunk /* maxChunkSizeMB */);
+
+ int approximateNextMin = expectedChunkSize;
+ for (const auto& splitKey : splitKeys) {
+ int _id = splitKey.getIntField(kPattern);
+ // Expect an approximate match due to integers rounding in the split points algorithm.
+ ASSERT(_id >= approximateNextMin - 2 && _id <= approximateNextMin + 2)
+ << BSON("approximateNextMin" << approximateNextMin << "splitKeys" << splitKeys
+ << "maxDocsPerChunk"
+ << maxDocsPerChunk
+ << "surplus"
+ << surplus
+ << "nSplitPoints"
+ << nSplitPoints
+ << "maxDocsPerNewChunk"
+ << maxDocsPerNewChunk
+ << "mustReposition"
+ << mustReposition
+ << "toInsert"
+ << toInsert
+ << "expectedChunkSize"
+ << expectedChunkSize);
+ approximateNextMin = _id + expectedChunkSize;
+ }
+ }
+};
+
+
+// Test that last split points are recalculated fairly (if the surplus allows so)
+TEST_F(RepositionLastSplitPointsTest, RandomRepositioningTest) {
+ PseudoRandom random(SecureRandom::create()->nextInt64());
+ // Avoid small sizes already checked in other test cases.
+ // Random maxDocsPerChunk in interval: [10, 110).
+ int maxDocsPerChunk = random.nextInt32(100) + 10;
+ // Random surplus in interval: [0, maxDocsPerChunk).
+ int surplus = random.nextInt32(maxDocsPerChunk);
+
+ log() << "RandomRepositioningTest parameters - maxDocsPerChunk: " << maxDocsPerChunk
+ << " - surplus: " << surplus;
+
+ for (int nSplitPointsToReposition = 1; nSplitPointsToReposition < 4;
+ nSplitPointsToReposition++) {
+ checkRepositioning(maxDocsPerChunk, surplus, nSplitPointsToReposition);
+ }
+}
+
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 4f7caa95c1f..ae9b5e12f77 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -639,8 +639,7 @@ void Balancer::_splitOrMarkJumbo(OperationContext* opCtx,
nss,
cm->getShardKeyPattern(),
ChunkRange(chunk.getMin(), chunk.getMax()),
- Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
- boost::none));
+ Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes()));
uassert(ErrorCodes::CannotSplit, "No split points found", !splitPoints.empty());
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index bd4c8878544..73eb4266aa5 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -39,9 +39,9 @@
#include "mongo/db/client.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/s/auto_split_vector.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/split_chunk.h"
-#include "mongo/db/s/split_vector.h"
#include "mongo/db/service_context.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -320,18 +320,15 @@ void ChunkSplitter::_runAutosplit(const NamespaceString& nss,
<< " dataWritten since last check: " << dataWritten
<< " maxChunkSizeBytes: " << maxChunkSizeBytes;
- auto splitPoints = uassertStatusOK(splitVector(opCtx.get(),
- nss,
- cm->getShardKeyPattern().toBSON(),
- chunk.getMin(),
- chunk.getMax(),
- false,
- boost::none,
- boost::none,
- boost::none,
- maxChunkSizeBytes));
-
- if (splitPoints.size() <= 1) {
+ const auto& shardKeyPattern = cm->getShardKeyPattern();
+ auto splitPoints = autoSplitVector(opCtx.get(),
+ nss,
+ shardKeyPattern.toBSON(),
+ chunk.getMin(),
+ chunk.getMax(),
+ maxChunkSizeBytes);
+
+ if (splitPoints.empty()) {
// No split points means there isn't enough data to split on; 1 split point means we
// have between half the chunk size to full chunk size so there is no need to split yet
return;
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 9d1c8a7eee4..86d73344e4a 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -394,8 +394,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::createFirstChunksU
nss,
shardKeyPattern,
ChunkRange(keyPattern.globalMin(), keyPattern.globalMax()),
- balancerConfig->getMaxChunkSizeBytes(),
- 0));
+ balancerConfig->getMaxChunkSizeBytes()));
return generateShardCollectionInitialChunks(nss,
shardKeyPattern,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
index 3b9645eb03a..62f0a9633a5 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
@@ -68,21 +68,21 @@ class ShardCollectionTestBase : public ConfigServerTestFixture {
protected:
void expectSplitVector(const HostAndPort& shardHost,
const ShardKeyPattern& keyPattern,
- const BSONObj& splitPoints) {
+ const BSONArray& splitPoints) {
onCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(shardHost, request.target);
string cmdName = request.cmdObj.firstElement().fieldName();
- ASSERT_EQUALS("splitVector", cmdName);
- ASSERT_EQUALS(kNamespace.ns(),
- request.cmdObj["splitVector"].String()); // splitVector uses full ns
+ ASSERT_EQUALS("autoSplitVector", cmdName);
+ // autoSplitVector concatenates the collection name to the command's db
+ const auto receivedNs =
+ request.dbname + '.' + request.cmdObj["autoSplitVector"].String();
+ ASSERT_EQUALS(kNamespace.ns(), receivedNs);
ASSERT_BSONOBJ_EQ(keyPattern.toBSON(), request.cmdObj["keyPattern"].Obj());
ASSERT_BSONOBJ_EQ(keyPattern.getKeyPattern().globalMin(), request.cmdObj["min"].Obj());
ASSERT_BSONOBJ_EQ(keyPattern.getKeyPattern().globalMax(), request.cmdObj["max"].Obj());
ASSERT_EQUALS(64 * 1024 * 1024ULL,
static_cast<uint64_t>(request.cmdObj["maxChunkSizeBytes"].numberLong()));
- ASSERT_EQUALS(0, request.cmdObj["maxSplitPoints"].numberLong());
- ASSERT_EQUALS(0, request.cmdObj["maxChunkObjects"].numberLong());
ASSERT_BSONOBJ_EQ(
ReadPreferenceSetting(ReadPreference::PrimaryPreferred).toContainingBSON(),
@@ -338,7 +338,7 @@ TEST_F(ConfigServerShardCollectionTest, RangeSharding_NoInitialSplitPoints_NoSpl
});
// Respond to the splitVector command sent to the shard to figure out initial split points.
- expectSplitVector(shardHost, keyPattern, BSONObj());
+ expectSplitVector(shardHost, keyPattern, BSONArray());
// Expect the set shard version for that namespace.
// We do not check for a specific ChunkVersion, because we cannot easily know the OID that was
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index e07695ac82f..c8300788eaf 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -737,7 +737,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
if (totalRecs > 0) {
avgRecSize = collection->dataSize(opCtx) / totalRecs;
maxRecsWhenFull = _args.getMaxChunkSizeBytes() / avgRecSize;
- maxRecsWhenFull = 130 * maxRecsWhenFull / 100; // pad some slack
+ maxRecsWhenFull = 2 * maxRecsWhenFull; // pad some slack
} else {
avgRecSize = 0;
maxRecsWhenFull = kMaxObjectPerChunk + 1;
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 5c409c2b9e5..e0cdbe622f4 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -145,6 +145,7 @@ env.Library(
'stale_exception.cpp',
env.Idlc('catalog/type_chunk_base.idl')[0],
env.Idlc('database_version.idl')[0],
+ env.Idlc('request_types/auto_split_vector.idl')[0],
env.Idlc('request_types/clone_catalog_data.idl')[0],
env.Idlc('request_types/clear_jumbo_flag.idl')[0],
env.Idlc('request_types/create_collection.idl')[0],
diff --git a/src/mongo/s/request_types/auto_split_vector.idl b/src/mongo/s/request_types/auto_split_vector.idl
new file mode 100644
index 00000000000..42e55424c4e
--- /dev/null
+++ b/src/mongo/s/request_types/auto_split_vector.idl
@@ -0,0 +1,71 @@
+# Copyright(C) 2021 - present MongoDB, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the Server Side Public License, version 1,
+# as published by MongoDB, Inc.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Server Side Public License for more details.
+#
+# You should have received a copy of the Server Side Public License
+# along with this program. If not, see
+# <http://www.mongodb.com/licensing/server-side-public-license>.
+#
+# As a special exception, the copyright holders give permission to link the
+# code of portions of this program with the OpenSSL library under certain
+# conditions as described in each individual source file and distribute
+# linked combinations including the program with the OpenSSL library. You
+# must comply with the Server Side Public License in all respects for
+# all of the code used other than as permitted herein. If you modify file(s)
+# with this exception, you may extend this exception to your version of the
+# file(s), but you are not obligated to do so. If you do not wish to do so,
+# delete this exception statement from your version. If you delete this
+# exception statement from all source files in the program, then also delete
+# it in the license file.
+#
+
+# This IDL file describes the BSON format for the autoSplitVector command.
+
+global:
+ cpp_namespace: "mongo"
+ cpp_includes:
+ - "mongo/db/s/auto_split_vector.h"
+
+imports:
+ - "mongo/idl/basic_types.idl"
+
+types:
+ bson_vector:
+ bson_serialization_type: any
+ description: "An array of objects representing the split keys."
+ cpp_type: "std::vector<mongo::BSONObj>"
+ deserializer: ::mongo::parseSplitKeys
+
+structs:
+ AutoSplitVectorResponse:
+ description: "The reply of an autoSplitVector command."
+ strict: false
+ fields:
+ splitKeys: bson_vector
+
+commands:
+ autoSplitVector:
+ cpp_name: AutoSplitVectorRequest
+ description: "Internal autoSplitVector command"
+ strict: false
+ namespace: concatenate_with_db
+ fields:
+ keyPattern:
+ type: object
+ description: "Shard key pattern of the collection"
+ min:
+ type: object
+ description: "Min key of the chunk"
+ max:
+ type: object
+ description: "Max key of the chunk"
+ maxChunkSizeBytes:
+ type: safeInt64
+ description: "Max chunk size of the collection expressed in bytes"
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index a861e69e9e7..116d636ef92 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -39,8 +39,10 @@
#include "mongo/client/read_preference.h"
#include "mongo/client/remote_command_targeter.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/s/auto_split_vector.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
+#include "mongo/s/request_types/auto_split_vector_gen.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -94,45 +96,65 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* opCtx,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
const ChunkRange& chunkRange,
- long long chunkSizeBytes,
- boost::optional<int> maxObjs) {
- BSONObjBuilder cmd;
- cmd.append("splitVector", nss.ns());
- cmd.append("keyPattern", shardKeyPattern.toBSON());
- chunkRange.append(&cmd);
- cmd.append("maxChunkSizeBytes", chunkSizeBytes);
- if (maxObjs) {
- cmd.append("maxChunkObjects", *maxObjs);
- }
-
+ long long chunkSizeBytes) {
auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
- auto cmdStatus = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- "admin",
- cmd.obj(),
- Shard::RetryPolicy::kIdempotent);
- if (!cmdStatus.isOK()) {
- return std::move(cmdStatus.getStatus());
- }
- if (!cmdStatus.getValue().commandStatus.isOK()) {
- return std::move(cmdStatus.getValue().commandStatus);
+ auto invokeSplitCommand = [&](const BSONObj& command, const StringData db) {
+ return shardStatus.getValue()->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ db.toString(),
+ command,
+ Shard::RetryPolicy::kIdempotent);
+ };
+
+ AutoSplitVectorRequest req(nss);
+ req.setKeyPattern(shardKeyPattern.toBSON());
+ req.setMin(chunkRange.getMin());
+ req.setMax(chunkRange.getMax());
+ req.setMaxChunkSizeBytes(chunkSizeBytes);
+
+
+ auto cmdStatus = invokeSplitCommand(req.toBSON({}), nss.db());
+
+ // Fallback to splitVector command in case of mixed binaries not supporting autoSplitVector
+ bool fallback = [&]() {
+ auto status = Shard::CommandResponse::getEffectiveStatus(cmdStatus);
+ return !status.isOK() && status.code() == ErrorCodes::CommandNotFound;
+ }();
+
+ if (fallback) {
+ BSONObjBuilder cmd;
+ cmd.append("splitVector", nss.ns());
+ cmd.append("keyPattern", shardKeyPattern.toBSON());
+ chunkRange.append(&cmd);
+ cmd.append("maxChunkSizeBytes", chunkSizeBytes);
+ cmdStatus = invokeSplitCommand(cmd.obj(), NamespaceString::kAdminDb);
}
- const auto response = std::move(cmdStatus.getValue().response);
- std::vector<BSONObj> splitPoints;
+ auto status = Shard::CommandResponse::getEffectiveStatus(cmdStatus);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ if (fallback) {
+ const auto response = std::move(cmdStatus.getValue().response);
+ std::vector<BSONObj> splitPoints;
- BSONObjIterator it(response.getObjectField("splitKeys"));
- while (it.more()) {
- splitPoints.push_back(it.next().Obj().getOwned());
+ BSONObjIterator it(response.getObjectField("splitKeys"));
+ while (it.more()) {
+ splitPoints.push_back(it.next().Obj().getOwned());
+ }
+ return std::move(splitPoints);
}
- return std::move(splitPoints);
+ const auto response = AutoSplitVectorResponse::parse(
+ IDLParserErrorContext("AutoSplitVectorResponse"), std::move(cmdStatus.getValue().response));
+ return response.getSplitKeys();
}
StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
diff --git a/src/mongo/s/shard_util.h b/src/mongo/s/shard_util.h
index a44b3fae919..dba785df22c 100644
--- a/src/mongo/s/shard_util.h
+++ b/src/mongo/s/shard_util.h
@@ -78,8 +78,7 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* opCtx,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
const ChunkRange& chunkRange,
- long long chunkSizeBytes,
- boost::optional<int> maxObjs);
+ long long chunkSizeBytes);
/**
* Asks the specified shard to split the chunk described by min/maxKey into the respective split
diff --git a/src/mongo/s/write_ops/cluster_write.cpp b/src/mongo/s/write_ops/cluster_write.cpp
index 1e9af832516..493c3ef7f28 100644
--- a/src/mongo/s/write_ops/cluster_write.cpp
+++ b/src/mongo/s/write_ops/cluster_write.cpp
@@ -302,8 +302,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* opCtx,
nss,
manager->getShardKeyPattern(),
chunkRange,
- chunkSizeToUse,
- boost::none));
+ chunkSizeToUse));
if (splitPoints.size() <= 1) {
// No split points means there isn't enough data to split on; 1 split point means we