summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorKevin Pulo <kevin.pulo@mongodb.com>2017-07-16 11:07:49 +1000
committerKevin Pulo <kevin.pulo@mongodb.com>2017-08-25 11:09:24 +1000
commite1f5f40fc17f99fc06dda4621564db7e31be1132 (patch)
treef9d065919b002e2d7d944964e372de7eb9dd82c3 /src/mongo
parentd78c225444bf4ab93e8cbe824f622f7d8940bd8d (diff)
downloadmongo-e1f5f40fc17f99fc06dda4621564db7e31be1132.tar.gz
SERVER-20392 remove early chunksize autosplit heuristic
Plus some additional 3.4-specific jstest fixes. (cherry picked from commit ad6a668da49c61a4276749aef7529088dc3524ea)
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/s/commands/cluster_write.cpp25
1 files changed, 1 insertions, 24 deletions
diff --git a/src/mongo/s/commands/cluster_write.cpp b/src/mongo/s/commands/cluster_write.cpp
index b17035993d9..565a1ddc4d7 100644
--- a/src/mongo/s/commands/cluster_write.cpp
+++ b/src/mongo/s/commands/cluster_write.cpp
@@ -66,28 +66,6 @@ void toBatchError(const Status& status, BatchedCommandResponse* response) {
}
/**
- * Given a maxChunkSize configuration and the number of chunks in a particular sharded collection,
- * returns an optimal chunk size to use in order to achieve a good ratio between number of chunks
- * and their size.
- */
-uint64_t calculateDesiredChunkSize(uint64_t maxChunkSizeBytes, uint64_t numChunks) {
- // Splitting faster in early chunks helps spread out an initial load better
- const uint64_t minChunkSize = 1 << 20; // 1 MBytes
-
- if (numChunks <= 1) {
- return 1024;
- } else if (numChunks < 3) {
- return minChunkSize / 2;
- } else if (numChunks < 10) {
- return std::max(maxChunkSizeBytes / 4, minChunkSize);
- } else if (numChunks < 20) {
- return std::max(maxChunkSizeBytes / 2, minChunkSize);
- } else {
- return maxChunkSizeBytes;
- }
-}
-
-/**
* Returns the split point that will result in one of the chunk having exactly one document. Also
* returns an empty document if the split point cannot be determined.
*
@@ -341,8 +319,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* opCtx,
const uint64_t chunkBytesWritten = chunk->addBytesWritten(dataWritten);
- const uint64_t desiredChunkSize =
- calculateDesiredChunkSize(balancerConfig->getMaxChunkSizeBytes(), manager->numChunks());
+ const uint64_t desiredChunkSize = balancerConfig->getMaxChunkSizeBytes();
// If this chunk is at either end of the range, trigger auto-split at 10% less data written in
// order to trigger the top-chunk optimization.