summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2021-10-05 08:32:40 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-10-05 09:23:27 +0000
commit033e43fe45b29c5d6658b0dc1e1d53b8cc50aecc (patch)
treed572f51d460c136ce163ba61b45f7b524dda34e0
parent4bb27a5a497b002fa0a70234ffd89c5a24a22612 (diff)
downloadmongo-033e43fe45b29c5d6658b0dc1e1d53b8cc50aecc.tar.gz
SERVER-60267 Change jumbo chunk size threshold
-rw-r--r--jstests/sharding/jumbo1.js9
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp2
2 files changed, 5 insertions, 6 deletions
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index 94803227a28..fa5d13b9f2a 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -5,8 +5,7 @@ load("jstests/sharding/libs/find_chunks_util.js");
var s = new ShardingTest({shards: 2, other: {chunkSize: 1}});
-assert.commandWorked(s.s.adminCommand({enablesharding: "test"}));
-s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s.adminCommand({enablesharding: "test", primaryShard: s.shard1.shardName}));
assert.commandWorked(
s.s.adminCommand({addShardToZone: s.shard0.shardName, zone: 'finalDestination'}));
@@ -18,12 +17,12 @@ assert.commandWorked(s.s.adminCommand(
var db = s.getDB("test");
-const big = 'X'.repeat(10000);
+const big = 'X'.repeat(1024 * 1024); // 1MB
-// Create sufficient documents to create a jumbo chunk, and use the same shard key in all of
+// Insert 3MB of documents to create a jumbo chunk, and use the same shard key in all of
// them so that the chunk cannot be split.
var bulk = db.foo.initializeUnorderedBulkOp();
-for (var i = 0; i < 200; i++) {
+for (var i = 0; i < 3; i++) {
bulk.insert({x: 0, big: big});
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index a2bf431c63b..4819be823a9 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -892,7 +892,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
avgRecSize = BSONObj::kMinBSONLength;
}
maxRecsWhenFull = _args.getMaxChunkSizeBytes() / avgRecSize;
- maxRecsWhenFull = 130 * maxRecsWhenFull / 100; // pad some slack
+ maxRecsWhenFull = 2 * maxRecsWhenFull; // pad some slack
} else {
avgRecSize = 0;
maxRecsWhenFull = kMaxObjectPerChunk + 1;