diff options
author | Allison Easton <allison.easton@mongodb.com> | 2022-03-04 14:47:44 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2022-03-04 15:17:12 +0000 |
commit | 6e9b39f474fe1cb58aae2d9791f1be726f73dbdb (patch) | |
tree | 26cd215f2e9be1f6c0737ee851eed347c7a5196b | |
parent | 676a969740251e412feb14d203e55948f703e9cb (diff) | |
download | mongo-6e9b39f474fe1cb58aae2d9791f1be726f73dbdb.tar.gz |
SERVER-63758 Fix post-defragmentation checks
-rw-r--r-- | jstests/sharding/balancer_defragmentation_merge_chunks.js | 3 | ||||
-rw-r--r-- | jstests/sharding/defragment_large_collection.js | 10 | ||||
-rw-r--r-- | jstests/sharding/libs/defragmentation_util.js | 45 |
3 files changed, 42 insertions, 16 deletions
diff --git a/jstests/sharding/balancer_defragmentation_merge_chunks.js b/jstests/sharding/balancer_defragmentation_merge_chunks.js index 6268d786b15..99d654cc2bf 100644 --- a/jstests/sharding/balancer_defragmentation_merge_chunks.js +++ b/jstests/sharding/balancer_defragmentation_merge_chunks.js @@ -61,7 +61,8 @@ function setupCollection() { targetChunkSizeMB / 2 /* maxChunkFillMB */, 0 /* numZones */, 32 * 1024 /* docSizeBytes */, - 1000 /* chunkSpacing */); + 1000 /* chunkSpacing */, + false /* disableCollectionBalancing */); jsTest.log("Collection " + coll.getFullName() + ", number of chunks before defragmentation: " + findChunksUtil.countChunksForNs(st.s.getDB('config'), coll.getFullName())); return coll; diff --git a/jstests/sharding/defragment_large_collection.js b/jstests/sharding/defragment_large_collection.js index 1422c314fc3..12ce30040c4 100644 --- a/jstests/sharding/defragment_large_collection.js +++ b/jstests/sharding/defragment_large_collection.js @@ -47,8 +47,14 @@ for (let i = 0; i < numCollections; ++i) { const coll = db[coll_prefix + i]; - defragmentationUtil.createFragmentedCollection( - st.s, coll.getFullName(), numChunks, maxChunkFillMB, numZones, docSizeBytes, chunkSpacing); + defragmentationUtil.createFragmentedCollection(st.s, + coll.getFullName(), + numChunks, + maxChunkFillMB, + numZones, + docSizeBytes, + chunkSpacing, + true); collections.push(coll); } diff --git a/jstests/sharding/libs/defragmentation_util.js b/jstests/sharding/libs/defragmentation_util.js index 5ef9e1d2c96..3172a18566b 100644 --- a/jstests/sharding/libs/defragmentation_util.js +++ b/jstests/sharding/libs/defragmentation_util.js @@ -1,12 +1,23 @@ var defragmentationUtil = (function() { load("jstests/sharding/libs/find_chunks_util.js"); - let createFragmentedCollection = function( - mongos, ns, numChunks, maxChunkFillMB, numZones, docSizeBytes, chunkSpacing) { + let createFragmentedCollection = function(mongos, + ns, + numChunks, + maxChunkFillMB, + numZones, + docSizeBytes, + chunkSpacing, + disableCollectionBalancing) { jsTest.log("Creating fragmented collection " + ns + " with parameters: numChunks = " + numChunks + ", numZones = " + numZones + ", docSizeBytes = " + docSizeBytes + ", maxChunkFillMB = " + maxChunkFillMB + ", chunkSpacing = " + chunkSpacing); assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {key: 1}})); + // Turn off balancer for this collection + if (disableCollectionBalancing) { + assert.commandWorked( + mongos.getDB('config').collections.update({_id: ns}, {$set: {"noBalance": true}})); + } createAndDistributeChunks(mongos, ns, numChunks, chunkSpacing); createRandomZones(mongos, ns, numZones, chunkSpacing); @@ -84,8 +95,9 @@ var defragmentationUtil = (function() { let checkPostDefragmentationState = function(mongos, ns, maxChunkSizeMB, shardKey) { const oversizedChunkThreshold = maxChunkSizeMB * 1024 * 1024 * 4 / 3; - const chunks = - findChunksUtil.findChunksByNs(mongos.getDB('config'), ns).sort({shardKey: 1}).toArray(); + const chunks = findChunksUtil.findChunksByNs(mongos.getDB('config'), ns) + .sort({[shardKey]: 1}) + .toArray(); const coll = mongos.getCollection(ns); const pipeline = [ {'$collStats': {'storageStats': {}}}, @@ -102,7 +114,8 @@ var defragmentationUtil = (function() { let checkForOversizedChunk = function( coll, chunk, shardKey, avgObjSize, oversizedChunkThreshold) { let chunkSize = - coll.countDocuments({key: {$gte: chunk.min[shardKey], $lt: chunk.max[shardKey]}}) * + coll.countDocuments( + {[shardKey]: {$gte: chunk.min[shardKey], $lt: chunk.max[shardKey]}}) * avgObjSize; assert.lte( chunkSize, @@ -114,16 +127,22 @@ var defragmentationUtil = (function() { let chunk1 = chunks[i - 1]; let chunk2 = chunks[i]; // Check for mergeable chunks with combined size less than maxChunkSize - if (chunk1["shard"] === chunk2["shard"] && chunk1["max"] === chunk2["min"]) { + if (chunk1["shard"] === chunk2["shard"] && + bsonWoCompare(chunk1["max"], chunk2["min"]) === 0) { let chunk1Zone = getZoneForRange(mongos, ns, chunk1.min, chunk1.max); let chunk2Zone = getZoneForRange(mongos, ns, chunk2.min, chunk2.max); - if (chunk1Zone === chunk2Zone) { - let combinedDataSize = coll.countDocuments({ - shardKey: {$gte: chunk1.min[shardKey], $lt: chunk2.max[shardKey]} - }) * avgObjSizeByShard[chunk1['shard']]; - assert.lte( + if (bsonWoCompare(chunk1Zone, chunk2Zone) === 0) { + let combinedDataSize = + coll.countDocuments( + {[shardKey]: {$gte: chunk1.min[shardKey], $lt: chunk2.max[shardKey]}}) * + avgObjSizeByShard[chunk1['shard']]; + // The autosplitter should not split chunks whose combined size is < 133% of + // maxChunkSize but this threshold may be off by a few documents depending on + // rounding of avgObjSize. + const autosplitRoundingTolerance = 3 * avgObjSizeByShard[chunk1['shard']]; + assert.gte( combinedDataSize, - oversizedChunkThreshold, + oversizedChunkThreshold - autosplitRoundingTolerance, `Chunks ${tojson(chunk1)} and ${ tojson(chunk2)} are mergeable with combined size ${combinedDataSize}`); } @@ -147,7 +166,7 @@ var defragmentationUtil = (function() { const tags = mongos.getDB('config') .tags.find({ns: ns, min: {$lte: minKey}, max: {$gte: maxKey}}) .toArray(); - assert.leq(tags.length, 1); + assert.lte(tags.length, 1); if (tags.length === 1) { return tags[0].tag; } |