diff options
Diffstat (limited to 'jstests/sharding/get_stats_for_balancing.js')
-rw-r--r-- | jstests/sharding/get_stats_for_balancing.js | 106 |
1 files changed, 106 insertions, 0 deletions
diff --git a/jstests/sharding/get_stats_for_balancing.js b/jstests/sharding/get_stats_for_balancing.js new file mode 100644 index 00000000000..1787d75db99 --- /dev/null +++ b/jstests/sharding/get_stats_for_balancing.js @@ -0,0 +1,106 @@ +/* + * Basic tests for _shardsvrGetStatsForBalancing + * + * @tags: [ + * featureFlagOrphanTracking, + * requires_fcv_60, + * ] + */ +(function() { +'use strict'; + +load("jstests/libs/fail_point_util.js"); // for 'configureFailPoint()' + +const rangeDeleterBatchSize = 128; + +const st = new ShardingTest({ + shards: 2, + other: {shardOptions: {setParameter: {rangeDeleterBatchSize: rangeDeleterBatchSize}}} +}); + +function getCollSizeBytes(ns, node) { + let res; + + assert.soon(() => { + res = assert.commandWorkedOrFailedWithCode( + node.adminCommand( + {_shardsvrGetStatsForBalancing: 1, collections: [ns], scaleFactor: 1}), + [ErrorCodes.NotYetInitialized]); + return res.ok; + }); + + jsTest.log("XOXO: " + tojson(res)); + return res['stats'][0]['collSize']; +} +const kSizeSingleDocBytes = 18; +// work on non-existing collections +st.forEachConnection((shard) => { + assert.eq(0, getCollSizeBytes("db.not_exists", shard.rs.getPrimary())); +}); + +const dbName = 'db'; +const db = st.getDB(dbName); +assert.commandWorked( + st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.shardName})); + +{ + // work on unsharded collections + let coll = db['unsharded1']; + assert.writeOK(coll.insert({_id: 1})); + assert.eq(kSizeSingleDocBytes, getCollSizeBytes(coll.getFullName(), st.shard0.rs.getPrimary())); + assert.eq(0, getCollSizeBytes(coll.getFullName(), st.shard1.rs.getPrimary())); +} + +// work on sharded collections +let coll = db['sharded1']; +assert.commandWorked(st.s.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}})); +st.forEachConnection((shard) => { + assert.eq(0, getCollSizeBytes(coll.getFullName(), shard.rs.getPrimary())); +}); + +const numDocs = 1000; +let bulk = coll.initializeUnorderedBulkOp(); +for (let i = 0; i < numDocs; i++) { + bulk.insert({_id: i}); +} +assert.commandWorked(bulk.execute()); + +// Create two chunks +assert.commandWorked(st.s.adminCommand({split: coll.getFullName(), middle: {_id: numDocs / 2}})); + +// Check the data size is correct before the chunk is moved +assert.eq(kSizeSingleDocBytes * numDocs, + getCollSizeBytes(coll.getFullName(), st.shard0.rs.getPrimary())); +assert.eq(0, getCollSizeBytes(coll.getFullName(), st.shard1.rs.getPrimary())); + +// Pause before first range deletion task +let beforeDeletionFailpoint = configureFailPoint(st.shard0, "hangBeforeDoingDeletion"); +let afterDeletionFailpoint = configureFailPoint(st.shard0, "hangAfterDoingDeletion"); +assert.commandWorked(db.adminCommand( + {moveChunk: coll.getFullName(), find: {_id: (numDocs / 2)}, to: st.shard1.shardName})); + +const expectedShardSizeBytes = kSizeSingleDocBytes * (numDocs / 2); +st.forEachConnection((shard) => { + assert.eq(expectedShardSizeBytes, getCollSizeBytes(coll.getFullName(), shard.rs.getPrimary())); +}); + +// Check that dataSize is always correct during range deletions +const numBatches = (numDocs / 2) / rangeDeleterBatchSize; +for (let i = 0; i < numBatches; i++) { + // Wait for failpoint and check num orphans + beforeDeletionFailpoint.wait(); + st.forEachConnection((shard) => { + assert.eq(expectedShardSizeBytes, + getCollSizeBytes(coll.getFullName(), shard.rs.getPrimary())); + }); + // Unset and reset failpoint without allowing any batches deleted in the meantime + afterDeletionFailpoint = configureFailPoint(st.shard0, "hangAfterDoingDeletion"); + beforeDeletionFailpoint.off(); + afterDeletionFailpoint.wait(); + beforeDeletionFailpoint = configureFailPoint(st.shard0, "hangBeforeDoingDeletion"); + afterDeletionFailpoint.off(); +} +beforeDeletionFailpoint.off(); + +st.stop(); +})(); |