summaryrefslogtreecommitdiff
path: root/jstests/sharding/move_chunk_concurrent_cloning.js
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/sharding/move_chunk_concurrent_cloning.js')
-rw-r--r--jstests/sharding/move_chunk_concurrent_cloning.js160
1 files changed, 75 insertions, 85 deletions
diff --git a/jstests/sharding/move_chunk_concurrent_cloning.js b/jstests/sharding/move_chunk_concurrent_cloning.js
index 3166f823351..954c2d7cd35 100644
--- a/jstests/sharding/move_chunk_concurrent_cloning.js
+++ b/jstests/sharding/move_chunk_concurrent_cloning.js
@@ -6,101 +6,91 @@
load('./jstests/libs/chunk_manipulation_util.js');
-const runParallelMoveChunk = (numThreads) => {
- // For startParallelOps to write its state
- let staticMongod = MongoRunner.runMongod({});
-
- let st = new ShardingTest({shards: 2});
- st.stopBalancer();
-
- const kThreadCount = numThreads;
- const kPadding = new Array(1024).join("x");
-
- let testDB = st.s.getDB('test');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
-
- let shardKeyVal = 0;
- const kDocsInBatch = 8 * 1000;
- const kMinCollSize = 128 * 1024 * 1024;
- let approxInsertedSize = 0;
- while (approxInsertedSize < kMinCollSize) {
- var bulk = testDB.user.initializeUnorderedBulkOp();
- for (let docs = 0; docs < kDocsInBatch; docs++) {
- shardKeyVal++;
- bulk.insert({_id: shardKeyVal, x: shardKeyVal, padding: kPadding});
- }
- assert.commandWorked(bulk.execute());
-
- approxInsertedSize = approxInsertedSize + (kDocsInBatch * 1024);
+// For startParallelOps to write its state
+let staticMongod = MongoRunner.runMongod({});
+
+let st = new ShardingTest({shards: 2});
+st.stopBalancer();
+
+const kThreadCount = Math.floor(Math.random() * 31) + 1;
+const kPadding = new Array(1024).join("x");
+
+let testDB = st.s.getDB('test');
+assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+
+let shardKeyVal = 0;
+const kDocsInBatch = 8 * 1000;
+const kMinCollSize = 128 * 1024 * 1024;
+let approxInsertedSize = 0;
+while (approxInsertedSize < kMinCollSize) {
+ var bulk = testDB.user.initializeUnorderedBulkOp();
+ for (let docs = 0; docs < kDocsInBatch; docs++) {
+ shardKeyVal++;
+ bulk.insert({_id: shardKeyVal, x: shardKeyVal, padding: kPadding});
}
+ assert.commandWorked(bulk.execute());
- const kInitialLoadFinalKey = shardKeyVal;
-
- print(`Running tests with migrationConcurrency == ${kThreadCount}`);
- st._rs.forEach((replSet) => {
- assert.commandWorked(replSet.test.getPrimary().adminCommand(
- {setParameter: 1, migrationConcurrency: kThreadCount}));
- });
-
- const configCollEntry =
- st.s.getDB('config').getCollection('collections').findOne({_id: 'test.user'});
- let chunks = st.s.getDB('config').chunks.find({uuid: configCollEntry.uuid}).toArray();
- assert.eq(1, chunks.length, tojson(chunks));
-
- let joinMoveChunk =
- moveChunkParallel(staticMongod, st.s0.host, {x: 0}, null, 'test.user', st.shard1.shardName);
-
- // Migration cloning scans by shard key order. Perform some writes against the collection on
- // both the lower and upper ends of the shard key values while migration is happening to
- // exercise xferMods logic.
- const kDeleteIndexOffset = kInitialLoadFinalKey - 3000;
- const kUpdateIndexOffset = kInitialLoadFinalKey - 5000;
- for (let x = 0; x < 1000; x++) {
- assert.commandWorked(testDB.user.remove({x: x}));
- assert.commandWorked(testDB.user.update({x: 4000 + x}, {$set: {updated: true}}));
-
- assert.commandWorked(testDB.user.remove({x: kDeleteIndexOffset + x}));
- assert.commandWorked(
- testDB.user.update({x: kUpdateIndexOffset + x}, {$set: {updated: true}}));
-
- let newShardKey = kInitialLoadFinalKey + x + 1;
- assert.commandWorked(testDB.user.insert({_id: newShardKey, x: newShardKey}));
- }
+ approxInsertedSize = approxInsertedSize + (kDocsInBatch * 1024);
+}
- joinMoveChunk();
+const kInitialLoadFinalKey = shardKeyVal;
- let shardKeyIdx = 1000; // Index starts at 1k since we deleted the first 1k docs.
- let cursor = testDB.user.find().sort({x: 1});
+print(`Running tests with migrationConcurrency == ${kThreadCount}`);
+st._rs.forEach((replSet) => {
+ assert.commandWorked(replSet.test.getPrimary().adminCommand(
+ {setParameter: 1, migrationConcurrency: kThreadCount}));
+});
- while (cursor.hasNext()) {
- let next = cursor.next();
- assert.eq(next.x, shardKeyIdx);
+const configCollEntry =
+ st.s.getDB('config').getCollection('collections').findOne({_id: 'test.user'});
+let chunks = st.s.getDB('config').chunks.find({uuid: configCollEntry.uuid}).toArray();
+assert.eq(1, chunks.length, tojson(chunks));
- if ((shardKeyIdx >= 4000 && shardKeyIdx < 5000) ||
- (shardKeyIdx >= kUpdateIndexOffset && shardKeyIdx < (kUpdateIndexOffset + 1000))) {
- assert.eq(true, next.updated, tojson(next));
- }
+let joinMoveChunk =
+ moveChunkParallel(staticMongod, st.s0.host, {x: 0}, null, 'test.user', st.shard1.shardName);
- shardKeyIdx++;
+// Migration cloning scans by shard key order. Perform some writes against the collection on both
+// the lower and upper ends of the shard key values while migration is happening to exercise
+// xferMods logic.
+const kDeleteIndexOffset = kInitialLoadFinalKey - 3000;
+const kUpdateIndexOffset = kInitialLoadFinalKey - 5000;
+for (let x = 0; x < 1000; x++) {
+ assert.commandWorked(testDB.user.remove({x: x}));
+ assert.commandWorked(testDB.user.update({x: 4000 + x}, {$set: {updated: true}}));
- if (shardKeyIdx == kDeleteIndexOffset) {
- shardKeyIdx += 1000;
- }
- }
+ assert.commandWorked(testDB.user.remove({x: kDeleteIndexOffset + x}));
+ assert.commandWorked(testDB.user.update({x: kUpdateIndexOffset + x}, {$set: {updated: true}}));
- shardKeyIdx--;
- assert.eq(shardKeyIdx, kInitialLoadFinalKey + 1000);
+ let newShardKey = kInitialLoadFinalKey + x + 1;
+ assert.commandWorked(testDB.user.insert({_id: newShardKey, x: newShardKey}));
+}
- st.stop();
- MongoRunner.stopMongod(staticMongod);
-};
+joinMoveChunk();
-// Run test 10 times with random concurrency levels.
-for (let i = 1; i <= 5; i++) {
- runParallelMoveChunk(Math.floor(Math.random() * 31) + 1);
-}
+let shardKeyIdx = 1000; // Index starts at 1k since we deleted the first 1k docs.
+let cursor = testDB.user.find().sort({x: 1});
+
+while (cursor.hasNext()) {
+ let next = cursor.next();
+ assert.eq(next.x, shardKeyIdx);
+
+ if ((shardKeyIdx >= 4000 && shardKeyIdx < 5000) ||
+ (shardKeyIdx >= kUpdateIndexOffset && shardKeyIdx < (kUpdateIndexOffset + 1000))) {
+ assert.eq(true, next.updated, tojson(next));
+ }
+
+ shardKeyIdx++;
+
+ if (shardKeyIdx == kDeleteIndexOffset) {
+ shardKeyIdx += 1000;
+ }
}
- )();
+shardKeyIdx--;
+assert.eq(shardKeyIdx, kInitialLoadFinalKey + 1000);
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
+})();