diff options
author | Nicholas Zolnierz <nicholas.zolnierz@mongodb.com> | 2020-02-14 23:34:42 +0000 |
---|---|---|
committer | evergreen <evergreen@mongodb.com> | 2020-02-14 23:34:42 +0000 |
commit | cb1a4f9b3cb40f895eaa8237e749ce40f8134c8f (patch) | |
tree | be82196076aefa9459724a4e8a708be1323f0b89 | |
parent | 7a91c2823d5c0f94ad0d0e6bb5e830df0276da2a (diff) | |
download | mongo-cb1a4f9b3cb40f895eaa8237e749ce40f8134c8f.tar.gz |
SERVER-46145 Remove mr_shard_version.js test in favor of the existing FSM tests
-rw-r--r-- | buildscripts/resmokeconfig/suites/sharding_misc.yml | 1 | ||||
-rw-r--r-- | jstests/sharding/mr_shard_version.js | 87 | ||||
-rw-r--r-- | jstests/slow1/mr_during_migrate.js | 112 |
3 files changed, 0 insertions, 200 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_misc.yml b/buildscripts/resmokeconfig/suites/sharding_misc.yml index 4bdc42ea1f4..23f5460dc4d 100644 --- a/buildscripts/resmokeconfig/suites/sharding_misc.yml +++ b/buildscripts/resmokeconfig/suites/sharding_misc.yml @@ -72,7 +72,6 @@ selector: - jstests/sharding/move_chunk_remove_with_write_retryability.js - jstests/sharding/move_chunk_update_with_write_retryability.js - jstests/sharding/addshard2.js - - jstests/sharding/mr_shard_version.js - jstests/sharding/move_chunk_insert_with_write_retryability.js - jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js - jstests/sharding/replmonitor_bad_seed.js diff --git a/jstests/sharding/mr_shard_version.js b/jstests/sharding/mr_shard_version.js deleted file mode 100644 index 195444c97f0..00000000000 --- a/jstests/sharding/mr_shard_version.js +++ /dev/null @@ -1,87 +0,0 @@ -// Test for SERVER-4158 (version changes during mapreduce) -(function() { - -var st = new ShardingTest({shards: 2, mongos: 1}); - -// Stop balancer, since it'll just get in the way of these -st.stopBalancer(); - -var coll = st.s.getCollection(jsTest.name() + ".coll"); - -var numDocs = 50000; -var numKeys = 1000; -var numTests = 3; - -var bulk = coll.initializeUnorderedBulkOp(); -for (var i = 0; i < numDocs; i++) { - bulk.insert({_id: i, key: "" + (i % numKeys), value: i % numKeys}); -} -assert.commandWorked(bulk.execute()); - -assert.eq(numDocs, coll.find().itcount()); - -var halfId = coll.find().itcount() / 2; - -// Shard collection in half -st.shardColl(coll, {_id: 1}, {_id: halfId}); - -st.printShardingStatus(); - -jsTest.log("Collection now initialized with keys and values..."); - -jsTest.log("Starting migrations..."); - -var ops = {}; -for (var i = 0; i < st._connections.length; i++) { - for (var j = 0; j < 2; j++) { - ops["" + (i * 2 + j)] = { - op: "command", - ns: "admin", - command: { - moveChunk: "" + coll, - find: {_id: (j == 0 ? 0 : halfId)}, - to: st._connections[i].shardName - }, - }; - } -} - -var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false}); - -jsTest.log("Starting m/r..."); - -var map = function() { - emit(this.key, this.value); -}; -var reduce = function(k, values) { - var total = 0; - for (var i = 0; i < values.length; i++) - total += values[i]; - return total; -}; - -var outputColl = st.s.getCollection(jsTest.name() + ".mrOutput"); - -jsTest.log("Output coll : " + outputColl); - -for (var t = 0; t < numTests; t++) { - var results = coll.mapReduce(map, reduce, {out: {replace: outputColl.getName()}}); - - // Assert that the results are actually correct, all keys have values of (numDocs / numKeys) - // x key - var output = outputColl.find().sort({_id: 1}).toArray(); - - // printjson( output ) - - assert.eq(output.length, numKeys); - printjson(output); - for (var i = 0; i < output.length; i++) - assert.eq(parseInt(output[i]._id) * (numDocs / numKeys), output[i].value); -} - -jsTest.log("Finishing parallel migrations..."); - -printjson(benchFinish(bid)); - -st.stop(); -})(); diff --git a/jstests/slow1/mr_during_migrate.js b/jstests/slow1/mr_during_migrate.js deleted file mode 100644 index 8bd8392154b..00000000000 --- a/jstests/slow1/mr_during_migrate.js +++ /dev/null @@ -1,112 +0,0 @@ -// Do parallel ops with migrates occurring -// @tags: [requires_sharding] - -(function() { -'use strict'; - -var st = new ShardingTest({shards: 10, mongos: 2, verbose: 2}); - -var mongos = st.s0; -var admin = mongos.getDB("admin"); -var coll = st.s.getCollection(jsTest.name() + ".coll"); - -var numDocs = 1024 * 1024; -var dataSize = 1024; // bytes, must be power of 2 - -var data = "x"; -while (data.length < dataSize) - data += data; - -var bulk = coll.initializeUnorderedBulkOp(); -for (var i = 0; i < numDocs; i++) { - bulk.insert({_id: i, data: data}); -} -assert.commandWorked(bulk.execute()); - -// Make sure everything got inserted -assert.eq(numDocs, coll.find().itcount()); - -jsTest.log("Inserted " + sh._dataFormat(dataSize * numDocs) + " of data."); - -// Shard collection -st.shardColl(coll, {_id: 1}, false); - -st.printShardingStatus(); - -jsTest.log("Sharded collection now initialized, starting migrations..."); - -var checkMigrate = function() { - print("Result of migrate : "); - printjson(this); -}; - -// Creates a number of migrations of random chunks to diff shard servers -var ops = []; -for (var i = 0; i < st._connections.length; i++) { - ops.push({ - op: "command", - ns: "admin", - command: { - moveChunk: "" + coll, - find: {_id: {"#RAND_INT": [0, numDocs]}}, - to: st._connections[i].shardName, - _waitForDelete: true - }, - showResult: true - }); -} - -// TODO: Also migrate output collection - -jsTest.log("Starting migrations now..."); - -var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false}); - -//####################### -// Tests during migration - -var numTests = 5; - -for (var t = 0; t < numTests; t++) { - jsTest.log("Test #" + t); - - var mongos = st.s1; // use other mongos so we get stale shard versions - var coll = mongos.getCollection(coll + ""); - var outputColl = mongos.getCollection(coll + "_output"); - - var numTypes = 32; - var map = function() { - emit(this._id % 32 /* must be hardcoded */, {c: 1}); - }; - - var reduce = function(k, vals) { - var total = 0; - for (var i = 0; i < vals.length; i++) - total += vals[i].c; - return {c: total}; - }; - - printjson(coll.find({_id: 0}).itcount()); - - jsTest.log("Starting new mapReduce run #" + t); - - // assert.eq( coll.find().itcount(), numDocs ) - - coll.getMongo().getDB("admin").runCommand({setParameter: 1, traceExceptions: true}); - - printjson(coll.mapReduce( - map, reduce, {out: {replace: outputColl.getName(), db: outputColl.getDB() + ""}})); - - jsTest.log("MapReduce run #" + t + " finished."); - - assert.eq(outputColl.find().itcount(), numTypes); - - outputColl.find().forEach(function(x) { - assert.eq(x.value.c, numDocs / numTypes); - }); -} - -printjson(benchFinish(bid)); - -st.stop(); -})(); |