summaryrefslogtreecommitdiff
path: root/jstests/sharding/mrShardedOutput.js
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/sharding/mrShardedOutput.js')
-rw-r--r--jstests/sharding/mrShardedOutput.js135
1 files changed, 0 insertions, 135 deletions
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
deleted file mode 100644
index 53332b3c13d..00000000000
--- a/jstests/sharding/mrShardedOutput.js
+++ /dev/null
@@ -1,135 +0,0 @@
-// This test runs map reduce from a sharded input collection and outputs it to a sharded collection.
-// The test is done in 2 passes - the first pass runs the map reduce and outputs it to a
-// non-existing collection. The second pass runs map reduce with the collection input twice the size
-// of the first and outputs it to the new sharded collection created in the first pass.
-(function() {
-"use strict";
-
-const st = new ShardingTest({shards: 2, other: {chunkSize: 1}});
-
-const config = st.getDB("config");
-st.adminCommand({enablesharding: "test"});
-st.ensurePrimaryShard("test", st.shard1.shardName);
-st.adminCommand({shardcollection: "test.foo", key: {"a": 1}});
-
-const testDB = st.getDB("test");
-
-function map2() {
- emit(this.i, {count: 1, y: this.y});
-}
-function reduce2(key, values) {
- return values[0];
-}
-
-let numDocs = 0;
-const numBatch = 5000;
-const str = new Array(1024).join('a');
-
-// Pre split now so we don't have to balance the chunks later.
-// M/R is strange in that it chooses the output shards based on currently sharded
-// collections in the database. The upshot is that we need a sharded collection on
-// both shards in order to ensure M/R will output to two shards.
-st.adminCommand({split: 'test.foo', middle: {a: numDocs + numBatch / 2}});
-st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: st.shard0.shardName});
-
-// Add some more data for input so that chunks will get split further
-for (let splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
- testDB.adminCommand({split: 'test.foo', middle: {a: splitPoint}});
-}
-
-let bulk = testDB.foo.initializeUnorderedBulkOp();
-for (let i = 0; i < numBatch; ++i) {
- bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
-}
-assert.commandWorked(bulk.execute());
-
-numDocs += numBatch;
-
-// Do the MapReduce step
-jsTest.log("Setup OK: count matches (" + numDocs + ") -- Starting MapReduce");
-let res = testDB.foo.mapReduce(map2, reduce2, {out: {replace: "mrShardedOut", sharded: true}});
-jsTest.log("MapReduce results:" + tojson(res));
-
-let reduceOutputCount = res.counts.output;
-assert.eq(numDocs,
- reduceOutputCount,
- "MapReduce FAILED: res.counts.output = " + reduceOutputCount + ", should be " + numDocs);
-
-jsTest.log("Checking that all MapReduce output documents are in output collection");
-let outColl = testDB["mrShardedOut"];
-let outCollCount = outColl.find().itcount();
-assert.eq(numDocs,
- outCollCount,
- "MapReduce FAILED: outColl.find().itcount() = " + outCollCount + ", should be " +
- numDocs + ": this may happen intermittently until resolution of SERVER-3627");
-
-// Make sure it's sharded and split
-let newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(
- newNumChunks, 1, "Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
-
-// Check that there are no "jumbo" chunks.
-const objSize = Object.bsonsize(testDB.mrShardedOut.findOne());
-const docsPerChunk = 1024 * 1024 / objSize * 1.1; // 1MB chunk size + allowance
-
-config.chunks.find({ns: testDB.mrShardedOut.getFullName()}).forEach(function(chunkDoc) {
- const count =
- testDB.mrShardedOut.find({_id: {$gte: chunkDoc.min._id, $lt: chunkDoc.max._id}}).itcount();
- assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
-});
-
-// Check that chunks for the newly created sharded output collection are well distributed.
-const shard0Chunks =
- config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: st.shard0.shardName}).count();
-const shard1Chunks =
- config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: st.shard1.shardName}).count();
-assert.lte(Math.abs(shard0Chunks - shard1Chunks), 1);
-
-jsTest.log('Starting second pass');
-
-st.adminCommand({split: 'test.foo', middle: {a: numDocs + numBatch / 2}});
-st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: st.shard0.shardName});
-
-// Add some more data for input so that chunks will get split further
-for (let splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
- testDB.adminCommand({split: 'test.foo', middle: {a: numDocs + splitPoint}});
-}
-
-bulk = testDB.foo.initializeUnorderedBulkOp();
-for (let i = 0; i < numBatch; ++i) {
- bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
-}
-assert.commandWorked(bulk.execute());
-numDocs += numBatch;
-
-// Do the MapReduce step
-jsTest.log("Setup OK: count matches (" + numDocs + ") -- Starting MapReduce");
-res = testDB.foo.mapReduce(map2, reduce2, {out: {replace: "mrShardedOut", sharded: true}});
-jsTest.log("MapReduce results:" + tojson(res));
-
-reduceOutputCount = res.counts.output;
-assert.eq(numDocs,
- reduceOutputCount,
- "MapReduce FAILED: res.counts.output = " + reduceOutputCount + ", should be " + numDocs);
-
-jsTest.log("Checking that all MapReduce output documents are in output collection");
-outColl = testDB.mrShardedOut;
-outCollCount = outColl.find().itcount();
-assert.eq(numDocs,
- outCollCount,
- "MapReduce FAILED: outColl.find().itcount() = " + outCollCount + ", should be " +
- numDocs + ": this may happen intermittently until resolution of SERVER-3627");
-
-// Make sure it's sharded and split
-newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(
- newNumChunks, 1, "Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
-
-config.chunks.find({ns: testDB.mrShardedOut.getFullName()}).forEach(function(chunkDoc) {
- const count =
- testDB.mrShardedOut.find({_id: {$gte: chunkDoc.min._id, $lt: chunkDoc.max._id}}).itcount();
- assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
-});
-
-st.stop();
-}());