summaryrefslogtreecommitdiff
path: root/jstests/sharding/mrShardedOutput.js
diff options
context:
space:
mode:
authorJonathan Abrahams <jonathan@mongodb.com>2016-03-09 12:17:50 -0500
committerJonathan Abrahams <jonathan@mongodb.com>2016-03-09 12:18:14 -0500
commit4ae691e8edc87d0e3cfb633bb91c328426be007b (patch)
tree52079a593f54382ca13a2e741633eab1b6271893 /jstests/sharding/mrShardedOutput.js
parenta025d43f3ce2efc1fb1282a718f5d286fa0a4dc1 (diff)
downloadmongo-4ae691e8edc87d0e3cfb633bb91c328426be007b.tar.gz
SERVER-22468 Format JS code with approved style in jstests/
Diffstat (limited to 'jstests/sharding/mrShardedOutput.js')
-rw-r--r--jstests/sharding/mrShardedOutput.js91
1 files changed, 49 insertions, 42 deletions
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
index a4dac1db3d2..ab3bae28d74 100644
--- a/jstests/sharding/mrShardedOutput.js
+++ b/jstests/sharding/mrShardedOutput.js
@@ -4,17 +4,21 @@
// collection input twice the size of the first and outputs it to the new sharded
// collection created in the first pass.
-var st = new ShardingTest({ shards: 2, other: { chunkSize: 1 }});
+var st = new ShardingTest({shards: 2, other: {chunkSize: 1}});
var config = st.getDB("config");
-st.adminCommand( { enablesharding: "test" } );
-st.getDB("admin").runCommand( { movePrimary: "test", to: "shard0001"});
-st.adminCommand( { shardcollection: "test.foo", key: { "a": 1 } } );
+st.adminCommand({enablesharding: "test"});
+st.getDB("admin").runCommand({movePrimary: "test", to: "shard0001"});
+st.adminCommand({shardcollection: "test.foo", key: {"a": 1}});
-var testDB = st.getDB( "test" );
+var testDB = st.getDB("test");
-function map2() { emit(this.i, { count: 1, y: this.y }); }
-function reduce2(key, values) { return values[0]; }
+function map2() {
+ emit(this.i, {count: 1, y: this.y});
+}
+function reduce2(key, values) {
+ return values[0];
+}
var numDocs = 0;
var numBatch = 5000;
@@ -24,17 +28,17 @@ var str = new Array(1024).join('a');
// M/R is strange in that it chooses the output shards based on currently sharded
// collections in the database. The upshot is that we need a sharded collection on
// both shards in order to ensure M/R will output to two shards.
-st.adminCommand({ split: 'test.foo', middle: { a: numDocs + numBatch / 2 }});
-st.adminCommand({ moveChunk: 'test.foo', find: { a: numDocs }, to: 'shard0000' });
+st.adminCommand({split: 'test.foo', middle: {a: numDocs + numBatch / 2}});
+st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: 'shard0000'});
// Add some more data for input so that chunks will get split further
for (var splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
- testDB.adminCommand({ split: 'test.foo', middle: { a: splitPoint }});
+ testDB.adminCommand({split: 'test.foo', middle: {a: splitPoint}});
}
var bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numBatch; ++i) {
- bulk.insert({ a: numDocs + i, y: str, i: numDocs + i });
+ bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
}
assert.writeOK(bulk.execute());
@@ -46,55 +50,58 @@ var res = testDB.foo.mapReduce(map2, reduce2, {out: {replace: "mrShardedOut", sh
jsTest.log("MapReduce results:" + tojson(res));
var reduceOutputCount = res.counts.output;
-assert.eq(numDocs, reduceOutputCount,
- "MapReduce FAILED: res.counts.output = " + reduceOutputCount +
- ", should be " + numDocs);
+assert.eq(numDocs,
+ reduceOutputCount,
+ "MapReduce FAILED: res.counts.output = " + reduceOutputCount + ", should be " + numDocs);
jsTest.log("Checking that all MapReduce output documents are in output collection");
var outColl = testDB["mrShardedOut"];
var outCollCount = outColl.find().itcount();
-assert.eq(numDocs, outCollCount,
- "MapReduce FAILED: outColl.find().itcount() = " + outCollCount +
- ", should be " + numDocs +
- ": this may happen intermittently until resolution of SERVER-3627");
+assert.eq(numDocs,
+ outCollCount,
+ "MapReduce FAILED: outColl.find().itcount() = " + outCollCount + ", should be " +
+ numDocs + ": this may happen intermittently until resolution of SERVER-3627");
// Make sure it's sharded and split
var newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(newNumChunks, 1,
+assert.gt(newNumChunks,
+ 1,
"Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
// Check that there are no "jumbo" chunks.
var objSize = Object.bsonsize(testDB.mrShardedOut.findOne());
-var docsPerChunk = 1024 * 1024 / objSize * 1.1; // 1MB chunk size + allowance
+var docsPerChunk = 1024 * 1024 / objSize * 1.1; // 1MB chunk size + allowance
st.printShardingStatus(true);
-config.chunks.find({ ns: testDB.mrShardedOut.getFullName() }).forEach(function(chunkDoc) {
- var count = testDB.mrShardedOut.find({ _id: { $gte: chunkDoc.min._id,
- $lt: chunkDoc.max._id }}).itcount();
- assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
-});
+config.chunks.find({ns: testDB.mrShardedOut.getFullName()})
+ .forEach(function(chunkDoc) {
+ var count =
+ testDB.mrShardedOut.find({_id: {$gte: chunkDoc.min._id, $lt: chunkDoc.max._id}})
+ .itcount();
+ assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
+ });
// Check that chunks for the newly created sharded output collection are well distributed.
-var shard0Chunks = config.chunks.find({ ns: testDB.mrShardedOut._fullName,
- shard: 'shard0000' }).count();
-var shard1Chunks = config.chunks.find({ ns: testDB.mrShardedOut._fullName,
- shard: 'shard0001' }).count();
+var shard0Chunks =
+ config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: 'shard0000'}).count();
+var shard1Chunks =
+ config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: 'shard0001'}).count();
assert.lte(Math.abs(shard0Chunks - shard1Chunks), 1);
jsTest.log('Starting second pass');
-st.adminCommand({ split: 'test.foo', middle: { a: numDocs + numBatch / 2 }});
-st.adminCommand({ moveChunk: 'test.foo', find: { a: numDocs }, to: 'shard0000' });
+st.adminCommand({split: 'test.foo', middle: {a: numDocs + numBatch / 2}});
+st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: 'shard0000'});
// Add some more data for input so that chunks will get split further
for (splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
- testDB.adminCommand({ split: 'test.foo', middle: { a: numDocs + splitPoint }});
+ testDB.adminCommand({split: 'test.foo', middle: {a: numDocs + splitPoint}});
}
bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numBatch; ++i) {
- bulk.insert({ a: numDocs + i, y: str, i: numDocs + i });
+ bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
}
assert.writeOK(bulk.execute());
jsTest.log("No errors on insert batch.");
@@ -106,21 +113,22 @@ res = testDB.foo.mapReduce(map2, reduce2, {out: {replace: "mrShardedOut", sharde
jsTest.log("MapReduce results:" + tojson(res));
reduceOutputCount = res.counts.output;
-assert.eq(numDocs, reduceOutputCount,
- "MapReduce FAILED: res.counts.output = " + reduceOutputCount +
- ", should be " + numDocs);
+assert.eq(numDocs,
+ reduceOutputCount,
+ "MapReduce FAILED: res.counts.output = " + reduceOutputCount + ", should be " + numDocs);
jsTest.log("Checking that all MapReduce output documents are in output collection");
outColl = testDB["mrShardedOut"];
outCollCount = outColl.find().itcount();
-assert.eq(numDocs, outCollCount,
- "MapReduce FAILED: outColl.find().itcount() = " + outCollCount +
- ", should be " + numDocs +
- ": this may happen intermittently until resolution of SERVER-3627");
+assert.eq(numDocs,
+ outCollCount,
+ "MapReduce FAILED: outColl.find().itcount() = " + outCollCount + ", should be " +
+ numDocs + ": this may happen intermittently until resolution of SERVER-3627");
// Make sure it's sharded and split
newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(newNumChunks, 1,
+assert.gt(newNumChunks,
+ 1,
"Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
st.printShardingStatus(true);
@@ -138,4 +146,3 @@ config.chunks.find({ ns: testDB.mrShardedOut.getFullName() }).forEach(function(c
// to balance chunks.
st.stop();
-