summaryrefslogtreecommitdiff
path: root/jstests/sharding/mr_and_agg_versioning.js
diff options
context:
space:
mode:
authorJonathan Abrahams <jonathan@mongodb.com>2016-03-09 12:17:50 -0500
committerJonathan Abrahams <jonathan@mongodb.com>2016-03-09 12:18:14 -0500
commit4ae691e8edc87d0e3cfb633bb91c328426be007b (patch)
tree52079a593f54382ca13a2e741633eab1b6271893 /jstests/sharding/mr_and_agg_versioning.js
parenta025d43f3ce2efc1fb1282a718f5d286fa0a4dc1 (diff)
downloadmongo-4ae691e8edc87d0e3cfb633bb91c328426be007b.tar.gz
SERVER-22468 Format JS code with approved style in jstests/
Diffstat (limited to 'jstests/sharding/mr_and_agg_versioning.js')
-rw-r--r--jstests/sharding/mr_and_agg_versioning.js93
1 files changed, 47 insertions, 46 deletions
diff --git a/jstests/sharding/mr_and_agg_versioning.js b/jstests/sharding/mr_and_agg_versioning.js
index 0167a23554d..e2d1c6f7869 100644
--- a/jstests/sharding/mr_and_agg_versioning.js
+++ b/jstests/sharding/mr_and_agg_versioning.js
@@ -1,64 +1,65 @@
// Test that map reduce and aggregate properly handle shard versioning.
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({shards: 2, mongos: 3});
+ var st = new ShardingTest({shards: 2, mongos: 3});
-var dbName = jsTest.name();
-var collName = dbName + ".coll";
-var numDocs = 50000;
-var numKeys = 1000;
+ var dbName = jsTest.name();
+ var collName = dbName + ".coll";
+ var numDocs = 50000;
+ var numKeys = 1000;
-st.s.adminCommand({enableSharding: dbName});
-st.ensurePrimaryShard(dbName, 'shard0000');
-st.s.adminCommand({shardCollection: collName, key: {key: 1}});
+ st.s.adminCommand({enableSharding: dbName});
+ st.ensurePrimaryShard(dbName, 'shard0000');
+ st.s.adminCommand({shardCollection: collName, key: {key: 1}});
-// Load chunk data to the stale mongoses before moving a chunk
-var staleMongos1 = st.s1;
-var staleMongos2 = st.s2;
-staleMongos1.getCollection(collName).find().itcount();
-staleMongos2.getCollection(collName).find().itcount();
+ // Load chunk data to the stale mongoses before moving a chunk
+ var staleMongos1 = st.s1;
+ var staleMongos2 = st.s2;
+ staleMongos1.getCollection(collName).find().itcount();
+ staleMongos2.getCollection(collName).find().itcount();
-st.s.adminCommand({split: collName, middle: {key: numKeys/2}});
-st.s.adminCommand({moveChunk: collName, find: {key: 0}, to: 'shard0001'});
+ st.s.adminCommand({split: collName, middle: {key: numKeys / 2}});
+ st.s.adminCommand({moveChunk: collName, find: {key: 0}, to: 'shard0001'});
-var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp();
-for(var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, key: (i % numKeys), value: i % numKeys});
-}
-assert.writeOK(bulk.execute());
+ var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, key: (i % numKeys), value: i % numKeys});
+ }
+ assert.writeOK(bulk.execute());
-// Add orphaned documents directly to the shards to ensure they are properly filtered out.
-st.shard0.getCollection(collName).insert({_id: 0, key: 0, value: 0});
-st.shard1.getCollection(collName).insert({_id: numDocs, key: numKeys, value: numKeys});
+ // Add orphaned documents directly to the shards to ensure they are properly filtered out.
+ st.shard0.getCollection(collName).insert({_id: 0, key: 0, value: 0});
+ st.shard1.getCollection(collName).insert({_id: numDocs, key: numKeys, value: numKeys});
-jsTest.log("Doing mapReduce");
+ jsTest.log("Doing mapReduce");
-var map = function(){ emit( this.key, this.value ); };
-var reduce = function(k, values){
- var total = 0;
- for(var i = 0; i < values.length; i++) {
- total += values[i];
- }
- return total;
-};
-function validateOutput(output) {
- assert.eq(output.length, numKeys, tojson(output));
- for(var i = 0; i < output.length; i++) {
- assert.eq(output[i]._id * (numDocs/numKeys), output[i].value, tojson(output));
+ var map = function() {
+ emit(this.key, this.value);
+ };
+ var reduce = function(k, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++) {
+ total += values[i];
+ }
+ return total;
+ };
+ function validateOutput(output) {
+ assert.eq(output.length, numKeys, tojson(output));
+ for (var i = 0; i < output.length; i++) {
+ assert.eq(output[i]._id * (numDocs / numKeys), output[i].value, tojson(output));
+ }
}
-}
-var res = staleMongos1.getCollection(collName).mapReduce(map, reduce, {out: {inline: 1}});
-validateOutput(res.results);
+ var res = staleMongos1.getCollection(collName).mapReduce(map, reduce, {out: {inline: 1}});
+ validateOutput(res.results);
-jsTest.log("Doing aggregation");
+ jsTest.log("Doing aggregation");
-res = staleMongos2.getCollection(collName).aggregate([
- {'$group': {_id: "$key", value: {"$sum": "$value"}}},
- {'$sort': {_id: 1}}]);
-validateOutput(res.toArray());
+ res = staleMongos2.getCollection(collName).aggregate(
+ [{'$group': {_id: "$key", value: {"$sum": "$value"}}}, {'$sort': {_id: 1}}]);
+ validateOutput(res.toArray());
-st.stop();
+ st.stop();
})();