diff options
34 files changed, 204 insertions, 204 deletions
diff --git a/jstests/noPassthroughWithMongod/benchrun_substitution.js b/jstests/noPassthroughWithMongod/benchrun_substitution.js index 520613bcdc8..afc79b4cc49 100644 --- a/jstests/noPassthroughWithMongod/benchrun_substitution.js +++ b/jstests/noPassthroughWithMongod/benchrun_substitution.js @@ -61,7 +61,7 @@ function benchrun_sub_remove(use_write_command) { ops = [{op: "remove", ns: "test.benchrun_sub", query: {x: {"#RAND_INT": [0, 100]}}, writeCmd: use_write_command, - }] + }]; for (var i = 0; i < 100; ++i) { t.insert({x: i}); diff --git a/jstests/noPassthroughWithMongod/bulk_api_limits.js b/jstests/noPassthroughWithMongod/bulk_api_limits.js index ab3e468f373..b7bab04f6d1 100644 --- a/jstests/noPassthroughWithMongod/bulk_api_limits.js +++ b/jstests/noPassthroughWithMongod/bulk_api_limits.js @@ -26,7 +26,7 @@ var executeTestsUnordered = function() { var hugeString = ""; // Create it bigger than 16MB for(var i = 0; i < (1024 * 1100); i++) { - hugeString = hugeString + "1234567890123456" + hugeString = hugeString + "1234567890123456"; } // Set up the batch @@ -49,7 +49,7 @@ var executeTestsUnordered = function() { var hugeString = ""; // Create 4 MB strings to test splitting for(var i = 0; i < (1024 * 256); i++) { - hugeString = hugeString + "1234567890123456" + hugeString = hugeString + "1234567890123456"; } // Insert the string a couple of times, should force split into multiple batches @@ -61,12 +61,12 @@ var executeTestsUnordered = function() { batch.insert({a:5, b: hugeString}); batch.insert({a:6, b: hugeString}); var result = batch.execute(); - printjson(JSON.stringify(result)) + printjson(JSON.stringify(result)); // Basic properties check assert.eq(6, result.nInserted); assert.eq(false, result.hasWriteErrors()); -} +}; /******************************************************** * @@ -82,7 +82,7 @@ var executeTestsOrdered = function() { var hugeString = ""; // Create it bigger than 16MB for(var i = 0; i < (1024 * 1100); i++) { - hugeString = hugeString + "1234567890123456" + hugeString = hugeString + "1234567890123456"; } // Set up the batch @@ -105,7 +105,7 @@ var executeTestsOrdered = function() { var hugeString = ""; // Create 4 MB strings to test splitting for(var i = 0; i < (1024 * 256); i++) { - hugeString = hugeString + "1234567890123456" + hugeString = hugeString + "1234567890123456"; } // Insert the string a couple of times, should force split into multiple batches @@ -125,7 +125,7 @@ var executeTestsOrdered = function() { // Create unique index coll.dropIndexes(); coll.remove({}); -} +}; var buildVersion = parseInt(db.runCommand({buildInfo:1}).versionArray.slice(0, 3).join(""), 10); // Save the existing useWriteCommands function @@ -138,7 +138,7 @@ if(buildVersion >= 255) { // Force the use of useWriteCommands coll._mongo.useWriteCommands = function() { return true; - } + }; // Execute tests using legacy operations executeTestsUnordered(); @@ -148,7 +148,7 @@ if(buildVersion >= 255) { // Force the use of legacy commands coll._mongo.useWriteCommands = function() { return false; -} +}; // Execute tests using legacy operations executeTestsUnordered(); diff --git a/jstests/noPassthroughWithMongod/clonecollection.js b/jstests/noPassthroughWithMongod/clonecollection.js index 9e55ac16e5b..022ef58f595 100644 --- a/jstests/noPassthroughWithMongod/clonecollection.js +++ b/jstests/noPassthroughWithMongod/clonecollection.js @@ -30,8 +30,8 @@ if ( t.a.getIndexes().length != 2 ) { } assert.eq( 2, t.a.getIndexes().length, "expected index missing" ); // Verify index works -x = t.a.find( { i: 50 } ).hint( { i: 1 } ).explain("executionStats") -printjson( x ) +x = t.a.find( { i: 50 } ).hint( { i: 1 } ).explain("executionStats"); +printjson( x ); assert.eq( 1, x.executionStats.nReturned , "verify 1" ); assert.eq( 1, t.a.find( { i: 50 } ).hint( { i: 1 } ).toArray().length, "match length did not match expected" ); diff --git a/jstests/noPassthroughWithMongod/connections_opened.js b/jstests/noPassthroughWithMongod/connections_opened.js index 371e3031b7b..e3f25b11fce 100644 --- a/jstests/noPassthroughWithMongod/connections_opened.js +++ b/jstests/noPassthroughWithMongod/connections_opened.js @@ -32,7 +32,7 @@ function createTemporaryConnection() { "assert.soon(function() {" + "try { conn = new Mongo(\"" + db.getMongo().host + "\"); return conn" + "} catch (x) {return false;}}, " + - "\"Timed out waiting for temporary connection to connect\", 30000, 5000);" + "\"Timed out waiting for temporary connection to connect\", 30000, 5000);"; // Poll the signal collection until it is told to terminate. pollString += "assert.soon(function() {" + "return conn.getDB('" + testDB + "').getCollection('" + signalCollection + "')" diff --git a/jstests/noPassthroughWithMongod/dup_bgindex.js b/jstests/noPassthroughWithMongod/dup_bgindex.js index fbfb8a8a322..3d36e90a4ae 100644 --- a/jstests/noPassthroughWithMongod/dup_bgindex.js +++ b/jstests/noPassthroughWithMongod/dup_bgindex.js @@ -5,7 +5,7 @@ t.drop(); for (var i=0; i<10000; i++) { t.insert( { name : "foo" , z : { a : 17 , b : 4}, i: i } ); } -var cmd = "db.duplIndexTest.ensureIndex( { i : 1 }, {background:true} );" +var cmd = "db.duplIndexTest.ensureIndex( { i : 1 }, {background:true} );"; var join1 = startParallelShell(cmd); var join2 = startParallelShell(cmd); t.ensureIndex( { i : 1 }, {background:true} ); diff --git a/jstests/noPassthroughWithMongod/geo_axis_aligned.js b/jstests/noPassthroughWithMongod/geo_axis_aligned.js index 084b839cabc..7cd33b2d638 100644 --- a/jstests/noPassthroughWithMongod/geo_axis_aligned.js +++ b/jstests/noPassthroughWithMongod/geo_axis_aligned.js @@ -1,46 +1,46 @@ // Axis aligned circles - hard-to-find precision errors possible with exact distances here -t = db.axisaligned +t = db.axisaligned; t.drop(); -scale = [ 1, 10, 1000, 10000 ] -bits = [ 2, 3, 4, 5, 6, 7, 8, 9 ] -radius = [ 0.0001, 0.001, 0.01, 0.1 ] -center = [ [ 5, 52 ], [ 6, 53 ], [ 7, 54 ], [ 8, 55 ], [ 9, 56 ] ] +scale = [ 1, 10, 1000, 10000 ]; +bits = [ 2, 3, 4, 5, 6, 7, 8, 9 ]; +radius = [ 0.0001, 0.001, 0.01, 0.1 ]; +center = [ [ 5, 52 ], [ 6, 53 ], [ 7, 54 ], [ 8, 55 ], [ 9, 56 ] ]; -bound = [] +bound = []; for( var j = 0; j < center.length; j++ ) bound.push( [-180, 180] ); // Scale all our values to test different sizes -radii = [] -centers = [] -bounds = [] +radii = []; +centers = []; +bounds = []; for( var s = 0; s < scale.length; s++ ){ for ( var i = 0; i < radius.length; i++ ) { - radii.push( radius[i] * scale[s] ) + radii.push( radius[i] * scale[s] ); } for ( var j = 0; j < center.length; j++ ) { - centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] ) - bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] ) + centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] ); + bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] ); } } -radius = radii -center = centers -bound = bounds +radius = radii; +center = centers; +bound = bounds; for ( var b = 0; b < bits.length; b++ ) { - printjson( radius ) - printjson( centers ) + printjson( radius ); + printjson( centers ); for ( var i = 0; i < radius.length; i++ ) { for ( var j = 0; j < center.length; j++ ) { printjson( { center : center[j], radius : radius[i], bits : bits[b] } ); - t.drop() + t.drop(); // Make sure our numbers are precise enough for this test if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) ) @@ -65,7 +65,7 @@ for ( var b = 0; b < bits.length; b++ ) { // These are invalid cases, so we skip them. if (!res.ok) continue; - print( "DOING WITHIN QUERY ") + print( "DOING WITHIN QUERY "); r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } ); assert.eq( 5, r.count() ); @@ -74,18 +74,18 @@ for ( var b = 0; b < bits.length; b++ ) { a = r.toArray(); x = []; for ( k in a ) - x.push( a[k]["_id"] ) - x.sort() + x.push( a[k]["_id"] ); + x.sort(); assert.eq( [ 1, 2, 3, 4, 5 ], x ); - print( " DOING NEAR QUERY ") + print( " DOING NEAR QUERY "); //printjson( center[j] ) - r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } ) + r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } ); assert.eq( 5, r.count() ); - print( " DOING DIST QUERY ") + print( " DOING DIST QUERY "); - a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results + a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results; assert.eq( 5, a.length ); var distance = 0; diff --git a/jstests/noPassthroughWithMongod/geo_mnypts.js b/jstests/noPassthroughWithMongod/geo_mnypts.js index bc7935fa7a6..d70a2bdb60a 100644 --- a/jstests/noPassthroughWithMongod/geo_mnypts.js +++ b/jstests/noPassthroughWithMongod/geo_mnypts.js @@ -1,53 +1,53 @@ // Test sanity of geo queries with a lot of points -var coll = db.testMnyPts -coll.drop() +var coll = db.testMnyPts; +coll.drop(); -var totalPts = 500 * 1000 +var totalPts = 500 * 1000; // Add points in a 100x100 grid var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < totalPts; i++ ){ - var ii = i % 10000 + var ii = i % 10000; bulk.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] }); } assert.writeOK(bulk.execute()); -coll.ensureIndex({ loc : "2d" }) +coll.ensureIndex({ loc : "2d" }); // Check that quarter of points in each quadrant for( var i = 0; i < 4; i++ ){ - var x = i % 2 - var y = Math.floor( i / 2 ) + var x = i % 2; + var y = Math.floor( i / 2 ); - var box = [[0, 0], [49, 49]] - box[0][0] += ( x == 1 ? 50 : 0 ) - box[1][0] += ( x == 1 ? 50 : 0 ) - box[0][1] += ( y == 1 ? 50 : 0 ) - box[1][1] += ( y == 1 ? 50 : 0 ) + var box = [[0, 0], [49, 49]]; + box[0][0] += ( x == 1 ? 50 : 0 ); + box[1][0] += ( x == 1 ? 50 : 0 ); + box[0][1] += ( y == 1 ? 50 : 0 ); + box[1][1] += ( y == 1 ? 50 : 0 ); - assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).count() ) - assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).itcount() ) + assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).count() ); + assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).itcount() ); } // Check that half of points in each half for( var i = 0; i < 2; i++ ){ - var box = [[0, 0], [49, 99]] - box[0][0] += ( i == 1 ? 50 : 0 ) - box[1][0] += ( i == 1 ? 50 : 0 ) + var box = [[0, 0], [49, 99]]; + box[0][0] += ( i == 1 ? 50 : 0 ); + box[1][0] += ( i == 1 ? 50 : 0 ); - assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).count() ) - assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).itcount() ) + assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).count() ); + assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).itcount() ); } // Check that all but corner set of points in radius -var circle = [[0, 0], (100 - 1) * Math.sqrt( 2 ) - 0.25 ] +var circle = [[0, 0], (100 - 1) * Math.sqrt( 2 ) - 0.25 ]; -assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).count() ) -assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).itcount() ) +assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).count() ); +assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).itcount() ); diff --git a/jstests/noPassthroughWithMongod/geo_near_random2.js b/jstests/noPassthroughWithMongod/geo_near_random2.js index d7dbc9792a5..ac729b140e6 100644 --- a/jstests/noPassthroughWithMongod/geo_near_random2.js +++ b/jstests/noPassthroughWithMongod/geo_near_random2.js @@ -12,7 +12,7 @@ test.testPt(test.mkPt(), opts); test.testPt(test.mkPt(), opts); test.testPt(test.mkPt(), opts); -opts.sphere = 1 +opts.sphere = 1; test.testPt([0,0], opts); test.testPt(test.mkPt(0.8), opts); test.testPt(test.mkPt(0.8), opts); diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js index 097eb2a53fe..50bc5c29d68 100644 --- a/jstests/noPassthroughWithMongod/geo_polygon.js +++ b/jstests/noPassthroughWithMongod/geo_polygon.js @@ -13,7 +13,7 @@ assert.writeOK(bulk.execute()); var numTests = 31; for( var n = 0; n < numTests; n++ ){ - t.dropIndexes() + t.dropIndexes(); t.ensureIndex( { loc : "2d" }, { bits : 2 + n } ); assert.between( 9 - 2 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [1,1], [0,2]] }}} ).count() , 9, "Triangle Test", true); diff --git a/jstests/noPassthroughWithMongod/huge_multikey_index.js b/jstests/noPassthroughWithMongod/huge_multikey_index.js index c646fc8c127..14f110ff3bb 100644 --- a/jstests/noPassthroughWithMongod/huge_multikey_index.js +++ b/jstests/noPassthroughWithMongod/huge_multikey_index.js @@ -1,19 +1,19 @@ // https://jira.mongodb.org/browse/SERVER-4534 // Building an index in the forground on a field with a large array and few documents in // the collection used to open too many files and crash the server. -t = db.huge_multikey_index -t.drop() +t = db.huge_multikey_index; +t.drop(); function doit() { - arr = [] + arr = []; for (var i=0; i< 1000*1000;i++) arr.push(i); - t.insert({a:arr}) + t.insert({a:arr}); //t.ensureIndex({a:1}, {background:true}) // always worked - t.ensureIndex({a:1}) // used to fail server with out of fds error + t.ensureIndex({a:1}); // used to fail server with out of fds error } doit(); diff --git a/jstests/noPassthroughWithMongod/index_check10.js b/jstests/noPassthroughWithMongod/index_check10.js index dd88e094cf2..25d9eed5ca8 100644 --- a/jstests/noPassthroughWithMongod/index_check10.js +++ b/jstests/noPassthroughWithMongod/index_check10.js @@ -66,7 +66,7 @@ function doIt() { } spec[ fields[ i ] ] = s; } else { - var vals = [] + var vals = []; for( var j = 0; j < Random.randInt( 15 ); ++j ) { vals.push( r() ); } diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js index 51840645fe1..3271d7245f5 100644 --- a/jstests/noPassthroughWithMongod/index_check9.js +++ b/jstests/noPassthroughWithMongod/index_check9.js @@ -21,7 +21,7 @@ n = Random.randInt( 5 ) + 1; var idx = sort(); var chars = "abcdefghijklmnopqrstuvwxyz"; -var alphas = [] +var alphas = []; for( var i = 0; i < n; ++i ) { alphas.push( Random.rand() > 0.5 ); } @@ -79,7 +79,7 @@ function check() { break; } case 1 /* $in */ : { - var vals = [] + var vals = []; var inLength = Random.randInt( 15 ); for( var j = 0; j < inLength; ++j ) { vals.push( r( alphas[ i ] ) ); diff --git a/jstests/noPassthroughWithMongod/index_hammer1.js b/jstests/noPassthroughWithMongod/index_hammer1.js index 675a2f8db7c..4617eb3fd98 100644 --- a/jstests/noPassthroughWithMongod/index_hammer1.js +++ b/jstests/noPassthroughWithMongod/index_hammer1.js @@ -7,18 +7,18 @@ for ( i=0; i<10000; i++ ) bulk.insert({ x: i, y: i }); assert.writeOK(bulk.execute()); -ops = [] +ops = []; for ( i=0; i<50; i++ ) - ops.push( { op : "find" , ns : t.getFullName() , query : { x : { $gt : 5000 } , y : { $gt : 5000 } } } ) + ops.push( { op : "find" , ns : t.getFullName() , query : { x : { $gt : 5000 } , y : { $gt : 5000 } } } ); -ops[10] = { op : "createIndex" , ns : t.getFullName() , key : { x : 1 } } -ops[20] = { op : "createIndex" , ns : t.getFullName() , key : { y : 1 } } -ops[30] = { op : "dropIndex" , ns : t.getFullName() , key : { x : 1 } } -ops[40] = { op : "dropIndex" , ns : t.getFullName() , key : { y : 1 } } +ops[10] = { op : "createIndex" , ns : t.getFullName() , key : { x : 1 } }; +ops[20] = { op : "createIndex" , ns : t.getFullName() , key : { y : 1 } }; +ops[30] = { op : "dropIndex" , ns : t.getFullName() , key : { x : 1 } }; +ops[40] = { op : "dropIndex" , ns : t.getFullName() , key : { y : 1 } }; -res = benchRun( { ops : ops , parallel : 5 , seconds : 20 , host : db.getMongo().host } ) -printjson( res ) +res = benchRun( { ops : ops , parallel : 5 , seconds : 20 , host : db.getMongo().host } ); +printjson( res ); assert.eq( 10000 , t.count() ); diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js index 24a04775746..2d3ddd9099e 100644 --- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js +++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js @@ -26,7 +26,7 @@ var checkOp = function(checkDB) { } } return false; -} +}; var dbname = 'bgIndexSec'; var collection = 'jstests_feh'; diff --git a/jstests/noPassthroughWithMongod/logpath.js b/jstests/noPassthroughWithMongod/logpath.js index 5f2b809d6df..1634495cd17 100644 --- a/jstests/noPassthroughWithMongod/logpath.js +++ b/jstests/noPassthroughWithMongod/logpath.js @@ -6,7 +6,7 @@ var token = "logpath_token"; var dbdir = MongoRunner.dataPath + name + "/"; // this will work under windows as well as linux var basedir = MongoRunner.dataPath + name + "files" + "/"; var logdir = basedir + "logdir/"; -var testdir = basedir + "testdir/" +var testdir = basedir + "testdir/"; var sfile = _isWindows() ? "NUL" : "/dev/null"; var logs = [token + "1", token + "2"]; diff --git a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js index 1ff024fcb03..857c18c297c 100644 --- a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js +++ b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js @@ -24,8 +24,8 @@ for (var i = 0; i < 10; i++) { }
assert.writeOK(bulk.execute());
-function mapFn() { emit(this.idx, 1); };
-function reduceFn(key, values) { return Array.sum(values); };
+function mapFn() { emit(this.idx, 1); }
+function reduceFn(key, values) { return Array.sum(values); }
var out = coll.mapReduce(mapFn, reduceFn, { out: { replace: "mrOutput" } });
diff --git a/jstests/noPassthroughWithMongod/moveprimary-replset.js b/jstests/noPassthroughWithMongod/moveprimary-replset.js index 50fe756463b..2725585a08e 100755 --- a/jstests/noPassthroughWithMongod/moveprimary-replset.js +++ b/jstests/noPassthroughWithMongod/moveprimary-replset.js @@ -18,7 +18,7 @@ var shardingTestConfig = { config : 3, rs : { nodes : 3 }, other : { manualAddShard : true } -} +}; var shardingTest = new ShardingTest(shardingTestConfig); jsTest.log("Geting connections to the individual shards"); diff --git a/jstests/noPassthroughWithMongod/newcollection2.js b/jstests/noPassthroughWithMongod/newcollection2.js index da13f6eadf2..104eec7e897 100644 --- a/jstests/noPassthroughWithMongod/newcollection2.js +++ b/jstests/noPassthroughWithMongod/newcollection2.js @@ -14,4 +14,4 @@ assert( v.valid ); db.runCommand({ applyOps: [ { op: 'u', ns: 'a\0b' } ] }); var res = db["a\0a"].insert({}); -assert(res.hasWriteError(), "A write to collection a\0a succceeded") +assert(res.hasWriteError(), "A write to collection a\0a succceeded"); diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js index bdb4a9b4b66..45736c26a4e 100644 --- a/jstests/noPassthroughWithMongod/no_balance_collection.js +++ b/jstests/noPassthroughWithMongod/no_balance_collection.js @@ -1,6 +1,6 @@ // Tests whether the noBalance flag disables balancing for collections -var st = new ShardingTest({ shards : 2, mongos : 1, verbose : 1 }) +var st = new ShardingTest({ shards : 2, mongos : 1, verbose : 1 }); // First, test that shell helpers require an argument assert.throws(sh.disableBalancing, [], "sh.disableBalancing requires a collection"); @@ -8,70 +8,70 @@ assert.throws(sh.enableBalancing, [], "sh.enableBalancing requires a collection" // Initially stop balancing -st.stopBalancer() +st.stopBalancer(); -var shardAName = st._shardNames[0] -var shardBName = st._shardNames[1] +var shardAName = st._shardNames[0]; +var shardBName = st._shardNames[1]; -var collA = st.s.getCollection( jsTest.name() + ".collA" ) -var collB = st.s.getCollection( jsTest.name() + ".collB" ) +var collA = st.s.getCollection( jsTest.name() + ".collA" ); +var collB = st.s.getCollection( jsTest.name() + ".collB" ); // Shard two collections -st.shardColl( collA, { _id : 1 }, false ) -st.shardColl( collB, { _id : 1 }, false ) +st.shardColl( collA, { _id : 1 }, false ); +st.shardColl( collB, { _id : 1 }, false ); // Split into a lot of chunks so balancing can occur for( var i = 0; i < 10 - 1; i++ ){ // 10 chunks total - collA.getMongo().getDB("admin").runCommand({ split : collA + "", middle : { _id : i } }) - collA.getMongo().getDB("admin").runCommand({ split : collB + "", middle : { _id : i } }) + collA.getMongo().getDB("admin").runCommand({ split : collA + "", middle : { _id : i } }); + collA.getMongo().getDB("admin").runCommand({ split : collB + "", middle : { _id : i } }); } // Disable balancing on one collection -sh.disableBalancing( collB ) +sh.disableBalancing( collB ); -jsTest.log( "Balancing disabled on " + collB ) -printjson( collA.getDB().getSisterDB( "config" ).collections.find().toArray() ) +jsTest.log( "Balancing disabled on " + collB ); +printjson( collA.getDB().getSisterDB( "config" ).collections.find().toArray() ); -st.startBalancer() +st.startBalancer(); // Make sure collA gets balanced assert.soon( function(){ - var shardAChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collA ), shard : shardAName }).itcount() - var shardBChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collA ), shard : shardBName }).itcount() - printjson({ shardA : shardAChunks, shardB : shardBChunks }) - return shardAChunks == shardBChunks -}, "" + collA + " chunks not balanced!", 5 * 60 * 1000 ) + var shardAChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collA ), shard : shardAName }).itcount(); + var shardBChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collA ), shard : shardBName }).itcount(); + printjson({ shardA : shardAChunks, shardB : shardBChunks }); + return shardAChunks == shardBChunks; +}, "" + collA + " chunks not balanced!", 5 * 60 * 1000 ); -jsTest.log( "Chunks for " + collA + " are balanced." ) +jsTest.log( "Chunks for " + collA + " are balanced." ); // Check that the collB chunks were not moved -var shardAChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardAName }).itcount() -var shardBChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardBName }).itcount() -printjson({ shardA : shardAChunks, shardB : shardBChunks }) -assert( shardAChunks == 0 || shardBChunks == 0 ) +var shardAChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardAName }).itcount(); +var shardBChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardBName }).itcount(); +printjson({ shardA : shardAChunks, shardB : shardBChunks }); +assert( shardAChunks == 0 || shardBChunks == 0 ); // Re-enable balancing for collB -sh.enableBalancing( collB ) +sh.enableBalancing( collB ); // Make sure that collB is now balanced assert.soon( function(){ - var shardAChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardAName }).itcount() - var shardBChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardBName }).itcount() - printjson({ shardA : shardAChunks, shardB : shardBChunks }) - return shardAChunks == shardBChunks -}, "" + collB + " chunks not balanced!", 5 * 60 * 1000 ) + var shardAChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardAName }).itcount(); + var shardBChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardBName }).itcount(); + printjson({ shardA : shardAChunks, shardB : shardBChunks }); + return shardAChunks == shardBChunks; +}, "" + collB + " chunks not balanced!", 5 * 60 * 1000 ); -jsTest.log( "Chunks for " + collB + " are balanced." ) +jsTest.log( "Chunks for " + collB + " are balanced." ); // Re-disable balancing for collB -sh.disableBalancing( collB ) +sh.disableBalancing( collB ); // Wait for the balancer to fully finish the last migration and write the changelog // MUST set db var here, ugly but necessary -db = st.s0.getDB("config") -sh.waitForBalancer(true) +db = st.s0.getDB("config"); +sh.waitForBalancer(true); // Make sure auto-migrates on insert don't move chunks -var lastMigration = sh._lastMigration( collB ) +var lastMigration = sh._lastMigration( collB ); var bulk = collB.initializeUnorderedBulkOp(); for( var i = 0; i < 1000000; i++ ){ @@ -79,8 +79,8 @@ for( var i = 0; i < 1000000; i++ ){ } assert.writeOK(bulk.execute()); -printjson( lastMigration ) -printjson( sh._lastMigration( collB ) ) +printjson( lastMigration ); +printjson( sh._lastMigration( collB ) ); if(lastMigration == null) { assert.eq(null, sh._lastMigration(collB)); diff --git a/jstests/noPassthroughWithMongod/query_oplogreplay.js b/jstests/noPassthroughWithMongod/query_oplogreplay.js index 1ae9be9071f..67982025ebc 100644 --- a/jstests/noPassthroughWithMongod/query_oplogreplay.js +++ b/jstests/noPassthroughWithMongod/query_oplogreplay.js @@ -5,7 +5,7 @@ function test(t) { assert.commandWorked(t.getDB().createCollection(t.getName(), {capped: true, size: 16*1024})); function makeTS(i) { - return Timestamp(1000, i) + return Timestamp(1000, i); } for (var i = 0; i < 100; i++) { diff --git a/jstests/noPassthroughWithMongod/replReads.js b/jstests/noPassthroughWithMongod/replReads.js index 09b09277011..a5b60ffea9c 100644 --- a/jstests/noPassthroughWithMongod/replReads.js +++ b/jstests/noPassthroughWithMongod/replReads.js @@ -5,15 +5,15 @@ function testReadLoadBalancing(numReplicas) { var s = new ShardingTest({ shards: { rs0: { nodes: numReplicas }}, verbose: 2, other: { chunkSize: 1 }}); - s.adminCommand({enablesharding : "test"}) - s.config.settings.find().forEach(printjson) + s.adminCommand({enablesharding : "test"}); + s.config.settings.find().forEach(printjson); - s.adminCommand({shardcollection : "test.foo", key : {_id : 1}}) + s.adminCommand({shardcollection : "test.foo", key : {_id : 1}}); - s.getDB("test").foo.insert({a : 123}) + s.getDB("test").foo.insert({a : 123}); - primary = s._rs[0].test.liveNodes.master - secondaries = s._rs[0].test.liveNodes.slaves + primary = s._rs[0].test.liveNodes.master; + secondaries = s._rs[0].test.liveNodes.slaves; function rsStats() { return s.getDB("admin").runCommand("connPoolStats")["replicaSets"][s.rs0.name]; @@ -32,7 +32,7 @@ function testReadLoadBalancing(numReplicas) { assert.soon( function() { var x = rsStats().hosts; - printjson(x) + printjson(x); for ( var i=0; i<x.length; i++ ) if ( ! isMasterOrSecondary( x[i] ) ) return false; @@ -41,19 +41,19 @@ function testReadLoadBalancing(numReplicas) { ); for (var i = 0; i < secondaries.length; i++) { - assert.soon( function(){ return secondaries[i].getDB("test").foo.count() > 0; } ) - secondaries[i].getDB('test').setProfilingLevel(2) + assert.soon( function(){ return secondaries[i].getDB("test").foo.count() > 0; } ); + secondaries[i].getDB('test').setProfilingLevel(2); } // Primary may change with reconfig - primary.getDB('test').setProfilingLevel(2) + primary.getDB('test').setProfilingLevel(2); // Store references to the connection so they won't be garbage collected. var connections = []; for (var i = 0; i < secondaries.length * 10; i++) { - conn = new Mongo(s._mongos[0].host) - conn.setSlaveOk() - conn.getDB('test').foo.findOne() + conn = new Mongo(s._mongos[0].host); + conn.setSlaveOk(); + conn.getDB('test').foo.findOne(); connections.push(conn); } @@ -88,7 +88,7 @@ function testReadLoadBalancing(numReplicas) { var numOk = 0; // Now wait until the host disappears, since now we actually update our // replica sets via isMaster in mongos - if( x.hosts.length == c["members"].length - 1 ) return true + if( x.hosts.length == c["members"].length - 1 ) return true; /* for ( var i=0; i<x.hosts.length; i++ ) if ( x.hosts[i].hidden ) @@ -99,16 +99,16 @@ function testReadLoadBalancing(numReplicas) { ); // Secondaries may change here - secondaries = s._rs[0].test.liveNodes.slaves + secondaries = s._rs[0].test.liveNodes.slaves; for (var i = 0; i < secondaries.length * 10; i++) { - conn = new Mongo(s._mongos[0].host) - conn.setSlaveOk() - conn.getDB('test').foo.findOne() + conn = new Mongo(s._mongos[0].host); + conn.setSlaveOk(); + conn.getDB('test').foo.findOne(); connections.push(conn); } - var counts = [] + var counts = []; for (var i = 0; i < secondaries.length; i++) { var profileCollection = secondaries[i].getDB('test').system.profile; counts.push( profileCollection.find(profileCriteria).count() ); @@ -117,7 +117,7 @@ function testReadLoadBalancing(numReplicas) { counts = counts.sort(); assert.eq( 20 , Math.abs( counts[1] - counts[0] ), "counts wrong: " + tojson( counts ) ); - s.stop() + s.stop(); } //for (var i = 1; i < 10; i++) { diff --git a/jstests/noPassthroughWithMongod/replica_set_shard_version.js b/jstests/noPassthroughWithMongod/replica_set_shard_version.js index 400c49a3a4c..f853c74603e 100644 --- a/jstests/noPassthroughWithMongod/replica_set_shard_version.js +++ b/jstests/noPassthroughWithMongod/replica_set_shard_version.js @@ -1,47 +1,47 @@ // Tests whether a Replica Set in a mongos cluster can cause versioning problems -jsTestLog( "Starting sharded cluster..." ) +jsTestLog( "Starting sharded cluster..." ); -var st = new ShardingTest( { shards : 1, mongos : 2, other : { rs : true } } ) +var st = new ShardingTest( { shards : 1, mongos : 2, other : { rs : true } } ); // Uncomment to stop the balancer, since the balancer usually initializes the shard automatically // SERVER-4921 is otherwise hard to manifest // st.stopBalancer() -var mongosA = st.s0 -var mongosB = st.s1 -var shard = st.shard0 +var mongosA = st.s0; +var mongosB = st.s1; +var shard = st.shard0; coll = mongosA.getCollection( jsTestName() + ".coll" ); // Wait for primary and then initialize shard SERVER-5130 -st.rs0.getPrimary() -coll.findOne() +st.rs0.getPrimary(); +coll.findOne(); -var sadmin = shard.getDB( "admin" ) +var sadmin = shard.getDB( "admin" ); assert.throws(function() { sadmin.runCommand({ replSetStepDown : 3000, force : true }); }); st.rs0.getPrimary(); -mongosA.getDB("admin").runCommand({ setParameter : 1, traceExceptions : true }) +mongosA.getDB("admin").runCommand({ setParameter : 1, traceExceptions : true }); try{ // This _almost_ always fails, unless the new primary is already detected. If if fails, it should // mark the master as bad, so mongos will reload the replica set master next request // TODO: Can we just retry and succeed here? - coll.findOne() + coll.findOne(); } catch( e ){ - print( "This error is expected : " ) - printjson( e ) + print( "This error is expected : " ); + printjson( e ); } -jsTest.log( "Running query which should succeed..." ) +jsTest.log( "Running query which should succeed..." ); // This should always succeed without throwing an error -coll.findOne() +coll.findOne(); -mongosA.getDB("admin").runCommand({ setParameter : 1, traceExceptions : false }) +mongosA.getDB("admin").runCommand({ setParameter : 1, traceExceptions : false }); // now check secondary @@ -56,4 +56,4 @@ print( "eliot: " + tojson( other.findOne() ) ); -st.stop() +st.stop(); diff --git a/jstests/noPassthroughWithMongod/server7428.js b/jstests/noPassthroughWithMongod/server7428.js index 1c6c2997804..d077e126d8a 100644 --- a/jstests/noPassthroughWithMongod/server7428.js +++ b/jstests/noPassthroughWithMongod/server7428.js @@ -17,6 +17,6 @@ var admin = toDb.getDB("admin"); admin.createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles}); admin.auth("foo","bar"); -admin.copyDatabase('test', 'test', fromDb.host) +admin.copyDatabase('test', 'test', fromDb.host); })(); diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js index e0cc2dc82f5..b112590d0a0 100644 --- a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js +++ b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js @@ -5,7 +5,7 @@ var st = new ShardingTest({ shards : 2, mongos : 1, other : { mongosOptions : { noAutoSplit : "" }, shardOptions : { /* binVersion : "latest" */ } } }); -st.stopBalancer() +st.stopBalancer(); var mongos = st.s0; var coll = mongos.getCollection( "foo.bar" ); @@ -20,7 +20,7 @@ assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok ); jsTest.log( "Preparing large insert..." ); -var data1MB = "x" +var data1MB = "x"; while ( data1MB.length < 1024 * 1024 ) data1MB += data1MB; diff --git a/jstests/noPassthroughWithMongod/sharding_rs_arb1.js b/jstests/noPassthroughWithMongod/sharding_rs_arb1.js index 30570c261c5..ba3142bbe5c 100644 --- a/jstests/noPassthroughWithMongod/sharding_rs_arb1.js +++ b/jstests/noPassthroughWithMongod/sharding_rs_arb1.js @@ -1,4 +1,4 @@ -var name = "sharding_rs_arb1" +var name = "sharding_rs_arb1"; var replTest = new ReplSetTest( { name : name , nodes : 3 } ); replTest.startSet(); var port = replTest.ports; @@ -16,12 +16,12 @@ var master = replTest.getPrimary(); var db = master.getDB( "test" ); printjson( rs.status() ); -var st = new ShardingTest({numShards: 0}) +var st = new ShardingTest({numShards: 0}); var admin = st.getDB('admin'); var res = admin.runCommand( { addshard : replTest.getURL() } ); -printjson( res ) -assert( res.ok , tojson(res) ) +printjson( res ); +assert( res.ok , tojson(res) ); st.stop(); replTest.stopSet(); diff --git a/jstests/noPassthroughWithMongod/temp_namespace.js b/jstests/noPassthroughWithMongod/temp_namespace.js index d69fd135786..f74ac73bcea 100644 --- a/jstests/noPassthroughWithMongod/temp_namespace.js +++ b/jstests/noPassthroughWithMongod/temp_namespace.js @@ -3,10 +3,10 @@ // This test requires persistence beacuase it assumes data will survive a restart. // @tags: [requires_persistence] -testname = 'temp_namespace_sw' +testname = 'temp_namespace_sw'; var conn = MongoRunner.runMongod({smallfiles: "", noprealloc: "", nopreallocj: ""}); -d = conn.getDB('test') +d = conn.getDB('test'); d.runCommand({create: testname+'temp1', temp: true}); d[testname+'temp1'].ensureIndex({x:1}); d.runCommand({create: testname+'temp2', temp: 1}); @@ -21,8 +21,8 @@ function countCollectionNames( theDB, regex ) { return z.match( regex ); } ).length; } -assert.eq(countCollectionNames( d, /temp\d$/) , 2) -assert.eq(countCollectionNames( d, /keep\d$/) , 4) +assert.eq(countCollectionNames( d, /temp\d$/) , 2); +assert.eq(countCollectionNames( d, /keep\d$/) , 4); MongoRunner.stopMongod(conn); conn = MongoRunner.runMongod({restart:true, @@ -31,7 +31,7 @@ conn = MongoRunner.runMongod({restart:true, smallfiles: "", noprealloc: "", nopreallocj: ""}); -d = conn.getDB('test') -assert.eq(countCollectionNames( d, /temp\d$/) , 0) -assert.eq(countCollectionNames( d, /keep\d$/) , 4) +d = conn.getDB('test'); +assert.eq(countCollectionNames( d, /temp\d$/) , 0); +assert.eq(countCollectionNames( d, /keep\d$/) , 4); MongoRunner.stopMongod(conn); diff --git a/jstests/noPassthroughWithMongod/testing_only_commands.js b/jstests/noPassthroughWithMongod/testing_only_commands.js index 523b64cc6a3..51d104bee48 100644 --- a/jstests/noPassthroughWithMongod/testing_only_commands.js +++ b/jstests/noPassthroughWithMongod/testing_only_commands.js @@ -16,7 +16,7 @@ var assertCmdNotFound = function(db, cmdName) { var res = db.runCommand(cmdName); assert.eq(0, res.ok); assert.eq(59, res.code, 'expected CommandNotFound(59) error code for test command ' + cmdName); -} +}; var assertCmdFound = function(db, cmdName) { var res = db.runCommand(cmdName); @@ -25,7 +25,7 @@ var assertCmdFound = function(db, cmdName) { 'test command ' + cmdName + ' should either have succeeded or ' + 'failed with an error code other than CommandNotFound(59)'); } -} +}; jsTest.setOption('enableTestCommands', false); diff --git a/jstests/noPassthroughWithMongod/ttl1.js b/jstests/noPassthroughWithMongod/ttl1.js index 9f2926b18cb..906f2be75cf 100644 --- a/jstests/noPassthroughWithMongod/ttl1.js +++ b/jstests/noPassthroughWithMongod/ttl1.js @@ -18,7 +18,7 @@ assertEntryMatches = function(array, regex) { } assert(found, "The regex: " + regex + " did not match any entries in the array: " + array.join('\n')); -} +}; // Part 1 var t = db.ttl1; t.drop(); @@ -29,12 +29,12 @@ for (i=0; i<24; i++) { var past = new Date(now - (3600 * 1000 * i)); t.insert({x: past, y: past, z: past}); } -t.insert( { a : 1 } ) //no x value -t.insert( { x: null } ) //non-date value -t.insert( { x : true } ) //non-date value -t.insert( { x : "yo" } ) //non-date value -t.insert( { x : 3 } ) //non-date value -t.insert( { x : /foo/ } ) //non-date value +t.insert( { a : 1 } ); //no x value +t.insert( { x: null } ); //non-date value +t.insert( { x : true } ); //non-date value +t.insert( { x : "yo" } ); //non-date value +t.insert( { x : 3 } ); //non-date value +t.insert( { x : /foo/ } ); //non-date value assert.eq( 30 , t.count() ); diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js index 431154cb033..4c16c7f6306 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl.js +++ b/jstests/noPassthroughWithMongod/ttl_repl.js @@ -39,7 +39,7 @@ rt.awaitReplication(); assert.eq( 24 , mastercol.count() , "docs not inserted on primary" ); assert.eq( 24 , slave1col.count() , "docs not inserted on secondary" ); -print("Initial Stats:") +print("Initial Stats:"); print("Master:"); printjson( mastercol.stats() ); print("Slave1:"); @@ -52,7 +52,7 @@ rt.awaitReplication(); sleep(70*1000); // TTL monitor runs every 60 seconds, so wait 70 -print("Stats after waiting for TTL Monitor:") +print("Stats after waiting for TTL Monitor:"); print("Master:"); printjson( mastercol.stats() ); print("Slave1:"); diff --git a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js index 68619dcb3b3..740f49b665d 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js +++ b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js @@ -51,7 +51,7 @@ var restartWithoutConfig = function() { print("Create a TTL collection and put doc in local.system.replset"); primeSystemReplset(); -print("make sure TTL doesn't work when member is started with system.replset doc") +print("make sure TTL doesn't work when member is started with system.replset doc"); restartWithConfig(); print("remove system.replset entry & restart"); diff --git a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js index bf9317aad95..a4319a15c13 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js +++ b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js @@ -29,7 +29,7 @@ assert.commandWorked(slave1col.runCommand("godinsert", {obj: {_id: new Date(), x: new Date( (new Date()).getTime() - 600000 ) } })); assert.eq(1, slave1col.count(), "missing inserted doc" ); -sleep(70*1000) //wait for 70seconds +sleep(70*1000); //wait for 70seconds assert.eq(1, slave1col.count(), "ttl deleted my doc!" ); // looking for these errors : "Assertion: 13312:replSet error : logOp() but not primary", @@ -38,7 +38,7 @@ assert.eq(1, slave1col.count(), "ttl deleted my doc!" ); var errorStrings = ["Assertion: 13312", "Assertion 17405"]; var foundError = false; var foundLine = ""; -var globalLogLines = assert.commandWorked(slave1col.getDB().adminCommand({getLog:"global"})).log +var globalLogLines = assert.commandWorked(slave1col.getDB().adminCommand({getLog:"global"})).log; for (i in globalLogLines) { var line = globalLogLines[i]; errorStrings.forEach(function(errorString) { diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js index c7146b63cb7..bb154cc8180 100644 --- a/jstests/noPassthroughWithMongod/ttl_sharded.js +++ b/jstests/noPassthroughWithMongod/ttl_sharded.js @@ -52,9 +52,9 @@ assert.eq( 6 , t.count() ); var shard0 = s._connections[0].getDB( dbname ); var shard1 = s._connections[1].getDB( dbname ); -print("Shard 0 coll stats:") +print("Shard 0 coll stats:"); printjson( shard0.getCollection( coll ).stats() ); -print("Shard 1 coll stats:") +print("Shard 1 coll stats:"); printjson( shard1.getCollection( coll ).stats() ); diff --git a/jstests/noPassthroughWithMongod/unix_socket1.js b/jstests/noPassthroughWithMongod/unix_socket1.js index 1b607063923..3cd64c3370e 100644 --- a/jstests/noPassthroughWithMongod/unix_socket1.js +++ b/jstests/noPassthroughWithMongod/unix_socket1.js @@ -10,16 +10,16 @@ doesLogMatchRegex = function(logArray, regex) { if ( ! _isWindows() ) { - hoststring = db.getMongo().host - index = hoststring.lastIndexOf(':') + hoststring = db.getMongo().host; + index = hoststring.lastIndexOf(':'); if (index == -1){ - port = '27017' + port = '27017'; } else { - port = hoststring.substr(index + 1) + port = hoststring.substr(index + 1); } - sock = new Mongo('/tmp/mongodb-' + port + '.sock') - sockdb = sock.getDB(db.getName()) + sock = new Mongo('/tmp/mongodb-' + port + '.sock'); + sockdb = sock.getDB(db.getName()); assert( sockdb.runCommand('ping').ok ); // Test unix socket path @@ -30,7 +30,7 @@ if ( ! _isWindows() ) { var conn = MongoRunner.runMongod({dbpath: dataPath, unixSocketPrefix: path}); var sock2 = new Mongo(path + "/mongodb-" + conn.port + ".sock"); - sockdb2 = sock2.getDB(db.getName()) + sockdb2 = sock2.getDB(db.getName()); assert( sockdb2.runCommand('ping').ok ); // Test the naming of the unix socket diff --git a/jstests/noPassthroughWithMongod/validate_command.js b/jstests/noPassthroughWithMongod/validate_command.js index 9f92bc0bed8..6e243070142 100644 --- a/jstests/noPassthroughWithMongod/validate_command.js +++ b/jstests/noPassthroughWithMongod/validate_command.js @@ -5,15 +5,15 @@ var count = 10; function testValidate(output) { - assert.eq(output.nrecords, count, "validate returned an invalid count") - assert.eq(output.nIndexes, 3, "validate returned an invalid number of indexes") + assert.eq(output.nrecords, count, "validate returned an invalid count"); + assert.eq(output.nIndexes, 3, "validate returned an invalid number of indexes"); - var indexNames = output.keysPerIndex + var indexNames = output.keysPerIndex; for (var i in indexNames) { if (!indexNames.hasOwnProperty(i)) continue; - assert.eq(indexNames[i], count, "validate returned an invalid number of indexes") + assert.eq(indexNames[i], count, "validate returned an invalid number of indexes"); } } @@ -27,15 +27,15 @@ t.insert({x:i}); } - t.ensureIndex({x:1}, {name: "forward"}) - t.ensureIndex({x:-1}, {name: "reverse"}) + t.ensureIndex({x:1}, {name: "forward"}); + t.ensureIndex({x:-1}, {name: "reverse"}); // TEST NORMAL VALIDATE - var output = t.validate() + var output = t.validate(); testValidate(output); // TEST FULL - var output = t.validate({full:true}) + var output = t.validate({full:true}); testValidate(output); }());
\ No newline at end of file |