diff options
author | David Storch <david.storch@10gen.com> | 2014-05-06 19:00:56 -0400 |
---|---|---|
committer | David Storch <david.storch@10gen.com> | 2014-05-06 19:00:56 -0400 |
commit | 72380726608df663a85bee24d69a20ed2ca8287d (patch) | |
tree | 735b7724ddc814fdf385d754bd7921975b5de491 /jstests/noPassthroughWithMongod | |
parent | 3061ab54eb2cc642a279becfca0b93f5e17db117 (diff) | |
download | mongo-72380726608df663a85bee24d69a20ed2ca8287d.tar.gz |
Revert "SERVER-13741 Migrate remaining tests to use write commands"
This reverts commit 87dc3ae516e1d12a632dc604710661e38ed7b3dd.
Diffstat (limited to 'jstests/noPassthroughWithMongod')
45 files changed, 625 insertions, 298 deletions
diff --git a/jstests/noPassthroughWithMongod/autosplit_heuristics.js b/jstests/noPassthroughWithMongod/autosplit_heuristics.js index ee1d28b5eda..33649617126 100644 --- a/jstests/noPassthroughWithMongod/autosplit_heuristics.js +++ b/jstests/noPassthroughWithMongod/autosplit_heuristics.js @@ -60,11 +60,15 @@ printjson({ chunkSizeBytes : chunkSizeBytes, totalInserts : totalInserts }); // Insert enough docs to trigger splits into all chunks -var bulk = coll.initializeUnorderedBulkOp(); for (var i = 0; i < totalInserts; i++) { - bulk.insert({ _id : i % numChunks + (i / totalInserts) }); + coll.insert({ _id : i % numChunks + (i / totalInserts) }); + if ( i % ( numChunks * 1000 ) == 0 ) { + print( "Inserted " + i + " docs, " + + ( i * approxSize / numChunks ) + " bytes per chunk." ); + } } -assert.writeOK(bulk.execute()); + +assert.eq(null, coll.getDB().getLastError()); jsTest.log("Inserts completed..."); diff --git a/jstests/noPassthroughWithMongod/background.js b/jstests/noPassthroughWithMongod/background.js index 188027a029b..d1d0047988a 100644 --- a/jstests/noPassthroughWithMongod/background.js +++ b/jstests/noPassthroughWithMongod/background.js @@ -7,41 +7,45 @@ t.drop(); var a = new Mongo( db.getMongo().host ).getDB( db.getName() ); -var bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 100000; i++ ) { - bulk.insert({ y: 'aaaaaaaaaaaa', i: i }); - if( i % 10000 == 0 ) { - assert.writeOK(bulk.execute()); - bulk = t.initializeUnorderedBulkOp(); - print(i); - } + t.insert({y:'aaaaaaaaaaaa',i:i}); + if( i % 10000 == 0 ) { + db.getLastError(); + print(i); + } } +//db.getLastError(); + // start bg indexing a.system.indexes.insert({ns:"test.bg1", key:{i:1}, name:"i_1", background:true}); // add more data -bulk = t.initializeUnorderedBulkOp(); + for( var i = 0; i < 100000; i++ ) { - bulk.insert({ i: i }); - if( i % 10000 == 0 ) { - printjson( db.currentOp() ); - assert.writeOK(bulk.execute()); - bulk = t.initializeUnorderedBulkOp(); - print(i); - } + t.insert({i:i}); + if( i % 10000 == 0 ) { + printjson( db.currentOp() ); + db.getLastError(); + print(i); + } } -assert.writeOK(bulk.execute()); +printjson( db.getLastErrorObj() ); printjson( db.currentOp() ); -for( var i = 0; i < 40; i++ ) { - if( db.currentOp().inprog.length == 0 ) - break; - print("waiting"); - sleep(1000); +for( var i = 0; i < 40; i++ ) { + if( db.currentOp().inprog.length == 0 ) + break; + print("waiting"); + sleep(1000); } +printjson( a.getLastErrorObj() ); + var idx = t.getIndexes(); +// print("indexes:"); +// printjson(idx); + assert( idx[1].key.i == 1 ); diff --git a/jstests/noPassthroughWithMongod/balance_repl.js b/jstests/noPassthroughWithMongod/balance_repl.js index c5818ea19b0..610af04767b 100644 --- a/jstests/noPassthroughWithMongod/balance_repl.js +++ b/jstests/noPassthroughWithMongod/balance_repl.js @@ -5,11 +5,10 @@ s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true, _noslee db = s.getDB( "test" ); -var bulk = db.foo.initializeUnorderedBulkOp(); -for (var i = 0; i < 2100; i++) { - bulk.insert({ _id: i, x: i }); +for ( i=0; i<2100; i++ ) { + db.foo.insert( { _id : i , x : i } ); } -assert.writeOK(bulk.execute()); +db.getLastError(); serverName = s.getServerName( "test" ) other = s.config.shards.findOne( { _id : { $ne : serverName } } ); diff --git a/jstests/noPassthroughWithMongod/balance_tags1.js b/jstests/noPassthroughWithMongod/balance_tags1.js index 1122380d7bc..945f0526b17 100644 --- a/jstests/noPassthroughWithMongod/balance_tags1.js +++ b/jstests/noPassthroughWithMongod/balance_tags1.js @@ -3,11 +3,10 @@ s = new ShardingTest( "balance_tags1" , 3 , 1 , 1 , { sync:true, chunksize : 1 , s.config.settings.update( { _id: "balancer" }, { $set : { stopped: false, _nosleep: true } } , true ); db = s.getDB( "test" ); -var bulk = db.foo.initializeUnorderedBulkOp(); for ( i=0; i<21; i++ ) { - bulk.insert({ _id: i, x: i }); + db.foo.insert( { _id : i , x : i } ); } -assert.writeOK(bulk.execute()); +db.getLastError(); s.adminCommand( { enablesharding : "test" } ) s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); diff --git a/jstests/noPassthroughWithMongod/balance_tags2.js b/jstests/noPassthroughWithMongod/balance_tags2.js index 6d0ed6ea7ca..55ad3dc5a97 100644 --- a/jstests/noPassthroughWithMongod/balance_tags2.js +++ b/jstests/noPassthroughWithMongod/balance_tags2.js @@ -6,11 +6,10 @@ s = new ShardingTest( "balance_tags2" , 3 , 1 , 1 , s.config.settings.save({ _id: "balancer", _nosleep: true}); db = s.getDB( "test" ); -var bulk = db.foo.initializeUnorderedBulkOp(); for ( i=0; i<21; i++ ) { - bulk.insert({ _id: i, x: i }); + db.foo.insert( { _id : i , x : i } ); } -assert.writeOK(bulk.execute()); +db.getLastError(); // enable sharding, shard, and stop balancer sh.enableSharding("test"); diff --git a/jstests/noPassthroughWithMongod/btreedel.js b/jstests/noPassthroughWithMongod/btreedel.js index 89af6aa7d5d..824eb3e63a6 100644 --- a/jstests/noPassthroughWithMongod/btreedel.js +++ b/jstests/noPassthroughWithMongod/btreedel.js @@ -3,11 +3,9 @@ t = db.foo;
t.remove({});
-var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 1000000; i++) {
- bulk.insert({ _id: i, x: 'a b' });
+ t.insert({ _id: i, x: 'a b' });
}
-assert.writeOK(bulk.execute());
print("1 insert done count: " + t.count());
diff --git a/jstests/noPassthroughWithMongod/bulk_shard_insert.js b/jstests/noPassthroughWithMongod/bulk_shard_insert.js index 74810a7c668..d9cd25a635e 100644 --- a/jstests/noPassthroughWithMongod/bulk_shard_insert.js +++ b/jstests/noPassthroughWithMongod/bulk_shard_insert.js @@ -46,7 +46,12 @@ while( docsInserted < numDocs ){ bulk.push({ hi : "there", at : docsInserted, i : i, x : x }) } - assert.writeOK(coll.insert( bulk )); + coll.insert( bulk ) + var result = db.getLastError( 1 ) + if( result != null ){ + printjson( result ) + throw result + } if( Math.floor( docsInserted / 10000 ) != Math.floor( ( docsInserted + currBulkSize ) / 10000 ) ){ print( "Inserted " + (docsInserted + currBulkSize) + " documents." ) diff --git a/jstests/noPassthroughWithMongod/capped4.js b/jstests/noPassthroughWithMongod/capped4.js index be02e3c6ae1..27d138c16ce 100644 --- a/jstests/noPassthroughWithMongod/capped4.js +++ b/jstests/noPassthroughWithMongod/capped4.js @@ -22,8 +22,8 @@ assert( !d.hasNext(), "C" ); assert( t.find().sort( { i : 1 } ).hint( { i : 1 } ).toArray().length > 10, "D" ); assert( t.findOne( { i : i - 1 } ), "E" ); -var res = assert.writeError(t.remove( { i : i - 1 } )); -assert( res.getWriteError().errmsg.indexOf( "capped" ) >= 0, "F" ); +t.remove( { i : i - 1 } ); +assert( db.getLastError().indexOf( "capped" ) >= 0, "F" ); assert( t.validate().valid, "G" );
diff --git a/jstests/noPassthroughWithMongod/fsync2.js b/jstests/noPassthroughWithMongod/fsync2.js index 7080837a99b..bdf956f30f2 100644 --- a/jstests/noPassthroughWithMongod/fsync2.js +++ b/jstests/noPassthroughWithMongod/fsync2.js @@ -41,7 +41,9 @@ function doTest() { //assert.eq(1, m.getDB(db.getName()).fsync2.count()); assert( m.getDB("admin").$cmd.sys.unlock.findOne().ok ); - + + db.getLastError(); + assert.eq( 2, db.fsync2.count() ); } diff --git a/jstests/noPassthroughWithMongod/geo_axis_aligned.js b/jstests/noPassthroughWithMongod/geo_axis_aligned.js index 084b839cabc..0161eccb4ac 100644 --- a/jstests/noPassthroughWithMongod/geo_axis_aligned.js +++ b/jstests/noPassthroughWithMongod/geo_axis_aligned.js @@ -17,14 +17,15 @@ centers = [] bounds = [] for( var s = 0; s < scale.length; s++ ){ - for ( var i = 0; i < radius.length; i++ ) { - radii.push( radius[i] * scale[s] ) - } + for ( var i = 0; i < radius.length; i++ ) { + radii.push( radius[i] * scale[s] ) + } + + for ( var j = 0; j < center.length; j++ ) { + centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] ) + bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] ) + } - for ( var j = 0; j < center.length; j++ ) { - centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] ) - bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] ) - } } radius = radii @@ -33,74 +34,75 @@ bound = bounds for ( var b = 0; b < bits.length; b++ ) { - printjson( radius ) - printjson( centers ) - - for ( var i = 0; i < radius.length; i++ ) { - for ( var j = 0; j < center.length; j++ ) { - printjson( { center : center[j], radius : radius[i], bits : bits[b] } ); - - t.drop() - - // Make sure our numbers are precise enough for this test - if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) ) - continue; - - t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } ); - t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } ); - t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } ); - t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } ); - t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } ); - t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } ); - t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } ); - t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } ); - t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } ); - - var res = t.ensureIndex({ loc: "2d" }, - { max: bound[j][1], - min : bound[j][0], - bits : bits[b] }); - - // ensureIndex fails when this iteration inserted coordinates that are out of bounds. - // These are invalid cases, so we skip them. - if (!res.ok) continue; - - print( "DOING WITHIN QUERY ") - r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } ); - - assert.eq( 5, r.count() ); - - // FIXME: surely code like this belongs in utils.js. - a = r.toArray(); - x = []; - for ( k in a ) - x.push( a[k]["_id"] ) - x.sort() - assert.eq( [ 1, 2, 3, 4, 5 ], x ); - - print( " DOING NEAR QUERY ") - //printjson( center[j] ) - r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } ) - assert.eq( 5, r.count() ); - - print( " DOING DIST QUERY ") - - a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results - assert.eq( 5, a.length ); - - var distance = 0; - for( var k = 0; k < a.length; k++ ){ - assert.gte( a[k].dis, distance ); - - } - - r = t.find({ loc: { $within: { $box: [ [ center[j][0] - radius[i], - center[j][1] - radius[i] ], - [ center[j][0] + radius[i], - center[j][1] + radius[i] ]]}}}, - { _id: 1 } ); - assert.eq( 9, r.count() ); - - } - } -} + + + printjson( radius ) + printjson( centers ) + + for ( var i = 0; i < radius.length; i++ ) { + for ( var j = 0; j < center.length; j++ ) { + + printjson( { center : center[j], radius : radius[i], bits : bits[b] } ); + + t.drop() + + // Make sure our numbers are precise enough for this test + if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) ) + continue; + + t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } ); + t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } ); + t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } ); + t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } ); + t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } ); + t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } ); + t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } ); + t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } ); + t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } ); + + t.ensureIndex( { loc : "2d" }, { max : bound[j][1], min : bound[j][0], bits : bits[b] } ); + + if( db.getLastError() ) continue; + + print( "DOING WITHIN QUERY ") + r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } ); + + //printjson( r.toArray() ); + + assert.eq( 5, r.count() ); + + // FIXME: surely code like this belongs in utils.js. + a = r.toArray(); + x = []; + for ( k in a ) + x.push( a[k]["_id"] ) + x.sort() + assert.eq( [ 1, 2, 3, 4, 5 ], x ); + + print( " DOING NEAR QUERY ") + //printjson( center[j] ) + r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } ) + assert.eq( 5, r.count() ); + + print( " DOING DIST QUERY ") + + a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results + assert.eq( 5, a.length ); + + //printjson( a ); + + var distance = 0; + for( var k = 0; k < a.length; k++ ){ + //print( a[k].dis ) + //print( distance ) + assert.gte( a[k].dis, distance ); + //printjson( a[k].obj ) + //print( distance = a[k].dis ); + } + + r = t.find( { loc : { $within : { $box : [ [ center[j][0] - radius[i], center[j][1] - radius[i] ], [ center[j][0] + radius[i], center[j][1] + radius[i] ] ] } } }, { _id : 1 } ) + assert.eq( 9, r.count() ); + + } + } +}
\ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/geo_mnypts.js b/jstests/noPassthroughWithMongod/geo_mnypts.js index bc7935fa7a6..ac4065158bf 100644 --- a/jstests/noPassthroughWithMongod/geo_mnypts.js +++ b/jstests/noPassthroughWithMongod/geo_mnypts.js @@ -6,12 +6,10 @@ coll.drop() var totalPts = 500 * 1000 // Add points in a 100x100 grid -var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < totalPts; i++ ){ var ii = i % 10000 - bulk.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] }); + coll.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] }) } -assert.writeOK(bulk.execute()); coll.ensureIndex({ loc : "2d" }) diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js index 5b19b2b2080..25bf0269ccc 100644 --- a/jstests/noPassthroughWithMongod/geo_polygon.js +++ b/jstests/noPassthroughWithMongod/geo_polygon.js @@ -15,14 +15,12 @@ if ( bi.indexOf( "erh2" ) >= 0 ){ if ( shouldRun ) { num = 0; - var bulk = t.initializeUnorderedBulkOp(); for ( x = -180; x < 180; x += .5 ){ for ( y = -180; y < 180; y += .5 ){ o = { _id : num++ , loc : [ x , y ] }; - bulk.insert( o ); + t.save( o ); } } - assert.writeOK(bulk.execute()); var numTests = 31; for( var n = 0; n < numTests; n++ ){ diff --git a/jstests/noPassthroughWithMongod/index_check10.js b/jstests/noPassthroughWithMongod/index_check10.js index 84e7342e051..79d0d93fc9b 100644 --- a/jstests/noPassthroughWithMongod/index_check10.js +++ b/jstests/noPassthroughWithMongod/index_check10.js @@ -104,30 +104,25 @@ function doIt( indexVersion ) { } } - var bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 10000; ++i ) { - bulk.insert( obj() ); + t.save( obj() ); } - assert.writeOK(bulk.execute()); t.ensureIndex( idx , { v : indexVersion } ); check(); - bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 10000; ++i ) { if ( Random.rand() > 0.9 ) { - bulk.insert( obj() ); + t.save( obj() ); } else { - bulk.find( obj() ).remove(); // improve + t.remove( obj() ); // improve } if( Random.rand() > 0.999 ) { print( i ); - assert.writeOK(bulk.execute()); check(); - bulk = t.initializeUnorderedBulkOp(); } } - assert.writeOK(bulk.execute()); + check(); } diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js index fd1b1d5eaa1..8a50471940b 100644 --- a/jstests/noPassthroughWithMongod/index_check9.js +++ b/jstests/noPassthroughWithMongod/index_check9.js @@ -106,32 +106,25 @@ function check() { assert.eq( c3.length, count ); } -var bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 10000; ++i ) { - bulk.insert( obj() ); + t.save( obj() ); if( Random.rand() > 0.999 ) { print( i ); - assert.writeOK(bulk.execute()); check(); - bulk = t.initializeUnorderedBulkOp(); } } -bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 100000; ++i ) { if ( Random.rand() > 0.9 ) { - bulk.insert( obj() ); + t.save( obj() ); } else { - bulk.find( obj() ).remove(); // improve + t.remove( obj() ); // improve } if( Random.rand() > 0.999 ) { print( i ); - assert.writeOK(bulk.execute()); check(); - bulk = t.initializeUnorderedBulkOp(); } } -assert.writeOK(bulk.execute()); check(); @@ -139,4 +132,4 @@ check(); for( var z = 0; z < 5; ++z ) { doIt(); -} +}
\ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/index_hammer1.js b/jstests/noPassthroughWithMongod/index_hammer1.js index 675a2f8db7c..87fd3820f66 100644 --- a/jstests/noPassthroughWithMongod/index_hammer1.js +++ b/jstests/noPassthroughWithMongod/index_hammer1.js @@ -2,10 +2,9 @@ t = db.index_hammer1; t.drop(); -var bulk = t.initializeUnorderedBulkOp(); for ( i=0; i<10000; i++ ) - bulk.insert({ x: i, y: i }); -assert.writeOK(bulk.execute()); + t.insert( { x : i , y : i } ); +db.getLastError(); ops = [] diff --git a/jstests/noPassthroughWithMongod/index_killop.js b/jstests/noPassthroughWithMongod/index_killop.js index f897f6a80de..b022e31f3b8 100644 --- a/jstests/noPassthroughWithMongod/index_killop.js +++ b/jstests/noPassthroughWithMongod/index_killop.js @@ -5,11 +5,10 @@ t.drop(); // Insert a large number of documents, enough to ensure that an index build on these documents will // be interrupted before complete. -var bulk = t.initializeUnorderedBulkOp(); for( i = 0; i < 1e6; ++i ) { - bulk.insert({ a: i }); + t.save( { a:i } ); } -assert.writeOK(bulk.execute()); +db.getLastError(); function debug( x ) { // printjson( x ); @@ -24,7 +23,7 @@ function getIndexBuildOpId() { // Identify the index build as an insert into the 'test.system.indexes' // namespace. It is assumed that no other clients are concurrently // accessing the 'test' database. - if ( op.op == 'query' && 'createIndexes' in op.query ) { + if ( op.op == 'insert' && op.ns == 'test.system.indexes' ) { debug( op.opid ); indexBuildOpId = op.opid; } @@ -34,8 +33,9 @@ function getIndexBuildOpId() { /** Test that building an index with @param 'options' can be aborted using killop. */ function testAbortIndexBuild( options ) { - var createIdx = startParallelShell('var coll = db.jstests_slownightly_index_killop; \ - coll.createIndex({ a: 1 }, ' + tojson(options) + ');'); + + // Create an index asynchronously by using a new connection. + new Mongo( db.getMongo().host ).getCollection( t.toString() ).createIndex( { a:1 }, options ); // When the index build starts, find its op id. assert.soon( function() { return ( opId = getIndexBuildOpId() ) != -1; } ); @@ -44,8 +44,6 @@ function testAbortIndexBuild( options ) { // Wait for the index build to stop. assert.soon( function() { return getIndexBuildOpId() == -1; } ); - createIdx(); - // Check that no new index has been created. This verifies that the index build was aborted // rather than successfully completed. assert.eq( [ { _id:1 } ], t.getIndexKeys() ); diff --git a/jstests/noPassthroughWithMongod/index_multi.js b/jstests/noPassthroughWithMongod/index_multi.js index e4c38632bcf..ac259455d36 100644 --- a/jstests/noPassthroughWithMongod/index_multi.js +++ b/jstests/noPassthroughWithMongod/index_multi.js @@ -4,9 +4,8 @@ Random.setRandomSeed(); var coll = db.index_multi; -var bulk = coll.initializeUnorderedBulkOp(); print("Populate the collection with random data"); -for (var i = 0; i < 1e4; i++) { +for (var i=0;i<1e4; i++) { var doc = {"_id" : i}; for (var j=0; j<100; j++) { @@ -23,54 +22,52 @@ for (var i = 0; i < 1e4; i++) { } } - bulk.insert(doc); + if (i%1000 == 0) { + print("inserted "+i); + } + + coll.insert(doc); } -assert.writeOK(bulk.execute()); // Array of all index specs var specs = []; var multikey = []; -var indexJobs = []; print("Create 3 triple indexes"); -for (var i = 90; i < 93; i++) { +for (var i=90; i<93; i++) { var spec = {}; spec["field"+i] = 1; spec["field"+(i+1)] = 1; spec["field"+(i+2)] = 1; - indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + "," + - "{ background: true });" + - "db.results.insert(db.runCommand({ getlasterror: 1 }));")); + startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});" + +"db.results.insert(db.runCommand({getlasterror:1}));"); specs.push(spec); multikey.push(i % 10 == 0 || (i+1) % 10 == 0 || (i+2) % 10 == 0); } print("Create 30 compound indexes"); -for (var i = 30; i < 90; i += 2) { +for (var i=30; i<90; i+=2) { var spec = {}; spec["field"+i] = 1; spec["field"+(i+1)] = 1; - indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + ", " + - "{ background: true });" + - "db.results.insert(db.runCommand({ getlasterror: 1 }));")); + startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});" + +"db.results.insert(db.runCommand({getlasterror:1}));"); specs.push(spec); multikey.push(i % 10 == 0 || (i+1) % 10 == 0); } print("Create 30 indexes"); -for (var i = 0; i < 30; i++) { +for (var i=0; i<30; i++) { var spec = {}; spec["field"+i] = 1; - indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + ", " + - "{ background: true });" + - "db.results.insert(db.runCommand({ getlasterror: 1 }));")); + startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});" + +"db.results.insert(db.runCommand({getlasterror:1}));"); specs.push(spec); multikey.push(i % 10 == 0); } print("Do some sets and unsets"); -bulk = coll.initializeUnorderedBulkOp(); -for (i = 0; i < 1e4; i++) { +for (i=0; i<1e4; i++) { var criteria = {_id: Random.randInt(1e5)}; var mod = {}; if (Random.rand() < .5) { @@ -82,23 +79,31 @@ for (i = 0; i < 1e4; i++) { mod['$unset']['field'+Random.randInt(100)] = true; } - bulk.find(criteria).update(mod); + coll.update(criteria, mod); } -assert.writeOK(bulk.execute()); - -indexJobs.forEach(function(join) { - join(); -}); printjson(db.results.find().toArray()); printjson(coll.getIndexes()); print("Make sure we end up with 64 indexes"); -for (var i in specs) { - print("trying to hint on "+tojson(specs[i])); - var explain = coll.find().hint(specs[i]).explain(); - assert.eq(multikey[i], explain.isMultiKey, tojson(explain)); -} +assert.soon( + function() { + for (var i in specs) { + print("trying to hint on "+tojson(specs[i])); + try { + var explain = coll.find().hint(specs[i]).explain(); + printjson(explain); + assert.eq(multikey[i], explain.isMultiKey); + } catch (x) { + print(x+", hinting on "+tojson(specs[i])); + return false; + } + } + return true; + }, + "wait for all indexes to be built", + 120000 +); print("SUCCESS!"); diff --git a/jstests/noPassthroughWithMongod/index_retry.js b/jstests/noPassthroughWithMongod/index_retry.js index 7c79e75af5f..d0465476144 100644 --- a/jstests/noPassthroughWithMongod/index_retry.js +++ b/jstests/noPassthroughWithMongod/index_retry.js @@ -12,14 +12,13 @@ t.drop(); // Insert a large number of documents, enough to ensure that an index build on these documents can // be interrupted before complete. -var bulk = t.initializeUnorderedBulkOp(); for (i = 0; i < 5e5; ++i) { - bulk.insert({ a: i }); + t.save( { a:i } ); if (i % 10000 == 0) { print("i: " + i); } } -assert.writeOK(bulk.execute()); +test.getLastError(); function debug(x) { printjson(x); @@ -37,15 +36,14 @@ function indexBuildInProgress() { // Identify the index build as an insert into the 'test.system.indexes' // namespace. It is assumed that no other clients are concurrently // accessing the 'test' database. - if ( op.op == 'query' && 'createIndexes' in op.query ) { + if ( op.op == 'insert' && op.ns == 'test.system.indexes' ) { debug(op.opid); - var idxSpec = op.query.indexes[0]; // SERVER-4295 Make sure the index details are there // we can't assert these things, since there is a race in reporting // but we won't count if they aren't - if ( "a_1" == idxSpec.name && - 1 == idxSpec.key.a && - idxSpec.background ) { + if ( "a_1" == op.insert.name && + 1 == op.insert.key.a && + op.insert.background ) { indexBuildOpId = op.opid; } } @@ -55,9 +53,10 @@ function indexBuildInProgress() { } function abortDuringIndexBuild(options) { - var createIdx = startParallelShell('var coll = db.jstests_slownightly_index_retry; \ - coll.createIndex({ a: 1 }, { background: true });', - ports[0]); + + // Create an index asynchronously by using a new connection. + new Mongo(test.getMongo().host).getCollection(t.toString()).createIndex( + { a:1 }, { background:true } ); // Wait for the index build to start. var times = 0; @@ -69,7 +68,6 @@ function abortDuringIndexBuild(options) { print("killing the mongod"); stopMongod(ports[0], /* signal */ 9); - createIdx(); } abortDuringIndexBuild(); diff --git a/jstests/noPassthroughWithMongod/indexbg_drop.js b/jstests/noPassthroughWithMongod/indexbg_drop.js index db4493df017..9e754b747ef 100644 --- a/jstests/noPassthroughWithMongod/indexbg_drop.js +++ b/jstests/noPassthroughWithMongod/indexbg_drop.js @@ -42,11 +42,9 @@ var dc = {dropIndexes: collection, index: "i_1"}; // set up collections masterDB.dropDatabase(); jsTest.log("creating test data " + size + " documents"); -var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp(); for( i = 0; i < size; ++i ) { - bulk.insert({ i: Random.rand() }); + masterDB.getCollection(collection).save( {i: Random.rand()} ); } -assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing for test of: " + tojson(dc)); // Add another index to be sure the drop command works. diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js index f6f1d426161..09c75056ca2 100644 --- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js +++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js @@ -66,11 +66,9 @@ for (var idx = 0; idx < dropAction.length; idx++) { // set up collections masterDB.dropDatabase(); jsTest.log("creating test data " + size + " documents"); - var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp(); for(var i = 0; i < size; ++i ) { - bulk.insert({ i: i }); + masterDB.getCollection(collection).save( {i:i} ); } - assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing for test of: " + JSON.stringify(dc)); masterDB.getCollection(collection).ensureIndex( {i:1}, {background:true} ); diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js index a3b2c26f59e..01d3b23a07c 100644 --- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js +++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js @@ -38,11 +38,9 @@ var secondDB = second.getDB('bgIndexSec'); var size = 500000; jsTest.log("creating test data " + size + " documents"); -var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp(); for(var i = 0; i < size; ++i) { - bulk.insert({ i: i }); + masterDB.jstests_bgsec.save( {i:i} ); } -assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing"); masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} ); diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js index 1ea53e6aac2..38cced11bb9 100644 --- a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js +++ b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js @@ -64,11 +64,9 @@ var size = 500000; jsTest.log("creating test data " + size + " documents"); - var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp(); for( i = 0; i < size; ++i ) { - bulk.insert({ i : i }); + masterDB.jstests_bgsec.save( {i:i} ); } - assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing"); masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} ); @@ -84,7 +82,8 @@ // Make sure a journal flush for the oplog occurs, by doing a local journaled write to the // secondary - assert.writeOK(second.getDB('local').foo.insert({ a: 1 }, { writeConcern: { j: true }})); + second.getDB('local').foo.insert({a:1}); + second.getDB('local').runCommand( { getLastError: 1, j: true } ); // restart secondary and reconnect jsTest.log("Restarting secondary"); diff --git a/jstests/noPassthroughWithMongod/large_chunk.js b/jstests/noPassthroughWithMongod/large_chunk.js index 12f0c48fdcd..2e648084947 100644 --- a/jstests/noPassthroughWithMongod/large_chunk.js +++ b/jstests/noPassthroughWithMongod/large_chunk.js @@ -20,12 +20,11 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 400 * 1024 * 1024 ) ){ - bulk.insert({ _id: num++, s: bigString }); + db.foo.insert( { _id : num++ , s : bigString } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); +db.getLastError(); // Turn on sharding on the 'test.foo' collection and generate a large chunk s.adminCommand( { enablesharding : "test" } ); diff --git a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js index 1ff024fcb03..fd7ec8c68e7 100644 --- a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js +++ b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js @@ -15,14 +15,14 @@ var coll = db.getCollection("mrInput"); //
var expectedOutColl = [];
-var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 10; i++) {
for (var j = 1; j < 50; j++) {
- bulk.insert({ idx: i, j: j });
+ coll.insert({idx: i, j: j});
}
expectedOutColl.push ({ _id: i, value: j - 1 });
}
-assert.writeOK(bulk.execute());
+
+assertGLEOK(db.getLastErrorObj());
function mapFn() { emit(this.idx, 1); };
function reduceFn(key, values) { return Array.sum(values); };
@@ -41,4 +41,4 @@ assert.eq(out.counts.emit, 490, "emit count is wrong"); // changed, such as reduceTriggerRatio or maxInMemSize. If not the case, then something else
// must have changed with when intermediate reduces occur (see mongo::mr::State::checkSize).
//
-assert.eq(out.counts.reduce, 14, "reduce count is wrong");
+assert.eq(out.counts.reduce, 14, "reduce count is wrong");
\ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/mr_shard_version.js b/jstests/noPassthroughWithMongod/mr_shard_version.js index c011e7700e9..47fd99ea30e 100644 --- a/jstests/noPassthroughWithMongod/mr_shard_version.js +++ b/jstests/noPassthroughWithMongod/mr_shard_version.js @@ -11,12 +11,11 @@ var numDocs = 500000 var numKeys = 1000 var numTests = 3 -var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < numDocs; i++ ){ - bulk.insert({ _id: i, key: "" + ( i % numKeys ), value: i % numKeys }); + coll.insert({ _id : i, key : "" + ( i % numKeys ), value : i % numKeys }) } -assert.writeOK(bulk.execute()); +assert.eq( null, coll.getDB().getLastError() ) assert.eq( numDocs, coll.find().itcount() ) var halfId = coll.find().itcount() / 2 diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js index 6f6c196510d..7aa55564640 100644 --- a/jstests/noPassthroughWithMongod/no_balance_collection.js +++ b/jstests/noPassthroughWithMongod/no_balance_collection.js @@ -68,11 +68,9 @@ sh.waitForBalancer(true) // Make sure auto-migrates on insert don't move chunks var lastMigration = sh._lastMigration( collB ) -var bulk = collB.initializeUnorderedBulkOp(); for( var i = 0; i < 1000000; i++ ){ - bulk.insert({ _id: i, hello: "world" }); + collB.insert({ _id : i, hello : "world" }) } -assert.writeOK(bulk.execute()); printjson( lastMigration ) printjson( sh._lastMigration( collB ) ) @@ -80,4 +78,4 @@ printjson( sh._lastMigration( collB ) ) if( lastMigration == null ) assert.eq( null, sh._lastMigration( collB ) ) else assert.eq( lastMigration.time, sh._lastMigration( collB ).time ) -st.stop() +st.stop()
\ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/opcounters_legacy.js b/jstests/noPassthroughWithMongod/opcounters_legacy.js new file mode 100644 index 00000000000..7db520a109f --- /dev/null +++ b/jstests/noPassthroughWithMongod/opcounters_legacy.js @@ -0,0 +1,173 @@ +// Test that opcounters get incremented properly. + +// Remember the global 'db' var +var lastDB = db; +var mongo = new Mongo(db.getMongo().host); +mongo.writeMode = function() { return "legacy"; } +db = mongo.getDB(db.toString()); + +var t = db.opcounters; +var isMongos = ("isdbgrid" == db.runCommand("ismaster").msg); +var opCounters; + +// +// 1. Insert. +// +// - mongod, single insert: +// counted as 1 op if successful, else 0 +// - mongod, bulk insert of N with continueOnError=true: +// counted as N ops, regardless of errors +// - mongod, bulk insert of N with continueOnError=false: +// counted as K ops, where K is number of docs successfully inserted +// +// - mongos +// count ops attempted like insert commands +// + +t.drop(); + +// Single insert, no error. +opCounters = db.serverStatus().opcounters; +t.insert({_id:0}); +assert(!db.getLastError()); +assert.eq(opCounters.insert + 1, db.serverStatus().opcounters.insert); + +// Bulk insert, no error. +opCounters = db.serverStatus().opcounters; +t.insert([{_id:1},{_id:2}]) +assert(!db.getLastError()); +assert.eq(opCounters.insert + 2, db.serverStatus().opcounters.insert); + +// Single insert, with error. +opCounters = db.serverStatus().opcounters; +t.insert({_id:0}) +print( db.getLastError() ) +assert(db.getLastError()); +assert.eq(opCounters.insert + (isMongos ? 1 : 0), db.serverStatus().opcounters.insert); + +// Bulk insert, with error, continueOnError=false. +opCounters = db.serverStatus().opcounters; +t.insert([{_id:3},{_id:3},{_id:4}]) +assert(db.getLastError()); +assert.eq(opCounters.insert + (isMongos ? 2 : 1), db.serverStatus().opcounters.insert); + +// Bulk insert, with error, continueOnError=true. +var continueOnErrorFlag = 1; +opCounters = db.serverStatus().opcounters; +t.insert([{_id:5},{_id:5},{_id:6}], continueOnErrorFlag) +assert(db.getLastError()); +assert.eq(opCounters.insert + 3, db.serverStatus().opcounters.insert); + +// +// 2. Update. +// +// - counted as 1 op, regardless of errors +// + +t.drop(); +t.insert({_id:0}); + +// Update, no error. +opCounters = db.serverStatus().opcounters; +t.update({_id:0}, {$set:{a:1}}); +assert(!db.getLastError()); +assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update); + +// Update, with error. +opCounters = db.serverStatus().opcounters; +t.update({_id:0}, {$set:{_id:1}}); +assert(db.getLastError()); +assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update); + +// +// 3. Delete. +// +// - counted as 1 op, regardless of errors +// + +t.drop(); +t.insert([{_id:0},{_id:1}]); + +// Delete, no error. +opCounters = db.serverStatus().opcounters; +t.remove({_id:0}); +assert(!db.getLastError()); +assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete); + +// Delete, with error. +opCounters = db.serverStatus().opcounters; +t.remove({_id:{$invalidOp:1}}); +assert(db.getLastError()); +assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete); + +// +// 4. Query. +// +// - mongod: counted as 1 op, regardless of errors +// - mongos: counted as 1 op if successful, else 0 +// + +t.drop(); +t.insert({_id:0}); + +// Query, no error. +opCounters = db.serverStatus().opcounters; +t.findOne(); +assert.eq(opCounters.query + 1, db.serverStatus().opcounters.query); + +// Query, with error. +opCounters = db.serverStatus().opcounters; +assert.throws(function() { t.findOne({_id:{$invalidOp:1}}) }); +assert.eq(opCounters.query + (isMongos ? 0 : 1), db.serverStatus().opcounters.query); + +// +// 5. Getmore. +// +// - counted as 1 op per getmore issued, regardless of errors +// + +t.drop(); +t.insert([{_id:0},{_id:1},{_id:2}]); + +// Getmore, no error. +opCounters = db.serverStatus().opcounters; +t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore +assert.eq(opCounters.query + 1, db.serverStatus().opcounters.query); +assert.eq(opCounters.getmore + 1, db.serverStatus().opcounters.getmore); + +// Getmore, with error (TODO implement when SERVER-5813 is resolved). + +// +// 6. Command. +// +// - unrecognized commands not counted +// - recognized commands counted as 1 op, regardless of errors +// - some (recognized) commands can suppress command counting (i.e. aren't counted as commands) +// + +t.drop(); +t.insert({_id:0}) + +// Command, recognized, no error. +opCounters = db.serverStatus().opcounters; +assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted + +// Command, recognized, with error. +opCounters = db.serverStatus().opcounters; +res = t.runCommand("count", {query:{$invalidOp:1}}); +assert.eq(0, res.ok); +assert.eq(opCounters.command + 2, + db.serverStatus().opcounters.command); // "serverStatus", "count" counted + +// Command, unrecognized. +opCounters = db.serverStatus().opcounters; +res = t.runCommand("command that doesn't exist"); +assert.eq(0, res.ok); +//assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted +// TODO Replace below with above when SERVER-9038 is resolved (mongos counts unrecognized commands) +assert.eq(opCounters.command + (isMongos ? 2 : 1), db.serverStatus().opcounters.command); + +// Command, recognized, counting suppressed (TODO implement when SERVER-9038 is resolved). + +// Restore 'db' var +db = lastDB; diff --git a/jstests/noPassthroughWithMongod/opcounters_write_cmd.js b/jstests/noPassthroughWithMongod/opcounters_write_cmd.js new file mode 100644 index 00000000000..47a1bc63515 --- /dev/null +++ b/jstests/noPassthroughWithMongod/opcounters_write_cmd.js @@ -0,0 +1,166 @@ +// Test that opcounters get incremented properly. + +var mongo = new Mongo(db.getMongo().host); +mongo.forceWriteMode("commands"); +var newdb = mongo.getDB(db.toString()); + +var t = newdb.opcounters; +var isMongos = ("isdbgrid" == newdb.runCommand("ismaster").msg); +var opCounters; +var res; + +assert(t.getDB().getMongo().useWriteCommands(), "test is not running with write commands") + +// +// Count ops attempted in write commands in mongod and mongos +// + +// +// 1. Insert. +// +// - unordered insert of N: +// counted as N ops, regardless of errors +// - ordered insert of N: +// counted as K + 1 ops, where K is number of docs successfully inserted, +// adding the failed attempt +// + +t.drop(); + +// Single insert, no error. +opCounters = newdb.serverStatus().opcounters; +res = t.insert({_id:0}); +assert.writeOK(res); +assert.eq(opCounters.insert + 1, newdb.serverStatus().opcounters.insert); + +// Bulk insert, no error. +opCounters = newdb.serverStatus().opcounters; +res = t.insert([{_id:1},{_id:2}]) +assert.writeOK(res); +assert.eq(opCounters.insert + 2, newdb.serverStatus().opcounters.insert); + +// Single insert, with error. +opCounters = newdb.serverStatus().opcounters; +res = t.insert({_id:0}) +assert.writeError(res); +assert.eq(opCounters.insert + 1, newdb.serverStatus().opcounters.insert); + +// Bulk insert, with error, ordered. +opCounters = newdb.serverStatus().opcounters; +res = t.insert([{_id:3},{_id:3},{_id:4}]) +assert.writeError(res); +assert.eq(opCounters.insert + 2, newdb.serverStatus().opcounters.insert); + +// Bulk insert, with error, unordered. +var continueOnErrorFlag = 1; +opCounters = newdb.serverStatus().opcounters; +res = t.insert([{_id:5},{_id:5},{_id:6}], continueOnErrorFlag) +assert.writeError(res); +assert.eq(opCounters.insert + 3, newdb.serverStatus().opcounters.insert); + +// +// 2. Update. +// + +t.drop(); +t.insert({_id:0}); + +// Update, no error. +opCounters = newdb.serverStatus().opcounters; +res = t.update({_id:0}, {$set:{a:1}}); +assert.writeOK(res); +assert.eq(opCounters.update + 1, newdb.serverStatus().opcounters.update); + +// Update, with error. +opCounters = newdb.serverStatus().opcounters; +res = t.update({_id:0}, {$set:{_id:1}}); +assert.writeError(res); +assert.eq(opCounters.update + 1, newdb.serverStatus().opcounters.update); + +// +// 3. Delete. +// + +t.drop(); +t.insert([{_id:0},{_id:1}]); + +// Delete, no error. +opCounters = newdb.serverStatus().opcounters; +res = t.remove({_id:0}); +assert.writeOK(res); +assert.eq(opCounters.delete + 1, newdb.serverStatus().opcounters.delete); + +// Delete, with error. +opCounters = newdb.serverStatus().opcounters; +res = t.remove({_id:{$invalidOp:1}}); +assert.writeError(res); +assert.eq(opCounters.delete + 1, newdb.serverStatus().opcounters.delete); + +// +// 4. Query. +// +// - mongod: counted as 1 op, regardless of errors +// - mongos: counted as 1 op if successful, else 0 +// + +t.drop(); +t.insert({_id:0}); + +// Query, no error. +opCounters = newdb.serverStatus().opcounters; +t.findOne(); +assert.eq(opCounters.query + 1, newdb.serverStatus().opcounters.query); + +// Query, with error. +opCounters = newdb.serverStatus().opcounters; +assert.throws(function() { t.findOne({_id:{$invalidOp:1}}) }); +assert.eq(opCounters.query + (isMongos ? 0 : 1), newdb.serverStatus().opcounters.query); + +// +// 5. Getmore. +// +// - counted as 1 op per getmore issued, regardless of errors +// + +t.drop(); +t.insert([{_id:0},{_id:1},{_id:2}]); + +// Getmore, no error. +opCounters = newdb.serverStatus().opcounters; +t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore +assert.eq(opCounters.query + 1, newdb.serverStatus().opcounters.query); +assert.eq(opCounters.getmore + 1, newdb.serverStatus().opcounters.getmore); + +// Getmore, with error (TODO implement when SERVER-5813 is resolved). + +// +// 6. Command. +// +// - unrecognized commands not counted +// - recognized commands counted as 1 op, regardless of errors +// - some (recognized) commands can suppress command counting (i.e. aren't counted as commands) +// + +t.drop(); +t.insert({_id:0}) + +// Command, recognized, no error. +opCounters = newdb.serverStatus().opcounters; +assert.eq(opCounters.command + 1, newdb.serverStatus().opcounters.command); // "serverStatus" counted + +// Command, recognized, with error. +opCounters = newdb.serverStatus().opcounters; +res = t.runCommand("count", {query:{$invalidOp:1}}); +assert.eq(0, res.ok); +assert.eq(opCounters.command + 2, + newdb.serverStatus().opcounters.command); // "serverStatus", "count" counted + +// Command, unrecognized. +opCounters = newdb.serverStatus().opcounters; +res = t.runCommand("command that doesn't exist"); +assert.eq(0, res.ok); +//assert.eq(opCounters.command + 1, newdb.serverStatus().opcounters.command); // "serverStatus" counted +// TODO Replace below with above when SERVER-9038 is resolved (mongos counts unrecognized commands) +assert.eq(opCounters.command + (isMongos ? 2 : 1), newdb.serverStatus().opcounters.command); + +// Command, recognized, counting suppressed (TODO implement when SERVER-9038 is resolved). diff --git a/jstests/noPassthroughWithMongod/parallel_collection_scan.js b/jstests/noPassthroughWithMongod/parallel_collection_scan.js index 44e5d361e45..d745f088376 100644 --- a/jstests/noPassthroughWithMongod/parallel_collection_scan.js +++ b/jstests/noPassthroughWithMongod/parallel_collection_scan.js @@ -6,11 +6,10 @@ s = ""; while ( s.length < 10000 ) s += "."; -var bulk = t.initializeUnorderedBulkOp(); for ( i = 0; i < 8000; i++ ) { - bulk.insert({ x: i, s: s }); + t.insert( { x : i, s : s } ); } -assert.writeOK(bulk.execute()); + function iterateSliced() { var res = t.runCommand( "parallelCollectionScan", { numCursors : 3 } ); diff --git a/jstests/noPassthroughWithMongod/remove9.js b/jstests/noPassthroughWithMongod/remove9.js index 7492e36c5b9..e7dfe9bfff1 100644 --- a/jstests/noPassthroughWithMongod/remove9.js +++ b/jstests/noPassthroughWithMongod/remove9.js @@ -5,7 +5,8 @@ js = "while( 1 ) { for( i = 0; i < 10000; ++i ) { db.jstests_remove9.save( {i:i} pid = startMongoProgramNoConnect( "mongo" , "--eval" , js , db ? db.getMongo().host : null ); for( var i = 0; i < 10000; ++i ) { - assert.writeOK(t.remove( { i: Random.randInt( 10000 )} )); + t.remove( {i:Random.randInt( 10000 )} ); + assert.automsg( "!db.getLastError()" ); } -stopMongoProgramByPid( pid ); +stopMongoProgramByPid( pid );
\ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/sharding_balance1.js b/jstests/noPassthroughWithMongod/sharding_balance1.js index 7f3892ce8f3..e0c36f6cea5 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance1.js +++ b/jstests/noPassthroughWithMongod/sharding_balance1.js @@ -15,13 +15,12 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 20 * 1024 * 1024 ) ){ - bulk.insert({ _id: num++, s: bigString }); + db.foo.insert( { _id : num++ , s : bigString } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); +db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_balance2.js b/jstests/noPassthroughWithMongod/sharding_balance2.js index c3e2e825ba3..519f88807a2 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance2.js +++ b/jstests/noPassthroughWithMongod/sharding_balance2.js @@ -26,12 +26,12 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 40 * 1024 * 1024 ) ){ - bulk.insert({ _id: num++, s: bigString }); + db.foo.insert( { _id : num++ , s : bigString } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); + +db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_balance3.js b/jstests/noPassthroughWithMongod/sharding_balance3.js index 59f4136d44c..5e85a694716 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance3.js +++ b/jstests/noPassthroughWithMongod/sharding_balance3.js @@ -16,13 +16,12 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 40 * 1024 * 1024 ) ){ - bulk.insert({ _id: num++, s: bigString }); + db.foo.insert( { _id : num++ , s : bigString } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); +db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_balance4.js b/jstests/noPassthroughWithMongod/sharding_balance4.js index c2a3d744964..f1c27afa0bb 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance4.js +++ b/jstests/noPassthroughWithMongod/sharding_balance4.js @@ -35,12 +35,12 @@ counts = {} // -function doUpdate( bulk, includeString, optionalId ){ +function doUpdate( includeString, optionalId ){ var up = { $inc : { x : 1 } } if ( includeString ) up["$set"] = { s : bigString }; var myid = optionalId == undefined ? Random.randInt( N ) : optionalId - bulk.find({ _id : myid }).upsert().update( up ); + db.foo.update( { _id : myid } , up , true ); counts[myid] = ( counts[myid] ? counts[myid] : 0 ) + 1; return myid; @@ -48,15 +48,14 @@ function doUpdate( bulk, includeString, optionalId ){ // Initially update all documents from 1 to N, otherwise later checks can fail because no document // previously existed -var bulk = db.foo.initializeUnorderedBulkOp(); for ( i = 0; i < N; i++ ){ - doUpdate( bulk, true, i ); + doUpdate( true, i ) } for ( i=0; i<N*9; i++ ){ - doUpdate( bulk, false ); + doUpdate( false ) } -assert.writeOK(bulk.execute()); +db.getLastError(); for ( var i=0; i<50; i++ ){ s.printChunks( "test.foo" ) @@ -110,15 +109,25 @@ function check( msg , dontAssert ){ function diff1(){ jsTest.log("Running diff1...") + + var myid = doUpdate( false ) + var le = db.getLastErrorCmd(); + + if ( le.err ) + print( "ELIOT ELIOT : " + tojson( le ) + "\t" + myid ); + + if ( ! le.updatedExisting || le.n != 1 ) { + print( "going to assert for id: " + myid + " correct count is: " + counts[myid] + " db says count is: " + tojson(db.foo.findOne( { _id : myid } )) ); + } + + assert( le.updatedExisting , "GLE diff myid: " + myid + " 1: " + tojson(le) ) + assert.eq( 1 , le.n , "GLE diff myid: " + myid + " 2: " + tojson(le) ) - bulk = db.foo.initializeUnorderedBulkOp(); - var myid = doUpdate( bulk, false ); - var res = assert.writeOK(bulk.execute()); - assert.eq( 1, res.nModified, - "diff myid: " + myid + " 2: " + res.toString() + "\n" + - " correct count is: " + counts[myid] + - " db says count is: " + tojson(db.foo.findOne({ _id: myid })) ); + if ( Math.random() > .99 ){ + db.getLastError() + check( "random late check" ); // SERVER-1430 + } var x = s.chunkCounts( "foo" ) if ( Math.random() > .999 ) diff --git a/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js b/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js index e3728817744..41bf9bf03c5 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js +++ b/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js @@ -14,14 +14,12 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; - -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 20 * 1024 * 1024 ) ){ - bulk.insert({ _id: Math.random(), s: bigString }); + db.foo.insert( { _id : Math.random() , s : bigString } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); +db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js b/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js index 32278c089f3..414b6d57925 100644 --- a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js +++ b/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js @@ -22,11 +22,11 @@ for( var i = 0; i < nsq; i++ ) data += data dataObj = {} for( var i = 0; i < n; i++ ) dataObj["data-" + i] = data -var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < 40; i++ ) { - bulk.insert({ data: dataObj }); + if(i != 0 && i % 10 == 0) printjson( coll.stats() ) + coll.save({ data : dataObj }) } -assert.writeOK(bulk.execute()); +db.getLastError(); assert.eq( 40 , coll.count() , "prep1" ); @@ -46,9 +46,9 @@ assert.soon( // On *extremely* slow or variable systems, we've seen migrations fail in the critical section and // kill the server. Do an explicit check for this. SERVER-8781 // TODO: Remove once we can better specify what systems to run what tests on. - try { - assert.commandWorked(shardA.getDB("admin").runCommand({ ping: 1 })); - assert.commandWorked(shardB.getDB("admin").runCommand({ ping: 1 })); + try { + assert.eq(null, shardA.getDB("admin").getLastError()); + assert.eq(null, shardB.getDB("admin").getLastError()); } catch(e) { print("An error occurred contacting a shard during balancing," + diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js b/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js index c4484356dd4..2948dbef3f9 100644 --- a/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js +++ b/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js @@ -23,11 +23,11 @@ numDocs = 20 * docsPerChunk print( "stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs ) -var bulk = t.initializeUnorderedBulkOp(); -for (var i = 0; i < numDocs; i++){ - bulk.insert({ _id: i, s: bigString }); +for ( i=0; i<numDocs; i++ ){ + t.insert( { _id : i , s : bigString } ); } -assert.writeOK(bulk.execute()); + +db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js index 8132e33c5d0..6b2e7faa56b 100644 --- a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js +++ b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js @@ -40,14 +40,15 @@ coll.insert({ _id : -2, d : data15PlusMB }); coll.insert({ _id : -1, d : data15PlusMB }); // Docs of assorted sizes -assert.writeOK(coll.insert({ _id : 0, d : "x" })); -assert.writeOK(coll.insert({ _id : 1, d : data15PlusMB })); -assert.writeOK(coll.insert({ _id : 2, d : "x" })); -assert.writeOK(coll.insert({ _id : 3, d : data15MB })); -assert.writeOK(coll.insert({ _id : 4, d : "x" })); -assert.writeOK(coll.insert({ _id : 5, d : data1MB })); -assert.writeOK(coll.insert({ _id : 6, d : "x" })); - +coll.insert({ _id : 0, d : "x" }); +coll.insert({ _id : 1, d : data15PlusMB }); +coll.insert({ _id : 2, d : "x" }); +coll.insert({ _id : 3, d : data15MB }); +coll.insert({ _id : 4, d : "x" }); +coll.insert({ _id : 5, d : data1MB }); +coll.insert({ _id : 6, d : "x" }); + +assert.eq( null, coll.getDB().getLastError() ); assert.eq( 9, coll.find().itcount() ); jsTest.log( "Starting migration..." ); diff --git a/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js b/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js index 0d8af3a1ebe..9c4d73d5a2c 100644 --- a/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js +++ b/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js @@ -6,14 +6,12 @@ s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); db = s.getDB( "test" ); -var bulk = db.foo.initializeUnorderedBulkOp(); -var bulk2 = db.bar.initializeUnorderedBulkOp(); for ( i=0; i<100; i++ ) { - bulk.insert({ _id: i, x: i }); - bulk2.insert({ _id: i, x: i }); + db.foo.insert( { _id : i , x : i } ) + db.bar.insert( { _id : i , x : i } ) } -assert.writeOK(bulk.execute()); -assert.writeOK(bulk2.execute()); + +db.getLastError(); sh.splitAt( "test.foo" , { _id : 50 } ) diff --git a/jstests/noPassthroughWithMongod/sharding_rs1.js b/jstests/noPassthroughWithMongod/sharding_rs1.js index 53a1f5e5dda..d79605ad177 100644 --- a/jstests/noPassthroughWithMongod/sharding_rs1.js +++ b/jstests/noPassthroughWithMongod/sharding_rs1.js @@ -15,13 +15,12 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 20 * 1024 * 1024 ) ){ - bulk.insert({ _id: num++, s: bigString, x: Math.random() }); + db.foo.insert( { _id : num++ , s : bigString , x : Math.random() } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); +db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_rs2.js b/jstests/noPassthroughWithMongod/sharding_rs2.js index 1a0fe612d70..b577bf82ba9 100644 --- a/jstests/noPassthroughWithMongod/sharding_rs2.js +++ b/jstests/noPassthroughWithMongod/sharding_rs2.js @@ -92,12 +92,11 @@ assert.lte( before.query + 10 , after.query , "B3" ) db.foo.ensureIndex( { x : 1 } ) -var bulk = db.foo.initializeUnorderedBulkOp(); for ( i=0; i<100; i++ ){ if ( i == 17 ) continue; - bulk.insert({ x: i }); + db.foo.insert( { x : i } ) } -assert.writeOK(bulk.execute({ w: 3 })); +db.getLastError( 3 , 10000 ); // Counts pass the options of the connection - which is slaveOk'd, so we need to wait for // replication for this and future tests to pass diff --git a/jstests/noPassthroughWithMongod/ttl1.js b/jstests/noPassthroughWithMongod/ttl1.js index 3ce494bc2ef..cba4d035b05 100644 --- a/jstests/noPassthroughWithMongod/ttl1.js +++ b/jstests/noPassthroughWithMongod/ttl1.js @@ -35,6 +35,7 @@ t.insert( { x : true } ) //non-date value t.insert( { x : "yo" } ) //non-date value t.insert( { x : 3 } ) //non-date value t.insert( { x : /foo/ } ) //non-date value +db.getLastError(); assert.eq( 30 , t.count() ); diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js index 5646ce22a39..3b251dfa8a9 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl.js +++ b/jstests/noPassthroughWithMongod/ttl_repl.js @@ -29,11 +29,9 @@ masterdb.createCollection(mastercol.getName(), {usePowerOf2Sizes: false}); // create new collection. insert 24 docs, aged at one-hour intervalss now = (new Date()).getTime(); -var bulk = mastercol.initializeUnorderedBulkOp(); -for ( i=0; i<24; i++ ) { - bulk.insert({ x: new Date( now - ( 3600 * 1000 * i )) }); -} -assert.writeOK(bulk.execute()); +for ( i=0; i<24; i++ ) + mastercol.insert( { x : new Date( now - ( 3600 * 1000 * i ) ) } ); +masterdb.getLastError(); rt.awaitReplication(); assert.eq( 24 , mastercol.count() , "docs not inserted on primary" ); assert.eq( 24 , slave1col.count() , "docs not inserted on secondary" ); @@ -50,7 +48,8 @@ assert.eq( 0 , slave1col.stats().userFlags , "userFlags not 0 on secondary"); // create TTL index, wait for TTL monitor to kick in, then check that // userFlags get set to 1, and correct number of docs age out -assert.commandWorked(mastercol.ensureIndex({ x: 1 }, { expireAfterSeconds: 20000 })); +mastercol.ensureIndex( { x : 1 } , { expireAfterSeconds : 20000 } ); +masterdb.getLastError(); rt.awaitReplication(); sleep(70*1000); // TTL monitor runs every 60 seconds, so wait 70 diff --git a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js index 15e72b66a81..99c8681a144 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js +++ b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js @@ -17,7 +17,8 @@ var primeSystemReplset = function() { print("create a TTL collection"); var testDB = conn.getDB("test"); - assert.commandWorked(testDB.foo.ensureIndex({ x: 1 }, { expireAfterSeconds: 2 })); + testDB.foo.ensureIndex({x:1}, {expireAfterSeconds : 2}); + testDB.getLastError(); }; var restartWithConfig = function() { @@ -37,7 +38,8 @@ var restartWithConfig = function() { var restartWithoutConfig = function() { var localDB = conn.getDB("local"); - assert.writeOK(localDB.system.replset.remove({})); + localDB.system.replset.remove({}); + localDB.getLastError(); stopMongod(runner.port(), 15); diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js index 2c524d8d788..d5aa45e269a 100644 --- a/jstests/noPassthroughWithMongod/ttl_sharded.js +++ b/jstests/noPassthroughWithMongod/ttl_sharded.js @@ -20,12 +20,11 @@ s.adminCommand( { shardcollection : ns , key: { _id : 1 } } ); // insert 24 docs, with timestamps at one hour intervals var now = (new Date()).getTime(); -var bulk = t.initializeUnorderedBulkOp(); -for (var i = 0; i < 24; i++) { +for ( i=0; i<24; i++ ){ var past = new Date( now - ( 3600 * 1000 * i ) ); - bulk.insert({ _id: i, x: past }); + t.insert( {_id : i , x : past } ); } -assert.writeOK(bulk.execute()); +s.getDB( dbname ).getLastError(); assert.eq( t.count() , 24 , "initial docs not inserted"); // create the TTL index which delete anything older than ~5.5 hours |