summaryrefslogtreecommitdiff
path: root/jstests/noPassthroughWithMongod
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2014-04-25 14:04:36 -0400
committerRandolph Tan <randolph@10gen.com>2014-05-06 16:32:44 -0400
commit87dc3ae516e1d12a632dc604710661e38ed7b3dd (patch)
tree3a483a3d0c38ce00a7f4d7dba0e9cba7f7eba5f3 /jstests/noPassthroughWithMongod
parent6b945ec15c61f6bd4bfbaf382624d886ec8441d2 (diff)
downloadmongo-87dc3ae516e1d12a632dc604710661e38ed7b3dd.tar.gz
SERVER-13741 Migrate remaining tests to use write commands
Diffstat (limited to 'jstests/noPassthroughWithMongod')
-rw-r--r--jstests/noPassthroughWithMongod/autosplit_heuristics.js10
-rw-r--r--jstests/noPassthroughWithMongod/background.js46
-rw-r--r--jstests/noPassthroughWithMongod/balance_repl.js7
-rw-r--r--jstests/noPassthroughWithMongod/balance_tags1.js5
-rw-r--r--jstests/noPassthroughWithMongod/balance_tags2.js5
-rw-r--r--jstests/noPassthroughWithMongod/btreedel.js4
-rw-r--r--jstests/noPassthroughWithMongod/bulk_shard_insert.js7
-rw-r--r--jstests/noPassthroughWithMongod/capped4.js4
-rw-r--r--jstests/noPassthroughWithMongod/fsync2.js4
-rw-r--r--jstests/noPassthroughWithMongod/geo_axis_aligned.js158
-rw-r--r--jstests/noPassthroughWithMongod/geo_mnypts.js4
-rw-r--r--jstests/noPassthroughWithMongod/geo_polygon.js4
-rw-r--r--jstests/noPassthroughWithMongod/index_check10.js13
-rw-r--r--jstests/noPassthroughWithMongod/index_check9.js15
-rw-r--r--jstests/noPassthroughWithMongod/index_hammer1.js5
-rw-r--r--jstests/noPassthroughWithMongod/index_killop.js14
-rw-r--r--jstests/noPassthroughWithMongod/index_multi.js65
-rw-r--r--jstests/noPassthroughWithMongod/index_retry.js22
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_drop.js4
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_interrupts.js4
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_secondary.js4
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js7
-rw-r--r--jstests/noPassthroughWithMongod/large_chunk.js5
-rw-r--r--jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js8
-rw-r--r--jstests/noPassthroughWithMongod/mr_shard_version.js5
-rw-r--r--jstests/noPassthroughWithMongod/no_balance_collection.js6
-rw-r--r--jstests/noPassthroughWithMongod/opcounters_legacy.js173
-rw-r--r--jstests/noPassthroughWithMongod/opcounters_write_cmd.js166
-rw-r--r--jstests/noPassthroughWithMongod/parallel_collection_scan.js5
-rw-r--r--jstests/noPassthroughWithMongod/remove9.js5
-rw-r--r--jstests/noPassthroughWithMongod/sharding_balance1.js5
-rw-r--r--jstests/noPassthroughWithMongod/sharding_balance2.js6
-rw-r--r--jstests/noPassthroughWithMongod/sharding_balance3.js5
-rw-r--r--jstests/noPassthroughWithMongod/sharding_balance4.js35
-rw-r--r--jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js6
-rw-r--r--jstests/noPassthroughWithMongod/sharding_migrateBigObject.js12
-rw-r--r--jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js8
-rw-r--r--jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js17
-rw-r--r--jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js10
-rw-r--r--jstests/noPassthroughWithMongod/sharding_rs1.js5
-rw-r--r--jstests/noPassthroughWithMongod/sharding_rs2.js5
-rw-r--r--jstests/noPassthroughWithMongod/ttl1.js1
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl.js11
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl_maintenance.js6
-rw-r--r--jstests/noPassthroughWithMongod/ttl_sharded.js7
45 files changed, 298 insertions, 625 deletions
diff --git a/jstests/noPassthroughWithMongod/autosplit_heuristics.js b/jstests/noPassthroughWithMongod/autosplit_heuristics.js
index 33649617126..ee1d28b5eda 100644
--- a/jstests/noPassthroughWithMongod/autosplit_heuristics.js
+++ b/jstests/noPassthroughWithMongod/autosplit_heuristics.js
@@ -60,15 +60,11 @@ printjson({ chunkSizeBytes : chunkSizeBytes,
totalInserts : totalInserts });
// Insert enough docs to trigger splits into all chunks
+var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < totalInserts; i++) {
- coll.insert({ _id : i % numChunks + (i / totalInserts) });
- if ( i % ( numChunks * 1000 ) == 0 ) {
- print( "Inserted " + i + " docs, " +
- ( i * approxSize / numChunks ) + " bytes per chunk." );
- }
+ bulk.insert({ _id : i % numChunks + (i / totalInserts) });
}
-
-assert.eq(null, coll.getDB().getLastError());
+assert.writeOK(bulk.execute());
jsTest.log("Inserts completed...");
diff --git a/jstests/noPassthroughWithMongod/background.js b/jstests/noPassthroughWithMongod/background.js
index d1d0047988a..188027a029b 100644
--- a/jstests/noPassthroughWithMongod/background.js
+++ b/jstests/noPassthroughWithMongod/background.js
@@ -7,45 +7,41 @@ t.drop();
var a = new Mongo( db.getMongo().host ).getDB( db.getName() );
+var bulk = t.initializeUnorderedBulkOp();
for( var i = 0; i < 100000; i++ ) {
- t.insert({y:'aaaaaaaaaaaa',i:i});
- if( i % 10000 == 0 ) {
- db.getLastError();
- print(i);
- }
+ bulk.insert({ y: 'aaaaaaaaaaaa', i: i });
+ if( i % 10000 == 0 ) {
+ assert.writeOK(bulk.execute());
+ bulk = t.initializeUnorderedBulkOp();
+ print(i);
+ }
}
-//db.getLastError();
-
// start bg indexing
a.system.indexes.insert({ns:"test.bg1", key:{i:1}, name:"i_1", background:true});
// add more data
-
+bulk = t.initializeUnorderedBulkOp();
for( var i = 0; i < 100000; i++ ) {
- t.insert({i:i});
- if( i % 10000 == 0 ) {
- printjson( db.currentOp() );
- db.getLastError();
- print(i);
- }
+ bulk.insert({ i: i });
+ if( i % 10000 == 0 ) {
+ printjson( db.currentOp() );
+ assert.writeOK(bulk.execute());
+ bulk = t.initializeUnorderedBulkOp();
+ print(i);
+ }
}
-printjson( db.getLastErrorObj() );
+assert.writeOK(bulk.execute());
printjson( db.currentOp() );
-for( var i = 0; i < 40; i++ ) {
- if( db.currentOp().inprog.length == 0 )
- break;
- print("waiting");
- sleep(1000);
+for( var i = 0; i < 40; i++ ) {
+ if( db.currentOp().inprog.length == 0 )
+ break;
+ print("waiting");
+ sleep(1000);
}
-printjson( a.getLastErrorObj() );
-
var idx = t.getIndexes();
-// print("indexes:");
-// printjson(idx);
-
assert( idx[1].key.i == 1 );
diff --git a/jstests/noPassthroughWithMongod/balance_repl.js b/jstests/noPassthroughWithMongod/balance_repl.js
index 610af04767b..c5818ea19b0 100644
--- a/jstests/noPassthroughWithMongod/balance_repl.js
+++ b/jstests/noPassthroughWithMongod/balance_repl.js
@@ -5,10 +5,11 @@ s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true, _noslee
db = s.getDB( "test" );
-for ( i=0; i<2100; i++ ) {
- db.foo.insert( { _id : i , x : i } );
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 2100; i++) {
+ bulk.insert({ _id: i, x: i });
}
-db.getLastError();
+assert.writeOK(bulk.execute());
serverName = s.getServerName( "test" )
other = s.config.shards.findOne( { _id : { $ne : serverName } } );
diff --git a/jstests/noPassthroughWithMongod/balance_tags1.js b/jstests/noPassthroughWithMongod/balance_tags1.js
index 945f0526b17..1122380d7bc 100644
--- a/jstests/noPassthroughWithMongod/balance_tags1.js
+++ b/jstests/noPassthroughWithMongod/balance_tags1.js
@@ -3,10 +3,11 @@ s = new ShardingTest( "balance_tags1" , 3 , 1 , 1 , { sync:true, chunksize : 1 ,
s.config.settings.update( { _id: "balancer" }, { $set : { stopped: false, _nosleep: true } } , true );
db = s.getDB( "test" );
+var bulk = db.foo.initializeUnorderedBulkOp();
for ( i=0; i<21; i++ ) {
- db.foo.insert( { _id : i , x : i } );
+ bulk.insert({ _id: i, x: i });
}
-db.getLastError();
+assert.writeOK(bulk.execute());
s.adminCommand( { enablesharding : "test" } )
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
diff --git a/jstests/noPassthroughWithMongod/balance_tags2.js b/jstests/noPassthroughWithMongod/balance_tags2.js
index 55ad3dc5a97..6d0ed6ea7ca 100644
--- a/jstests/noPassthroughWithMongod/balance_tags2.js
+++ b/jstests/noPassthroughWithMongod/balance_tags2.js
@@ -6,10 +6,11 @@ s = new ShardingTest( "balance_tags2" , 3 , 1 , 1 ,
s.config.settings.save({ _id: "balancer", _nosleep: true});
db = s.getDB( "test" );
+var bulk = db.foo.initializeUnorderedBulkOp();
for ( i=0; i<21; i++ ) {
- db.foo.insert( { _id : i , x : i } );
+ bulk.insert({ _id: i, x: i });
}
-db.getLastError();
+assert.writeOK(bulk.execute());
// enable sharding, shard, and stop balancer
sh.enableSharding("test");
diff --git a/jstests/noPassthroughWithMongod/btreedel.js b/jstests/noPassthroughWithMongod/btreedel.js
index 824eb3e63a6..89af6aa7d5d 100644
--- a/jstests/noPassthroughWithMongod/btreedel.js
+++ b/jstests/noPassthroughWithMongod/btreedel.js
@@ -3,9 +3,11 @@
t = db.foo;
t.remove({});
+var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 1000000; i++) {
- t.insert({ _id: i, x: 'a b' });
+ bulk.insert({ _id: i, x: 'a b' });
}
+assert.writeOK(bulk.execute());
print("1 insert done count: " + t.count());
diff --git a/jstests/noPassthroughWithMongod/bulk_shard_insert.js b/jstests/noPassthroughWithMongod/bulk_shard_insert.js
index d9cd25a635e..74810a7c668 100644
--- a/jstests/noPassthroughWithMongod/bulk_shard_insert.js
+++ b/jstests/noPassthroughWithMongod/bulk_shard_insert.js
@@ -46,12 +46,7 @@ while( docsInserted < numDocs ){
bulk.push({ hi : "there", at : docsInserted, i : i, x : x })
}
- coll.insert( bulk )
- var result = db.getLastError( 1 )
- if( result != null ){
- printjson( result )
- throw result
- }
+ assert.writeOK(coll.insert( bulk ));
if( Math.floor( docsInserted / 10000 ) != Math.floor( ( docsInserted + currBulkSize ) / 10000 ) ){
print( "Inserted " + (docsInserted + currBulkSize) + " documents." )
diff --git a/jstests/noPassthroughWithMongod/capped4.js b/jstests/noPassthroughWithMongod/capped4.js
index 27d138c16ce..be02e3c6ae1 100644
--- a/jstests/noPassthroughWithMongod/capped4.js
+++ b/jstests/noPassthroughWithMongod/capped4.js
@@ -22,8 +22,8 @@ assert( !d.hasNext(), "C" );
assert( t.find().sort( { i : 1 } ).hint( { i : 1 } ).toArray().length > 10, "D" );
assert( t.findOne( { i : i - 1 } ), "E" );
-t.remove( { i : i - 1 } );
-assert( db.getLastError().indexOf( "capped" ) >= 0, "F" );
+var res = assert.writeError(t.remove( { i : i - 1 } ));
+assert( res.getWriteError().errmsg.indexOf( "capped" ) >= 0, "F" );
assert( t.validate().valid, "G" );
diff --git a/jstests/noPassthroughWithMongod/fsync2.js b/jstests/noPassthroughWithMongod/fsync2.js
index bdf956f30f2..7080837a99b 100644
--- a/jstests/noPassthroughWithMongod/fsync2.js
+++ b/jstests/noPassthroughWithMongod/fsync2.js
@@ -41,9 +41,7 @@ function doTest() {
//assert.eq(1, m.getDB(db.getName()).fsync2.count());
assert( m.getDB("admin").$cmd.sys.unlock.findOne().ok );
-
- db.getLastError();
-
+
assert.eq( 2, db.fsync2.count() );
}
diff --git a/jstests/noPassthroughWithMongod/geo_axis_aligned.js b/jstests/noPassthroughWithMongod/geo_axis_aligned.js
index 0161eccb4ac..084b839cabc 100644
--- a/jstests/noPassthroughWithMongod/geo_axis_aligned.js
+++ b/jstests/noPassthroughWithMongod/geo_axis_aligned.js
@@ -17,15 +17,14 @@ centers = []
bounds = []
for( var s = 0; s < scale.length; s++ ){
- for ( var i = 0; i < radius.length; i++ ) {
- radii.push( radius[i] * scale[s] )
- }
-
- for ( var j = 0; j < center.length; j++ ) {
- centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] )
- bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] )
- }
+ for ( var i = 0; i < radius.length; i++ ) {
+ radii.push( radius[i] * scale[s] )
+ }
+ for ( var j = 0; j < center.length; j++ ) {
+ centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] )
+ bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] )
+ }
}
radius = radii
@@ -34,75 +33,74 @@ bound = bounds
for ( var b = 0; b < bits.length; b++ ) {
-
-
- printjson( radius )
- printjson( centers )
-
- for ( var i = 0; i < radius.length; i++ ) {
- for ( var j = 0; j < center.length; j++ ) {
-
- printjson( { center : center[j], radius : radius[i], bits : bits[b] } );
-
- t.drop()
-
- // Make sure our numbers are precise enough for this test
- if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) )
- continue;
-
- t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } );
- t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } );
- t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } );
- t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } );
- t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } );
- t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } );
- t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } );
- t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } );
- t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } );
-
- t.ensureIndex( { loc : "2d" }, { max : bound[j][1], min : bound[j][0], bits : bits[b] } );
-
- if( db.getLastError() ) continue;
-
- print( "DOING WITHIN QUERY ")
- r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } );
-
- //printjson( r.toArray() );
-
- assert.eq( 5, r.count() );
-
- // FIXME: surely code like this belongs in utils.js.
- a = r.toArray();
- x = [];
- for ( k in a )
- x.push( a[k]["_id"] )
- x.sort()
- assert.eq( [ 1, 2, 3, 4, 5 ], x );
-
- print( " DOING NEAR QUERY ")
- //printjson( center[j] )
- r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } )
- assert.eq( 5, r.count() );
-
- print( " DOING DIST QUERY ")
-
- a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results
- assert.eq( 5, a.length );
-
- //printjson( a );
-
- var distance = 0;
- for( var k = 0; k < a.length; k++ ){
- //print( a[k].dis )
- //print( distance )
- assert.gte( a[k].dis, distance );
- //printjson( a[k].obj )
- //print( distance = a[k].dis );
- }
-
- r = t.find( { loc : { $within : { $box : [ [ center[j][0] - radius[i], center[j][1] - radius[i] ], [ center[j][0] + radius[i], center[j][1] + radius[i] ] ] } } }, { _id : 1 } )
- assert.eq( 9, r.count() );
-
- }
- }
-} \ No newline at end of file
+ printjson( radius )
+ printjson( centers )
+
+ for ( var i = 0; i < radius.length; i++ ) {
+ for ( var j = 0; j < center.length; j++ ) {
+ printjson( { center : center[j], radius : radius[i], bits : bits[b] } );
+
+ t.drop()
+
+ // Make sure our numbers are precise enough for this test
+ if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) )
+ continue;
+
+ t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } );
+ t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } );
+ t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } );
+ t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } );
+ t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } );
+ t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } );
+ t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } );
+ t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } );
+ t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } );
+
+ var res = t.ensureIndex({ loc: "2d" },
+ { max: bound[j][1],
+ min : bound[j][0],
+ bits : bits[b] });
+
+ // ensureIndex fails when this iteration inserted coordinates that are out of bounds.
+ // These are invalid cases, so we skip them.
+ if (!res.ok) continue;
+
+ print( "DOING WITHIN QUERY ")
+ r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } );
+
+ assert.eq( 5, r.count() );
+
+ // FIXME: surely code like this belongs in utils.js.
+ a = r.toArray();
+ x = [];
+ for ( k in a )
+ x.push( a[k]["_id"] )
+ x.sort()
+ assert.eq( [ 1, 2, 3, 4, 5 ], x );
+
+ print( " DOING NEAR QUERY ")
+ //printjson( center[j] )
+ r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } )
+ assert.eq( 5, r.count() );
+
+ print( " DOING DIST QUERY ")
+
+ a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results
+ assert.eq( 5, a.length );
+
+ var distance = 0;
+ for( var k = 0; k < a.length; k++ ){
+ assert.gte( a[k].dis, distance );
+
+ }
+
+ r = t.find({ loc: { $within: { $box: [ [ center[j][0] - radius[i],
+ center[j][1] - radius[i] ],
+ [ center[j][0] + radius[i],
+ center[j][1] + radius[i] ]]}}},
+ { _id: 1 } );
+ assert.eq( 9, r.count() );
+
+ }
+ }
+}
diff --git a/jstests/noPassthroughWithMongod/geo_mnypts.js b/jstests/noPassthroughWithMongod/geo_mnypts.js
index ac4065158bf..bc7935fa7a6 100644
--- a/jstests/noPassthroughWithMongod/geo_mnypts.js
+++ b/jstests/noPassthroughWithMongod/geo_mnypts.js
@@ -6,10 +6,12 @@ coll.drop()
var totalPts = 500 * 1000
// Add points in a 100x100 grid
+var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < totalPts; i++ ){
var ii = i % 10000
- coll.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] })
+ bulk.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] });
}
+assert.writeOK(bulk.execute());
coll.ensureIndex({ loc : "2d" })
diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js
index 25bf0269ccc..5b19b2b2080 100644
--- a/jstests/noPassthroughWithMongod/geo_polygon.js
+++ b/jstests/noPassthroughWithMongod/geo_polygon.js
@@ -15,12 +15,14 @@ if ( bi.indexOf( "erh2" ) >= 0 ){
if ( shouldRun ) {
num = 0;
+ var bulk = t.initializeUnorderedBulkOp();
for ( x = -180; x < 180; x += .5 ){
for ( y = -180; y < 180; y += .5 ){
o = { _id : num++ , loc : [ x , y ] };
- t.save( o );
+ bulk.insert( o );
}
}
+ assert.writeOK(bulk.execute());
var numTests = 31;
for( var n = 0; n < numTests; n++ ){
diff --git a/jstests/noPassthroughWithMongod/index_check10.js b/jstests/noPassthroughWithMongod/index_check10.js
index 79d0d93fc9b..84e7342e051 100644
--- a/jstests/noPassthroughWithMongod/index_check10.js
+++ b/jstests/noPassthroughWithMongod/index_check10.js
@@ -104,25 +104,30 @@ function doIt( indexVersion ) {
}
}
+ var bulk = t.initializeUnorderedBulkOp();
for( var i = 0; i < 10000; ++i ) {
- t.save( obj() );
+ bulk.insert( obj() );
}
+ assert.writeOK(bulk.execute());
t.ensureIndex( idx , { v : indexVersion } );
check();
+ bulk = t.initializeUnorderedBulkOp();
for( var i = 0; i < 10000; ++i ) {
if ( Random.rand() > 0.9 ) {
- t.save( obj() );
+ bulk.insert( obj() );
} else {
- t.remove( obj() ); // improve
+ bulk.find( obj() ).remove(); // improve
}
if( Random.rand() > 0.999 ) {
print( i );
+ assert.writeOK(bulk.execute());
check();
+ bulk = t.initializeUnorderedBulkOp();
}
}
-
+ assert.writeOK(bulk.execute());
check();
}
diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js
index 8a50471940b..fd1b1d5eaa1 100644
--- a/jstests/noPassthroughWithMongod/index_check9.js
+++ b/jstests/noPassthroughWithMongod/index_check9.js
@@ -106,25 +106,32 @@ function check() {
assert.eq( c3.length, count );
}
+var bulk = t.initializeUnorderedBulkOp();
for( var i = 0; i < 10000; ++i ) {
- t.save( obj() );
+ bulk.insert( obj() );
if( Random.rand() > 0.999 ) {
print( i );
+ assert.writeOK(bulk.execute());
check();
+ bulk = t.initializeUnorderedBulkOp();
}
}
+bulk = t.initializeUnorderedBulkOp();
for( var i = 0; i < 100000; ++i ) {
if ( Random.rand() > 0.9 ) {
- t.save( obj() );
+ bulk.insert( obj() );
} else {
- t.remove( obj() ); // improve
+ bulk.find( obj() ).remove(); // improve
}
if( Random.rand() > 0.999 ) {
print( i );
+ assert.writeOK(bulk.execute());
check();
+ bulk = t.initializeUnorderedBulkOp();
}
}
+assert.writeOK(bulk.execute());
check();
@@ -132,4 +139,4 @@ check();
for( var z = 0; z < 5; ++z ) {
doIt();
-} \ No newline at end of file
+}
diff --git a/jstests/noPassthroughWithMongod/index_hammer1.js b/jstests/noPassthroughWithMongod/index_hammer1.js
index 87fd3820f66..675a2f8db7c 100644
--- a/jstests/noPassthroughWithMongod/index_hammer1.js
+++ b/jstests/noPassthroughWithMongod/index_hammer1.js
@@ -2,9 +2,10 @@
t = db.index_hammer1;
t.drop();
+var bulk = t.initializeUnorderedBulkOp();
for ( i=0; i<10000; i++ )
- t.insert( { x : i , y : i } );
-db.getLastError();
+ bulk.insert({ x: i, y: i });
+assert.writeOK(bulk.execute());
ops = []
diff --git a/jstests/noPassthroughWithMongod/index_killop.js b/jstests/noPassthroughWithMongod/index_killop.js
index b022e31f3b8..f897f6a80de 100644
--- a/jstests/noPassthroughWithMongod/index_killop.js
+++ b/jstests/noPassthroughWithMongod/index_killop.js
@@ -5,10 +5,11 @@ t.drop();
// Insert a large number of documents, enough to ensure that an index build on these documents will
// be interrupted before complete.
+var bulk = t.initializeUnorderedBulkOp();
for( i = 0; i < 1e6; ++i ) {
- t.save( { a:i } );
+ bulk.insert({ a: i });
}
-db.getLastError();
+assert.writeOK(bulk.execute());
function debug( x ) {
// printjson( x );
@@ -23,7 +24,7 @@ function getIndexBuildOpId() {
// Identify the index build as an insert into the 'test.system.indexes'
// namespace. It is assumed that no other clients are concurrently
// accessing the 'test' database.
- if ( op.op == 'insert' && op.ns == 'test.system.indexes' ) {
+ if ( op.op == 'query' && 'createIndexes' in op.query ) {
debug( op.opid );
indexBuildOpId = op.opid;
}
@@ -33,9 +34,8 @@ function getIndexBuildOpId() {
/** Test that building an index with @param 'options' can be aborted using killop. */
function testAbortIndexBuild( options ) {
-
- // Create an index asynchronously by using a new connection.
- new Mongo( db.getMongo().host ).getCollection( t.toString() ).createIndex( { a:1 }, options );
+ var createIdx = startParallelShell('var coll = db.jstests_slownightly_index_killop; \
+ coll.createIndex({ a: 1 }, ' + tojson(options) + ');');
// When the index build starts, find its op id.
assert.soon( function() { return ( opId = getIndexBuildOpId() ) != -1; } );
@@ -44,6 +44,8 @@ function testAbortIndexBuild( options ) {
// Wait for the index build to stop.
assert.soon( function() { return getIndexBuildOpId() == -1; } );
+ createIdx();
+
// Check that no new index has been created. This verifies that the index build was aborted
// rather than successfully completed.
assert.eq( [ { _id:1 } ], t.getIndexKeys() );
diff --git a/jstests/noPassthroughWithMongod/index_multi.js b/jstests/noPassthroughWithMongod/index_multi.js
index ac259455d36..e4c38632bcf 100644
--- a/jstests/noPassthroughWithMongod/index_multi.js
+++ b/jstests/noPassthroughWithMongod/index_multi.js
@@ -4,8 +4,9 @@ Random.setRandomSeed();
var coll = db.index_multi;
+var bulk = coll.initializeUnorderedBulkOp();
print("Populate the collection with random data");
-for (var i=0;i<1e4; i++) {
+for (var i = 0; i < 1e4; i++) {
var doc = {"_id" : i};
for (var j=0; j<100; j++) {
@@ -22,52 +23,54 @@ for (var i=0;i<1e4; i++) {
}
}
- if (i%1000 == 0) {
- print("inserted "+i);
- }
-
- coll.insert(doc);
+ bulk.insert(doc);
}
+assert.writeOK(bulk.execute());
// Array of all index specs
var specs = [];
var multikey = [];
+var indexJobs = [];
print("Create 3 triple indexes");
-for (var i=90; i<93; i++) {
+for (var i = 90; i < 93; i++) {
var spec = {};
spec["field"+i] = 1;
spec["field"+(i+1)] = 1;
spec["field"+(i+2)] = 1;
- startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});"
- +"db.results.insert(db.runCommand({getlasterror:1}));");
+ indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + "," +
+ "{ background: true });" +
+ "db.results.insert(db.runCommand({ getlasterror: 1 }));"));
specs.push(spec);
multikey.push(i % 10 == 0 || (i+1) % 10 == 0 || (i+2) % 10 == 0);
}
print("Create 30 compound indexes");
-for (var i=30; i<90; i+=2) {
+for (var i = 30; i < 90; i += 2) {
var spec = {};
spec["field"+i] = 1;
spec["field"+(i+1)] = 1;
- startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});"
- +"db.results.insert(db.runCommand({getlasterror:1}));");
+ indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + ", " +
+ "{ background: true });" +
+ "db.results.insert(db.runCommand({ getlasterror: 1 }));"));
specs.push(spec);
multikey.push(i % 10 == 0 || (i+1) % 10 == 0);
}
print("Create 30 indexes");
-for (var i=0; i<30; i++) {
+for (var i = 0; i < 30; i++) {
var spec = {};
spec["field"+i] = 1;
- startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});"
- +"db.results.insert(db.runCommand({getlasterror:1}));");
+ indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + ", " +
+ "{ background: true });" +
+ "db.results.insert(db.runCommand({ getlasterror: 1 }));"));
specs.push(spec);
multikey.push(i % 10 == 0);
}
print("Do some sets and unsets");
-for (i=0; i<1e4; i++) {
+bulk = coll.initializeUnorderedBulkOp();
+for (i = 0; i < 1e4; i++) {
var criteria = {_id: Random.randInt(1e5)};
var mod = {};
if (Random.rand() < .5) {
@@ -79,31 +82,23 @@ for (i=0; i<1e4; i++) {
mod['$unset']['field'+Random.randInt(100)] = true;
}
- coll.update(criteria, mod);
+ bulk.find(criteria).update(mod);
}
+assert.writeOK(bulk.execute());
+
+indexJobs.forEach(function(join) {
+ join();
+});
printjson(db.results.find().toArray());
printjson(coll.getIndexes());
print("Make sure we end up with 64 indexes");
-assert.soon(
- function() {
- for (var i in specs) {
- print("trying to hint on "+tojson(specs[i]));
- try {
- var explain = coll.find().hint(specs[i]).explain();
- printjson(explain);
- assert.eq(multikey[i], explain.isMultiKey);
- } catch (x) {
- print(x+", hinting on "+tojson(specs[i]));
- return false;
- }
- }
- return true;
- },
- "wait for all indexes to be built",
- 120000
-);
+for (var i in specs) {
+ print("trying to hint on "+tojson(specs[i]));
+ var explain = coll.find().hint(specs[i]).explain();
+ assert.eq(multikey[i], explain.isMultiKey, tojson(explain));
+}
print("SUCCESS!");
diff --git a/jstests/noPassthroughWithMongod/index_retry.js b/jstests/noPassthroughWithMongod/index_retry.js
index d0465476144..7c79e75af5f 100644
--- a/jstests/noPassthroughWithMongod/index_retry.js
+++ b/jstests/noPassthroughWithMongod/index_retry.js
@@ -12,13 +12,14 @@ t.drop();
// Insert a large number of documents, enough to ensure that an index build on these documents can
// be interrupted before complete.
+var bulk = t.initializeUnorderedBulkOp();
for (i = 0; i < 5e5; ++i) {
- t.save( { a:i } );
+ bulk.insert({ a: i });
if (i % 10000 == 0) {
print("i: " + i);
}
}
-test.getLastError();
+assert.writeOK(bulk.execute());
function debug(x) {
printjson(x);
@@ -36,14 +37,15 @@ function indexBuildInProgress() {
// Identify the index build as an insert into the 'test.system.indexes'
// namespace. It is assumed that no other clients are concurrently
// accessing the 'test' database.
- if ( op.op == 'insert' && op.ns == 'test.system.indexes' ) {
+ if ( op.op == 'query' && 'createIndexes' in op.query ) {
debug(op.opid);
+ var idxSpec = op.query.indexes[0];
// SERVER-4295 Make sure the index details are there
// we can't assert these things, since there is a race in reporting
// but we won't count if they aren't
- if ( "a_1" == op.insert.name &&
- 1 == op.insert.key.a &&
- op.insert.background ) {
+ if ( "a_1" == idxSpec.name &&
+ 1 == idxSpec.key.a &&
+ idxSpec.background ) {
indexBuildOpId = op.opid;
}
}
@@ -53,10 +55,9 @@ function indexBuildInProgress() {
}
function abortDuringIndexBuild(options) {
-
- // Create an index asynchronously by using a new connection.
- new Mongo(test.getMongo().host).getCollection(t.toString()).createIndex(
- { a:1 }, { background:true } );
+ var createIdx = startParallelShell('var coll = db.jstests_slownightly_index_retry; \
+ coll.createIndex({ a: 1 }, { background: true });',
+ ports[0]);
// Wait for the index build to start.
var times = 0;
@@ -68,6 +69,7 @@ function abortDuringIndexBuild(options) {
print("killing the mongod");
stopMongod(ports[0], /* signal */ 9);
+ createIdx();
}
abortDuringIndexBuild();
diff --git a/jstests/noPassthroughWithMongod/indexbg_drop.js b/jstests/noPassthroughWithMongod/indexbg_drop.js
index 9e754b747ef..db4493df017 100644
--- a/jstests/noPassthroughWithMongod/indexbg_drop.js
+++ b/jstests/noPassthroughWithMongod/indexbg_drop.js
@@ -42,9 +42,11 @@ var dc = {dropIndexes: collection, index: "i_1"};
// set up collections
masterDB.dropDatabase();
jsTest.log("creating test data " + size + " documents");
+var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
for( i = 0; i < size; ++i ) {
- masterDB.getCollection(collection).save( {i: Random.rand()} );
+ bulk.insert({ i: Random.rand() });
}
+assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing for test of: " + tojson(dc));
// Add another index to be sure the drop command works.
diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
index 09c75056ca2..f6f1d426161 100644
--- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js
+++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
@@ -66,9 +66,11 @@ for (var idx = 0; idx < dropAction.length; idx++) {
// set up collections
masterDB.dropDatabase();
jsTest.log("creating test data " + size + " documents");
+ var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
for(var i = 0; i < size; ++i ) {
- masterDB.getCollection(collection).save( {i:i} );
+ bulk.insert({ i: i });
}
+ assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing for test of: " + JSON.stringify(dc));
masterDB.getCollection(collection).ensureIndex( {i:1}, {background:true} );
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
index 01d3b23a07c..a3b2c26f59e 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
@@ -38,9 +38,11 @@ var secondDB = second.getDB('bgIndexSec');
var size = 500000;
jsTest.log("creating test data " + size + " documents");
+var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
for(var i = 0; i < size; ++i) {
- masterDB.jstests_bgsec.save( {i:i} );
+ bulk.insert({ i: i });
}
+assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing");
masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} );
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
index 38cced11bb9..1ea53e6aac2 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
@@ -64,9 +64,11 @@
var size = 500000;
jsTest.log("creating test data " + size + " documents");
+ var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
for( i = 0; i < size; ++i ) {
- masterDB.jstests_bgsec.save( {i:i} );
+ bulk.insert({ i : i });
}
+ assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing");
masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} );
@@ -82,8 +84,7 @@
// Make sure a journal flush for the oplog occurs, by doing a local journaled write to the
// secondary
- second.getDB('local').foo.insert({a:1});
- second.getDB('local').runCommand( { getLastError: 1, j: true } );
+ assert.writeOK(second.getDB('local').foo.insert({ a: 1 }, { writeConcern: { j: true }}));
// restart secondary and reconnect
jsTest.log("Restarting secondary");
diff --git a/jstests/noPassthroughWithMongod/large_chunk.js b/jstests/noPassthroughWithMongod/large_chunk.js
index 2e648084947..12f0c48fdcd 100644
--- a/jstests/noPassthroughWithMongod/large_chunk.js
+++ b/jstests/noPassthroughWithMongod/large_chunk.js
@@ -20,11 +20,12 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 400 * 1024 * 1024 ) ){
- db.foo.insert( { _id : num++ , s : bigString } );
+ bulk.insert({ _id: num++, s: bigString });
inserted += bigString.length;
}
-db.getLastError();
+assert.writeOK(bulk.execute());
// Turn on sharding on the 'test.foo' collection and generate a large chunk
s.adminCommand( { enablesharding : "test" } );
diff --git a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
index fd7ec8c68e7..1ff024fcb03 100644
--- a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
+++ b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
@@ -15,14 +15,14 @@ var coll = db.getCollection("mrInput");
//
var expectedOutColl = [];
+var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 10; i++) {
for (var j = 1; j < 50; j++) {
- coll.insert({idx: i, j: j});
+ bulk.insert({ idx: i, j: j });
}
expectedOutColl.push ({ _id: i, value: j - 1 });
}
-
-assertGLEOK(db.getLastErrorObj());
+assert.writeOK(bulk.execute());
function mapFn() { emit(this.idx, 1); };
function reduceFn(key, values) { return Array.sum(values); };
@@ -41,4 +41,4 @@ assert.eq(out.counts.emit, 490, "emit count is wrong");
// changed, such as reduceTriggerRatio or maxInMemSize. If not the case, then something else
// must have changed with when intermediate reduces occur (see mongo::mr::State::checkSize).
//
-assert.eq(out.counts.reduce, 14, "reduce count is wrong"); \ No newline at end of file
+assert.eq(out.counts.reduce, 14, "reduce count is wrong");
diff --git a/jstests/noPassthroughWithMongod/mr_shard_version.js b/jstests/noPassthroughWithMongod/mr_shard_version.js
index 47fd99ea30e..c011e7700e9 100644
--- a/jstests/noPassthroughWithMongod/mr_shard_version.js
+++ b/jstests/noPassthroughWithMongod/mr_shard_version.js
@@ -11,11 +11,12 @@ var numDocs = 500000
var numKeys = 1000
var numTests = 3
+var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < numDocs; i++ ){
- coll.insert({ _id : i, key : "" + ( i % numKeys ), value : i % numKeys })
+ bulk.insert({ _id: i, key: "" + ( i % numKeys ), value: i % numKeys });
}
+assert.writeOK(bulk.execute());
-assert.eq( null, coll.getDB().getLastError() )
assert.eq( numDocs, coll.find().itcount() )
var halfId = coll.find().itcount() / 2
diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js
index 7aa55564640..6f6c196510d 100644
--- a/jstests/noPassthroughWithMongod/no_balance_collection.js
+++ b/jstests/noPassthroughWithMongod/no_balance_collection.js
@@ -68,9 +68,11 @@ sh.waitForBalancer(true)
// Make sure auto-migrates on insert don't move chunks
var lastMigration = sh._lastMigration( collB )
+var bulk = collB.initializeUnorderedBulkOp();
for( var i = 0; i < 1000000; i++ ){
- collB.insert({ _id : i, hello : "world" })
+ bulk.insert({ _id: i, hello: "world" });
}
+assert.writeOK(bulk.execute());
printjson( lastMigration )
printjson( sh._lastMigration( collB ) )
@@ -78,4 +80,4 @@ printjson( sh._lastMigration( collB ) )
if( lastMigration == null ) assert.eq( null, sh._lastMigration( collB ) )
else assert.eq( lastMigration.time, sh._lastMigration( collB ).time )
-st.stop() \ No newline at end of file
+st.stop()
diff --git a/jstests/noPassthroughWithMongod/opcounters_legacy.js b/jstests/noPassthroughWithMongod/opcounters_legacy.js
deleted file mode 100644
index 7db520a109f..00000000000
--- a/jstests/noPassthroughWithMongod/opcounters_legacy.js
+++ /dev/null
@@ -1,173 +0,0 @@
-// Test that opcounters get incremented properly.
-
-// Remember the global 'db' var
-var lastDB = db;
-var mongo = new Mongo(db.getMongo().host);
-mongo.writeMode = function() { return "legacy"; }
-db = mongo.getDB(db.toString());
-
-var t = db.opcounters;
-var isMongos = ("isdbgrid" == db.runCommand("ismaster").msg);
-var opCounters;
-
-//
-// 1. Insert.
-//
-// - mongod, single insert:
-// counted as 1 op if successful, else 0
-// - mongod, bulk insert of N with continueOnError=true:
-// counted as N ops, regardless of errors
-// - mongod, bulk insert of N with continueOnError=false:
-// counted as K ops, where K is number of docs successfully inserted
-//
-// - mongos
-// count ops attempted like insert commands
-//
-
-t.drop();
-
-// Single insert, no error.
-opCounters = db.serverStatus().opcounters;
-t.insert({_id:0});
-assert(!db.getLastError());
-assert.eq(opCounters.insert + 1, db.serverStatus().opcounters.insert);
-
-// Bulk insert, no error.
-opCounters = db.serverStatus().opcounters;
-t.insert([{_id:1},{_id:2}])
-assert(!db.getLastError());
-assert.eq(opCounters.insert + 2, db.serverStatus().opcounters.insert);
-
-// Single insert, with error.
-opCounters = db.serverStatus().opcounters;
-t.insert({_id:0})
-print( db.getLastError() )
-assert(db.getLastError());
-assert.eq(opCounters.insert + (isMongos ? 1 : 0), db.serverStatus().opcounters.insert);
-
-// Bulk insert, with error, continueOnError=false.
-opCounters = db.serverStatus().opcounters;
-t.insert([{_id:3},{_id:3},{_id:4}])
-assert(db.getLastError());
-assert.eq(opCounters.insert + (isMongos ? 2 : 1), db.serverStatus().opcounters.insert);
-
-// Bulk insert, with error, continueOnError=true.
-var continueOnErrorFlag = 1;
-opCounters = db.serverStatus().opcounters;
-t.insert([{_id:5},{_id:5},{_id:6}], continueOnErrorFlag)
-assert(db.getLastError());
-assert.eq(opCounters.insert + 3, db.serverStatus().opcounters.insert);
-
-//
-// 2. Update.
-//
-// - counted as 1 op, regardless of errors
-//
-
-t.drop();
-t.insert({_id:0});
-
-// Update, no error.
-opCounters = db.serverStatus().opcounters;
-t.update({_id:0}, {$set:{a:1}});
-assert(!db.getLastError());
-assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update);
-
-// Update, with error.
-opCounters = db.serverStatus().opcounters;
-t.update({_id:0}, {$set:{_id:1}});
-assert(db.getLastError());
-assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update);
-
-//
-// 3. Delete.
-//
-// - counted as 1 op, regardless of errors
-//
-
-t.drop();
-t.insert([{_id:0},{_id:1}]);
-
-// Delete, no error.
-opCounters = db.serverStatus().opcounters;
-t.remove({_id:0});
-assert(!db.getLastError());
-assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete);
-
-// Delete, with error.
-opCounters = db.serverStatus().opcounters;
-t.remove({_id:{$invalidOp:1}});
-assert(db.getLastError());
-assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete);
-
-//
-// 4. Query.
-//
-// - mongod: counted as 1 op, regardless of errors
-// - mongos: counted as 1 op if successful, else 0
-//
-
-t.drop();
-t.insert({_id:0});
-
-// Query, no error.
-opCounters = db.serverStatus().opcounters;
-t.findOne();
-assert.eq(opCounters.query + 1, db.serverStatus().opcounters.query);
-
-// Query, with error.
-opCounters = db.serverStatus().opcounters;
-assert.throws(function() { t.findOne({_id:{$invalidOp:1}}) });
-assert.eq(opCounters.query + (isMongos ? 0 : 1), db.serverStatus().opcounters.query);
-
-//
-// 5. Getmore.
-//
-// - counted as 1 op per getmore issued, regardless of errors
-//
-
-t.drop();
-t.insert([{_id:0},{_id:1},{_id:2}]);
-
-// Getmore, no error.
-opCounters = db.serverStatus().opcounters;
-t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore
-assert.eq(opCounters.query + 1, db.serverStatus().opcounters.query);
-assert.eq(opCounters.getmore + 1, db.serverStatus().opcounters.getmore);
-
-// Getmore, with error (TODO implement when SERVER-5813 is resolved).
-
-//
-// 6. Command.
-//
-// - unrecognized commands not counted
-// - recognized commands counted as 1 op, regardless of errors
-// - some (recognized) commands can suppress command counting (i.e. aren't counted as commands)
-//
-
-t.drop();
-t.insert({_id:0})
-
-// Command, recognized, no error.
-opCounters = db.serverStatus().opcounters;
-assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted
-
-// Command, recognized, with error.
-opCounters = db.serverStatus().opcounters;
-res = t.runCommand("count", {query:{$invalidOp:1}});
-assert.eq(0, res.ok);
-assert.eq(opCounters.command + 2,
- db.serverStatus().opcounters.command); // "serverStatus", "count" counted
-
-// Command, unrecognized.
-opCounters = db.serverStatus().opcounters;
-res = t.runCommand("command that doesn't exist");
-assert.eq(0, res.ok);
-//assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted
-// TODO Replace below with above when SERVER-9038 is resolved (mongos counts unrecognized commands)
-assert.eq(opCounters.command + (isMongos ? 2 : 1), db.serverStatus().opcounters.command);
-
-// Command, recognized, counting suppressed (TODO implement when SERVER-9038 is resolved).
-
-// Restore 'db' var
-db = lastDB;
diff --git a/jstests/noPassthroughWithMongod/opcounters_write_cmd.js b/jstests/noPassthroughWithMongod/opcounters_write_cmd.js
deleted file mode 100644
index 47a1bc63515..00000000000
--- a/jstests/noPassthroughWithMongod/opcounters_write_cmd.js
+++ /dev/null
@@ -1,166 +0,0 @@
-// Test that opcounters get incremented properly.
-
-var mongo = new Mongo(db.getMongo().host);
-mongo.forceWriteMode("commands");
-var newdb = mongo.getDB(db.toString());
-
-var t = newdb.opcounters;
-var isMongos = ("isdbgrid" == newdb.runCommand("ismaster").msg);
-var opCounters;
-var res;
-
-assert(t.getDB().getMongo().useWriteCommands(), "test is not running with write commands")
-
-//
-// Count ops attempted in write commands in mongod and mongos
-//
-
-//
-// 1. Insert.
-//
-// - unordered insert of N:
-// counted as N ops, regardless of errors
-// - ordered insert of N:
-// counted as K + 1 ops, where K is number of docs successfully inserted,
-// adding the failed attempt
-//
-
-t.drop();
-
-// Single insert, no error.
-opCounters = newdb.serverStatus().opcounters;
-res = t.insert({_id:0});
-assert.writeOK(res);
-assert.eq(opCounters.insert + 1, newdb.serverStatus().opcounters.insert);
-
-// Bulk insert, no error.
-opCounters = newdb.serverStatus().opcounters;
-res = t.insert([{_id:1},{_id:2}])
-assert.writeOK(res);
-assert.eq(opCounters.insert + 2, newdb.serverStatus().opcounters.insert);
-
-// Single insert, with error.
-opCounters = newdb.serverStatus().opcounters;
-res = t.insert({_id:0})
-assert.writeError(res);
-assert.eq(opCounters.insert + 1, newdb.serverStatus().opcounters.insert);
-
-// Bulk insert, with error, ordered.
-opCounters = newdb.serverStatus().opcounters;
-res = t.insert([{_id:3},{_id:3},{_id:4}])
-assert.writeError(res);
-assert.eq(opCounters.insert + 2, newdb.serverStatus().opcounters.insert);
-
-// Bulk insert, with error, unordered.
-var continueOnErrorFlag = 1;
-opCounters = newdb.serverStatus().opcounters;
-res = t.insert([{_id:5},{_id:5},{_id:6}], continueOnErrorFlag)
-assert.writeError(res);
-assert.eq(opCounters.insert + 3, newdb.serverStatus().opcounters.insert);
-
-//
-// 2. Update.
-//
-
-t.drop();
-t.insert({_id:0});
-
-// Update, no error.
-opCounters = newdb.serverStatus().opcounters;
-res = t.update({_id:0}, {$set:{a:1}});
-assert.writeOK(res);
-assert.eq(opCounters.update + 1, newdb.serverStatus().opcounters.update);
-
-// Update, with error.
-opCounters = newdb.serverStatus().opcounters;
-res = t.update({_id:0}, {$set:{_id:1}});
-assert.writeError(res);
-assert.eq(opCounters.update + 1, newdb.serverStatus().opcounters.update);
-
-//
-// 3. Delete.
-//
-
-t.drop();
-t.insert([{_id:0},{_id:1}]);
-
-// Delete, no error.
-opCounters = newdb.serverStatus().opcounters;
-res = t.remove({_id:0});
-assert.writeOK(res);
-assert.eq(opCounters.delete + 1, newdb.serverStatus().opcounters.delete);
-
-// Delete, with error.
-opCounters = newdb.serverStatus().opcounters;
-res = t.remove({_id:{$invalidOp:1}});
-assert.writeError(res);
-assert.eq(opCounters.delete + 1, newdb.serverStatus().opcounters.delete);
-
-//
-// 4. Query.
-//
-// - mongod: counted as 1 op, regardless of errors
-// - mongos: counted as 1 op if successful, else 0
-//
-
-t.drop();
-t.insert({_id:0});
-
-// Query, no error.
-opCounters = newdb.serverStatus().opcounters;
-t.findOne();
-assert.eq(opCounters.query + 1, newdb.serverStatus().opcounters.query);
-
-// Query, with error.
-opCounters = newdb.serverStatus().opcounters;
-assert.throws(function() { t.findOne({_id:{$invalidOp:1}}) });
-assert.eq(opCounters.query + (isMongos ? 0 : 1), newdb.serverStatus().opcounters.query);
-
-//
-// 5. Getmore.
-//
-// - counted as 1 op per getmore issued, regardless of errors
-//
-
-t.drop();
-t.insert([{_id:0},{_id:1},{_id:2}]);
-
-// Getmore, no error.
-opCounters = newdb.serverStatus().opcounters;
-t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore
-assert.eq(opCounters.query + 1, newdb.serverStatus().opcounters.query);
-assert.eq(opCounters.getmore + 1, newdb.serverStatus().opcounters.getmore);
-
-// Getmore, with error (TODO implement when SERVER-5813 is resolved).
-
-//
-// 6. Command.
-//
-// - unrecognized commands not counted
-// - recognized commands counted as 1 op, regardless of errors
-// - some (recognized) commands can suppress command counting (i.e. aren't counted as commands)
-//
-
-t.drop();
-t.insert({_id:0})
-
-// Command, recognized, no error.
-opCounters = newdb.serverStatus().opcounters;
-assert.eq(opCounters.command + 1, newdb.serverStatus().opcounters.command); // "serverStatus" counted
-
-// Command, recognized, with error.
-opCounters = newdb.serverStatus().opcounters;
-res = t.runCommand("count", {query:{$invalidOp:1}});
-assert.eq(0, res.ok);
-assert.eq(opCounters.command + 2,
- newdb.serverStatus().opcounters.command); // "serverStatus", "count" counted
-
-// Command, unrecognized.
-opCounters = newdb.serverStatus().opcounters;
-res = t.runCommand("command that doesn't exist");
-assert.eq(0, res.ok);
-//assert.eq(opCounters.command + 1, newdb.serverStatus().opcounters.command); // "serverStatus" counted
-// TODO Replace below with above when SERVER-9038 is resolved (mongos counts unrecognized commands)
-assert.eq(opCounters.command + (isMongos ? 2 : 1), newdb.serverStatus().opcounters.command);
-
-// Command, recognized, counting suppressed (TODO implement when SERVER-9038 is resolved).
diff --git a/jstests/noPassthroughWithMongod/parallel_collection_scan.js b/jstests/noPassthroughWithMongod/parallel_collection_scan.js
index d745f088376..44e5d361e45 100644
--- a/jstests/noPassthroughWithMongod/parallel_collection_scan.js
+++ b/jstests/noPassthroughWithMongod/parallel_collection_scan.js
@@ -6,10 +6,11 @@ s = "";
while ( s.length < 10000 )
s += ".";
+var bulk = t.initializeUnorderedBulkOp();
for ( i = 0; i < 8000; i++ ) {
- t.insert( { x : i, s : s } );
+ bulk.insert({ x: i, s: s });
}
-
+assert.writeOK(bulk.execute());
function iterateSliced() {
var res = t.runCommand( "parallelCollectionScan", { numCursors : 3 } );
diff --git a/jstests/noPassthroughWithMongod/remove9.js b/jstests/noPassthroughWithMongod/remove9.js
index e7dfe9bfff1..7492e36c5b9 100644
--- a/jstests/noPassthroughWithMongod/remove9.js
+++ b/jstests/noPassthroughWithMongod/remove9.js
@@ -5,8 +5,7 @@ js = "while( 1 ) { for( i = 0; i < 10000; ++i ) { db.jstests_remove9.save( {i:i}
pid = startMongoProgramNoConnect( "mongo" , "--eval" , js , db ? db.getMongo().host : null );
for( var i = 0; i < 10000; ++i ) {
- t.remove( {i:Random.randInt( 10000 )} );
- assert.automsg( "!db.getLastError()" );
+ assert.writeOK(t.remove( { i: Random.randInt( 10000 )} ));
}
-stopMongoProgramByPid( pid ); \ No newline at end of file
+stopMongoProgramByPid( pid );
diff --git a/jstests/noPassthroughWithMongod/sharding_balance1.js b/jstests/noPassthroughWithMongod/sharding_balance1.js
index e0c36f6cea5..7f3892ce8f3 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance1.js
+++ b/jstests/noPassthroughWithMongod/sharding_balance1.js
@@ -15,12 +15,13 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 20 * 1024 * 1024 ) ){
- db.foo.insert( { _id : num++ , s : bigString } );
+ bulk.insert({ _id: num++, s: bigString });
inserted += bigString.length;
}
+assert.writeOK(bulk.execute());
-db.getLastError();
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.lt( 20 , s.config.chunks.count() , "setup2" );
diff --git a/jstests/noPassthroughWithMongod/sharding_balance2.js b/jstests/noPassthroughWithMongod/sharding_balance2.js
index 519f88807a2..c3e2e825ba3 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance2.js
+++ b/jstests/noPassthroughWithMongod/sharding_balance2.js
@@ -26,12 +26,12 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 40 * 1024 * 1024 ) ){
- db.foo.insert( { _id : num++ , s : bigString } );
+ bulk.insert({ _id: num++, s: bigString });
inserted += bigString.length;
}
-
-db.getLastError();
+assert.writeOK(bulk.execute());
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.lt( 20 , s.config.chunks.count() , "setup2" );
diff --git a/jstests/noPassthroughWithMongod/sharding_balance3.js b/jstests/noPassthroughWithMongod/sharding_balance3.js
index 5e85a694716..59f4136d44c 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance3.js
+++ b/jstests/noPassthroughWithMongod/sharding_balance3.js
@@ -16,12 +16,13 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 40 * 1024 * 1024 ) ){
- db.foo.insert( { _id : num++ , s : bigString } );
+ bulk.insert({ _id: num++, s: bigString });
inserted += bigString.length;
}
+assert.writeOK(bulk.execute());
-db.getLastError();
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.lt( 20 , s.config.chunks.count() , "setup2" );
diff --git a/jstests/noPassthroughWithMongod/sharding_balance4.js b/jstests/noPassthroughWithMongod/sharding_balance4.js
index f1c27afa0bb..c2a3d744964 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance4.js
+++ b/jstests/noPassthroughWithMongod/sharding_balance4.js
@@ -35,12 +35,12 @@ counts = {}
//
-function doUpdate( includeString, optionalId ){
+function doUpdate( bulk, includeString, optionalId ){
var up = { $inc : { x : 1 } }
if ( includeString )
up["$set"] = { s : bigString };
var myid = optionalId == undefined ? Random.randInt( N ) : optionalId
- db.foo.update( { _id : myid } , up , true );
+ bulk.find({ _id : myid }).upsert().update( up );
counts[myid] = ( counts[myid] ? counts[myid] : 0 ) + 1;
return myid;
@@ -48,14 +48,15 @@ function doUpdate( includeString, optionalId ){
// Initially update all documents from 1 to N, otherwise later checks can fail because no document
// previously existed
+var bulk = db.foo.initializeUnorderedBulkOp();
for ( i = 0; i < N; i++ ){
- doUpdate( true, i )
+ doUpdate( bulk, true, i );
}
for ( i=0; i<N*9; i++ ){
- doUpdate( false )
+ doUpdate( bulk, false );
}
-db.getLastError();
+assert.writeOK(bulk.execute());
for ( var i=0; i<50; i++ ){
s.printChunks( "test.foo" )
@@ -109,25 +110,15 @@ function check( msg , dontAssert ){
function diff1(){
jsTest.log("Running diff1...")
-
- var myid = doUpdate( false )
- var le = db.getLastErrorCmd();
-
- if ( le.err )
- print( "ELIOT ELIOT : " + tojson( le ) + "\t" + myid );
-
- if ( ! le.updatedExisting || le.n != 1 ) {
- print( "going to assert for id: " + myid + " correct count is: " + counts[myid] + " db says count is: " + tojson(db.foo.findOne( { _id : myid } )) );
- }
-
- assert( le.updatedExisting , "GLE diff myid: " + myid + " 1: " + tojson(le) )
- assert.eq( 1 , le.n , "GLE diff myid: " + myid + " 2: " + tojson(le) )
+ bulk = db.foo.initializeUnorderedBulkOp();
+ var myid = doUpdate( bulk, false );
+ var res = assert.writeOK(bulk.execute());
- if ( Math.random() > .99 ){
- db.getLastError()
- check( "random late check" ); // SERVER-1430
- }
+ assert.eq( 1, res.nModified,
+ "diff myid: " + myid + " 2: " + res.toString() + "\n" +
+ " correct count is: " + counts[myid] +
+ " db says count is: " + tojson(db.foo.findOne({ _id: myid })) );
var x = s.chunkCounts( "foo" )
if ( Math.random() > .999 )
diff --git a/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js b/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js
index 41bf9bf03c5..e3728817744 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js
+++ b/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js
@@ -14,12 +14,14 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
+
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 20 * 1024 * 1024 ) ){
- db.foo.insert( { _id : Math.random() , s : bigString } );
+ bulk.insert({ _id: Math.random(), s: bigString });
inserted += bigString.length;
}
+assert.writeOK(bulk.execute());
-db.getLastError();
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.lt( 20 , s.config.chunks.count() , "setup2" );
diff --git a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js b/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js
index 414b6d57925..32278c089f3 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js
+++ b/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js
@@ -22,11 +22,11 @@ for( var i = 0; i < nsq; i++ ) data += data
dataObj = {}
for( var i = 0; i < n; i++ ) dataObj["data-" + i] = data
+var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < 40; i++ ) {
- if(i != 0 && i % 10 == 0) printjson( coll.stats() )
- coll.save({ data : dataObj })
+ bulk.insert({ data: dataObj });
}
-db.getLastError();
+assert.writeOK(bulk.execute());
assert.eq( 40 , coll.count() , "prep1" );
@@ -46,9 +46,9 @@ assert.soon(
// On *extremely* slow or variable systems, we've seen migrations fail in the critical section and
// kill the server. Do an explicit check for this. SERVER-8781
// TODO: Remove once we can better specify what systems to run what tests on.
- try {
- assert.eq(null, shardA.getDB("admin").getLastError());
- assert.eq(null, shardB.getDB("admin").getLastError());
+ try {
+ assert.commandWorked(shardA.getDB("admin").runCommand({ ping: 1 }));
+ assert.commandWorked(shardB.getDB("admin").runCommand({ ping: 1 }));
}
catch(e) {
print("An error occurred contacting a shard during balancing," +
diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js b/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js
index 2948dbef3f9..c4484356dd4 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js
+++ b/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js
@@ -23,11 +23,11 @@ numDocs = 20 * docsPerChunk
print( "stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs )
-for ( i=0; i<numDocs; i++ ){
- t.insert( { _id : i , s : bigString } );
+var bulk = t.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++){
+ bulk.insert({ _id: i, s: bigString });
}
-
-db.getLastError();
+assert.writeOK(bulk.execute());
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
index 6b2e7faa56b..8132e33c5d0 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
+++ b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
@@ -40,15 +40,14 @@ coll.insert({ _id : -2, d : data15PlusMB });
coll.insert({ _id : -1, d : data15PlusMB });
// Docs of assorted sizes
-coll.insert({ _id : 0, d : "x" });
-coll.insert({ _id : 1, d : data15PlusMB });
-coll.insert({ _id : 2, d : "x" });
-coll.insert({ _id : 3, d : data15MB });
-coll.insert({ _id : 4, d : "x" });
-coll.insert({ _id : 5, d : data1MB });
-coll.insert({ _id : 6, d : "x" });
-
-assert.eq( null, coll.getDB().getLastError() );
+assert.writeOK(coll.insert({ _id : 0, d : "x" }));
+assert.writeOK(coll.insert({ _id : 1, d : data15PlusMB }));
+assert.writeOK(coll.insert({ _id : 2, d : "x" }));
+assert.writeOK(coll.insert({ _id : 3, d : data15MB }));
+assert.writeOK(coll.insert({ _id : 4, d : "x" }));
+assert.writeOK(coll.insert({ _id : 5, d : data1MB }));
+assert.writeOK(coll.insert({ _id : 6, d : "x" }));
+
assert.eq( 9, coll.find().itcount() );
jsTest.log( "Starting migration..." );
diff --git a/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js b/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js
index 9c4d73d5a2c..0d8af3a1ebe 100644
--- a/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js
+++ b/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js
@@ -6,12 +6,14 @@ s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
db = s.getDB( "test" );
+var bulk = db.foo.initializeUnorderedBulkOp();
+var bulk2 = db.bar.initializeUnorderedBulkOp();
for ( i=0; i<100; i++ ) {
- db.foo.insert( { _id : i , x : i } )
- db.bar.insert( { _id : i , x : i } )
+ bulk.insert({ _id: i, x: i });
+ bulk2.insert({ _id: i, x: i });
}
-
-db.getLastError();
+assert.writeOK(bulk.execute());
+assert.writeOK(bulk2.execute());
sh.splitAt( "test.foo" , { _id : 50 } )
diff --git a/jstests/noPassthroughWithMongod/sharding_rs1.js b/jstests/noPassthroughWithMongod/sharding_rs1.js
index d79605ad177..53a1f5e5dda 100644
--- a/jstests/noPassthroughWithMongod/sharding_rs1.js
+++ b/jstests/noPassthroughWithMongod/sharding_rs1.js
@@ -15,12 +15,13 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 20 * 1024 * 1024 ) ){
- db.foo.insert( { _id : num++ , s : bigString , x : Math.random() } );
+ bulk.insert({ _id: num++, s: bigString, x: Math.random() });
inserted += bigString.length;
}
+assert.writeOK(bulk.execute());
-db.getLastError();
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.lt( 20 , s.config.chunks.count() , "setup2" );
diff --git a/jstests/noPassthroughWithMongod/sharding_rs2.js b/jstests/noPassthroughWithMongod/sharding_rs2.js
index b577bf82ba9..1a0fe612d70 100644
--- a/jstests/noPassthroughWithMongod/sharding_rs2.js
+++ b/jstests/noPassthroughWithMongod/sharding_rs2.js
@@ -92,11 +92,12 @@ assert.lte( before.query + 10 , after.query , "B3" )
db.foo.ensureIndex( { x : 1 } )
+var bulk = db.foo.initializeUnorderedBulkOp();
for ( i=0; i<100; i++ ){
if ( i == 17 ) continue;
- db.foo.insert( { x : i } )
+ bulk.insert({ x: i });
}
-db.getLastError( 3 , 10000 );
+assert.writeOK(bulk.execute({ w: 3 }));
// Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
// replication for this and future tests to pass
diff --git a/jstests/noPassthroughWithMongod/ttl1.js b/jstests/noPassthroughWithMongod/ttl1.js
index cba4d035b05..3ce494bc2ef 100644
--- a/jstests/noPassthroughWithMongod/ttl1.js
+++ b/jstests/noPassthroughWithMongod/ttl1.js
@@ -35,7 +35,6 @@ t.insert( { x : true } ) //non-date value
t.insert( { x : "yo" } ) //non-date value
t.insert( { x : 3 } ) //non-date value
t.insert( { x : /foo/ } ) //non-date value
-db.getLastError();
assert.eq( 30 , t.count() );
diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js
index 3b251dfa8a9..5646ce22a39 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl.js
@@ -29,9 +29,11 @@ masterdb.createCollection(mastercol.getName(), {usePowerOf2Sizes: false});
// create new collection. insert 24 docs, aged at one-hour intervalss
now = (new Date()).getTime();
-for ( i=0; i<24; i++ )
- mastercol.insert( { x : new Date( now - ( 3600 * 1000 * i ) ) } );
-masterdb.getLastError();
+var bulk = mastercol.initializeUnorderedBulkOp();
+for ( i=0; i<24; i++ ) {
+ bulk.insert({ x: new Date( now - ( 3600 * 1000 * i )) });
+}
+assert.writeOK(bulk.execute());
rt.awaitReplication();
assert.eq( 24 , mastercol.count() , "docs not inserted on primary" );
assert.eq( 24 , slave1col.count() , "docs not inserted on secondary" );
@@ -48,8 +50,7 @@ assert.eq( 0 , slave1col.stats().userFlags , "userFlags not 0 on secondary");
// create TTL index, wait for TTL monitor to kick in, then check that
// userFlags get set to 1, and correct number of docs age out
-mastercol.ensureIndex( { x : 1 } , { expireAfterSeconds : 20000 } );
-masterdb.getLastError();
+assert.commandWorked(mastercol.ensureIndex({ x: 1 }, { expireAfterSeconds: 20000 }));
rt.awaitReplication();
sleep(70*1000); // TTL monitor runs every 60 seconds, so wait 70
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
index 99c8681a144..15e72b66a81 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
@@ -17,8 +17,7 @@ var primeSystemReplset = function() {
print("create a TTL collection");
var testDB = conn.getDB("test");
- testDB.foo.ensureIndex({x:1}, {expireAfterSeconds : 2});
- testDB.getLastError();
+ assert.commandWorked(testDB.foo.ensureIndex({ x: 1 }, { expireAfterSeconds: 2 }));
};
var restartWithConfig = function() {
@@ -38,8 +37,7 @@ var restartWithConfig = function() {
var restartWithoutConfig = function() {
var localDB = conn.getDB("local");
- localDB.system.replset.remove({});
- localDB.getLastError();
+ assert.writeOK(localDB.system.replset.remove({}));
stopMongod(runner.port(), 15);
diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js
index d5aa45e269a..2c524d8d788 100644
--- a/jstests/noPassthroughWithMongod/ttl_sharded.js
+++ b/jstests/noPassthroughWithMongod/ttl_sharded.js
@@ -20,11 +20,12 @@ s.adminCommand( { shardcollection : ns , key: { _id : 1 } } );
// insert 24 docs, with timestamps at one hour intervals
var now = (new Date()).getTime();
-for ( i=0; i<24; i++ ){
+var bulk = t.initializeUnorderedBulkOp();
+for (var i = 0; i < 24; i++) {
var past = new Date( now - ( 3600 * 1000 * i ) );
- t.insert( {_id : i , x : past } );
+ bulk.insert({ _id: i, x: past });
}
-s.getDB( dbname ).getLastError();
+assert.writeOK(bulk.execute());
assert.eq( t.count() , 24 , "initial docs not inserted");
// create the TTL index which delete anything older than ~5.5 hours