diff options
author | David Storch <david.storch@10gen.com> | 2014-05-06 19:00:56 -0400 |
---|---|---|
committer | David Storch <david.storch@10gen.com> | 2014-05-06 19:00:56 -0400 |
commit | 72380726608df663a85bee24d69a20ed2ca8287d (patch) | |
tree | 735b7724ddc814fdf385d754bd7921975b5de491 | |
parent | 3061ab54eb2cc642a279becfca0b93f5e17db117 (diff) | |
download | mongo-72380726608df663a85bee24d69a20ed2ca8287d.tar.gz |
Revert "SERVER-13741 Migrate remaining tests to use write commands"
This reverts commit 87dc3ae516e1d12a632dc604710661e38ed7b3dd.
118 files changed, 1359 insertions, 1196 deletions
diff --git a/buildscripts/smoke.py b/buildscripts/smoke.py index 0573c66b936..cb9bf52f5d3 100755 --- a/buildscripts/smoke.py +++ b/buildscripts/smoke.py @@ -444,11 +444,14 @@ def skipTest(path): return False -legacyWriteRE = re.compile(r"jstests[/\\]multiVersion") +forceCommandsForDirs = ["aggregation", "auth", "core", "parallel", "replsets", "sharding"] +# look for jstests and one of the above suites separated by either posix or windows slashes +forceCommandsRE = re.compile(r"jstests[/\\](%s)" % ('|'.join(forceCommandsForDirs))) def setShellWriteModeForTest(path, argv): swm = shell_write_mode - if legacyWriteRE.search(path): - swm = "legacy" + if swm == "legacy": # change when the default changes to "commands" + if use_write_commands or forceCommandsRE.search(path): + swm = "commands" argv += ["--writeMode", swm] def runTest(test, result): @@ -1205,7 +1208,7 @@ def main(): parser.add_option('--use-write-commands', dest='use_write_commands', default=False, action='store_true', help='Deprecated(use --shell-write-mode): Sets the shell to use write commands by default') - parser.add_option('--shell-write-mode', dest='shell_write_mode', default="commands", + parser.add_option('--shell-write-mode', dest='shell_write_mode', default="legacy", help='Sets the shell to use a specific write mode: commands/compatibility/legacy (default:legacy)') global tests diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js index 8a00aaa4ae5..d318d6467b3 100644 --- a/jstests/auth/lib/commands_lib.js +++ b/jstests/auth/lib/commands_lib.js @@ -1770,6 +1770,7 @@ var authCommandsLib = { command: {renameCollection: firstDbName + ".x", to: secondDbName + ".y"}, setup: function (db) { db.getSisterDB(firstDbName).x.save( {} ); + db.getSisterDB(firstDbName).getLastError(); db.getSisterDB(adminDbName).runCommand({movePrimary: firstDbName, to: shard0name}); db.getSisterDB(adminDbName).runCommand({movePrimary: secondDbName, to: shard0name}); }, diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js index f236a9d4bc7..deaab67a969 100644 --- a/jstests/disk/diskfull.js +++ b/jstests/disk/diskfull.js @@ -22,16 +22,16 @@ if ( doIt ) { m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1", '--nojournal' ); d = m.getDB( "diskfulltest" ); c = d.getCollection( "diskfulltest" ); - assert.writeError(c.insert( { a: 6 } )); - + c.save( { a: 6 } ); + assert(d.getLastError().length ); + printjson( d.getLastErrorObj() ); assert.soon( function() { c.save( { a : 6 } ); return rawMongoProgramOutput().match( /file allocation failure/ ); }, "didn't see 'file allocation failure'" ); - res = assert.writeError(c.insert({ a: 6 })); - var errmsg = res.getWriteError().errmsg; - assert.eq(errmsg, "Can't take a write lock while out of disk space"); // every following fail + c.save( { a: 6 } ); + assert.eq(d.getLastError(), "Can't take a write lock while out of disk space"); // every following fail sleep( 3000 ); diff --git a/jstests/disk/killall.js b/jstests/disk/killall.js index 3be9f530780..a46a3588241 100644 --- a/jstests/disk/killall.js +++ b/jstests/disk/killall.js @@ -21,7 +21,8 @@ var mongod = startMongod( "--port", port, "--dbpath", dbpath, "--nohttpinterface var db = mongod.getDB( "test" ); var collection = db.getCollection( baseName ); -assert.writeOK(collection.insert({})); +collection.save( {} ); +assert( ! db.getLastError() ); s1 = startParallelShell( "db." + baseName + ".count( { $where: function() { while( 1 ) { ; } } } )", port ); // HACK(schwerin): startParallelShell's return value should allow you to block until the command has diff --git a/jstests/disk/preallocate_directoryperdb.js b/jstests/disk/preallocate_directoryperdb.js index 38f55866f60..8b3d551b5d4 100644 --- a/jstests/disk/preallocate_directoryperdb.js +++ b/jstests/disk/preallocate_directoryperdb.js @@ -22,15 +22,14 @@ function checkDb2DirAbsent() { var m = startMongod( "--smallfiles", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" ); db = m.getDB( baseName ); db2 = m.getDB( baseName2 ); -var bulk = db[ baseName ].initializeUnorderedBulkOp(); -var bulk2 = db2[ baseName2 ].initializeUnorderedBulkOp(); -var big = new Array( 5000 ).toString(); +c = db[ baseName ]; +c2 = db2[ baseName2 ]; +big = new Array( 5000 ).toString(); for( var i = 0; i < 3000; ++i ) { - bulk.insert({ b:big }); - bulk2.insert({ b:big }); + c.save( { b:big } ); + c2.save( { b:big } ); + db.getLastError(); } -assert.writeOK(bulk.execute()); -assert.writeOK(bulk2.execute()); // Due to our write pattern, we expect db2's .3 file to be queued up in the file // allocator behind db's .3 file at the time db2 is dropped. This will @@ -44,7 +43,8 @@ db.dropDatabase(); // Try writing a new database, to ensure file allocator is still working. db3 = m.getDB( baseName3 ); c3 = db[ baseName3 ]; -assert.writeOK(c3.insert( {} )); +c3.save( {} ); +assert( !db3.getLastError() ); assert.eq( 1, c3.count() ); checkDb2DirAbsent(); diff --git a/jstests/disk/quota.js b/jstests/disk/quota.js index e8476072ff1..aa8963c6263 100644 --- a/jstests/disk/quota.js +++ b/jstests/disk/quota.js @@ -11,11 +11,10 @@ db = m.getDB( baseName ); big = new Array( 10000 ).toString(); // Insert documents until quota is exhausted. -var coll = db[ baseName ]; -var res = coll.insert({ b: big }); -while( !res.hasWriteError() ) { - res = coll.insert({ b: big }); +while( !db.getLastError() ) { + db[ baseName ].save( {b:big} ); } +printjson( db.getLastError() ); dotTwoDataFile = baseName + ".2"; files = listFiles( dbpath ); @@ -28,7 +27,8 @@ dotTwoDataFile = "local" + ".2"; // Check that quota does not apply to local db, and a .2 file can be created. l = m.getDB( "local" )[ baseName ]; for( i = 0; i < 10000; ++i ) { - assert.writeOK(l.insert({ b: big })); + l.save( {b:big} ); + assert( !db.getLastError() ); dotTwoFound = false; if ( i % 100 != 0 ) { continue; diff --git a/jstests/disk/quota2.js b/jstests/disk/quota2.js index cdeda1a2a23..a5f07abec8a 100644 --- a/jstests/disk/quota2.js +++ b/jstests/disk/quota2.js @@ -13,12 +13,12 @@ db = m.getDB( baseName ); big = new Array( 10000 ).toString(); // Insert documents until quota is exhausted. -var coll = db[ baseName ]; -var res = coll.insert({ b: big }); -while( !res.hasWriteError() ) { - res = coll.insert({ b: big }); +while( !db.getLastError() ) { + db[ baseName ].save( {b:big} ); } +db.resetError(); + // Trigger allocation of an additional file for a 'special' namespace. for( n = 0; !db.getLastError(); ++n ) { db.createCollection( '' + n ); @@ -27,10 +27,10 @@ for( n = 0; !db.getLastError(); ++n ) { // Check that new docs are saved in the .0 file. for( i = 0; i < n; ++i ) { c = db[ ''+i ]; - res = c.insert({ b: big }); - if( !res.hasWriteError() ) { - assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file ); + c.save( {b:big} ); + if( !db.getLastError() ) { + assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file ); } } -} +}
\ No newline at end of file diff --git a/jstests/dur/a_quick.js b/jstests/dur/a_quick.js index bbec8af6939..ab36f91327e 100755 --- a/jstests/dur/a_quick.js +++ b/jstests/dur/a_quick.js @@ -62,7 +62,8 @@ tst.log("start mongod without dur"); var conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur"); tst.log("without dur work"); var d = conn.getDB("test"); -assert.writeOK(d.foo.insert({ _id: 123 })); +d.foo.insert({ _id:123 }); +d.getLastError(); tst.log("stop without dur"); stopMongod(30000); @@ -71,7 +72,8 @@ tst.log("start mongod with dur"); conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--durOptions", 8); tst.log("with dur work"); d = conn.getDB("test"); -assert.writeOK(d.foo.insert({ _id: 123 })); +d.foo.insert({ _id: 123 }); +d.getLastError(); // wait // we could actually do getlasterror fsync:1 now, but maybe this is agood // as it will assure that commits happen on a timely basis. a bunch of the other dur/*js diff --git a/jstests/dur/closeall.js b/jstests/dur/closeall.js index 8c3864e8118..3d7119ab134 100644 --- a/jstests/dur/closeall.js +++ b/jstests/dur/closeall.js @@ -30,24 +30,26 @@ function f(variant, quickCommits, paranoid) { print("closeall.js run test"); print("wait for initial sync to finish") // SERVER-4852 - assert.writeOK(db1.foo.insert({}, { writeConcern: { w: 2 }})); - assert.writeOK(db1.foo.remove({}, { writeConcern: { w: 2 }})); + db1.foo.insert({}); + err = db1.getLastErrorObj(2); + printjson(err) + assert.isnull(err.err); + db1.foo.remove({}); + err = db1.getLastErrorObj(2); + printjson(err) + assert.isnull(err.err); print("initial sync done") - var writeOps = startParallelShell('var coll = db.getSiblingDB("closealltest").foo; \ - var bulk = coll.initializeUnorderedBulkOp(); \ - for( var i = 0; i < ' + N + '; i++ ) { \ - bulk.insert({ x: 1 }); \ - if ( i % 7 == 0 ) \ - bulk.insert({ x: 99, y: 2 }); \ - if ( i % 49 == 0 ) \ - bulk.find({ x: 99 }).update( \ - { a: 1, b: 2, c: 3, d: 4 }); \ - if( i == 800 ) \ - coll.ensureIndex({ x: 1 }); \ - }', 30001); - - for( var i = 0; i < N; i++ ) { + for( var i = 0; i < N; i++ ) { + db1.foo.insert({x:1}); // this does wait for a return code so we will get some parallelism + if( i % 7 == 0 ) + db1.foo.insert({x:99, y:2}); + if( i % 49 == 0 ) + db1.foo.update({ x: 99 }, { a: 1, b: 2, c: 3, d: 4 }); + if (i % 100 == 0) + db1.foo.find(); + if( i == 800 ) + db1.foo.ensureIndex({ x: 1 }); var res = null; try { if( variant == 1 ) @@ -59,6 +61,7 @@ function f(variant, quickCommits, paranoid) { res = db2.adminCommand("closeAllDatabases"); } catch (e) { + sleep(5000); // sleeping a little makes console output order prettier print("\n\n\nFAIL closeall.js closeAllDatabases command invocation threw an exception. i:" + i); try { print("getlasterror:"); @@ -71,6 +74,8 @@ function f(variant, quickCommits, paranoid) { print("got another exception : " + e); } print("\n\n\n"); + // sleep a little to capture possible mongod output? + sleep(2000); throw e; } assert( res.ok, "closeAllDatabases res.ok=false"); @@ -82,8 +87,6 @@ function f(variant, quickCommits, paranoid) { print("closeall.js shutting down servers"); stopMongod(30002); stopMongod(30001); - - writeOps(); } // Skip this test on 32-bit Windows (unfixable failures in MapViewOfFileEx) diff --git a/jstests/dur/diskfull.js b/jstests/dur/diskfull.js index a604439424d..a1efba5595d 100644 --- a/jstests/dur/diskfull.js +++ b/jstests/dur/diskfull.js @@ -50,12 +50,15 @@ function work() { log("work"); try { var d = conn.getDB("test"); - var big = new Array( 5000 ).toString(); - var bulk = d.foo.initializeUnorderedBulkOp(); + + big = new Array( 5000 ).toString(); for( i = 0; i < 10000; ++i ) { - bulk.insert({ _id: i, b: big }); + d.foo.insert( { _id:i, b:big } ); } - assert.writeOK(bulk.execute()); + + gle = d.getLastError(); + if ( gle ) + throw gle; } catch ( e ) { print( e ); raise( e ); diff --git a/jstests/dur/dropdb.js b/jstests/dur/dropdb.js index 54de6bdd7f2..4fb94cc7d1e 100644 --- a/jstests/dur/dropdb.js +++ b/jstests/dur/dropdb.js @@ -62,8 +62,10 @@ function work() { d.dropDatabase(); + d.foo.insert({ _id: 100 }); + // assure writes applied in case we kill -9 on return from this function - assert.writeOK(d.foo.insert({ _id: 100 }, { writeConcern: { fsync: 1 }})); + assert(d.runCommand({ getlasterror: 1, fsync: 1 }).ok, "getlasterror not ok"); } function verify() { diff --git a/jstests/dur/dur1.js b/jstests/dur/dur1.js index 0aecaaac21c..cb4495aea52 100755 --- a/jstests/dur/dur1.js +++ b/jstests/dur/dur1.js @@ -64,6 +64,12 @@ function work() { // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually: d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 }); +// d.a.update({ _id: 4 }, { $inc: { x: 1} }); +// d.a.reIndex(); + + // assure writes applied in case we kill -9 on return from this function + d.getLastError();
+
log("endwork");
return d; } diff --git a/jstests/dur/dur1_tool.js b/jstests/dur/dur1_tool.js index adee933fdb4..fdfe05236f4 100755 --- a/jstests/dur/dur1_tool.js +++ b/jstests/dur/dur1_tool.js @@ -63,6 +63,13 @@ function work() { // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually: d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 }); + +// d.a.update({ _id: 4 }, { $inc: { x: 1} }); +// d.a.reIndex(); + + // assure writes applied in case we kill -9 on return from this function + d.getLastError();
+
log("endwork");
return d; } diff --git a/jstests/dur/indexbg2.js b/jstests/dur/indexbg2.js index d239d4eaa44..a7484f0a561 100644 --- a/jstests/dur/indexbg2.js +++ b/jstests/dur/indexbg2.js @@ -15,5 +15,5 @@ for( var i = 1000; i < 2000; ++i ) { t.insert( {_id:i,a:'abcd',b:'bcde',x:'four score and seven years ago'} ); t.remove( {_id:i} ); } -assert.writeOK(t.insert({ _id: 2000, a: 'abcd', b: 'bcde', x: 'four score and seven years ago' })); - +t.insert( {_id:2000,a:'abcd',b:'bcde',x:'four score and seven years ago'} ); +assert( !t.getDB().getLastError() ); diff --git a/jstests/dur/manyRestart.js b/jstests/dur/manyRestart.js index 5a68afdecbb..f434278ca9a 100755 --- a/jstests/dur/manyRestart.js +++ b/jstests/dur/manyRestart.js @@ -63,6 +63,12 @@ function work() { // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually: d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 }); + +// d.a.update({ _id: 4 }, { $inc: { x: 1} }); +// d.a.reIndex(); + + // assure writes applied in case we kill -9 on return from this function + d.getLastError(); log("endwork"); return d; } diff --git a/jstests/dur/md5.js b/jstests/dur/md5.js index 1b4ec43340e..1773091186a 100644 --- a/jstests/dur/md5.js +++ b/jstests/dur/md5.js @@ -29,6 +29,13 @@ function work() { // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually: d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 }); + + // d.a.update({ _id: 4 }, { $inc: { x: 1} }); + // d.a.reIndex(); + + // assure writes applied in case we kill -9 on return from this function + d.getLastError(); + log("endwork"); } diff --git a/jstests/dur/oplog.js b/jstests/dur/oplog.js index 8ded3c980a9..cd7d7c5e6ef 100755 --- a/jstests/dur/oplog.js +++ b/jstests/dur/oplog.js @@ -81,6 +81,9 @@ function work() { d.foo.insert({ _id: 6, q: "aaaaa", b: big, z: 3 }); d.foo.update({ _id: 5 }, { $set: { z: 99} }); + // assure writes applied in case we kill -9 on return from this function + d.getLastError(); + log("endwork"); verify(); diff --git a/jstests/gle/block2.js b/jstests/gle/block2.js deleted file mode 100644 index 142d51519b2..00000000000 --- a/jstests/gle/block2.js +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Basic gle testing for master/slave environment. Write command version also - * available at jstests/repl. - */ - -var rt = new ReplTest( "block1" ); - -var m = rt.start( true ); -var s = rt.start( false ); - -if (m.writeMode() == 'commands') { - jsTest.log('Skipping test since commands mode is already tested in repl/'); -} -else { - - function setup(){ - - dbm = m.getDB( "foo" ); - dbs = s.getDB( "foo" ); - - tm = dbm.bar; - ts = dbs.bar; - } - setup(); - - function check( msg ){ - assert.eq( tm.count() , ts.count() , "check: " + msg ); - } - - function worked( w , wtimeout ){ - var gle = dbm.getLastError( w , wtimeout ); - if (gle != null) { - printjson(gle); - } - return gle == null; - } - - check( "A" ); - - tm.save( { x : 1 } ); - assert( worked( 2 ) , "B" ); - - tm.save( { x : 2 } ); - assert( worked( 2 , 3000 ) , "C" ); - - rt.stop( false ); - tm.save( { x : 3 } ); - assert.eq( 3 , tm.count() , "D1" ); - assert( ! worked( 2 , 3000 ) , "D2" ); - - s = rt.start( false ); - setup(); - assert( worked( 2 , 30000 ) , "E" ); - -} - -rt.stop(); - diff --git a/jstests/gle/sync1.js b/jstests/gle/sync1.js deleted file mode 100644 index 83d26d1e71f..00000000000 --- a/jstests/gle/sync1.js +++ /dev/null @@ -1,57 +0,0 @@ -// TODO: remove test after we deprecate SyncClusterConnection - -var test = new SyncCCTest( "sync1" ); - -if (test.conn.writeMode() == 'commands') { - jsTest.log('Skipping test not compatible with write commands'); -} -else { - - db = test.conn.getDB( "test" ) - t = db.sync1 - t.save( { x : 1 } ) - assert.eq( 1 , t.find().itcount() , "A1" ); - assert.eq( 1 , t.find().count() , "A2" ); - t.save( { x : 2 } ) - assert.eq( 2 , t.find().itcount() , "A3" ); - assert.eq( 2 , t.find().count() , "A4" ); - - test.checkHashes( "test" , "A3" ); - - test.tempKill(); - assert.throws( function(){ t.save( { x : 3 } ); } , null , "B1" ); - // It's ok even for some of the mongod to be unreachable for read-only cmd - assert.eq( 2, t.find({}).count() ); - // It's NOT ok for some of the mongod to be unreachable for write cmd - assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); }); - assert.eq( 2 , t.find().itcount() , "B2" ); - test.tempStart(); - test.checkHashes( "test" , "B3" ); - - // Trying killing the second mongod - test.tempKill( 1 ); - assert.throws( function(){ t.save( { x : 3 } ); } ); - // It's ok even for some of the mongod to be unreachable for read-only cmd - assert.eq( 2, t.find({}).count() ); - // It's NOT ok for some of the mongod to be unreachable for write cmd - assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); }); - assert.eq( 2 , t.find().itcount() ); - test.tempStart( 1 ); - - assert.eq( 2 , t.find().itcount() , "C1" ); - assert.soon( function(){ - try { - t.remove( { x : 1 } ) - return true; - } - catch ( e ){ - print( e ); - } - return false; - } ) - t.find().forEach( printjson ) - assert.eq( 1 , t.find().itcount() , "C2" ); - - test.stop(); - -} diff --git a/jstests/gle/sync4.js b/jstests/gle/sync4.js deleted file mode 100644 index b6b1a777856..00000000000 --- a/jstests/gle/sync4.js +++ /dev/null @@ -1,26 +0,0 @@ -// TODO: remove test after we deprecate SyncClusterConnection - -test = new SyncCCTest( "sync4" ) - -if (test.conn.writeMode() == 'commands') { - jsTest.log('Skipping test not compatible with write commands'); -} -else { - - db = test.conn.getDB( "test" ) - t = db.sync4 - - for ( i=0; i<1000; i++ ){ - t.insert( { _id : i , x : "asdasdsdasdas" } ) - } - db.getLastError(); - - test.checkHashes( "test" , "A0" ); - assert.eq( 1000 , t.find().count() , "A1" ) - assert.eq( 1000 , t.find().itcount() , "A2" ) - assert.eq( 1000 , t.find().snapshot().batchSize(10).itcount() , "A2" ) - - test.stop(); - -} - diff --git a/jstests/gle/sync8.js b/jstests/gle/sync8.js deleted file mode 100644 index 81404785ac3..00000000000 --- a/jstests/gle/sync8.js +++ /dev/null @@ -1,21 +0,0 @@ -// TODO: remove test after we deprecate SyncClusterConnection - -// Test for SERVER-11492 - make sure that upserting a new document reports n:1 in GLE - -var test = new SyncCCTest( "sync1" ); - -if (test.conn.writeMode() == 'commands') { - jsTest.log('Skipping test not compatible with write commands'); -} -else { - var db = test.conn.getDB( "test" ); - var t = db.sync8; - t.remove({}); - - t.update({_id:1}, {$set:{a:1}}, true); - var le = db.getLastErrorObj(); - assert.eq(1, le.n); - - test.stop(); - -} diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js index 248f5e49a6c..60cb7733f5d 100644 --- a/jstests/libs/geo_near_random.js +++ b/jstests/libs/geo_near_random.js @@ -12,16 +12,16 @@ GeoNearRandomTest = function(name) { GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){ - if(!indexBounds){ - scale = scale || 1; // scale is good for staying away from edges - return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale]; - } - else{ - var range = indexBounds.max - indexBounds.min; - var eps = Math.pow(2, -40); - // Go very close to the borders but not quite there. - return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min]; - } + if(!indexBounds){ + scale = scale || 1; // scale is good for staying away from edges + return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale]; + } + else{ + var range = indexBounds.max - indexBounds.min; + var eps = Math.pow(2, -40); + // Go very close to the borders but not quite there. + return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min]; + } } @@ -29,29 +29,27 @@ GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) { assert.eq(this.nPts, 0, "insertPoints already called"); this.nPts = nPts; - var bulk = this.t.initializeUnorderedBulkOp(); for (var i=0; i<nPts; i++){ - bulk.insert({ _id: i, loc: this.mkPt(scale, indexBounds) }); + this.t.insert({_id: i, loc: this.mkPt(scale, indexBounds)}); } - assert.writeOK(bulk.execute()); if(!indexBounds) - this.t.ensureIndex({loc: '2d'}); + this.t.ensureIndex({loc: '2d'}); else - this.t.ensureIndex({loc: '2d'}, indexBounds) + this.t.ensureIndex({loc: '2d'}, indexBounds) } GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) { for (var i=0; i < short.length; i++){ - - var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0] - var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1] - var dS = short[i].obj ? short[i].dis : 1 - - var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0] - var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1] - var dL = long[i].obj ? long[i].dis : 1 - + + var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0] + var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1] + var dS = short[i].obj ? short[i].dis : 1 + + var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0] + var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1] + var dL = long[i].obj ? long[i].dis : 1 + assert.eq([xS, yS, dS], [xL, yL, dL]); } } diff --git a/jstests/misc/biginsert.js b/jstests/misc/biginsert.js new file mode 100755 index 00000000000..ebbdc18ba3e --- /dev/null +++ b/jstests/misc/biginsert.js @@ -0,0 +1,18 @@ +o = "xxxxxxxxxxxxxxxxxxx"; +o = o + o; +o + o; +o = o + o; +o = o + o; +o = o + o; + +var B = 40000; +var last = new Date(); +for (i = 0; i < 30000000; i++) { + db.foo.insert({ o: o }); + if (i % B == 0) { + var n = new Date(); + print(i); + print("per sec: " + B*1000 / (n - last)); + last = n; + } +} diff --git a/jstests/noPassthrough/disk_reuse1.js b/jstests/noPassthrough/disk_reuse1.js index c208dcefb1f..249985edd1a 100644 --- a/jstests/noPassthrough/disk_reuse1.js +++ b/jstests/noPassthrough/disk_reuse1.js @@ -16,36 +16,31 @@ while ( s.length < 1024 ) state = {} -var bulk = t.initializeUnorderedBulkOp(); -for (var i = 0; i < N; i++) { - bulk.insert({ _id: i, s: s }); -} -assert.writeOK(bulk.execute()); +for ( i=0; i<N; i++ ) + t.insert( { _id : i , s : s } ); orig = t.stats(); t.remove({}); -bulk = t.initializeUnorderedBulkOp(); -for (i = 0; i < N; i++) { - bulk.insert({ _id: i, s: s }); -} -assert.writeOK(bulk.execute()); +for ( i=0; i<N; i++ ) + t.insert( { _id : i , s : s } ); assert.eq( orig.storageSize , t.stats().storageSize , "A" ) -for (j = 0; j < 100; j++){ - for (i = 0; i < N; i++){ - bulk = t.initializeUnorderedBulkOp(); +for ( j=0; j<100; j++ ){ + for ( i=0; i<N; i++ ){ var r = Math.random(); if ( r > .5 ) - bulk.find({ _id: i }).remove(); + t.remove( { _id : i } ) else - bulk.find({ _id: i }).upsert().updateOne({ _id: i, s: s }); + t.insert( { _id : i , s : s } ) } - assert.writeOK(bulk.execute()); + //printjson( t.stats() ); + assert.eq( orig.storageSize , t.stats().storageSize , "B" + j ) } + test.stop(); diff --git a/jstests/noPassthrough/geo_full.js b/jstests/noPassthrough/geo_full.js index 9d9203242d3..ffeb26a2606 100644 --- a/jstests/noPassthrough/geo_full.js +++ b/jstests/noPassthrough/geo_full.js @@ -22,89 +22,96 @@ testServer = new SlowWeeklyMongod( "geo_full" ) db = testServer.getDB( "test" ); var randEnvironment = function(){ - - // Normal earth environment - if( Random.rand() < 0.5 ){ - return { max : 180, - min : -180, - bits : Math.floor( Random.rand() * 32 ) + 1, - earth : true, - bucketSize : 360 / ( 4 * 1024 * 1024 * 1024 ) }; - } - - var scales = [ 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000 ] - var scale = scales[ Math.floor( Random.rand() * scales.length ) ] - var offset = Random.rand() * scale - + + // Normal earth environment + if( Random.rand() < 0.5 ){ + return { max : 180, + min : -180, + bits : Math.floor( Random.rand() * 32 ) + 1, + earth : true, + bucketSize : 360 / ( 4 * 1024 * 1024 * 1024 ) } + } + + var scales = [ 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000 ] + var scale = scales[ Math.floor( Random.rand() * scales.length ) ] + var offset = Random.rand() * scale + var max = Random.rand() * scale + offset - var min = - Random.rand() * scale + offset - var bits = Math.floor( Random.rand() * 32 ) + 1 - var bits = Math.floor( Random.rand() * 32 ) + 1 - var range = max - min + var min = - Random.rand() * scale + offset + var bits = Math.floor( Random.rand() * 32 ) + 1 + var bits = Math.floor( Random.rand() * 32 ) + 1 + var range = max - min var bucketSize = range / ( 4 * 1024 * 1024 * 1024 ) - - return { max : max, - min : min, - bits : bits, - earth : false, - bucketSize : bucketSize } -}; + + return { max : max, + min : min, + bits : bits, + earth : false, + bucketSize : bucketSize } + +} var randPoint = function( env, query ) { - - if( query && Random.rand() > 0.5 ) - return query.exact - - if( env.earth ) - return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ] - - var range = env.max - env.min - return [ Random.rand() * range + env.min, Random.rand() * range + env.min ]; + + if( query && Random.rand() > 0.5 ) + return query.exact + + if( env.earth ) + return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ] + + var range = env.max - env.min + return [ Random.rand() * range + env.min, Random.rand() * range + env.min ]; } var randLocType = function( loc, wrapIn ){ - return randLocTypes( [ loc ], wrapIn )[0] + return randLocTypes( [ loc ], wrapIn )[0] } var randLocTypes = function( locs, wrapIn ) { - - var rLocs = [] - - for( var i = 0; i < locs.length; i++ ){ + + var rLocs = [] + + for( var i = 0; i < locs.length; i++ ){ rLocs.push( locs[i] ) - } - - if( wrapIn ){ - var wrappedLocs = [] - for( var i = 0; i < rLocs.length; i++ ){ - var wrapper = {} - wrapper[wrapIn] = rLocs[i] - wrappedLocs.push( wrapper ) - } - - return wrappedLocs - } - - return rLocs -}; + // {x:1, y:1} \ne [1,1]. + //if( Random.rand() < 0.5 ) + //rLocs.push( { x : locs[i][0], y : locs[i][1] } ) + //else + } + + if( wrapIn ){ + var wrappedLocs = [] + for( var i = 0; i < rLocs.length; i++ ){ + var wrapper = {} + wrapper[wrapIn] = rLocs[i] + wrappedLocs.push( wrapper ) + } + + return wrappedLocs + } + + return rLocs + +} var randDataType = function() { - var scales = [ 1, 10, 100, 1000, 10000 ] - var docScale = scales[ Math.floor( Random.rand() * scales.length ) ] - var locScale = scales[ Math.floor( Random.rand() * scales.length ) ] - - var numDocs = 40000 - var maxLocs = 40000 - // Make sure we don't blow past our test resources - while( numDocs * maxLocs > 40000 ){ - numDocs = Math.floor( Random.rand() * docScale ) + 1 - maxLocs = Math.floor( Random.rand() * locScale ) + 1 - } - - return { numDocs : numDocs, - maxLocs : maxLocs } -}; + var scales = [ 1, 10, 100, 1000, 10000 ] + var docScale = scales[ Math.floor( Random.rand() * scales.length ) ] + var locScale = scales[ Math.floor( Random.rand() * scales.length ) ] + + var numDocs = 40000 + var maxLocs = 40000 + // Make sure we don't blow past our test resources + while( numDocs * maxLocs > 40000 ){ + numDocs = Math.floor( Random.rand() * docScale ) + 1 + maxLocs = Math.floor( Random.rand() * locScale ) + 1 + } + + return { numDocs : numDocs, + maxLocs : maxLocs } + +} function deg2rad(arg) { return arg * Math.PI / 180.0; } function rad2deg(arg) { return arg * 180.0 / Math.PI; } @@ -133,181 +140,194 @@ function pointIsOK(startPoint, radius, env) { } var randQuery = function( env ) { - var center = randPoint( env ) - - var sphereRadius = -1 - var sphereCenter = null - if( env.earth ){ - // Get a start point that doesn't require wrapping - // TODO: Are we a bit too aggressive with wrapping issues? - var i - for( i = 0; i < 5; i++ ){ + + var center = randPoint( env ) + + var sphereRadius = -1 + var sphereCenter = null + if( env.earth ){ + // Get a start point that doesn't require wrapping + // TODO: Are we a bit too aggressive with wrapping issues? + var i + for( i = 0; i < 5; i++ ){ sphereRadius = Random.rand() * 45 * Math.PI / 180 sphereCenter = randPoint( env ) if (pointIsOK(sphereCenter, sphereRadius, env)) { break; } - } - if( i == 5 ) sphereRadius = -1; - - } - - var box = [ randPoint( env ), randPoint( env ) ] - - var boxPoly = [[ box[0][0], box[0][1] ], - [ box[0][0], box[1][1] ], - [ box[1][0], box[1][1] ], - [ box[1][0], box[0][1] ] ] - - if( box[0][0] > box[1][0] ){ - var swap = box[0][0] - box[0][0] = box[1][0] - box[1][0] = swap - } - - if( box[0][1] > box[1][1] ){ - var swap = box[0][1] - box[0][1] = box[1][1] - box[1][1] = swap - } + /* + var t = db.testSphere; t.drop(); t.ensureIndex({ loc : "2d" }, env ) + try{ t.find({ loc : { $within : { $centerSphere : [ sphereCenter, sphereRadius ] } } } ).count(); var err; if( err = db.getLastError() ) throw err; } + catch(e) { print( e ); continue } + print( " Radius " + sphereRadius + " and center " + sphereCenter + " ok ! ") + break; + */ + } + if( i == 5 ) sphereRadius = -1; + + } + + var box = [ randPoint( env ), randPoint( env ) ] + + var boxPoly = [[ box[0][0], box[0][1] ], + [ box[0][0], box[1][1] ], + [ box[1][0], box[1][1] ], + [ box[1][0], box[0][1] ] ] + + if( box[0][0] > box[1][0] ){ + var swap = box[0][0] + box[0][0] = box[1][0] + box[1][0] = swap + } + + if( box[0][1] > box[1][1] ){ + var swap = box[0][1] + box[0][1] = box[1][1] + box[1][1] = swap + } + + return { center : center, + radius : box[1][0] - box[0][0], + exact : randPoint( env ), + sphereCenter : sphereCenter, + sphereRadius : sphereRadius, + box : box, + boxPoly : boxPoly } + +} - return { center : center, - radius : box[1][0] - box[0][0], - exact : randPoint( env ), - sphereCenter : sphereCenter, - sphereRadius : sphereRadius, - box : box, - boxPoly : boxPoly } -}; var resultTypes = { "exact" : function( loc ){ - return query.exact[0] == loc[0] && query.exact[1] == loc[1] + return query.exact[0] == loc[0] && query.exact[1] == loc[1] }, "center" : function( loc ){ - return Geo.distance( query.center, loc ) <= query.radius + return Geo.distance( query.center, loc ) <= query.radius }, "box" : function( loc ){ - return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] && - loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1] - -}, + return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] && + loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1] + +}, "sphere" : function( loc ){ - return ( query.sphereRadius >= 0 ? - ( Geo.sphereDistance( query.sphereCenter, loc ) <= query.sphereRadius ) : false ); -}, + return ( query.sphereRadius >= 0 ? ( Geo.sphereDistance( query.sphereCenter, loc ) <= query.sphereRadius ) : false ) +}, "poly" : function( loc ){ - return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] && - loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1]; + return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] && + loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1] }} var queryResults = function( locs, query, results ){ - - if( ! results["center"] ){ - for( var type in resultTypes ){ - results[type] = { - docsIn : 0, - docsOut : 0, - locsIn : 0, - locsOut : 0 - } - } - } - - var indResults = {} - for( var type in resultTypes ){ - indResults[type] = { - docIn : false, - locsIn : 0, - locsOut : 0 - } - } - - for( var type in resultTypes ){ - - var docIn = false - for( var i = 0; i < locs.length; i++ ){ - if( resultTypes[type]( locs[i] ) ){ - results[type].locsIn++ - indResults[type].locsIn++ - indResults[type].docIn = true - } - else{ - results[type].locsOut++ - indResults[type].locsOut++ - } - } - if( indResults[type].docIn ) results[type].docsIn++ - else results[type].docsOut++ - - } - - return indResults + + if( ! results["center"] ){ + for( var type in resultTypes ){ + results[type] = { + docsIn : 0, + docsOut : 0, + locsIn : 0, + locsOut : 0 + } + } + } + + var indResults = {} + for( var type in resultTypes ){ + indResults[type] = { + docIn : false, + locsIn : 0, + locsOut : 0 + } + } + + for( var type in resultTypes ){ + + var docIn = false + for( var i = 0; i < locs.length; i++ ){ + if( resultTypes[type]( locs[i] ) ){ + results[type].locsIn++ + indResults[type].locsIn++ + indResults[type].docIn = true + } + else{ + results[type].locsOut++ + indResults[type].locsOut++ + } + } + if( indResults[type].docIn ) results[type].docsIn++ + else results[type].docsOut++ + + } + + return indResults + } var randQueryAdditions = function( doc, indResults ){ - - for( var type in resultTypes ){ - var choice = Random.rand() - if( Random.rand() < 0.25 ) - doc[type] = ( indResults[type].docIn ? { docIn : "yes" } : { docIn : "no" } ) - else if( Random.rand() < 0.5 ) - doc[type] = ( indResults[type].docIn ? { docIn : [ "yes" ] } : { docIn : [ "no" ] } ) - else if( Random.rand() < 0.75 ) - doc[type] = ( indResults[type].docIn ? [ { docIn : "yes" } ] : [ { docIn : "no" } ] ) - else - doc[type] = ( indResults[type].docIn ? [{ docIn: [ "yes" ] }] : [{ docIn: [ "no" ] }]); - } + + for( var type in resultTypes ){ + var choice = Random.rand() + if( Random.rand() < 0.25 ) + doc[type] = ( indResults[type].docIn ? { docIn : "yes" } : { docIn : "no" } ) + else if( Random.rand() < 0.5 ) + doc[type] = ( indResults[type].docIn ? { docIn : [ "yes" ] } : { docIn : [ "no" ] } ) + else if( Random.rand() < 0.75 ) + doc[type] = ( indResults[type].docIn ? [ { docIn : "yes" } ] : [ { docIn : "no" } ] ) + else + doc[type] = ( indResults[type].docIn ? [ { docIn : [ "yes" ] } ] : [ { docIn : [ "no" ] } ] ) + } + } var randIndexAdditions = function( indexDoc ){ - - for( var type in resultTypes ){ - - if( Random.rand() < 0.5 ) continue; - - var choice = Random.rand() - if( Random.rand() < 0.5 ) - indexDoc[type] = 1 - else - indexDoc[type + ".docIn"] = 1; - } -}; + + for( var type in resultTypes ){ + + if( Random.rand() < 0.5 ) continue; + + var choice = Random.rand() + if( Random.rand() < 0.5 ) + indexDoc[type] = 1 + else + indexDoc[type + ".docIn"] = 1 + + } + +} var randYesQuery = function(){ - - var choice = Math.floor( Random.rand() * 7 ) - if( choice == 0 ) - return { $ne : "no" } - else if( choice == 1 ) - return "yes" - else if( choice == 2 ) - return /^yes/ - else if( choice == 3 ) - return { $in : [ "good", "yes", "ok" ] } - else if( choice == 4 ) - return { $exists : true } - else if( choice == 5 ) - return { $nin : [ "bad", "no", "not ok" ] } - else if( choice == 6 ) - return { $not : /^no/ } + + var choice = Math.floor( Random.rand() * 7 ) + if( choice == 0 ) + return { $ne : "no" } + else if( choice == 1 ) + return "yes" + else if( choice == 2 ) + return /^yes/ + else if( choice == 3 ) + return { $in : [ "good", "yes", "ok" ] } + else if( choice == 4 ) + return { $exists : true } + else if( choice == 5 ) + return { $nin : [ "bad", "no", "not ok" ] } + else if( choice == 6 ) + return { $not : /^no/ } } var locArray = function( loc ){ - if( loc.x ) return [ loc.x, loc.y ] - if( ! loc.length ) return [ loc[0], loc[1] ] - return loc + if( loc.x ) return [ loc.x, loc.y ] + if( ! loc.length ) return [ loc[0], loc[1] ] + return loc } var locsArray = function( locs ){ - if( locs.loc ){ - arr = [] - for( var i = 0; i < locs.loc.length; i++ ) arr.push( locArray( locs.loc[i] ) ) - return arr - } - else{ - arr = [] - for( var i = 0; i < locs.length; i++ ) arr.push( locArray( locs[i].loc ) ) - return arr - } + if( locs.loc ){ + arr = [] + for( var i = 0; i < locs.loc.length; i++ ) arr.push( locArray( locs.loc[i] ) ) + return arr + } + else{ + arr = [] + for( var i = 0; i < locs.length; i++ ) arr.push( locArray( locs[i].loc ) ) + return arr + } } var minBoxSize = function( env, box ){ @@ -315,16 +335,16 @@ var minBoxSize = function( env, box ){ } var minBucketScale = function( env, box ){ - + if( box.length && box[0].length ) box = [ box[0][0] - box[1][0], box[0][1] - box[1][1] ] - + if( box.length ) box = Math.max( box[0], box[1] ) - + print( box ) print( env.bucketSize ) - + return Math.ceil( Math.log( box / env.bucketSize ) / Math.log( 2 ) ) } @@ -332,114 +352,119 @@ var minBucketScale = function( env, box ){ // TODO: Add spherical $uniqueDocs tests var numTests = 100 -// Our seed will change every time this is run, but +// Our seed will change every time this is run, but // each individual test will be reproducible given // that seed and test number var seed = new Date().getTime() //seed = 175 + 288 + 12 for ( var test = 0; test < numTests; test++ ) { - - Random.srand( seed + test ); - //Random.srand( 42240 ) - //Random.srand( 7344 ) - var t = db.testAllGeo - t.drop() - - print( "Generating test environment #" + test ) - var env = randEnvironment() - //env.bits = 11 - var query = randQuery( env ) - var data = randDataType() - //data.numDocs = 5; data.maxLocs = 1; - var paddingSize = Math.floor( Random.rand() * 10 + 1 ) - var results = {} - var totalPoints = 0 - print( "Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs + " locs " ) - - var bulk = t.initializeUnorderedBulkOp(); - for ( var i = 0; i < data.numDocs; i++ ) { - var numLocs = Math.floor( Random.rand() * data.maxLocs + 1 ) - totalPoints += numLocs - - var multiPoint = [] - for ( var p = 0; p < numLocs; p++ ) { - var point = randPoint( env, query ) - multiPoint.push( point ) - } - - var indResults = queryResults( multiPoint, query, results ) - - var doc - // Nest the keys differently - if( Random.rand() < 0.5 ) - doc = { locs : { loc : randLocTypes( multiPoint ) } } - else - doc = { locs : randLocTypes( multiPoint, "loc" ) } - - randQueryAdditions( doc, indResults ) - - doc._id = i - bulk.insert( doc ); - } - assert.writeOK(bulk.execute()); - - var indexDoc = { "locs.loc" : "2d" }; - randIndexAdditions( indexDoc ); - t.ensureIndex( indexDoc, env ); - assert.isnull( db.getLastError() ); - - var padding = "x" - for( var i = 0; i < paddingSize; i++ ) padding = padding + padding - - print( padding ) - - printjson( { seed : seed, - test: test, - env : env, - query : query, - data : data, - results : results, - paddingSize : paddingSize } ) - - // exact - print( "Exact query..." ) - assert.eq( results.exact.docsIn, t.find( { "locs.loc" : randLocType( query.exact ), "exact.docIn" : randYesQuery() } ).count() ) - - // $center - print( "Center query..." ) - print( "Min box : " + minBoxSize( env, query.radius ) ) - assert.eq( results.center.docsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).count() ) - - print( "Center query update..." ) - var res = t.update({ "locs.loc": { $within: { $center: [ query.center, query.radius ], - $uniqueDocs: true }}, - "center.docIn": randYesQuery() }, - { $set: { centerPaddingA: padding }}, false, true); - assert.eq( results.center.docsIn, res.nModified ); - - if( query.sphereRadius >= 0 ){ - - print( "Center sphere query...") - // $centerSphere - assert.eq( results.sphere.docsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ] } }, "sphere.docIn" : randYesQuery() } ).count() ) - - print( "Center sphere query update..." ) - res = t.update({ "locs.loc": { $within: { - $centerSphere: [ query.sphereCenter, query.sphereRadius ], - $uniqueDocs: true } }, - "sphere.docIn" : randYesQuery() }, - { $set: { spherePaddingA: padding } }, false, true); - assert.eq( results.sphere.docsIn, res.nModified ); - } - - // $box - print( "Box query..." ) - assert.eq( results.box.docsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : true } }, "box.docIn" : randYesQuery() } ).count() ) - - // $polygon - print( "Polygon query..." ) - assert.eq( results.poly.docsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ).count() ) + + Random.srand( seed + test ); + //Random.srand( 42240 ) + //Random.srand( 7344 ) + var t = db.testAllGeo + t.drop() + + print( "Generating test environment #" + test ) + var env = randEnvironment() + //env.bits = 11 + var query = randQuery( env ) + var data = randDataType() + //data.numDocs = 5; data.maxLocs = 1; + var paddingSize = Math.floor( Random.rand() * 10 + 1 ) + var results = {} + var totalPoints = 0 + print( "Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs + " locs " ) + + // Index after a random number of docs added + var indexIt = Math.floor( Random.rand() * data.numDocs ) + + for ( var i = 0; i < data.numDocs; i++ ) { + + if( indexIt == i ){ + var indexDoc = { "locs.loc" : "2d" } + randIndexAdditions( indexDoc ) + + // printjson( indexDoc ) + + t.ensureIndex( indexDoc, env ) + assert.isnull( db.getLastError() ) + } + + var numLocs = Math.floor( Random.rand() * data.maxLocs + 1 ) + totalPoints += numLocs + + var multiPoint = [] + for ( var p = 0; p < numLocs; p++ ) { + var point = randPoint( env, query ) + multiPoint.push( point ) + } + + var indResults = queryResults( multiPoint, query, results ) + + var doc + // Nest the keys differently + if( Random.rand() < 0.5 ) + doc = { locs : { loc : randLocTypes( multiPoint ) } } + else + doc = { locs : randLocTypes( multiPoint, "loc" ) } + + randQueryAdditions( doc, indResults ) + + //printjson( doc ) + doc._id = i + t.insert( doc ) + + } + + var padding = "x" + for( var i = 0; i < paddingSize; i++ ) padding = padding + padding + + print( padding ) + + printjson( { seed : seed, + test: test, + env : env, + query : query, + data : data, + results : results, + paddingSize : paddingSize } ) + + // exact + print( "Exact query..." ) + assert.eq( results.exact.docsIn, t.find( { "locs.loc" : randLocType( query.exact ), "exact.docIn" : randYesQuery() } ).count() ) + + // $center + print( "Center query..." ) + print( "Min box : " + minBoxSize( env, query.radius ) ) + assert.eq( results.center.docsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).count() ) + + print( "Center query update..." ) + // printjson( t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).toArray() ) + t.update( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : true } }, "center.docIn" : randYesQuery() }, { $set : { "centerPaddingA" : padding } }, false, true ) + assert.eq( results.center.docsIn, t.getDB().getLastErrorObj().n ) + + if( query.sphereRadius >= 0 ){ + + print( "Center sphere query...") + // $centerSphere + assert.eq( results.sphere.docsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ] } }, "sphere.docIn" : randYesQuery() } ).count() ) + + print( "Center sphere query update..." ) + // printjson( t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).toArray() ) + t.update( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ], $uniqueDocs : true } }, "sphere.docIn" : randYesQuery() }, { $set : { "spherePaddingA" : padding } }, false, true ) + assert.eq( results.sphere.docsIn, t.getDB().getLastErrorObj().n ) + + } + + // $box + print( "Box query..." ) + assert.eq( results.box.docsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : true } }, "box.docIn" : randYesQuery() } ).count() ) + + // $polygon + print( "Polygon query..." ) + assert.eq( results.poly.docsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ).count() ) // $near, $nearSphere and geoNear results have a default document limit of 100. var defaultDocLimit = 100; @@ -481,12 +506,12 @@ for ( var test = 0; test < numTests; test++ ) { var num = Math.min( 2* defaultDocLimit, 2 * results.center.docsIn); - var output = db.runCommand( { - geoNear : "testAllGeo", - near : query.center, - maxDistance : query.radius , - includeLocs : true, - num : num } ).results + var output = db.runCommand( { + geoNear : "testAllGeo", + near : query.center, + maxDistance : query.radius , + includeLocs : true, + num : num } ).results assert.eq( Math.min( num, results.center.docsIn ), output.length, @@ -495,36 +520,40 @@ for ( var test = 0; test < numTests; test++ ) { "; radius: " + query.radius + "; docs: " + results.center.docsIn + "; locs: " + results.center.locsIn ) - var distance = 0; - for ( var i = 0; i < output.length; i++ ) { - var retDistance = output[i].dis - var retLoc = locArray( output[i].loc ) - - var arrLocs = locsArray( output[i].obj.locs ) - - assert.contains( retLoc, arrLocs ) - - var distInObj = false - for ( var j = 0; j < arrLocs.length && distInObj == false; j++ ) { - var newDistance = Geo.distance( locArray( query.center ) , arrLocs[j] ) - distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 ) - } - - assert( distInObj ) - assert.between( retDistance - 0.0001 , Geo.distance( locArray( query.center ), retLoc ), retDistance + 0.0001 ) - assert.lte( retDistance, query.radius ) - assert.gte( retDistance, distance ) - distance = retDistance - } - - } - - // $polygon + var distance = 0; + for ( var i = 0; i < output.length; i++ ) { + var retDistance = output[i].dis + var retLoc = locArray( output[i].loc ) + + // print( "Dist from : " + results[i].loc + " to " + startPoint + " is " + // + retDistance + " vs " + radius ) + + var arrLocs = locsArray( output[i].obj.locs ) + + assert.contains( retLoc, arrLocs ) + + // printjson( arrLocs ) + + var distInObj = false + for ( var j = 0; j < arrLocs.length && distInObj == false; j++ ) { + var newDistance = Geo.distance( locArray( query.center ) , arrLocs[j] ) + distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 ) + } + + assert( distInObj ) + assert.between( retDistance - 0.0001 , Geo.distance( locArray( query.center ), retLoc ), retDistance + 0.0001 ) + assert.lte( retDistance, query.radius ) + assert.gte( retDistance, distance ) + distance = retDistance + } + + } + + // $polygon print( "Polygon remove..." ) - res = t.remove({ "locs.loc": { $within: { $polygon: query.boxPoly }}, - "poly.docIn": randYesQuery() }); - assert.eq( results.poly.docsIn, res.nRemoved ); - + t.remove( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ) + assert.eq( results.poly.docsIn, t.getDB().getLastErrorObj().n ) + } diff --git a/jstests/noPassthrough/geo_mnypts_plus_fields.js b/jstests/noPassthrough/geo_mnypts_plus_fields.js index 7c5e23d4b97..53d33da4f29 100644 --- a/jstests/noPassthrough/geo_mnypts_plus_fields.js +++ b/jstests/noPassthrough/geo_mnypts_plus_fields.js @@ -12,8 +12,7 @@ for( var fields = 1; fields < maxFields; fields++ ){ coll.drop() var totalPts = 500 * 1000 - - var bulk = coll.initializeUnorderedBulkOp(); + // Add points in a 100x100 grid for( var i = 0; i < totalPts; i++ ){ var ii = i % 10000 @@ -38,11 +37,10 @@ for( var fields = 1; fields < maxFields; fields++ ){ doc[ "field" + j ] = field } - - bulk.insert( doc ); + + coll.insert( doc ) } - assert.writeOK(bulk.execute()); - + // Create the query for the additional fields queryFields = {} for( var j = 0; j < fields; j++ ){ diff --git a/jstests/gle/gle_after_split_failure_during_migration.js b/jstests/noPassthrough/gle_after_split_failure_during_migration.js index 9d0a6a9ca2a..9d0a6a9ca2a 100644 --- a/jstests/gle/gle_after_split_failure_during_migration.js +++ b/jstests/noPassthrough/gle_after_split_failure_during_migration.js diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js index 1a7ba4b3f4c..d2c3ccac7e4 100644 --- a/jstests/noPassthrough/indexbg1.js +++ b/jstests/noPassthrough/indexbg1.js @@ -35,11 +35,10 @@ while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ m t = db[ baseName ]; t.drop(); - var bulk = db.jstests_indexbg1.initializeUnorderedBulkOp(); for( i = 0; i < size; ++i ) { - bulk.insert({ i: i }); + db.jstests_indexbg1.save( {i:i} ); } - assert.writeOK(bulk.execute()); + db.getLastError(); assert.eq( size, t.count() ); doParallel( fullName + ".ensureIndex( {i:1}, {background:true} )" ); @@ -63,16 +62,25 @@ while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ m assert( ex.nscanned < 1000 , "took too long to find 100: " + tojson( ex ) ); - assert.writeOK(t.remove({ i: 40 }, true )); // table scan - assert.writeOK(t.update({ i: 10 }, { i :-10 })); // should scan 10 + t.remove( {i:40}, true ); // table scan + assert( !db.getLastError() ); + + t.update( {i:10}, {i:-10} ); // should scan 10 + assert( !db.getLastError() ); id = t.find().hint( {$natural:-1} ).next()._id; - assert.writeOK(t.update({ _id: id }, { i: -2 } )); - assert.writeOK(t.save({ i: -50 })); - assert.writeOK(t.save({ i: size + 2 })); + t.update( {_id:id}, {i:-2} ); + assert( !db.getLastError() ); + + t.save( {i:-50} ); + assert( !db.getLastError() ); + + t.save( {i:size+2} ); + assert( !db.getLastError() ); assert.eq( size + 1, t.count() ); + assert( !db.getLastError() ); print( "finished with checks" ); } catch( e ) { @@ -105,10 +113,10 @@ assert.eq( 1, t.count( {i:-2} ) ); assert.eq( 1, t.count( {i:-50} ) ); assert.eq( 1, t.count( {i:size+2} ) ); assert.eq( 0, t.count( {i:40} ) ); +assert( !db.getLastError() ); print("about to drop index"); t.dropIndex( {i:1} ); -var gle = db.getLastError(); -printjson( gle ); -assert( !gle ); +printjson( db.getLastError() ); +assert( !db.getLastError() ); testServer.stop(); diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js index fcdac89956d..0b5edc79aa2 100644 --- a/jstests/noPassthrough/indexbg2.js +++ b/jstests/noPassthrough/indexbg2.js @@ -49,7 +49,9 @@ doTest = function(dropDups) { // wait for indexing to start assert.soon(function() { return 2 == db.system.indexes.count({ ns: "test." + baseName }) }, "no index created", 30000, 50); t.save({ i: 0, n: true }); + //printjson(db.getLastError()); t.save({ i: size - 1, n: true }); + //printjson(db.getLastError()); } catch (e) { // only a failure if we're still indexing // wait for parallel status to update to reflect indexing status diff --git a/jstests/noPassthrough/query_yield1.js b/jstests/noPassthrough/query_yield1.js index 7c168c1e208..624215f8c45 100644 --- a/jstests/noPassthrough/query_yield1.js +++ b/jstests/noPassthrough/query_yield1.js @@ -14,11 +14,9 @@ q = function(){ var x=this.n; for ( var i=0; i<250; i++ ){ x = x * 2; } return f while ( true ){ function fill(){ - var bulk = t.initializeUnorderedBulkOp(); for ( ; i<N; i++ ){ - bulk.insert({ _id: i, n: 1 }); + t.insert( { _id : i , n : 1 } ) } - assert.writeOK(bulk.execute()); } function timeQuery(){ @@ -60,7 +58,7 @@ num = 0; start = new Date(); biggestMe = 0; while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){ - var me = Date.timeFunc( function(){ t.insert( { x : 1 } ); }); + var me = Date.timeFunc( function(){ t.insert( { x : 1 } ); db.getLastError(); } ) var x = db.currentOp() if ( num++ == 0 ){ @@ -86,4 +84,4 @@ assert.eq( 0 , x.inprog.length , "weird 2" ); testServer.stop(); -} +}
\ No newline at end of file diff --git a/jstests/noPassthrough/query_yield2.js b/jstests/noPassthrough/query_yield2.js index b2262d6b357..71ce4535aa6 100644 --- a/jstests/noPassthrough/query_yield2.js +++ b/jstests/noPassthrough/query_yield2.js @@ -26,11 +26,9 @@ print( "Shell ==== Creating test.query_yield2 collection ..." ); print( "Shell ==== Adding documents until a time-wasting query takes over 2 seconds to complete" ); while ( true ){ function fill() { - var bulk = t.initializeUnorderedBulkOp(); for ( ; i < N; ++i ) { - bulk.insert({ _id: i , n: 1 }); + t.insert( { _id : i , n : 1 } ) } - assert.writeOK(bulk.execute()); } function timeQuery() { return Date.timeFunc( @@ -102,7 +100,7 @@ while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ) { if ( num == 0 ) { print( "Shell ==== Starting loop " + num + ", inserting 1 document" ); } - insertTime = Date.timeFunc( function() { t.insert({ x: 1 } ); }); + insertTime = Date.timeFunc( function() { t.insert( { x : 1 } ); db.getLastError(); } ); currentOp = db.currentOp(); len = currentOp.inprog.length; print( "Shell ==== Time to insert document " + num + " was " + insertTime + " ms, db.currentOp().inprog.length is " + len ); @@ -135,4 +133,4 @@ if ( len != 0 ) { print( "Shell ==== Test completed successfully, shutting down server" ); testServer.stop(); -} +}
\ No newline at end of file diff --git a/jstests/noPassthrough/repair2.js b/jstests/noPassthrough/repair2.js index e80a3edf02b..6f57ac0d45f 100644 --- a/jstests/noPassthrough/repair2.js +++ b/jstests/noPassthrough/repair2.js @@ -9,8 +9,8 @@ t = testServer.getDB( baseName )[ baseName ]; t.drop(); function protect( f ) { - try { - f(); + try { + f(); } catch( e ) { printjson( e ); } @@ -19,17 +19,16 @@ function protect( f ) { s = startParallelShell( "db = db.getSisterDB( '" + baseName + "'); for( i = 0; i < 10; ++i ) { db.repairDatabase(); sleep( 5000 ); }" ); for( i = 0; i < 30; ++i ) { - var bulk = t.initializeUnorderedBulkOp(); - for( j = 0; j < 5000; ++j ) { - bulk.insert({ _id: j } ); - } - for( j = 0; j < 5000; ++j ) { - bulk.find({ _id: j }).remove(); + for( j = 0; j < 5000; ++j ) { + protect( function() { t.insert( {_id:j} ); } ); } - assert.writeOK(bulk.execute()); - assert.eq( 0, t.count() ); + for( j = 0; j < 5000; ++j ) { + protect( function() { t.remove( {_id:j} ); } ); + } + + assert.eq( 0, t.count() ); } diff --git a/jstests/noPassthrough/sync1.js b/jstests/noPassthrough/sync1.js new file mode 100644 index 00000000000..490d2a53c5a --- /dev/null +++ b/jstests/noPassthrough/sync1.js @@ -0,0 +1,49 @@ + +test = new SyncCCTest( "sync1" ) + +db = test.conn.getDB( "test" ) +t = db.sync1 +t.save( { x : 1 } ) +assert.eq( 1 , t.find().itcount() , "A1" ); +assert.eq( 1 , t.find().count() , "A2" ); +t.save( { x : 2 } ) +assert.eq( 2 , t.find().itcount() , "A3" ); +assert.eq( 2 , t.find().count() , "A4" ); + +test.checkHashes( "test" , "A3" ); + +test.tempKill(); +assert.throws( function(){ t.save( { x : 3 } ); } , null , "B1" ); +// It's ok even for some of the mongod to be unreachable for read-only cmd +assert.eq( 2, t.find({}).count() ); +// It's NOT ok for some of the mongod to be unreachable for write cmd +assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); }); +assert.eq( 2 , t.find().itcount() , "B2" ); +test.tempStart(); +test.checkHashes( "test" , "B3" ); + +// Trying killing the second mongod +test.tempKill( 1 ); +assert.throws( function(){ t.save( { x : 3 } ); } ); +// It's ok even for some of the mongod to be unreachable for read-only cmd +assert.eq( 2, t.find({}).count() ); +// It's NOT ok for some of the mongod to be unreachable for write cmd +assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); }); +assert.eq( 2 , t.find().itcount() ); +test.tempStart( 1 ); + +assert.eq( 2 , t.find().itcount() , "C1" ); +assert.soon( function(){ + try { + t.remove( { x : 1 } ) + return true; + } + catch ( e ){ + print( e ); + } + return false; +} ) +t.find().forEach( printjson ) +assert.eq( 1 , t.find().itcount() , "C2" ); + +test.stop(); diff --git a/jstests/noPassthrough/sync4.js b/jstests/noPassthrough/sync4.js new file mode 100644 index 00000000000..6733f07089d --- /dev/null +++ b/jstests/noPassthrough/sync4.js @@ -0,0 +1,19 @@ + +test = new SyncCCTest( "sync4" ) + +db = test.conn.getDB( "test" ) +t = db.sync4 + +for ( i=0; i<1000; i++ ){ + t.insert( { _id : i , x : "asdasdsdasdas" } ) +} +db.getLastError(); + +test.checkHashes( "test" , "A0" ); +assert.eq( 1000 , t.find().count() , "A1" ) +assert.eq( 1000 , t.find().itcount() , "A2" ) +assert.eq( 1000 , t.find().snapshot().batchSize(10).itcount() , "A2" ) + + + +test.stop(); diff --git a/jstests/noPassthrough/sync8.js b/jstests/noPassthrough/sync8.js new file mode 100644 index 00000000000..241ad655569 --- /dev/null +++ b/jstests/noPassthrough/sync8.js @@ -0,0 +1,13 @@ +// Test for SERVER-11492 - make sure that upserting a new document reports n:1 in GLE + +var test = new SyncCCTest( "sync1" ); + +var db = test.conn.getDB( "test" ); +var t = db.sync8; +t.remove({}); + +t.update({_id:1}, {$set:{a:1}}, true); +var le = db.getLastErrorObj(); +assert.eq(1, le.n); + +test.stop(); diff --git a/jstests/noPassthrough/update_server-5552.js b/jstests/noPassthrough/update_server-5552.js index c164ba67694..d1f1bcb518b 100644 --- a/jstests/noPassthrough/update_server-5552.js +++ b/jstests/noPassthrough/update_server-5552.js @@ -1,3 +1,5 @@ + + load( "jstests/libs/slow_weekly_util.js" ) testServer = new SlowWeeklyMongod( "update_server-5552" ) db = testServer.getDB( "test" ); @@ -7,11 +9,9 @@ t.drop() N = 10000; -var bulk = t.initializeUnorderedBulkOp(); -for ( i=0; i<N; i++ ) { - bulk.insert({ _id: i, x: 1 }); -} -assert.writeOK(bulk.execute()); +for ( i=0; i<N; i++ ) + t.insert( { _id : i , x : 1 } ) +db.getLastError(); join = startParallelShell( "while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );" ) diff --git a/jstests/noPassthrough/update_yield1.js b/jstests/noPassthrough/update_yield1.js index 98437414600..db684a6d6eb 100644 --- a/jstests/noPassthrough/update_yield1.js +++ b/jstests/noPassthrough/update_yield1.js @@ -12,17 +12,16 @@ var i = 0; while ( true ){ var fill = function() { - var bulk = t.initializeUnorderedBulkOp(); for ( ; i<N; i++ ){ - bulk.insert({ _id: i, n: 1 }); + t.insert( { _id : i , n : 1 } ); } - assert.writeOK(bulk.execute()); }; var timeUpdate = function() { return Date.timeFunc( function(){ t.update( {} , { $inc : { n : 1 } } , false , true ); + var r = db.getLastErrorObj(); } ); }; @@ -49,7 +48,7 @@ function haveInProgressUpdate() { // --- test 1 -var join = startParallelShell( "db.update_yield1.update( {}, { $inc: { n: 1 }}, false, true );" ); +var join = startParallelShell( "db.update_yield1.update( {} , { $inc : { n : 1 } } , false , true ); db.getLastError()" ); assert.soon(haveInProgressUpdate, "never doing update"); var num = 0; diff --git a/jstests/noPassthroughWithMongod/autosplit_heuristics.js b/jstests/noPassthroughWithMongod/autosplit_heuristics.js index ee1d28b5eda..33649617126 100644 --- a/jstests/noPassthroughWithMongod/autosplit_heuristics.js +++ b/jstests/noPassthroughWithMongod/autosplit_heuristics.js @@ -60,11 +60,15 @@ printjson({ chunkSizeBytes : chunkSizeBytes, totalInserts : totalInserts }); // Insert enough docs to trigger splits into all chunks -var bulk = coll.initializeUnorderedBulkOp(); for (var i = 0; i < totalInserts; i++) { - bulk.insert({ _id : i % numChunks + (i / totalInserts) }); + coll.insert({ _id : i % numChunks + (i / totalInserts) }); + if ( i % ( numChunks * 1000 ) == 0 ) { + print( "Inserted " + i + " docs, " + + ( i * approxSize / numChunks ) + " bytes per chunk." ); + } } -assert.writeOK(bulk.execute()); + +assert.eq(null, coll.getDB().getLastError()); jsTest.log("Inserts completed..."); diff --git a/jstests/noPassthroughWithMongod/background.js b/jstests/noPassthroughWithMongod/background.js index 188027a029b..d1d0047988a 100644 --- a/jstests/noPassthroughWithMongod/background.js +++ b/jstests/noPassthroughWithMongod/background.js @@ -7,41 +7,45 @@ t.drop(); var a = new Mongo( db.getMongo().host ).getDB( db.getName() ); -var bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 100000; i++ ) { - bulk.insert({ y: 'aaaaaaaaaaaa', i: i }); - if( i % 10000 == 0 ) { - assert.writeOK(bulk.execute()); - bulk = t.initializeUnorderedBulkOp(); - print(i); - } + t.insert({y:'aaaaaaaaaaaa',i:i}); + if( i % 10000 == 0 ) { + db.getLastError(); + print(i); + } } +//db.getLastError(); + // start bg indexing a.system.indexes.insert({ns:"test.bg1", key:{i:1}, name:"i_1", background:true}); // add more data -bulk = t.initializeUnorderedBulkOp(); + for( var i = 0; i < 100000; i++ ) { - bulk.insert({ i: i }); - if( i % 10000 == 0 ) { - printjson( db.currentOp() ); - assert.writeOK(bulk.execute()); - bulk = t.initializeUnorderedBulkOp(); - print(i); - } + t.insert({i:i}); + if( i % 10000 == 0 ) { + printjson( db.currentOp() ); + db.getLastError(); + print(i); + } } -assert.writeOK(bulk.execute()); +printjson( db.getLastErrorObj() ); printjson( db.currentOp() ); -for( var i = 0; i < 40; i++ ) { - if( db.currentOp().inprog.length == 0 ) - break; - print("waiting"); - sleep(1000); +for( var i = 0; i < 40; i++ ) { + if( db.currentOp().inprog.length == 0 ) + break; + print("waiting"); + sleep(1000); } +printjson( a.getLastErrorObj() ); + var idx = t.getIndexes(); +// print("indexes:"); +// printjson(idx); + assert( idx[1].key.i == 1 ); diff --git a/jstests/noPassthroughWithMongod/balance_repl.js b/jstests/noPassthroughWithMongod/balance_repl.js index c5818ea19b0..610af04767b 100644 --- a/jstests/noPassthroughWithMongod/balance_repl.js +++ b/jstests/noPassthroughWithMongod/balance_repl.js @@ -5,11 +5,10 @@ s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true, _noslee db = s.getDB( "test" ); -var bulk = db.foo.initializeUnorderedBulkOp(); -for (var i = 0; i < 2100; i++) { - bulk.insert({ _id: i, x: i }); +for ( i=0; i<2100; i++ ) { + db.foo.insert( { _id : i , x : i } ); } -assert.writeOK(bulk.execute()); +db.getLastError(); serverName = s.getServerName( "test" ) other = s.config.shards.findOne( { _id : { $ne : serverName } } ); diff --git a/jstests/noPassthroughWithMongod/balance_tags1.js b/jstests/noPassthroughWithMongod/balance_tags1.js index 1122380d7bc..945f0526b17 100644 --- a/jstests/noPassthroughWithMongod/balance_tags1.js +++ b/jstests/noPassthroughWithMongod/balance_tags1.js @@ -3,11 +3,10 @@ s = new ShardingTest( "balance_tags1" , 3 , 1 , 1 , { sync:true, chunksize : 1 , s.config.settings.update( { _id: "balancer" }, { $set : { stopped: false, _nosleep: true } } , true ); db = s.getDB( "test" ); -var bulk = db.foo.initializeUnorderedBulkOp(); for ( i=0; i<21; i++ ) { - bulk.insert({ _id: i, x: i }); + db.foo.insert( { _id : i , x : i } ); } -assert.writeOK(bulk.execute()); +db.getLastError(); s.adminCommand( { enablesharding : "test" } ) s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); diff --git a/jstests/noPassthroughWithMongod/balance_tags2.js b/jstests/noPassthroughWithMongod/balance_tags2.js index 6d0ed6ea7ca..55ad3dc5a97 100644 --- a/jstests/noPassthroughWithMongod/balance_tags2.js +++ b/jstests/noPassthroughWithMongod/balance_tags2.js @@ -6,11 +6,10 @@ s = new ShardingTest( "balance_tags2" , 3 , 1 , 1 , s.config.settings.save({ _id: "balancer", _nosleep: true}); db = s.getDB( "test" ); -var bulk = db.foo.initializeUnorderedBulkOp(); for ( i=0; i<21; i++ ) { - bulk.insert({ _id: i, x: i }); + db.foo.insert( { _id : i , x : i } ); } -assert.writeOK(bulk.execute()); +db.getLastError(); // enable sharding, shard, and stop balancer sh.enableSharding("test"); diff --git a/jstests/noPassthroughWithMongod/btreedel.js b/jstests/noPassthroughWithMongod/btreedel.js index 89af6aa7d5d..824eb3e63a6 100644 --- a/jstests/noPassthroughWithMongod/btreedel.js +++ b/jstests/noPassthroughWithMongod/btreedel.js @@ -3,11 +3,9 @@ t = db.foo;
t.remove({});
-var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 1000000; i++) {
- bulk.insert({ _id: i, x: 'a b' });
+ t.insert({ _id: i, x: 'a b' });
}
-assert.writeOK(bulk.execute());
print("1 insert done count: " + t.count());
diff --git a/jstests/noPassthroughWithMongod/bulk_shard_insert.js b/jstests/noPassthroughWithMongod/bulk_shard_insert.js index 74810a7c668..d9cd25a635e 100644 --- a/jstests/noPassthroughWithMongod/bulk_shard_insert.js +++ b/jstests/noPassthroughWithMongod/bulk_shard_insert.js @@ -46,7 +46,12 @@ while( docsInserted < numDocs ){ bulk.push({ hi : "there", at : docsInserted, i : i, x : x }) } - assert.writeOK(coll.insert( bulk )); + coll.insert( bulk ) + var result = db.getLastError( 1 ) + if( result != null ){ + printjson( result ) + throw result + } if( Math.floor( docsInserted / 10000 ) != Math.floor( ( docsInserted + currBulkSize ) / 10000 ) ){ print( "Inserted " + (docsInserted + currBulkSize) + " documents." ) diff --git a/jstests/noPassthroughWithMongod/capped4.js b/jstests/noPassthroughWithMongod/capped4.js index be02e3c6ae1..27d138c16ce 100644 --- a/jstests/noPassthroughWithMongod/capped4.js +++ b/jstests/noPassthroughWithMongod/capped4.js @@ -22,8 +22,8 @@ assert( !d.hasNext(), "C" ); assert( t.find().sort( { i : 1 } ).hint( { i : 1 } ).toArray().length > 10, "D" ); assert( t.findOne( { i : i - 1 } ), "E" ); -var res = assert.writeError(t.remove( { i : i - 1 } )); -assert( res.getWriteError().errmsg.indexOf( "capped" ) >= 0, "F" ); +t.remove( { i : i - 1 } ); +assert( db.getLastError().indexOf( "capped" ) >= 0, "F" ); assert( t.validate().valid, "G" );
diff --git a/jstests/noPassthroughWithMongod/fsync2.js b/jstests/noPassthroughWithMongod/fsync2.js index 7080837a99b..bdf956f30f2 100644 --- a/jstests/noPassthroughWithMongod/fsync2.js +++ b/jstests/noPassthroughWithMongod/fsync2.js @@ -41,7 +41,9 @@ function doTest() { //assert.eq(1, m.getDB(db.getName()).fsync2.count()); assert( m.getDB("admin").$cmd.sys.unlock.findOne().ok ); - + + db.getLastError(); + assert.eq( 2, db.fsync2.count() ); } diff --git a/jstests/noPassthroughWithMongod/geo_axis_aligned.js b/jstests/noPassthroughWithMongod/geo_axis_aligned.js index 084b839cabc..0161eccb4ac 100644 --- a/jstests/noPassthroughWithMongod/geo_axis_aligned.js +++ b/jstests/noPassthroughWithMongod/geo_axis_aligned.js @@ -17,14 +17,15 @@ centers = [] bounds = [] for( var s = 0; s < scale.length; s++ ){ - for ( var i = 0; i < radius.length; i++ ) { - radii.push( radius[i] * scale[s] ) - } + for ( var i = 0; i < radius.length; i++ ) { + radii.push( radius[i] * scale[s] ) + } + + for ( var j = 0; j < center.length; j++ ) { + centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] ) + bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] ) + } - for ( var j = 0; j < center.length; j++ ) { - centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] ) - bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] ) - } } radius = radii @@ -33,74 +34,75 @@ bound = bounds for ( var b = 0; b < bits.length; b++ ) { - printjson( radius ) - printjson( centers ) - - for ( var i = 0; i < radius.length; i++ ) { - for ( var j = 0; j < center.length; j++ ) { - printjson( { center : center[j], radius : radius[i], bits : bits[b] } ); - - t.drop() - - // Make sure our numbers are precise enough for this test - if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) ) - continue; - - t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } ); - t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } ); - t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } ); - t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } ); - t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } ); - t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } ); - t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } ); - t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } ); - t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } ); - - var res = t.ensureIndex({ loc: "2d" }, - { max: bound[j][1], - min : bound[j][0], - bits : bits[b] }); - - // ensureIndex fails when this iteration inserted coordinates that are out of bounds. - // These are invalid cases, so we skip them. - if (!res.ok) continue; - - print( "DOING WITHIN QUERY ") - r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } ); - - assert.eq( 5, r.count() ); - - // FIXME: surely code like this belongs in utils.js. - a = r.toArray(); - x = []; - for ( k in a ) - x.push( a[k]["_id"] ) - x.sort() - assert.eq( [ 1, 2, 3, 4, 5 ], x ); - - print( " DOING NEAR QUERY ") - //printjson( center[j] ) - r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } ) - assert.eq( 5, r.count() ); - - print( " DOING DIST QUERY ") - - a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results - assert.eq( 5, a.length ); - - var distance = 0; - for( var k = 0; k < a.length; k++ ){ - assert.gte( a[k].dis, distance ); - - } - - r = t.find({ loc: { $within: { $box: [ [ center[j][0] - radius[i], - center[j][1] - radius[i] ], - [ center[j][0] + radius[i], - center[j][1] + radius[i] ]]}}}, - { _id: 1 } ); - assert.eq( 9, r.count() ); - - } - } -} + + + printjson( radius ) + printjson( centers ) + + for ( var i = 0; i < radius.length; i++ ) { + for ( var j = 0; j < center.length; j++ ) { + + printjson( { center : center[j], radius : radius[i], bits : bits[b] } ); + + t.drop() + + // Make sure our numbers are precise enough for this test + if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) ) + continue; + + t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } ); + t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } ); + t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } ); + t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } ); + t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } ); + t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } ); + t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } ); + t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } ); + t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } ); + + t.ensureIndex( { loc : "2d" }, { max : bound[j][1], min : bound[j][0], bits : bits[b] } ); + + if( db.getLastError() ) continue; + + print( "DOING WITHIN QUERY ") + r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } ); + + //printjson( r.toArray() ); + + assert.eq( 5, r.count() ); + + // FIXME: surely code like this belongs in utils.js. + a = r.toArray(); + x = []; + for ( k in a ) + x.push( a[k]["_id"] ) + x.sort() + assert.eq( [ 1, 2, 3, 4, 5 ], x ); + + print( " DOING NEAR QUERY ") + //printjson( center[j] ) + r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } ) + assert.eq( 5, r.count() ); + + print( " DOING DIST QUERY ") + + a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results + assert.eq( 5, a.length ); + + //printjson( a ); + + var distance = 0; + for( var k = 0; k < a.length; k++ ){ + //print( a[k].dis ) + //print( distance ) + assert.gte( a[k].dis, distance ); + //printjson( a[k].obj ) + //print( distance = a[k].dis ); + } + + r = t.find( { loc : { $within : { $box : [ [ center[j][0] - radius[i], center[j][1] - radius[i] ], [ center[j][0] + radius[i], center[j][1] + radius[i] ] ] } } }, { _id : 1 } ) + assert.eq( 9, r.count() ); + + } + } +}
\ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/geo_mnypts.js b/jstests/noPassthroughWithMongod/geo_mnypts.js index bc7935fa7a6..ac4065158bf 100644 --- a/jstests/noPassthroughWithMongod/geo_mnypts.js +++ b/jstests/noPassthroughWithMongod/geo_mnypts.js @@ -6,12 +6,10 @@ coll.drop() var totalPts = 500 * 1000 // Add points in a 100x100 grid -var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < totalPts; i++ ){ var ii = i % 10000 - bulk.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] }); + coll.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] }) } -assert.writeOK(bulk.execute()); coll.ensureIndex({ loc : "2d" }) diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js index 5b19b2b2080..25bf0269ccc 100644 --- a/jstests/noPassthroughWithMongod/geo_polygon.js +++ b/jstests/noPassthroughWithMongod/geo_polygon.js @@ -15,14 +15,12 @@ if ( bi.indexOf( "erh2" ) >= 0 ){ if ( shouldRun ) { num = 0; - var bulk = t.initializeUnorderedBulkOp(); for ( x = -180; x < 180; x += .5 ){ for ( y = -180; y < 180; y += .5 ){ o = { _id : num++ , loc : [ x , y ] }; - bulk.insert( o ); + t.save( o ); } } - assert.writeOK(bulk.execute()); var numTests = 31; for( var n = 0; n < numTests; n++ ){ diff --git a/jstests/noPassthroughWithMongod/index_check10.js b/jstests/noPassthroughWithMongod/index_check10.js index 84e7342e051..79d0d93fc9b 100644 --- a/jstests/noPassthroughWithMongod/index_check10.js +++ b/jstests/noPassthroughWithMongod/index_check10.js @@ -104,30 +104,25 @@ function doIt( indexVersion ) { } } - var bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 10000; ++i ) { - bulk.insert( obj() ); + t.save( obj() ); } - assert.writeOK(bulk.execute()); t.ensureIndex( idx , { v : indexVersion } ); check(); - bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 10000; ++i ) { if ( Random.rand() > 0.9 ) { - bulk.insert( obj() ); + t.save( obj() ); } else { - bulk.find( obj() ).remove(); // improve + t.remove( obj() ); // improve } if( Random.rand() > 0.999 ) { print( i ); - assert.writeOK(bulk.execute()); check(); - bulk = t.initializeUnorderedBulkOp(); } } - assert.writeOK(bulk.execute()); + check(); } diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js index fd1b1d5eaa1..8a50471940b 100644 --- a/jstests/noPassthroughWithMongod/index_check9.js +++ b/jstests/noPassthroughWithMongod/index_check9.js @@ -106,32 +106,25 @@ function check() { assert.eq( c3.length, count ); } -var bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 10000; ++i ) { - bulk.insert( obj() ); + t.save( obj() ); if( Random.rand() > 0.999 ) { print( i ); - assert.writeOK(bulk.execute()); check(); - bulk = t.initializeUnorderedBulkOp(); } } -bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 100000; ++i ) { if ( Random.rand() > 0.9 ) { - bulk.insert( obj() ); + t.save( obj() ); } else { - bulk.find( obj() ).remove(); // improve + t.remove( obj() ); // improve } if( Random.rand() > 0.999 ) { print( i ); - assert.writeOK(bulk.execute()); check(); - bulk = t.initializeUnorderedBulkOp(); } } -assert.writeOK(bulk.execute()); check(); @@ -139,4 +132,4 @@ check(); for( var z = 0; z < 5; ++z ) { doIt(); -} +}
\ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/index_hammer1.js b/jstests/noPassthroughWithMongod/index_hammer1.js index 675a2f8db7c..87fd3820f66 100644 --- a/jstests/noPassthroughWithMongod/index_hammer1.js +++ b/jstests/noPassthroughWithMongod/index_hammer1.js @@ -2,10 +2,9 @@ t = db.index_hammer1; t.drop(); -var bulk = t.initializeUnorderedBulkOp(); for ( i=0; i<10000; i++ ) - bulk.insert({ x: i, y: i }); -assert.writeOK(bulk.execute()); + t.insert( { x : i , y : i } ); +db.getLastError(); ops = [] diff --git a/jstests/noPassthroughWithMongod/index_killop.js b/jstests/noPassthroughWithMongod/index_killop.js index f897f6a80de..b022e31f3b8 100644 --- a/jstests/noPassthroughWithMongod/index_killop.js +++ b/jstests/noPassthroughWithMongod/index_killop.js @@ -5,11 +5,10 @@ t.drop(); // Insert a large number of documents, enough to ensure that an index build on these documents will // be interrupted before complete. -var bulk = t.initializeUnorderedBulkOp(); for( i = 0; i < 1e6; ++i ) { - bulk.insert({ a: i }); + t.save( { a:i } ); } -assert.writeOK(bulk.execute()); +db.getLastError(); function debug( x ) { // printjson( x ); @@ -24,7 +23,7 @@ function getIndexBuildOpId() { // Identify the index build as an insert into the 'test.system.indexes' // namespace. It is assumed that no other clients are concurrently // accessing the 'test' database. - if ( op.op == 'query' && 'createIndexes' in op.query ) { + if ( op.op == 'insert' && op.ns == 'test.system.indexes' ) { debug( op.opid ); indexBuildOpId = op.opid; } @@ -34,8 +33,9 @@ function getIndexBuildOpId() { /** Test that building an index with @param 'options' can be aborted using killop. */ function testAbortIndexBuild( options ) { - var createIdx = startParallelShell('var coll = db.jstests_slownightly_index_killop; \ - coll.createIndex({ a: 1 }, ' + tojson(options) + ');'); + + // Create an index asynchronously by using a new connection. + new Mongo( db.getMongo().host ).getCollection( t.toString() ).createIndex( { a:1 }, options ); // When the index build starts, find its op id. assert.soon( function() { return ( opId = getIndexBuildOpId() ) != -1; } ); @@ -44,8 +44,6 @@ function testAbortIndexBuild( options ) { // Wait for the index build to stop. assert.soon( function() { return getIndexBuildOpId() == -1; } ); - createIdx(); - // Check that no new index has been created. This verifies that the index build was aborted // rather than successfully completed. assert.eq( [ { _id:1 } ], t.getIndexKeys() ); diff --git a/jstests/noPassthroughWithMongod/index_multi.js b/jstests/noPassthroughWithMongod/index_multi.js index e4c38632bcf..ac259455d36 100644 --- a/jstests/noPassthroughWithMongod/index_multi.js +++ b/jstests/noPassthroughWithMongod/index_multi.js @@ -4,9 +4,8 @@ Random.setRandomSeed(); var coll = db.index_multi; -var bulk = coll.initializeUnorderedBulkOp(); print("Populate the collection with random data"); -for (var i = 0; i < 1e4; i++) { +for (var i=0;i<1e4; i++) { var doc = {"_id" : i}; for (var j=0; j<100; j++) { @@ -23,54 +22,52 @@ for (var i = 0; i < 1e4; i++) { } } - bulk.insert(doc); + if (i%1000 == 0) { + print("inserted "+i); + } + + coll.insert(doc); } -assert.writeOK(bulk.execute()); // Array of all index specs var specs = []; var multikey = []; -var indexJobs = []; print("Create 3 triple indexes"); -for (var i = 90; i < 93; i++) { +for (var i=90; i<93; i++) { var spec = {}; spec["field"+i] = 1; spec["field"+(i+1)] = 1; spec["field"+(i+2)] = 1; - indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + "," + - "{ background: true });" + - "db.results.insert(db.runCommand({ getlasterror: 1 }));")); + startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});" + +"db.results.insert(db.runCommand({getlasterror:1}));"); specs.push(spec); multikey.push(i % 10 == 0 || (i+1) % 10 == 0 || (i+2) % 10 == 0); } print("Create 30 compound indexes"); -for (var i = 30; i < 90; i += 2) { +for (var i=30; i<90; i+=2) { var spec = {}; spec["field"+i] = 1; spec["field"+(i+1)] = 1; - indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + ", " + - "{ background: true });" + - "db.results.insert(db.runCommand({ getlasterror: 1 }));")); + startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});" + +"db.results.insert(db.runCommand({getlasterror:1}));"); specs.push(spec); multikey.push(i % 10 == 0 || (i+1) % 10 == 0); } print("Create 30 indexes"); -for (var i = 0; i < 30; i++) { +for (var i=0; i<30; i++) { var spec = {}; spec["field"+i] = 1; - indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + ", " + - "{ background: true });" + - "db.results.insert(db.runCommand({ getlasterror: 1 }));")); + startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});" + +"db.results.insert(db.runCommand({getlasterror:1}));"); specs.push(spec); multikey.push(i % 10 == 0); } print("Do some sets and unsets"); -bulk = coll.initializeUnorderedBulkOp(); -for (i = 0; i < 1e4; i++) { +for (i=0; i<1e4; i++) { var criteria = {_id: Random.randInt(1e5)}; var mod = {}; if (Random.rand() < .5) { @@ -82,23 +79,31 @@ for (i = 0; i < 1e4; i++) { mod['$unset']['field'+Random.randInt(100)] = true; } - bulk.find(criteria).update(mod); + coll.update(criteria, mod); } -assert.writeOK(bulk.execute()); - -indexJobs.forEach(function(join) { - join(); -}); printjson(db.results.find().toArray()); printjson(coll.getIndexes()); print("Make sure we end up with 64 indexes"); -for (var i in specs) { - print("trying to hint on "+tojson(specs[i])); - var explain = coll.find().hint(specs[i]).explain(); - assert.eq(multikey[i], explain.isMultiKey, tojson(explain)); -} +assert.soon( + function() { + for (var i in specs) { + print("trying to hint on "+tojson(specs[i])); + try { + var explain = coll.find().hint(specs[i]).explain(); + printjson(explain); + assert.eq(multikey[i], explain.isMultiKey); + } catch (x) { + print(x+", hinting on "+tojson(specs[i])); + return false; + } + } + return true; + }, + "wait for all indexes to be built", + 120000 +); print("SUCCESS!"); diff --git a/jstests/noPassthroughWithMongod/index_retry.js b/jstests/noPassthroughWithMongod/index_retry.js index 7c79e75af5f..d0465476144 100644 --- a/jstests/noPassthroughWithMongod/index_retry.js +++ b/jstests/noPassthroughWithMongod/index_retry.js @@ -12,14 +12,13 @@ t.drop(); // Insert a large number of documents, enough to ensure that an index build on these documents can // be interrupted before complete. -var bulk = t.initializeUnorderedBulkOp(); for (i = 0; i < 5e5; ++i) { - bulk.insert({ a: i }); + t.save( { a:i } ); if (i % 10000 == 0) { print("i: " + i); } } -assert.writeOK(bulk.execute()); +test.getLastError(); function debug(x) { printjson(x); @@ -37,15 +36,14 @@ function indexBuildInProgress() { // Identify the index build as an insert into the 'test.system.indexes' // namespace. It is assumed that no other clients are concurrently // accessing the 'test' database. - if ( op.op == 'query' && 'createIndexes' in op.query ) { + if ( op.op == 'insert' && op.ns == 'test.system.indexes' ) { debug(op.opid); - var idxSpec = op.query.indexes[0]; // SERVER-4295 Make sure the index details are there // we can't assert these things, since there is a race in reporting // but we won't count if they aren't - if ( "a_1" == idxSpec.name && - 1 == idxSpec.key.a && - idxSpec.background ) { + if ( "a_1" == op.insert.name && + 1 == op.insert.key.a && + op.insert.background ) { indexBuildOpId = op.opid; } } @@ -55,9 +53,10 @@ function indexBuildInProgress() { } function abortDuringIndexBuild(options) { - var createIdx = startParallelShell('var coll = db.jstests_slownightly_index_retry; \ - coll.createIndex({ a: 1 }, { background: true });', - ports[0]); + + // Create an index asynchronously by using a new connection. + new Mongo(test.getMongo().host).getCollection(t.toString()).createIndex( + { a:1 }, { background:true } ); // Wait for the index build to start. var times = 0; @@ -69,7 +68,6 @@ function abortDuringIndexBuild(options) { print("killing the mongod"); stopMongod(ports[0], /* signal */ 9); - createIdx(); } abortDuringIndexBuild(); diff --git a/jstests/noPassthroughWithMongod/indexbg_drop.js b/jstests/noPassthroughWithMongod/indexbg_drop.js index db4493df017..9e754b747ef 100644 --- a/jstests/noPassthroughWithMongod/indexbg_drop.js +++ b/jstests/noPassthroughWithMongod/indexbg_drop.js @@ -42,11 +42,9 @@ var dc = {dropIndexes: collection, index: "i_1"}; // set up collections masterDB.dropDatabase(); jsTest.log("creating test data " + size + " documents"); -var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp(); for( i = 0; i < size; ++i ) { - bulk.insert({ i: Random.rand() }); + masterDB.getCollection(collection).save( {i: Random.rand()} ); } -assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing for test of: " + tojson(dc)); // Add another index to be sure the drop command works. diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js index f6f1d426161..09c75056ca2 100644 --- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js +++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js @@ -66,11 +66,9 @@ for (var idx = 0; idx < dropAction.length; idx++) { // set up collections masterDB.dropDatabase(); jsTest.log("creating test data " + size + " documents"); - var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp(); for(var i = 0; i < size; ++i ) { - bulk.insert({ i: i }); + masterDB.getCollection(collection).save( {i:i} ); } - assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing for test of: " + JSON.stringify(dc)); masterDB.getCollection(collection).ensureIndex( {i:1}, {background:true} ); diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js index a3b2c26f59e..01d3b23a07c 100644 --- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js +++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js @@ -38,11 +38,9 @@ var secondDB = second.getDB('bgIndexSec'); var size = 500000; jsTest.log("creating test data " + size + " documents"); -var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp(); for(var i = 0; i < size; ++i) { - bulk.insert({ i: i }); + masterDB.jstests_bgsec.save( {i:i} ); } -assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing"); masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} ); diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js index 1ea53e6aac2..38cced11bb9 100644 --- a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js +++ b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js @@ -64,11 +64,9 @@ var size = 500000; jsTest.log("creating test data " + size + " documents"); - var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp(); for( i = 0; i < size; ++i ) { - bulk.insert({ i : i }); + masterDB.jstests_bgsec.save( {i:i} ); } - assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing"); masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} ); @@ -84,7 +82,8 @@ // Make sure a journal flush for the oplog occurs, by doing a local journaled write to the // secondary - assert.writeOK(second.getDB('local').foo.insert({ a: 1 }, { writeConcern: { j: true }})); + second.getDB('local').foo.insert({a:1}); + second.getDB('local').runCommand( { getLastError: 1, j: true } ); // restart secondary and reconnect jsTest.log("Restarting secondary"); diff --git a/jstests/noPassthroughWithMongod/large_chunk.js b/jstests/noPassthroughWithMongod/large_chunk.js index 12f0c48fdcd..2e648084947 100644 --- a/jstests/noPassthroughWithMongod/large_chunk.js +++ b/jstests/noPassthroughWithMongod/large_chunk.js @@ -20,12 +20,11 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 400 * 1024 * 1024 ) ){ - bulk.insert({ _id: num++, s: bigString }); + db.foo.insert( { _id : num++ , s : bigString } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); +db.getLastError(); // Turn on sharding on the 'test.foo' collection and generate a large chunk s.adminCommand( { enablesharding : "test" } ); diff --git a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js index 1ff024fcb03..fd7ec8c68e7 100644 --- a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js +++ b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js @@ -15,14 +15,14 @@ var coll = db.getCollection("mrInput"); //
var expectedOutColl = [];
-var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 10; i++) {
for (var j = 1; j < 50; j++) {
- bulk.insert({ idx: i, j: j });
+ coll.insert({idx: i, j: j});
}
expectedOutColl.push ({ _id: i, value: j - 1 });
}
-assert.writeOK(bulk.execute());
+
+assertGLEOK(db.getLastErrorObj());
function mapFn() { emit(this.idx, 1); };
function reduceFn(key, values) { return Array.sum(values); };
@@ -41,4 +41,4 @@ assert.eq(out.counts.emit, 490, "emit count is wrong"); // changed, such as reduceTriggerRatio or maxInMemSize. If not the case, then something else
// must have changed with when intermediate reduces occur (see mongo::mr::State::checkSize).
//
-assert.eq(out.counts.reduce, 14, "reduce count is wrong");
+assert.eq(out.counts.reduce, 14, "reduce count is wrong");
\ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/mr_shard_version.js b/jstests/noPassthroughWithMongod/mr_shard_version.js index c011e7700e9..47fd99ea30e 100644 --- a/jstests/noPassthroughWithMongod/mr_shard_version.js +++ b/jstests/noPassthroughWithMongod/mr_shard_version.js @@ -11,12 +11,11 @@ var numDocs = 500000 var numKeys = 1000 var numTests = 3 -var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < numDocs; i++ ){ - bulk.insert({ _id: i, key: "" + ( i % numKeys ), value: i % numKeys }); + coll.insert({ _id : i, key : "" + ( i % numKeys ), value : i % numKeys }) } -assert.writeOK(bulk.execute()); +assert.eq( null, coll.getDB().getLastError() ) assert.eq( numDocs, coll.find().itcount() ) var halfId = coll.find().itcount() / 2 diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js index 6f6c196510d..7aa55564640 100644 --- a/jstests/noPassthroughWithMongod/no_balance_collection.js +++ b/jstests/noPassthroughWithMongod/no_balance_collection.js @@ -68,11 +68,9 @@ sh.waitForBalancer(true) // Make sure auto-migrates on insert don't move chunks var lastMigration = sh._lastMigration( collB ) -var bulk = collB.initializeUnorderedBulkOp(); for( var i = 0; i < 1000000; i++ ){ - bulk.insert({ _id: i, hello: "world" }); + collB.insert({ _id : i, hello : "world" }) } -assert.writeOK(bulk.execute()); printjson( lastMigration ) printjson( sh._lastMigration( collB ) ) @@ -80,4 +78,4 @@ printjson( sh._lastMigration( collB ) ) if( lastMigration == null ) assert.eq( null, sh._lastMigration( collB ) ) else assert.eq( lastMigration.time, sh._lastMigration( collB ).time ) -st.stop() +st.stop()
\ No newline at end of file diff --git a/jstests/gle/opcounters_legacy.js b/jstests/noPassthroughWithMongod/opcounters_legacy.js index 52e18c48643..7db520a109f 100644 --- a/jstests/gle/opcounters_legacy.js +++ b/jstests/noPassthroughWithMongod/opcounters_legacy.js @@ -1,5 +1,4 @@ // Test that opcounters get incremented properly. -// Write command version also available at jstests/core. // Remember the global 'db' var var lastDB = db; diff --git a/jstests/core/opcounters_write_cmd.js b/jstests/noPassthroughWithMongod/opcounters_write_cmd.js index 88a5c65b4c3..47a1bc63515 100644 --- a/jstests/core/opcounters_write_cmd.js +++ b/jstests/noPassthroughWithMongod/opcounters_write_cmd.js @@ -1,5 +1,4 @@ // Test that opcounters get incremented properly. -// Legacy write mode test also available at jstests/gle. var mongo = new Mongo(db.getMongo().host); mongo.forceWriteMode("commands"); diff --git a/jstests/noPassthroughWithMongod/parallel_collection_scan.js b/jstests/noPassthroughWithMongod/parallel_collection_scan.js index 44e5d361e45..d745f088376 100644 --- a/jstests/noPassthroughWithMongod/parallel_collection_scan.js +++ b/jstests/noPassthroughWithMongod/parallel_collection_scan.js @@ -6,11 +6,10 @@ s = ""; while ( s.length < 10000 ) s += "."; -var bulk = t.initializeUnorderedBulkOp(); for ( i = 0; i < 8000; i++ ) { - bulk.insert({ x: i, s: s }); + t.insert( { x : i, s : s } ); } -assert.writeOK(bulk.execute()); + function iterateSliced() { var res = t.runCommand( "parallelCollectionScan", { numCursors : 3 } ); diff --git a/jstests/noPassthroughWithMongod/remove9.js b/jstests/noPassthroughWithMongod/remove9.js index 7492e36c5b9..e7dfe9bfff1 100644 --- a/jstests/noPassthroughWithMongod/remove9.js +++ b/jstests/noPassthroughWithMongod/remove9.js @@ -5,7 +5,8 @@ js = "while( 1 ) { for( i = 0; i < 10000; ++i ) { db.jstests_remove9.save( {i:i} pid = startMongoProgramNoConnect( "mongo" , "--eval" , js , db ? db.getMongo().host : null ); for( var i = 0; i < 10000; ++i ) { - assert.writeOK(t.remove( { i: Random.randInt( 10000 )} )); + t.remove( {i:Random.randInt( 10000 )} ); + assert.automsg( "!db.getLastError()" ); } -stopMongoProgramByPid( pid ); +stopMongoProgramByPid( pid );
\ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/sharding_balance1.js b/jstests/noPassthroughWithMongod/sharding_balance1.js index 7f3892ce8f3..e0c36f6cea5 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance1.js +++ b/jstests/noPassthroughWithMongod/sharding_balance1.js @@ -15,13 +15,12 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 20 * 1024 * 1024 ) ){ - bulk.insert({ _id: num++, s: bigString }); + db.foo.insert( { _id : num++ , s : bigString } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); +db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_balance2.js b/jstests/noPassthroughWithMongod/sharding_balance2.js index c3e2e825ba3..519f88807a2 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance2.js +++ b/jstests/noPassthroughWithMongod/sharding_balance2.js @@ -26,12 +26,12 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 40 * 1024 * 1024 ) ){ - bulk.insert({ _id: num++, s: bigString }); + db.foo.insert( { _id : num++ , s : bigString } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); + +db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_balance3.js b/jstests/noPassthroughWithMongod/sharding_balance3.js index 59f4136d44c..5e85a694716 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance3.js +++ b/jstests/noPassthroughWithMongod/sharding_balance3.js @@ -16,13 +16,12 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 40 * 1024 * 1024 ) ){ - bulk.insert({ _id: num++, s: bigString }); + db.foo.insert( { _id : num++ , s : bigString } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); +db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_balance4.js b/jstests/noPassthroughWithMongod/sharding_balance4.js index c2a3d744964..f1c27afa0bb 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance4.js +++ b/jstests/noPassthroughWithMongod/sharding_balance4.js @@ -35,12 +35,12 @@ counts = {} // -function doUpdate( bulk, includeString, optionalId ){ +function doUpdate( includeString, optionalId ){ var up = { $inc : { x : 1 } } if ( includeString ) up["$set"] = { s : bigString }; var myid = optionalId == undefined ? Random.randInt( N ) : optionalId - bulk.find({ _id : myid }).upsert().update( up ); + db.foo.update( { _id : myid } , up , true ); counts[myid] = ( counts[myid] ? counts[myid] : 0 ) + 1; return myid; @@ -48,15 +48,14 @@ function doUpdate( bulk, includeString, optionalId ){ // Initially update all documents from 1 to N, otherwise later checks can fail because no document // previously existed -var bulk = db.foo.initializeUnorderedBulkOp(); for ( i = 0; i < N; i++ ){ - doUpdate( bulk, true, i ); + doUpdate( true, i ) } for ( i=0; i<N*9; i++ ){ - doUpdate( bulk, false ); + doUpdate( false ) } -assert.writeOK(bulk.execute()); +db.getLastError(); for ( var i=0; i<50; i++ ){ s.printChunks( "test.foo" ) @@ -110,15 +109,25 @@ function check( msg , dontAssert ){ function diff1(){ jsTest.log("Running diff1...") + + var myid = doUpdate( false ) + var le = db.getLastErrorCmd(); + + if ( le.err ) + print( "ELIOT ELIOT : " + tojson( le ) + "\t" + myid ); + + if ( ! le.updatedExisting || le.n != 1 ) { + print( "going to assert for id: " + myid + " correct count is: " + counts[myid] + " db says count is: " + tojson(db.foo.findOne( { _id : myid } )) ); + } + + assert( le.updatedExisting , "GLE diff myid: " + myid + " 1: " + tojson(le) ) + assert.eq( 1 , le.n , "GLE diff myid: " + myid + " 2: " + tojson(le) ) - bulk = db.foo.initializeUnorderedBulkOp(); - var myid = doUpdate( bulk, false ); - var res = assert.writeOK(bulk.execute()); - assert.eq( 1, res.nModified, - "diff myid: " + myid + " 2: " + res.toString() + "\n" + - " correct count is: " + counts[myid] + - " db says count is: " + tojson(db.foo.findOne({ _id: myid })) ); + if ( Math.random() > .99 ){ + db.getLastError() + check( "random late check" ); // SERVER-1430 + } var x = s.chunkCounts( "foo" ) if ( Math.random() > .999 ) diff --git a/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js b/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js index e3728817744..41bf9bf03c5 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js +++ b/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js @@ -14,14 +14,12 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; - -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 20 * 1024 * 1024 ) ){ - bulk.insert({ _id: Math.random(), s: bigString }); + db.foo.insert( { _id : Math.random() , s : bigString } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); +db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js b/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js index 32278c089f3..414b6d57925 100644 --- a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js +++ b/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js @@ -22,11 +22,11 @@ for( var i = 0; i < nsq; i++ ) data += data dataObj = {} for( var i = 0; i < n; i++ ) dataObj["data-" + i] = data -var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < 40; i++ ) { - bulk.insert({ data: dataObj }); + if(i != 0 && i % 10 == 0) printjson( coll.stats() ) + coll.save({ data : dataObj }) } -assert.writeOK(bulk.execute()); +db.getLastError(); assert.eq( 40 , coll.count() , "prep1" ); @@ -46,9 +46,9 @@ assert.soon( // On *extremely* slow or variable systems, we've seen migrations fail in the critical section and // kill the server. Do an explicit check for this. SERVER-8781 // TODO: Remove once we can better specify what systems to run what tests on. - try { - assert.commandWorked(shardA.getDB("admin").runCommand({ ping: 1 })); - assert.commandWorked(shardB.getDB("admin").runCommand({ ping: 1 })); + try { + assert.eq(null, shardA.getDB("admin").getLastError()); + assert.eq(null, shardB.getDB("admin").getLastError()); } catch(e) { print("An error occurred contacting a shard during balancing," + diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js b/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js index c4484356dd4..2948dbef3f9 100644 --- a/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js +++ b/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js @@ -23,11 +23,11 @@ numDocs = 20 * docsPerChunk print( "stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs ) -var bulk = t.initializeUnorderedBulkOp(); -for (var i = 0; i < numDocs; i++){ - bulk.insert({ _id: i, s: bigString }); +for ( i=0; i<numDocs; i++ ){ + t.insert( { _id : i , s : bigString } ); } -assert.writeOK(bulk.execute()); + +db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js index 8132e33c5d0..6b2e7faa56b 100644 --- a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js +++ b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js @@ -40,14 +40,15 @@ coll.insert({ _id : -2, d : data15PlusMB }); coll.insert({ _id : -1, d : data15PlusMB }); // Docs of assorted sizes -assert.writeOK(coll.insert({ _id : 0, d : "x" })); -assert.writeOK(coll.insert({ _id : 1, d : data15PlusMB })); -assert.writeOK(coll.insert({ _id : 2, d : "x" })); -assert.writeOK(coll.insert({ _id : 3, d : data15MB })); -assert.writeOK(coll.insert({ _id : 4, d : "x" })); -assert.writeOK(coll.insert({ _id : 5, d : data1MB })); -assert.writeOK(coll.insert({ _id : 6, d : "x" })); - +coll.insert({ _id : 0, d : "x" }); +coll.insert({ _id : 1, d : data15PlusMB }); +coll.insert({ _id : 2, d : "x" }); +coll.insert({ _id : 3, d : data15MB }); +coll.insert({ _id : 4, d : "x" }); +coll.insert({ _id : 5, d : data1MB }); +coll.insert({ _id : 6, d : "x" }); + +assert.eq( null, coll.getDB().getLastError() ); assert.eq( 9, coll.find().itcount() ); jsTest.log( "Starting migration..." ); diff --git a/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js b/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js index 0d8af3a1ebe..9c4d73d5a2c 100644 --- a/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js +++ b/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js @@ -6,14 +6,12 @@ s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); db = s.getDB( "test" ); -var bulk = db.foo.initializeUnorderedBulkOp(); -var bulk2 = db.bar.initializeUnorderedBulkOp(); for ( i=0; i<100; i++ ) { - bulk.insert({ _id: i, x: i }); - bulk2.insert({ _id: i, x: i }); + db.foo.insert( { _id : i , x : i } ) + db.bar.insert( { _id : i , x : i } ) } -assert.writeOK(bulk.execute()); -assert.writeOK(bulk2.execute()); + +db.getLastError(); sh.splitAt( "test.foo" , { _id : 50 } ) diff --git a/jstests/noPassthroughWithMongod/sharding_rs1.js b/jstests/noPassthroughWithMongod/sharding_rs1.js index 53a1f5e5dda..d79605ad177 100644 --- a/jstests/noPassthroughWithMongod/sharding_rs1.js +++ b/jstests/noPassthroughWithMongod/sharding_rs1.js @@ -15,13 +15,12 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 20 * 1024 * 1024 ) ){ - bulk.insert({ _id: num++, s: bigString, x: Math.random() }); + db.foo.insert( { _id : num++ , s : bigString , x : Math.random() } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); +db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_rs2.js b/jstests/noPassthroughWithMongod/sharding_rs2.js index 1a0fe612d70..b577bf82ba9 100644 --- a/jstests/noPassthroughWithMongod/sharding_rs2.js +++ b/jstests/noPassthroughWithMongod/sharding_rs2.js @@ -92,12 +92,11 @@ assert.lte( before.query + 10 , after.query , "B3" ) db.foo.ensureIndex( { x : 1 } ) -var bulk = db.foo.initializeUnorderedBulkOp(); for ( i=0; i<100; i++ ){ if ( i == 17 ) continue; - bulk.insert({ x: i }); + db.foo.insert( { x : i } ) } -assert.writeOK(bulk.execute({ w: 3 })); +db.getLastError( 3 , 10000 ); // Counts pass the options of the connection - which is slaveOk'd, so we need to wait for // replication for this and future tests to pass diff --git a/jstests/noPassthroughWithMongod/ttl1.js b/jstests/noPassthroughWithMongod/ttl1.js index 3ce494bc2ef..cba4d035b05 100644 --- a/jstests/noPassthroughWithMongod/ttl1.js +++ b/jstests/noPassthroughWithMongod/ttl1.js @@ -35,6 +35,7 @@ t.insert( { x : true } ) //non-date value t.insert( { x : "yo" } ) //non-date value t.insert( { x : 3 } ) //non-date value t.insert( { x : /foo/ } ) //non-date value +db.getLastError(); assert.eq( 30 , t.count() ); diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js index 5646ce22a39..3b251dfa8a9 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl.js +++ b/jstests/noPassthroughWithMongod/ttl_repl.js @@ -29,11 +29,9 @@ masterdb.createCollection(mastercol.getName(), {usePowerOf2Sizes: false}); // create new collection. insert 24 docs, aged at one-hour intervalss now = (new Date()).getTime(); -var bulk = mastercol.initializeUnorderedBulkOp(); -for ( i=0; i<24; i++ ) { - bulk.insert({ x: new Date( now - ( 3600 * 1000 * i )) }); -} -assert.writeOK(bulk.execute()); +for ( i=0; i<24; i++ ) + mastercol.insert( { x : new Date( now - ( 3600 * 1000 * i ) ) } ); +masterdb.getLastError(); rt.awaitReplication(); assert.eq( 24 , mastercol.count() , "docs not inserted on primary" ); assert.eq( 24 , slave1col.count() , "docs not inserted on secondary" ); @@ -50,7 +48,8 @@ assert.eq( 0 , slave1col.stats().userFlags , "userFlags not 0 on secondary"); // create TTL index, wait for TTL monitor to kick in, then check that // userFlags get set to 1, and correct number of docs age out -assert.commandWorked(mastercol.ensureIndex({ x: 1 }, { expireAfterSeconds: 20000 })); +mastercol.ensureIndex( { x : 1 } , { expireAfterSeconds : 20000 } ); +masterdb.getLastError(); rt.awaitReplication(); sleep(70*1000); // TTL monitor runs every 60 seconds, so wait 70 diff --git a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js index 15e72b66a81..99c8681a144 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js +++ b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js @@ -17,7 +17,8 @@ var primeSystemReplset = function() { print("create a TTL collection"); var testDB = conn.getDB("test"); - assert.commandWorked(testDB.foo.ensureIndex({ x: 1 }, { expireAfterSeconds: 2 })); + testDB.foo.ensureIndex({x:1}, {expireAfterSeconds : 2}); + testDB.getLastError(); }; var restartWithConfig = function() { @@ -37,7 +38,8 @@ var restartWithConfig = function() { var restartWithoutConfig = function() { var localDB = conn.getDB("local"); - assert.writeOK(localDB.system.replset.remove({})); + localDB.system.replset.remove({}); + localDB.getLastError(); stopMongod(runner.port(), 15); diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js index 2c524d8d788..d5aa45e269a 100644 --- a/jstests/noPassthroughWithMongod/ttl_sharded.js +++ b/jstests/noPassthroughWithMongod/ttl_sharded.js @@ -20,12 +20,11 @@ s.adminCommand( { shardcollection : ns , key: { _id : 1 } } ); // insert 24 docs, with timestamps at one hour intervals var now = (new Date()).getTime(); -var bulk = t.initializeUnorderedBulkOp(); -for (var i = 0; i < 24; i++) { +for ( i=0; i<24; i++ ){ var past = new Date( now - ( 3600 * 1000 * i ) ); - bulk.insert({ _id: i, x: past }); + t.insert( {_id : i , x : past } ); } -assert.writeOK(bulk.execute()); +s.getDB( dbname ).getLastError(); assert.eq( t.count() , 24 , "initial docs not inserted"); // create the TTL index which delete anything older than ~5.5 hours diff --git a/jstests/quota/quota1.js b/jstests/quota/quota1.js new file mode 100644 index 00000000000..f062686b099 --- /dev/null +++ b/jstests/quota/quota1.js @@ -0,0 +1,51 @@ +t = db.quota1; + +print( "starting quota1.a" ); + +// Test that db.eval() times out if quotas are enabled +assert.throws( + function(z){ + db.eval( + function(){ + db.quota1a.save( { a : 1 } ); + var a = 5; + while ( true ){ + a += 2; + } + } + ) + } +); + +print( "done quota1.a" ); + +//print( "starting quota1.b" ); +//assert.throws( +// function(z){ +// db.eval( +// function(){ +// db.quota1b.save( { a : 1 } ); +// var a = 5; +// assert( sleep( 150000 ) ); +// } +// ) +// } +//); +//print( "done quota1.b" ); +// +//print( "starting quota1.c" ); +//assert.throws( +// function(z){ +// db.eval( +// function(){ +// db.quota1c.save( { a : 1 } ); +// var a = 1; +// while ( true ){ +// a += 1; +// assert( sleep( 1000 ) ); +// } +// } +// ) +// } +//); +//print( "done quota1.c" ); diff --git a/jstests/repl/basic1.js b/jstests/repl/basic1.js index a2ec3ceb52f..ccde8874fbd 100644 --- a/jstests/repl/basic1.js +++ b/jstests/repl/basic1.js @@ -25,7 +25,7 @@ function check( note ){ sleep( 200 ); } lastOpLogEntry = m.getDB("local").oplog.$main.find({op:{$ne:"n"}}).sort({$natural:-1}).limit(-1).next(); - note = note + tojson(am.a.find().toArray()) + " != " + tojson(as.a.find().toArray()) + note = note + tojson(am.a.find().toArray()) + " != " + tojson(as.a.find().toArray()) + "last oplog:" + tojson(lastOpLogEntry); assert.eq( x.md5 , y.md5 , note ); } @@ -44,8 +44,9 @@ check( "C" ); // ----- check features ------- // map/reduce -assert.writeOK(am.mr.insert({ tags: [ "a" ]})); -assert.writeOK(am.mr.insert({ tags: [ "a", "b" ]})); +am.mr.insert( { tags : [ "a" ] } ) +am.mr.insert( { tags : [ "a" , "b" ] } ) +am.getLastError(); check( "mr setup" ); m = function(){ @@ -86,19 +87,22 @@ block(); checkNumCollections( "MR4" ); -var t = am.rpos; -var writeOption = { writeConcern: { w: 2, wtimeout: 3000 }}; -t.insert({ _id: 1, a: [{ n: "a", c: 1 }, { n: "b", c: 1 }, { n: "c", c: 1 }], b: [ 1, 2, 3 ]}, - writeOption); + +t = am.rpos; +t.insert( { _id : 1 , a : [ { n : "a" , c : 1 } , { n : "b" , c : 1 } , { n : "c" , c : 1 } ] , b : [ 1 , 2 , 3 ] } ) +block(); check( "after pos 1 " ); -t.update({ "a.n": "b" }, { $inc: { "a.$.c": 1 }}, writeOption); +t.update( { "a.n" : "b" } , { $inc : { "a.$.c" : 1 } } ) +block(); check( "after pos 2 " ); -t.update({ b: 2 }, { $inc: { "b.$": 1 }}, writeOption); +t.update( { "b" : 2 } , { $inc : { "b.$" : 1 } } ) +block(); check( "after pos 3 " ); -t.update({ b: 3 }, { $set: { "b.$": 17 }}, writeOption); +t.update( { "b" : 3} , { $set : { "b.$" : 17 } } ) +block(); check( "after pos 4 " ); @@ -108,17 +112,23 @@ printjson( as.rpos.findOne() ) //am.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().limit(10).sort( { $natural : -1 } ).forEach( printjson ) t = am.b; -var updateOption = { upsert: true, multi: false, writeConcern: { w: 2, wtimeout: 3000 }}; -t.update({ _id: "fun" }, { $inc: { "a.b.c.x": 6743 }}, updateOption); +t.update( { "_id" : "fun"}, { $inc : {"a.b.c.x" : 6743} } , true, false) +block() check( "b 1" ); -t.update({ _id: "fun" }, { $inc: { "a.b.c.x": 5 }}, updateOption); +t.update( { "_id" : "fun"}, { $inc : {"a.b.c.x" : 5} } , true, false) +block() check( "b 2" ); -t.update({ _id: "fun" }, { $inc: { "a.b.c.x": 100, "a.b.c.y": 911 }}, updateOption); +t.update( { "_id" : "fun"}, { $inc : {"a.b.c.x" : 100, "a.b.c.y" : 911} } , true, false) +block() assert.eq( { _id : "fun" , a : { b : { c : { x : 6848 , y : 911 } } } } , as.b.findOne() , "b 3" ); +//printjson( t.findOne() ) +//printjson( as.b.findOne() ) +//am.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().sort( { $natural : -1 } ).limit(3).forEach( printjson ) check( "b 4" ); + // lots of indexes am.lotOfIndexes.insert( { x : 1 } ) @@ -126,8 +136,10 @@ for ( i=0; i<200; i++ ){ var idx = {} idx["x"+i] = 1; am.lotOfIndexes.ensureIndex( idx ); + am.getLastError() } + assert.soon( function(){ return am.lotOfIndexes.getIndexes().length == as.lotOfIndexes.getIndexes().length; } , "lots of indexes a" ) assert.eq( am.lotOfIndexes.getIndexes().length , as.lotOfIndexes.getIndexes().length , "lots of indexes b" ) @@ -142,8 +154,9 @@ assert.soon( function(){ z = as.mu1.findOne(); printjson( z ); return friendlyEq // profiling - this sould be last am.setProfilingLevel( 2 ) -am.foo.insert({ x: 1 }, writeOption); +am.foo.insert( { x : 1 } ) am.foo.findOne() +block(); assert.eq( 2 , am.system.profile.count() , "P1" ) assert.eq( 0 , as.system.profile.count() , "P2" ) diff --git a/jstests/repl/block1.js b/jstests/repl/block1.js index ef36c3efb34..e358ba39705 100644 --- a/jstests/repl/block1.js +++ b/jstests/repl/block1.js @@ -11,7 +11,8 @@ tm = dbm.bar; ts = dbs.bar; for ( var i=0; i<1000; i++ ){ - tm.insert({ _id: i }, { writeConcern: { w: 2 }}); + tm.insert( { _id : i } ); + dbm.runCommand( { getlasterror : 1 , w : 2 } ) assert.eq( i + 1 , ts.count() , "A" + i ); assert.eq( i + 1 , tm.count() , "B" + i ); } diff --git a/jstests/repl/block2.js b/jstests/repl/block2.js index fc35b2774c4..64e52b8a94f 100644 --- a/jstests/repl/block2.js +++ b/jstests/repl/block2.js @@ -18,14 +18,33 @@ function check( msg ){ assert.eq( tm.count() , ts.count() , "check: " + msg ); } +function worked( w , wtimeout ){ + var gle = dbm.getLastError( w , wtimeout ); + if (gle != null) { + printjson(gle); + } + return gle == null; +} + check( "A" ); -assert.writeOK(tm.insert({ x: 1 }, { writeConcern: { w: 2 }})); -assert.writeOK(tm.insert({ x: 2 }, { writeConcern: { w: 2, wtimeout: 3000 }})); +tm.save( { x : 1 } ); +assert( worked( 2 ) , "B" ); + +tm.save( { x : 2 } ); +assert( worked( 2 , 3000 ) , "C" ) rt.stop( false ); -assert.writeError(tm.insert({ x: 3 }, { writeConcern: { w: 2, wtimeout: 3000 }})); +tm.save( { x : 3 } ) assert.eq( 3 , tm.count() , "D1" ); +assert( ! worked( 2 , 3000 ) , "D2" ) + +s = rt.start( false ) +setup(); +assert( worked( 2 , 30000 ) , "E" ) rt.stop(); + + + diff --git a/jstests/repl/drop_dups.js b/jstests/repl/drop_dups.js index bd3d2820108..1b151cfb71f 100644 --- a/jstests/repl/drop_dups.js +++ b/jstests/repl/drop_dups.js @@ -4,7 +4,9 @@ var rt = new ReplTest( "drop_dups" ); m = rt.start( true ); s = rt.start( false ); -var writeOption = { writeConcern: { w: 2, wtimeout: 3000 }}; +function block(){ + am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } ) +} am = m.getDB( "foo" ); as = s.getDB( "foo" ); @@ -14,18 +16,20 @@ function run( createInBackground ) { collName = "foo" + ( createInBackground ? "B" : "F" ); am[collName].drop(); - am.blah.insert({ x: 1 }, writeOption); + am.blah.insert( { x : 1 } ) assert.soon( function(){ + block(); return as.blah.findOne(); } ); - - var bulk = am[collName].initializeUnorderedBulkOp(); - for (var i = 0; i < 10; i++) { - bulk.insert({ _id: i, x: Math.floor( i / 2 ) }); + + + for ( i=0; i<10; i++ ) { + am[collName].insert( { _id : i , x : Math.floor( i / 2 ) } ) } - assert.writeOK(bulk.execute({ w: 2, wtimeout: 3000 })); - + + block(); + am.runCommand( { "godinsert" : collName , obj : { _id : 100 , x : 20 } } ); am.runCommand( { "godinsert" : collName , obj : { _id : 101 , x : 20 } } ); @@ -39,8 +43,8 @@ function run( createInBackground ) { } am[collName].ensureIndex( { x : 1 } , { unique : true , dropDups : true , background : createInBackground } ); - am.blah.insert({ x: 1 }, writeOption); - + am.blah.insert( { x : 1 } ) + block(); assert.eq( 2 , am[collName].getIndexKeys().length , "A1 : " + createInBackground ) if (!createInBackground) { assert.eq( 2 , as[collName].getIndexKeys().length , "A2 : " + createInBackground ) diff --git a/jstests/repl/master1.js b/jstests/repl/master1.js index 49b3416d202..93bfaf7862c 100644 --- a/jstests/repl/master1.js +++ b/jstests/repl/master1.js @@ -43,9 +43,11 @@ m.getDB( "local" ).runCommand( {godinsert:"oplog.$main", obj:op} ); rt.stop( true ); m = rt.start( true, null, true ); assert.eq( op.ts.i, lastop().ts.i ); +am().save( {} ); +// The above write should cause the server to terminate assert.throws(function() { - am().save( {} ); // triggers fassert because ofclock skew + am().findOne(); }); assert.neq(0, rt.stop( true )); // fasserted diff --git a/jstests/repl/mastermaster1.js b/jstests/repl/mastermaster1.js new file mode 100644 index 00000000000..97fdc149b56 --- /dev/null +++ b/jstests/repl/mastermaster1.js @@ -0,0 +1,55 @@ +// basic testing of master/master + + +ports = allocatePorts( 2 ) + +left = startMongodTest( ports[0] , "mastermaster1left" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[1] } ) + +x = left.getDB( "admin" ).runCommand( "ismaster" ) +assert( x.ismaster , "left: " + tojson( x ) ) + +right = startMongodTest( ports[1] , "mastermaster1right" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[0] } ) + +x = right.getDB( "admin" ).runCommand( "ismaster" ) +assert( x.ismaster , "right: " + tojson( x ) ) + +print( "check 1" ) + + +ldb = left.getDB( "test" ) +rdb = right.getDB( "test" ) + +print( "check 2" ) + +ldb.foo.insert( { _id : 1 , x : "eliot" } ) +result = ldb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 40000 } ); +printjson(result); +rdb.foo.insert( { _id : 2 , x : "sara" } ) +result = rdb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 40000 } ) +printjson(result); + +print( "check 3" ) + +print( "left" ) +ldb.foo.find().forEach( printjsononeline ) +print( "right" ) +rdb.foo.find().forEach( printjsononeline ) + +print( "oplog" ) + +rdb.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().forEach( printjsononeline ) + +/* +assert.eq( 2 , ldb.foo.count() , "B1" ) +assert.eq( 2 , rdb.foo.count() , "B2" ) +*/ + +print( "going to stop everything" ) + +for ( var i=0; i<ports.length; i++ ){ + stopMongod( ports[i] ); +} + +print( "yay" ) + + diff --git a/jstests/repl/mod_move.js b/jstests/repl/mod_move.js index 66cc00b39e5..d39e747b833 100644 --- a/jstests/repl/mod_move.js +++ b/jstests/repl/mod_move.js @@ -6,6 +6,10 @@ var rt = new ReplTest( "mod_move" ); m = rt.start( true , { oplogSize : 50 } ); +function block(){ + am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } ) +} + am = m.getDB( "foo" ); function check( note ){ @@ -27,30 +31,39 @@ function check( note ){ BIG = 100000; N = BIG * 2; -var bulk = am.a.initializeUnorderedBulkOp(); -for (var i = 0; i < BIG; i++) { - bulk.insert({ _id: i, s: 1, x: 1 }); +s : "asdasdasdasdasdasdasdadasdadasdadasdasdas" + +for ( i=0; i<BIG; i++ ) { + am.a.insert( { _id : i , s : 1 , x : 1 } ) } -for (; i < N; i++) { - bulk.insert({ _id: i, s: 1 }); +for ( ; i<N; i++ ) { + am.a.insert( { _id : i , s : 1 } ) } -for (i = 0; i < BIG; i++) { - bulk.find({ _id: i }).remove(); +for ( i=0; i<BIG; i++ ) { + am.a.remove( { _id : i } ) } -assert.writeOK(bulk.execute()); +am.getLastError(); assert.eq( BIG , am.a.count() ) assert.eq( 1 , am.a.stats().paddingFactor , "A2" ) + // start slave s = rt.start( false ); as = s.getDB( "foo" ); -bulk = am.a.initializeUnorderedBulkOp(); -for (i = N - 1; i >= BIG; i--) { - bulk.find({ _id: i }).update({ $set: { x: 1 }}); +for ( i=N-1; i>=BIG; i-- ) { + am.a.update( { _id : i } , { $set : { x : 1 } } ) + if ( i == N ) { + am.getLastError() + assert.lt( as.a.count() , BIG , "B1" ) + print( "NOW : " + as.a.count() ) + } } -assert.writeOK(bulk.execute()); check( "B" ) rt.stop(); + + + + diff --git a/jstests/repl/repl12.js b/jstests/repl/repl12.js index 4bfaf17d5d7..8db4b75e7a9 100644 --- a/jstests/repl/repl12.js +++ b/jstests/repl/repl12.js @@ -17,6 +17,7 @@ for( i = 0; i < 3; ++i ) { m.getDB( a ).c.save( {} ); a += "a"; } +m.getDB(a).getLastError(); //print("\n\n\n DB NAMES MASTER:"); //printjson(m.getDBNames()); diff --git a/jstests/repl/repl13.js b/jstests/repl/repl13.js index 78daae24c32..e8a80966dab 100644 --- a/jstests/repl/repl13.js +++ b/jstests/repl/repl13.js @@ -12,11 +12,10 @@ m = rt.start( true ); mc = m.getDB( 'd' )[ 'c' ]; // Insert some documents with a:{} fields. -var bulk = mc.initializeUnorderedBulkOp(); -for(var i = 0; i < 100000; ++i) { - bulk.insert({ _id: i, a: {}}); +for( i = 0; i < 100000; ++i ) { + mc.save( {_id:i,a:{}} ); } -assert.writeOK(bulk.execute()); +m.getDB( 'd' ).getLastError(); s = rt.start( false ); sc = s.getDB( 'd' )[ 'c' ]; @@ -27,13 +26,11 @@ assert.soon( function() { debug( sc.count() ); return sc.count() > 0; } ); // Update documents that will be cloned last with the intent that an updated version will be cloned. // This may cause an assertion when an update that was successfully applied to the original version // of a document is replayed against an updated version of the same document. -bulk = mc.initializeUnorderedBulkOp(); for( i = 99999; i >= 90000; --i ) { // If the document is cloned as {a:1}, the {$set:{'a.b':1}} modifier will uassert. - bulk.find({ _id: i }).update({ $set: { 'a.b': 1 }}); - bulk.find({ _id: i }).update({ $set: { a: 1 }}); + mc.update( {_id:i}, {$set:{'a.b':1}} ); + mc.update( {_id:i}, {$set:{a:1}} ); } -assert.writeOK(bulk.execute()); // The initial sync completes and subsequent writes succeed, in spite of any assertions that occur // when the update operations above are replicated. diff --git a/jstests/repl/repl17.js b/jstests/repl/repl17.js index c7a7be35ffc..8011d974328 100644 --- a/jstests/repl/repl17.js +++ b/jstests/repl/repl17.js @@ -11,6 +11,7 @@ md = master.getDB( 'd' ); for( i = 0; i < 1000; ++i ) { md[ ''+i ].save( {} ); } +md.getLastError(); slave = rt.start( false ); sd = slave.getDB( 'd' ); diff --git a/jstests/repl/repl19.js b/jstests/repl/repl19.js index a655d522bae..71d4335014a 100644 --- a/jstests/repl/repl19.js +++ b/jstests/repl/repl19.js @@ -13,7 +13,8 @@ for( i = 0; i < 100000; ++i ) { } targetId = 1000*1000; -assert.writeOK(mc.insert({ _id: targetId, val: [ 1, 2, 3 ] })); +mc.insert( { _id:targetId, val:[ 1, 2, 3 ] } ); +master.getDB( 'd' ).getLastError(); slave = rt.start( false ); sc = slave.getDB( 'd' )[ 'c' ]; diff --git a/jstests/repl/repl20.js b/jstests/repl/repl20.js index c30ef8d6f3c..02e50f58f1f 100644 --- a/jstests/repl/repl20.js +++ b/jstests/repl/repl20.js @@ -13,7 +13,8 @@ for( i = 0; i < 100000; ++i ) { } targetId = 1000*1000; -assert.writeOK(mc.insert({ _id: targetId, val: [ 1 ] })); +mc.insert( { _id:targetId, val:[ 1 ] } ); +master.getDB( 'd' ).getLastError(); slave = rt.start( false ); sc = slave.getDB( 'd' )[ 'c' ]; diff --git a/jstests/repl/repl21.js b/jstests/repl/repl21.js index 87c0c7fdd02..a94a4b5b779 100644 --- a/jstests/repl/repl21.js +++ b/jstests/repl/repl21.js @@ -8,14 +8,13 @@ rt = new ReplTest( "repl21tests" ); master = rt.start( true ); mc = master.getDB( 'd' )[ 'c' ]; -var bulk = mc.initializeUnorderedBulkOp(); -for(var i = 0; i < 100000; ++i) { - bulk.insert({ _id: i, z: i }); +for( i = 0; i < 100000; ++i ) { + mc.insert( { _id:i, z:i } ); } targetId = 1000*1000; -bulk.insert({ _id: targetId, val: [ 1 ] }); -assert.writeOK(bulk.execute()); +mc.insert( { _id:targetId, val:[ 1 ] } ); +master.getDB( 'd' ).getLastError(); slave = rt.start( false ); sc = slave.getDB( 'd' )[ 'c' ]; @@ -37,4 +36,4 @@ assert.soon( function() { return sc.count( { _id:'sentinel' } ) > 0; } ); assert.eq( [ 1, 3 ], mc.findOne( { _id:targetId } ).val ); assert.eq( [ 1, 3 ], sc.findOne( { _id:targetId } ).val ); -} +}
\ No newline at end of file diff --git a/jstests/repl/repl5.js b/jstests/repl/repl5.js index aeba7eb1095..eda48496656 100644 --- a/jstests/repl/repl5.js +++ b/jstests/repl/repl5.js @@ -14,10 +14,8 @@ doTest = function(signal, extraOpts) { m = rt.start( true ); ma = m.getDB( "a" ).a; - var bulk = ma.initializeUnorderedBulkOp(); for( i = 0; i < 10000; ++i ) - bulk.insert({ i: i }); - assert.writeOK(bulk.execute()); + ma.save( { i:i } ); s = rt.start(false, extraOpts); soonCountAtLeast( "a", "a", 1 ); diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js index f10b6b777bc..b65085c5702 100644 --- a/jstests/replsets/auth1.js +++ b/jstests/replsets/auth1.js @@ -134,18 +134,19 @@ wait(function() { print("add some more data 1"); master.auth("bar", "baz"); -bulk = master.foo.initializeUnorderedBulkOp(); for (var i=0; i<1000; i++) { - bulk.insert({ x: i, foo: "bar" }); + master.foo.insert({x:i, foo : "bar"}); } -assert.writeOK(bulk.execute({ w: 2 })); +var result = master.runCommand({getlasterror:1, w:2, wtimeout:60000}); +printjson(result); + print("resync"); rs.restart(0, {"keyFile" : path+"key1"}); print("add some more data 2"); -bulk = master.foo.initializeUnorderedBulkOp(); +var bulk = master.foo.initializeUnorderedBulkOp(); for (var i=0; i<1000; i++) { bulk.insert({ x: i, foo: "bar" }); } diff --git a/jstests/replsets/downstream.js b/jstests/replsets/downstream.js index f50716f0340..795e6671d46 100755 --- a/jstests/replsets/downstream.js +++ b/jstests/replsets/downstream.js @@ -23,13 +23,11 @@ var conn = repset.getMaster() var db = conn.getDB('test') // Add data to it -var bulk = db.foo.initializeUnorderedBulkOp(); -for (var i = 0; i < N; i++) { - bulk.insert({ x: i, text: Text }); -} -// wait to be copied to at least one secondary (BUG hangs here) -assert.writeOK(bulk.execute({ w: 2 })); +for (var i = 0; i < N; i++) + db['foo'].insert({x: i, text: Text}) +// wait to be copied to at least one secondary (BUG hangs here) +db.getLastError(2) print('getlasterror_w2.js SUCCESS') } diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js index 20364381dd0..da8979bb34c 100644 --- a/jstests/replsets/replset2.js +++ b/jstests/replsets/replset2.js @@ -77,6 +77,7 @@ doTest = function (signal) { s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 }); assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1"); + // Test getlasterror with large insert print("replset2.js **** Try inserting many records ****") try { var bigData = new Array(2000).toString(); diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js index 074aa7b9dda..55e68768eee 100644 --- a/jstests/replsets/tags2.js +++ b/jstests/replsets/tags2.js @@ -1,4 +1,4 @@ -// Change a write concern mode from 2 to 3 servers +// Change a getLastErrorMode from 2 to 3 servers var host = getHostName(); var replTest = new ReplSetTest( {name: "rstag", nodes: 4, startPort: 31000} ); diff --git a/jstests/sharding/movechunk_include.js b/jstests/sharding/movechunk_include.js index 8ffa1664526..e74c6c4bf4d 100644 --- a/jstests/sharding/movechunk_include.js +++ b/jstests/sharding/movechunk_include.js @@ -13,16 +13,16 @@ function setupMoveChunkTest(st) { str += "asdasdsdasdasdasdas"; } - var data = 0; - var num = 0; + var data = num = 0; //Insert till you get to 10MB of data - var bulk = testcoll.initializeUnorderedBulkOp(); while ( data < ( 1024 * 1024 * 10 ) ) { - bulk.insert({ _id: num++, s: str }); - data += str.length; + testcoll.insert( { _id : num++ , s : str } ) + data += str.length } - assert.writeOK(bulk.execute()); + + //Flush and wait + testdb.getLastError() var stats = st.chunkCounts( "foo" ) var to = "" diff --git a/jstests/sharding/multi_write_target.js b/jstests/sharding/multi_write_target.js index 3fc528293c8..52ce36a83e0 100644 --- a/jstests/sharding/multi_write_target.js +++ b/jstests/sharding/multi_write_target.js @@ -25,15 +25,20 @@ st.printShardingStatus(); jsTest.log("Testing multi-update..."); // Put data on all shards -assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 })); -assert.writeOK(st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 })); +st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 }); +assert.gleOK(st.shard0.getCollection(coll.toString()).getDB().getLastErrorObj()); +st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 }); +assert.gleOK(st.shard1.getCollection(coll.toString()).getDB().getLastErrorObj()); // Data not in chunks -assert.writeOK(st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 })); +st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 }); +assert.gleOK(st.shard2.getCollection(coll.toString()).getDB().getLastErrorObj()); // Non-multi-update doesn't work without shard key -assert.writeError(coll.update({ x : 1 }, { $set : { updated : true } }, { multi : false })); +coll.update({ x : 1 }, { $set : { updated : true } }, { multi : false }); +assert.gleError(coll.getDB().getLastErrorObj()); -assert.writeOK(coll.update({ x : 1 }, { $set : { updated : true } }, { multi : true })); +coll.update({ x : 1 }, { $set : { updated : true } }, { multi : true }); +assert.gleOK(coll.getDB().getLastErrorObj()); // Ensure update goes to *all* shards assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({ updated : true })); @@ -41,7 +46,8 @@ assert.neq(null, st.shard1.getCollection(coll.toString()).findOne({ updated : tr assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({ updated : true })); // _id update works, and goes to all shards -assert.writeOK(coll.update({ _id : 0 }, { $set : { updatedById : true } }, { multi : false })); +coll.update({ _id : 0 }, { $set : { updatedById : true } }, { multi : false }); +assert.gleOK(coll.getDB().getLastErrorObj()); // Ensure _id update goes to *all* shards assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({ updatedById : true })); @@ -50,9 +56,11 @@ assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({ updatedById jsTest.log("Testing multi-delete..."); // non-multi-delete doesn't work without shard key -assert.writeError(coll.remove({ x : 1 }, { justOne : true })); +coll.remove({ x : 1 }, { justOne : true }); +assert.gleError(coll.getDB().getLastErrorObj()); -assert.writeOK(coll.remove({ x : 1 }, { justOne : false })); +coll.remove({ x : 1 }, { justOne : false }); +assert.gleOK(coll.getDB().getLastErrorObj()); // Ensure delete goes to *all* shards assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({ x : 1 })); @@ -60,12 +68,16 @@ assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({ x : 1 })); assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({ x : 1 })); // Put more on all shards -assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 })); -assert.writeOK(st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 })); +st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 }); +assert.gleOK(st.shard0.getCollection(coll.toString()).getDB().getLastErrorObj()); +st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 }); +assert.gleOK(st.shard1.getCollection(coll.toString()).getDB().getLastErrorObj()); // Data not in chunks -assert.writeOK(st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 })); +st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 }); +assert.gleOK(st.shard2.getCollection(coll.toString()).getDB().getLastErrorObj()); -assert.writeOK(coll.remove({ _id : 0 }, { justOne : true })); +coll.remove({ _id : 0 }, { justOne : true }); +assert.gleOK(coll.getDB().getLastErrorObj()); // Ensure _id delete goes to *all* shards assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({ x : 1 })); diff --git a/jstests/slow1/sharding_multiple_collections.js b/jstests/slow1/sharding_multiple_collections.js index d817038ecdb..61d9911afca 100644 --- a/jstests/slow1/sharding_multiple_collections.js +++ b/jstests/slow1/sharding_multiple_collections.js @@ -12,14 +12,11 @@ S = "" while ( S.length < 500 ) S += "123123312312"; -var bulk = db.foo.initializeUnorderedBulkOp(); -var bulk2 = db.bar.initializeUnorderedBulkOp(); for ( i=0; i<N; i++ ){ - bulk.insert({ _id: i, s: S }); - bulk2.insert({ _id: i, s: S, s2: S }); + db.foo.insert( { _id : i , s : S } ) + db.bar.insert( { _id : i , s : S , s2 : S } ) + db.getLastError() } -assert.writeOK(bulk.execute()); -assert.writeOK(bulk2.execute()); db.printShardingStatus() diff --git a/jstests/slow2/32bit.js b/jstests/slow2/32bit.js index a149ea3e8dd..d80cc7821c3 100755 --- a/jstests/slow2/32bit.js +++ b/jstests/slow2/32bit.js @@ -7,97 +7,97 @@ if (forceSeedToBe) function f() {
seed = forceSeedToBe || Math.random();
-
+
pass = 1;
var mydb = db.getSisterDB( "test_32bit" );
mydb.dropDatabase();
while( 1 ) {
- if( pass >= 2 )
- break;
+ if( pass >= 2 )
+ break;
print("32bit.js PASS #" + pass);
pass++;
-
+
t = mydb.colltest_32bit;
print("seed=" + seed);
-
+
t.insert({x:1});
t.ensureIndex({a:1});
t.ensureIndex({b:1}, true);
t.ensureIndex({x:1});
if( Math.random() < 0.3 )
- t.ensureIndex({c:1});
+ t.ensureIndex({c:1});
t.ensureIndex({d:1});
t.ensureIndex({e:1});
t.ensureIndex({f:1});
-
+
big = 'a b';
big = big + big;
k = big;
big = big + big;
big = big + big;
big = big + big;
-
+
a = 0;
c = 'kkk';
var start = new Date();
- while( 1 ) {
- b = Math.random(seed);
- d = c + -a;
+ while( 1 ) {
+ b = Math.random(seed);
+ d = c + -a;
f = Math.random(seed) + a;
a++;
- cc = big;
- if( Math.random(seed) < .1 )
- cc = null;
- var res = t.insert({ a: a, b: b, c: cc, d: d, f: f });
- if( Math.random(seed) < 0.01 ) {
- if (res.hasWriteError()) {
- // Presumably we have mmap error on 32 bit. try a few more manipulations
- // attempting to break things.
- t.insert({a:33,b:44,c:55,d:66,f:66});
- t.insert({a:33,b:44000,c:55,d:66});
- t.insert({a:33,b:440000,c:55});
- t.insert({a:33,b:4400000});
- t.update({a:20},{'$set':{c:'abc'}});
- t.update({a:21},{'$set':{c:'aadsfbc'}});
- t.update({a:22},{'$set':{c:'c'}});
- t.update({a:23},{'$set':{b:cc}});
- t.remove({a:22});
- break;
- }
-
- t.remove({a:a});
- t.remove({b:Math.random(seed)});
- t.insert({e:1});
- t.insert({f:'aaaaaaaaaa'});
-
+ cc = big;
+ if( Math.random(seed) < .1 )
+ cc = null;
+ t.insert({a:a,b:b,c:cc,d:d,f:f});
+ if( Math.random(seed) < 0.01 ) {
+
+ if( mydb.getLastError() ) {
+ /* presumably we have mmap error on 32 bit. try a few more manipulations attempting to break things */
+ t.insert({a:33,b:44,c:55,d:66,f:66});
+ t.insert({a:33,b:44000,c:55,d:66});
+ t.insert({a:33,b:440000,c:55});
+ t.insert({a:33,b:4400000});
+ t.update({a:20},{'$set':{c:'abc'}});
+ t.update({a:21},{'$set':{c:'aadsfbc'}});
+ t.update({a:22},{'$set':{c:'c'}});
+ t.update({a:23},{'$set':{b:cc}});
+ t.remove({a:22});
+ break;
+ }
+
+ t.remove({a:a});
+ t.remove({b:Math.random(seed)});
+ t.insert({e:1});
+ t.insert({f:'aaaaaaaaaa'});
+
if( Math.random() < 0.00001 ) { print("remove cc"); t.remove({c:cc}); }
if( Math.random() < 0.0001 ) { print("update cc"); t.update({c:cc},{'$set':{c:1}},false,true); }
if( Math.random() < 0.00001 ) { print("remove e"); t.remove({e:1}); }
- }
- if (a == 20000 ) {
- var delta_ms = (new Date())-start;
- // 2MM / 20000 = 100. 1000ms/sec.
- var eta_secs = delta_ms * (100 / 1000);
- print("32bit.js eta_secs:" + eta_secs);
- if( eta_secs > 1000 ) {
- print("32bit.js machine is slow, stopping early. a:" + a);
- mydb.dropDatabase();
- return;
- }
- }
- if( a % 100000 == 0 ) {
- print(a);
- // on 64 bit we won't error out, so artificially stop. on 32 bit we will hit
- // mmap limit ~1.6MM but may vary by a factor of 2x by platform
- if( a >= 2200000 ) {
+ }
+ if (a == 20000 ) {
+ var delta_ms = (new Date())-start;
+ // 2MM / 20000 = 100. 1000ms/sec.
+ var eta_secs = delta_ms * (100 / 1000);
+ print("32bit.js eta_secs:" + eta_secs);
+ if( eta_secs > 1000 ) {
+ print("32bit.js machine is slow, stopping early. a:" + a);
+ mydb.dropDatabase();
+ return;
+ }
+ }
+ if( a % 100000 == 0 ) {
+ print(a);
+ // on 64 bit we won't error out, so artificially stop. on 32 bit we will hit mmap limit ~1.6MM but may
+ // vary by a factor of 2x by platform
+ if( a >= 2200000 ) {
mydb.dropDatabase();
- return;
- }
+ return;
+ }
}
- }
+ }
print("count: " + t.count());
var res = t.validate();
@@ -105,11 +105,11 @@ function f() { print("32bit.js FAIL validating");
print(res.result);
printjson(res);
- //mydb.dropDatabase();
- throw "fail validating 32bit.js";
+ //mydb.dropDatabase();
+ throw "fail validating 32bit.js";
}
- mydb.dropDatabase();
+ mydb.dropDatabase();
}
print("32bit.js SUCCESS");
diff --git a/jstests/slow2/conc_update.js b/jstests/slow2/conc_update.js index 4ee5bd22ca7..0d778df047e 100644 --- a/jstests/slow2/conc_update.js +++ b/jstests/slow2/conc_update.js @@ -6,42 +6,46 @@ db.dropDatabase(); NRECORDS=3*1024*1024 print("loading "+NRECORDS+" documents (progress msg every 1024*1024 documents)") -var bulk = db.conc.initializeUnorderedBulkOp(); -for (var i = 0; i < NRECORDS; i++) { - bulk.insert({ x: i }); +for (i=0; i<(NRECORDS); i++) { + db.conc.insert({x:i}) + if ((i%(1024*1024))==0) + print("loaded " + i/(1024*1024) + " mibi-records") } -assert.writeOK(bulk.execute()); print("making an index (this will take a while)") db.conc.ensureIndex({x:1}) var c1=db.conc.count({x:{$lt:NRECORDS}}) -updater = startParallelShell("db = db.getSisterDB('concurrency');\ - db.concflag.insert({ inprog: true });\ - sleep(20);\ - assert.writeOK(db.conc.update({}, \ - { $inc: { x: " + NRECORDS + "}}, false, true)); \ - assert.writeOK(db.concflag.update({}, { inprog: false }));"); +updater=startParallelShell("db=db.getSisterDB('concurrency');\ + db.concflag.insert( {inprog:true} );\ + sleep(20);\ + db.conc.update({}, {$inc:{x: "+NRECORDS+"}}, false, true);\ + e=db.getLastError();\ + print('update error: '+ e);\ + db.concflag.update({},{inprog:false});\ + assert.eq(e, null, 'update failed');"); -assert.soon( function(){ var x = db.concflag.findOne(); return x && x.inprog; } , +assert.soon( function(){ var x = db.concflag.findOne(); return x && x.inprog; } , "wait for fork" , 30000 , 1 ); querycount=0; decrements=0; misses=0 -assert.soon(function(){ - c2=db.conc.count({x:{$lt:NRECORDS}}) - print(c2) - querycount++; - if (c2<c1) - decrements++; - else - misses++; - c1 = c2; - return ! db.concflag.findOne().inprog; -}, "update never finished" , 2 * 60 * 60 * 1000 , 10 ); +assert.soon( + function(){ + c2=db.conc.count({x:{$lt:NRECORDS}}) + print(c2) + querycount++; + if (c2<c1) + decrements++; + else + misses++; + c1 = c2; + return ! db.concflag.findOne().inprog; + } , + "update never finished" , 2 * 60 * 60 * 1000 , 10 ); print(querycount + " queries, " + decrements + " decrements, " + misses + " misses"); diff --git a/jstests/slow2/dur_big_atomic_update.js b/jstests/slow2/dur_big_atomic_update.js index b8d3ba60be1..18a7c4a68f2 100644 --- a/jstests/slow2/dur_big_atomic_update.js +++ b/jstests/slow2/dur_big_atomic_update.js @@ -8,29 +8,26 @@ conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur", "--durOption d = conn.getDB("test"); d.foo.drop(); -var bulk = d.foo.initializeUnorderedBulkOp(); -for (var i = 0; i < 1024; i++){ - bulk.insert({ _id: i }); +for (var i=0; i<1024; i++){ + d.foo.insert({_id:i}); } -assert.writeOK(bulk.execute()); big_string = 'xxxxxxxxxxxxxxxx'; while (big_string.length < 1024*1024) { big_string += big_string; } -var res = assert.writeOK(d.foo.update({ $atomic: 1 }, - { $set: { big_string: big_string }}, - false, true /* multi */ )); -assert.eq(1024, res.nModified); +d.foo.update({$atomic:1}, {$set: {big_string: big_string}}, false, /*multi*/true); +err = d.getLastErrorObj(); + +assert(err.err == null); +assert(err.n == 1024); d.dropDatabase(); -bulk = d.foo.initializeUnorderedBulkOp(); -for (var i = 0; i < 1024; i++){ - bulk.insert({ _id: i }); +for (var i=0; i<1024; i++){ + d.foo.insert({_id:i}); } -assert.writeOK(bulk.execute()); // Do it again but in a db.eval d.eval( diff --git a/jstests/slow2/mr_during_migrate.js b/jstests/slow2/mr_during_migrate.js index 6ab9754c4f6..53ffd10fbec 100644 --- a/jstests/slow2/mr_during_migrate.js +++ b/jstests/slow2/mr_during_migrate.js @@ -17,13 +17,11 @@ var dataSize = 1024 // bytes, must be power of 2 var data = "x" while( data.length < dataSize ) data += data -var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < numDocs; i++ ){ - bulk.insert({ _id: i, data: data }); + coll.insert({ _id : i, data : data }) } -assert.writeOK(bulk.execute()); - // Make sure everything got inserted +assert.eq( null, coll.getDB().getLastError() ) assert.eq( numDocs, coll.find().itcount() ) diff --git a/jstests/slow2/replsets_killop.js b/jstests/slow2/replsets_killop.js index e035ae3cd40..3d3ee51f709 100644 --- a/jstests/slow2/replsets_killop.js +++ b/jstests/slow2/replsets_killop.js @@ -16,11 +16,11 @@ assert.soon( function() { return secondary.getDB( 'test' ).test.count() == 1; } // Start a parallel shell to insert new documents on the primary. inserter = startParallelShell( - 'var bulk = db.test.initializeUnorderedBulkOp(); \ - for( i = 1; i < ' + numDocs + '; ++i ) { \ - bulk.insert({ a: i }); \ - } \ - bulk.execute();' + 'for( i = 1; i < ' + numDocs + '; ++i ) { \ + db.test.save( { a:i } ); \ + sleep( 1 ); \ + } \ + db.getLastError();' ); // Periodically kill replication get mores. diff --git a/jstests/slow2/replsets_prefetch_stress.js b/jstests/slow2/replsets_prefetch_stress.js index 4273cb594fd..dcd523dd3a4 100644 --- a/jstests/slow2/replsets_prefetch_stress.js +++ b/jstests/slow2/replsets_prefetch_stress.js @@ -8,7 +8,8 @@ replTest.initiate(); var master = replTest.getMaster(); c = master.getDB( 'd' )[ 'c' ]; -assert.writeOK(c.insert({ _id: 0 })); +c.insert( { _id:0 } ); +master.getDB( 'd' ).getLastError(); replTest.awaitReplication(); // Create a:1 index. @@ -21,11 +22,10 @@ for( i = 0; i < 10000; ++i ) { } // Insert documents with multikey values. -var bulk = c.initializeUnorderedBulkOp(); for( i = 0; i < 1000; ++i ) { - bulk.insert({ a: multikeyValues }); + c.insert( { a:multikeyValues } ); } -assert.writeOK(bulk.execute()); +master.getDB( 'd' ).getLastError(); replTest.awaitReplication(300000); // Check document counts on all nodes. On error a node might go down or fail to sync all data, see diff --git a/jstests/slow2/rollback4.js b/jstests/slow2/rollback4.js index 7d511568ed9..4929e3777ff 100644 --- a/jstests/slow2/rollback4.js +++ b/jstests/slow2/rollback4.js @@ -31,11 +31,12 @@ replTest.awaitReplication(); // Insert into master var big = { b:new Array( 1000 ).toString() }; -var bulk = master.getDB('db').c.initializeUnorderedBulkOp(); for( var i = 0; i < 1000000; ++i ) { - bulk.insert( big ); + if ( i % 10000 == 0 ) { + print( i ); + } + master.getDB( 'db' ).c.insert( big ); } -assert.writeOK(bulk.execute()); // Stop master replTest.stop( 0 ); diff --git a/jstests/ssl/libs/ssl_helpers.js b/jstests/ssl/libs/ssl_helpers.js index c4a0f97968e..faf5b777284 100644 --- a/jstests/ssl/libs/ssl_helpers.js +++ b/jstests/ssl/libs/ssl_helpers.js @@ -65,14 +65,13 @@ function mixedShardTest(options1, options2, shouldSucceed) { r = st.adminCommand({ shardCollection : "test.col" , key : { _id : 1 } }); assert.eq(r, true, "error sharding collection for this configuration"); - // Test mongos talking to shards + // Test mongos talking to shards var bigstr = Array(1024*1024).join("#"); - var bulk = db1.col.initializeUnorderedBulkOp(); for(var i = 0; i < 128; i++){ - bulk.insert({ _id: i, string: bigstr }); + db1.col.insert({_id:i, string:bigstr}); } - assert.writeOK(bulk.execute()); + db1.getLastError(); assert.eq(128, db1.col.count(), "error retrieving documents from cluster"); // Test shards talking to each other diff --git a/jstests/ssl/sharding_with_x509.js b/jstests/ssl/sharding_with_x509.js index 245c10e9855..75150a60548 100644 --- a/jstests/ssl/sharding_with_x509.js +++ b/jstests/ssl/sharding_with_x509.js @@ -29,33 +29,33 @@ coll.ensureIndex({ insert : 1 }) print( "starting insertion phase" ) // Insert a bunch of data -var toInsert = 2000; -var bulk = coll.initializedUnorderedBulkOp(); +var toInsert = 2000 for( var i = 0; i < toInsert; i++ ){ - bulk.insert({ my: "test", data: "to", insert: i }); + coll.insert({ my : "test", data : "to", insert : i }) } -assert.writeOK(bulk.execute()); + +assert.eq( coll.getDB().getLastError(), null ) print( "starting updating phase" ) // Update a bunch of data -var toUpdate = toInsert; -bulk = coll.initializedUnorderedBulkOp(); +var toUpdate = toInsert for( var i = 0; i < toUpdate; i++ ){ - var id = coll.findOne({ insert : i })._id; - bulk.update({ insert : i, _id : id }, { $inc : { counter : 1 } }); + var id = coll.findOne({ insert : i })._id + coll.update({ insert : i, _id : id }, { $inc : { counter : 1 } }) } -assert.writeOK(bulk.execute()); + +assert.eq( coll.getDB().getLastError(), null ) print( "starting deletion" ) // Remove a bunch of data -var toDelete = toInsert / 2; -bulk = coll.initializedUnorderedBulkOp(); +var toDelete = toInsert / 2 for( var i = 0; i < toDelete; i++ ){ - bulk.remove({ insert : i }); + coll.remove({ insert : i }) } -assert.writeOK(bulk.execute()); + +assert.eq( coll.getDB().getLastError(), null ) // Make sure the right amount of data is there assert.eq( coll.find().count(), toInsert / 2 ) diff --git a/jstests/tool/dumpfilename1.js b/jstests/tool/dumpfilename1.js index 3cb2a26c6e4..84dae683a16 100644 --- a/jstests/tool/dumpfilename1.js +++ b/jstests/tool/dumpfilename1.js @@ -7,8 +7,9 @@ t = new ToolTest( "dumpfilename1" ); t.startDB( "foo" ); c = t.db; -assert.writeOK(c.getCollection("df/").insert({ a: 3 })); -assert.writeOK(c.getCollection("df").insert({ a: 2 })); +c.getCollection("df/").insert({a:3}); +c.getCollection("df").insert({a:2}); +t.db.getLastError(); // Ensure data is written before dumping it through a spawned process. t.runTool( "dump" , "--out" , t.ext ); diff --git a/jstests/tool/dumprestoreWithNoOptions.js b/jstests/tool/dumprestoreWithNoOptions.js index 60bcc4b70f2..5a866260bbc 100644 --- a/jstests/tool/dumprestoreWithNoOptions.js +++ b/jstests/tool/dumprestoreWithNoOptions.js @@ -30,7 +30,8 @@ for ( var opt in options ) { assert.eq(options[opt], cappedOptions[opt], 'invalid option:' + tojson(options) + " " + tojson(cappedOptions)); } -assert.writeOK(db.capped.insert({ x: 1 })); +db.capped.insert({ x: 1 }); +db.getLastError() // Full dump/restore @@ -57,7 +58,8 @@ var cappedOptions = db.capped.exists().options; for ( var opt in options ) { assert.eq(options[opt], cappedOptions[opt], 'invalid option') } -assert.writeOK(db.capped.insert({ x: 1 })); +db.capped.insert({ x: 1 }); +db.getLastError() dumppath = t.ext + "noOptionsSingleDump/"; mkdir(dumppath); @@ -85,8 +87,8 @@ var cappedOptions = db.capped.exists().options; for ( var opt in options ) { assert.eq(options[opt], cappedOptions[opt], 'invalid option') } - -assert.writeOK(db.capped.insert({ x: 1 })); +db.capped.insert({ x: 1 }); +db.getLastError() dumppath = t.ext + "noOptionsSingleColDump/"; mkdir(dumppath); diff --git a/jstests/tool/dumprestore_auth3.js b/jstests/tool/dumprestore_auth3.js index 591e7fb0900..11867a8fce4 100644 --- a/jstests/tool/dumprestore_auth3.js +++ b/jstests/tool/dumprestore_auth3.js @@ -71,7 +71,8 @@ function runTest(shutdownServer) { // Re-create user data db.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}); db.createRole({role: 'role', roles: [], privileges:[]}); - assert.writeOK(db.system.users.insert({user:'dbuser', pwd: 'pwd', roles: ['readWrite']})); + db.system.users.insert({user:'dbuser', pwd: 'pwd', roles: ['readWrite']}); + assert.gleSuccess(db); assert.eq(1, db.bar.findOne().a); assert.eq(1, db.getUsers().length, "didn't create user"); diff --git a/jstests/tool/exportimport_bigarray.js b/jstests/tool/exportimport_bigarray.js index dbdd2a9a6d7..43a209b8453 100644 --- a/jstests/tool/exportimport_bigarray.js +++ b/jstests/tool/exportimport_bigarray.js @@ -21,11 +21,16 @@ print('Number of documents to exceed maximum BSON size: ' + numDocs) print('About to insert ' + numDocs + ' documents into ' + exportimport_db.getName() + '.' + src.getName()); var i; -var bulk = src.initializeUnorderedBulkOp(); for (i = 0; i < numDocs; ++i) { - bulk.insert({ x: bigString }); + src.insert({ x : bigString }); +} +var lastError = exportimport_db.getLastError(); +if (lastError == null) { + print('Finished inserting ' + numDocs + ' documents'); +} +else { + doassert('Insertion failed: ' + lastError); } -assert.writeOK(bulk.execute()); data = 'data/exportimport_array_test.json'; diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js index f3535c6441e..ea48167c332 100644 --- a/src/mongo/shell/replsettest.js +++ b/src/mongo/shell/replsettest.js @@ -958,11 +958,10 @@ ReplSetTest.prototype.overflow = function( secondaries ){ while (count != prevCount) { print("ReplSetTest overflow inserting 10000"); - var bulk = overflowColl.initializeUnorderedBulkOp(); + for (var i = 0; i < 10000; i++) { - bulk.insert({ overflow : "value" }); + overflowColl.insert({ overflow : "value" }); } - bulk.execute(); prevCount = count; this.awaitReplication(); |