From 0dabee8227d445a18fa5e8e49b2be60ba2a0beef Mon Sep 17 00:00:00 2001 From: Randolph Tan Date: Wed, 14 May 2014 14:11:11 -0400 Subject: SERVER-13741 Migrate remaining tests to use write commands --- jstests/auth/lib/commands_lib.js | 1 - jstests/core/opcounters_write_cmd.js | 167 +++++ jstests/disk/diskfull.js | 10 +- jstests/disk/killall.js | 3 +- jstests/disk/preallocate_directoryperdb.js | 16 +- jstests/disk/quota.js | 10 +- jstests/disk/quota2.js | 16 +- jstests/dur/a_quick.js | 6 +- jstests/dur/closeall.js | 39 +- jstests/dur/diskfull.js | 11 +- jstests/dur/dropdb.js | 4 +- jstests/dur/dur1.js | 6 - jstests/dur/dur1_tool.js | 7 - jstests/dur/indexbg2.js | 4 +- jstests/dur/manyRestart.js | 6 - jstests/dur/md5.js | 7 - jstests/dur/oplog.js | 3 - jstests/gle/block2.js | 58 ++ .../gle_after_split_failure_during_migration.js | 136 ++++ jstests/gle/opcounters_legacy.js | 174 +++++ jstests/gle/sync1.js | 57 ++ jstests/gle/sync4.js | 26 + jstests/gle/sync8.js | 21 + jstests/libs/geo_near_random.js | 46 +- jstests/misc/biginsert.js | 18 - jstests/noPassthrough/disk_reuse1.js | 27 +- jstests/noPassthrough/geo_full.js | 739 ++++++++++----------- jstests/noPassthrough/geo_mnypts_plus_fields.js | 10 +- .../gle_after_split_failure_during_migration.js | 136 ---- jstests/noPassthrough/indexbg1.js | 30 +- jstests/noPassthrough/indexbg2.js | 2 - jstests/noPassthrough/query_yield1.js | 8 +- jstests/noPassthrough/query_yield2.js | 8 +- jstests/noPassthrough/repair2.js | 19 +- jstests/noPassthrough/sync1.js | 49 -- jstests/noPassthrough/sync4.js | 19 - jstests/noPassthrough/sync8.js | 13 - jstests/noPassthrough/update_server-5552.js | 10 +- jstests/noPassthrough/update_yield1.js | 7 +- .../autosplit_heuristics.js | 10 +- jstests/noPassthroughWithMongod/background.js | 46 +- jstests/noPassthroughWithMongod/balance_repl.js | 7 +- jstests/noPassthroughWithMongod/balance_tags1.js | 5 +- jstests/noPassthroughWithMongod/balance_tags2.js | 5 +- jstests/noPassthroughWithMongod/btreedel.js | 4 +- .../noPassthroughWithMongod/bulk_shard_insert.js | 7 +- jstests/noPassthroughWithMongod/capped4.js | 4 +- jstests/noPassthroughWithMongod/fsync2.js | 4 +- .../noPassthroughWithMongod/geo_axis_aligned.js | 158 +++-- jstests/noPassthroughWithMongod/geo_mnypts.js | 4 +- jstests/noPassthroughWithMongod/geo_polygon.js | 4 +- jstests/noPassthroughWithMongod/index_check10.js | 13 +- jstests/noPassthroughWithMongod/index_check9.js | 15 +- jstests/noPassthroughWithMongod/index_hammer1.js | 5 +- jstests/noPassthroughWithMongod/index_killop.js | 14 +- jstests/noPassthroughWithMongod/index_multi.js | 65 +- jstests/noPassthroughWithMongod/index_retry.js | 22 +- jstests/noPassthroughWithMongod/indexbg_drop.js | 4 +- .../noPassthroughWithMongod/indexbg_interrupts.js | 4 +- .../indexbg_restart_secondary.js | 4 +- .../indexbg_restart_sigkill_secondary_noretry.js | 7 +- jstests/noPassthroughWithMongod/large_chunk.js | 5 +- .../mapreduce_intermediate_reduce.js | 8 +- .../noPassthroughWithMongod/mr_shard_version.js | 5 +- .../no_balance_collection.js | 6 +- .../noPassthroughWithMongod/opcounters_legacy.js | 173 ----- .../opcounters_write_cmd.js | 166 ----- .../parallel_collection_scan.js | 5 +- jstests/noPassthroughWithMongod/remove9.js | 5 +- .../noPassthroughWithMongod/sharding_balance1.js | 5 +- .../noPassthroughWithMongod/sharding_balance2.js | 6 +- .../noPassthroughWithMongod/sharding_balance3.js | 5 +- .../noPassthroughWithMongod/sharding_balance4.js | 35 +- .../sharding_balance_randomorder1.js | 6 +- .../sharding_migrateBigObject.js | 12 +- .../sharding_migrate_cursor1.js | 8 +- .../sharding_migrate_large_docs.js | 17 +- .../sharding_multiple_ns_rs.js | 10 +- jstests/noPassthroughWithMongod/sharding_rs1.js | 5 +- jstests/noPassthroughWithMongod/sharding_rs2.js | 5 +- jstests/noPassthroughWithMongod/ttl1.js | 1 - jstests/noPassthroughWithMongod/ttl_repl.js | 11 +- .../ttl_repl_maintenance.js | 6 +- jstests/noPassthroughWithMongod/ttl_sharded.js | 7 +- jstests/quota/quota1.js | 51 -- jstests/repl/basic1.js | 43 +- jstests/repl/block1.js | 3 +- jstests/repl/block2.js | 25 +- jstests/repl/drop_dups.js | 24 +- jstests/repl/master1.js | 4 +- jstests/repl/mastermaster1.js | 55 -- jstests/repl/mod_move.js | 37 +- jstests/repl/repl12.js | 1 - jstests/repl/repl13.js | 13 +- jstests/repl/repl17.js | 1 - jstests/repl/repl19.js | 3 +- jstests/repl/repl20.js | 3 +- jstests/repl/repl21.js | 11 +- jstests/repl/repl5.js | 4 +- jstests/replsets/auth1.js | 9 +- jstests/replsets/downstream.js | 10 +- jstests/replsets/replset2.js | 1 - jstests/replsets/tags2.js | 2 +- jstests/sharding/movechunk_include.js | 12 +- jstests/sharding/multi_write_target.js | 36 +- jstests/slow1/sharding_multiple_collections.js | 9 +- jstests/slow2/32bit.js | 118 ++-- jstests/slow2/conc_update.js | 48 +- jstests/slow2/dur_big_atomic_update.js | 21 +- jstests/slow2/mr_during_migrate.js | 6 +- jstests/slow2/replsets_killop.js | 10 +- jstests/slow2/replsets_prefetch_stress.js | 8 +- jstests/slow2/rollback4.js | 7 +- jstests/ssl/libs/ssl_helpers.js | 7 +- jstests/ssl/sharding_with_x509.js | 26 +- jstests/tool/dumpfilename1.js | 5 +- jstests/tool/dumprestoreWithNoOptions.js | 10 +- jstests/tool/dumprestore_auth3.js | 3 +- jstests/tool/exportimport_bigarray.js | 11 +- 119 files changed, 1664 insertions(+), 1825 deletions(-) create mode 100644 jstests/core/opcounters_write_cmd.js create mode 100644 jstests/gle/block2.js create mode 100644 jstests/gle/gle_after_split_failure_during_migration.js create mode 100644 jstests/gle/opcounters_legacy.js create mode 100644 jstests/gle/sync1.js create mode 100644 jstests/gle/sync4.js create mode 100644 jstests/gle/sync8.js delete mode 100755 jstests/misc/biginsert.js delete mode 100644 jstests/noPassthrough/gle_after_split_failure_during_migration.js delete mode 100644 jstests/noPassthrough/sync1.js delete mode 100644 jstests/noPassthrough/sync4.js delete mode 100644 jstests/noPassthrough/sync8.js delete mode 100644 jstests/noPassthroughWithMongod/opcounters_legacy.js delete mode 100644 jstests/noPassthroughWithMongod/opcounters_write_cmd.js delete mode 100644 jstests/quota/quota1.js delete mode 100644 jstests/repl/mastermaster1.js (limited to 'jstests') diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js index d318d6467b3..8a00aaa4ae5 100644 --- a/jstests/auth/lib/commands_lib.js +++ b/jstests/auth/lib/commands_lib.js @@ -1770,7 +1770,6 @@ var authCommandsLib = { command: {renameCollection: firstDbName + ".x", to: secondDbName + ".y"}, setup: function (db) { db.getSisterDB(firstDbName).x.save( {} ); - db.getSisterDB(firstDbName).getLastError(); db.getSisterDB(adminDbName).runCommand({movePrimary: firstDbName, to: shard0name}); db.getSisterDB(adminDbName).runCommand({movePrimary: secondDbName, to: shard0name}); }, diff --git a/jstests/core/opcounters_write_cmd.js b/jstests/core/opcounters_write_cmd.js new file mode 100644 index 00000000000..88a5c65b4c3 --- /dev/null +++ b/jstests/core/opcounters_write_cmd.js @@ -0,0 +1,167 @@ +// Test that opcounters get incremented properly. +// Legacy write mode test also available at jstests/gle. + +var mongo = new Mongo(db.getMongo().host); +mongo.forceWriteMode("commands"); +var newdb = mongo.getDB(db.toString()); + +var t = newdb.opcounters; +var isMongos = ("isdbgrid" == newdb.runCommand("ismaster").msg); +var opCounters; +var res; + +assert(t.getDB().getMongo().useWriteCommands(), "test is not running with write commands") + +// +// Count ops attempted in write commands in mongod and mongos +// + +// +// 1. Insert. +// +// - unordered insert of N: +// counted as N ops, regardless of errors +// - ordered insert of N: +// counted as K + 1 ops, where K is number of docs successfully inserted, +// adding the failed attempt +// + +t.drop(); + +// Single insert, no error. +opCounters = newdb.serverStatus().opcounters; +res = t.insert({_id:0}); +assert.writeOK(res); +assert.eq(opCounters.insert + 1, newdb.serverStatus().opcounters.insert); + +// Bulk insert, no error. +opCounters = newdb.serverStatus().opcounters; +res = t.insert([{_id:1},{_id:2}]) +assert.writeOK(res); +assert.eq(opCounters.insert + 2, newdb.serverStatus().opcounters.insert); + +// Single insert, with error. +opCounters = newdb.serverStatus().opcounters; +res = t.insert({_id:0}) +assert.writeError(res); +assert.eq(opCounters.insert + 1, newdb.serverStatus().opcounters.insert); + +// Bulk insert, with error, ordered. +opCounters = newdb.serverStatus().opcounters; +res = t.insert([{_id:3},{_id:3},{_id:4}]) +assert.writeError(res); +assert.eq(opCounters.insert + 2, newdb.serverStatus().opcounters.insert); + +// Bulk insert, with error, unordered. +var continueOnErrorFlag = 1; +opCounters = newdb.serverStatus().opcounters; +res = t.insert([{_id:5},{_id:5},{_id:6}], continueOnErrorFlag) +assert.writeError(res); +assert.eq(opCounters.insert + 3, newdb.serverStatus().opcounters.insert); + +// +// 2. Update. +// + +t.drop(); +t.insert({_id:0}); + +// Update, no error. +opCounters = newdb.serverStatus().opcounters; +res = t.update({_id:0}, {$set:{a:1}}); +assert.writeOK(res); +assert.eq(opCounters.update + 1, newdb.serverStatus().opcounters.update); + +// Update, with error. +opCounters = newdb.serverStatus().opcounters; +res = t.update({_id:0}, {$set:{_id:1}}); +assert.writeError(res); +assert.eq(opCounters.update + 1, newdb.serverStatus().opcounters.update); + +// +// 3. Delete. +// + +t.drop(); +t.insert([{_id:0},{_id:1}]); + +// Delete, no error. +opCounters = newdb.serverStatus().opcounters; +res = t.remove({_id:0}); +assert.writeOK(res); +assert.eq(opCounters.delete + 1, newdb.serverStatus().opcounters.delete); + +// Delete, with error. +opCounters = newdb.serverStatus().opcounters; +res = t.remove({_id:{$invalidOp:1}}); +assert.writeError(res); +assert.eq(opCounters.delete + 1, newdb.serverStatus().opcounters.delete); + +// +// 4. Query. +// +// - mongod: counted as 1 op, regardless of errors +// - mongos: counted as 1 op if successful, else 0 +// + +t.drop(); +t.insert({_id:0}); + +// Query, no error. +opCounters = newdb.serverStatus().opcounters; +t.findOne(); +assert.eq(opCounters.query + 1, newdb.serverStatus().opcounters.query); + +// Query, with error. +opCounters = newdb.serverStatus().opcounters; +assert.throws(function() { t.findOne({_id:{$invalidOp:1}}) }); +assert.eq(opCounters.query + (isMongos ? 0 : 1), newdb.serverStatus().opcounters.query); + +// +// 5. Getmore. +// +// - counted as 1 op per getmore issued, regardless of errors +// + +t.drop(); +t.insert([{_id:0},{_id:1},{_id:2}]); + +// Getmore, no error. +opCounters = newdb.serverStatus().opcounters; +t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore +assert.eq(opCounters.query + 1, newdb.serverStatus().opcounters.query); +assert.eq(opCounters.getmore + 1, newdb.serverStatus().opcounters.getmore); + +// Getmore, with error (TODO implement when SERVER-5813 is resolved). + +// +// 6. Command. +// +// - unrecognized commands not counted +// - recognized commands counted as 1 op, regardless of errors +// - some (recognized) commands can suppress command counting (i.e. aren't counted as commands) +// + +t.drop(); +t.insert({_id:0}) + +// Command, recognized, no error. +opCounters = newdb.serverStatus().opcounters; +assert.eq(opCounters.command + 1, newdb.serverStatus().opcounters.command); // "serverStatus" counted + +// Command, recognized, with error. +opCounters = newdb.serverStatus().opcounters; +res = t.runCommand("count", {query:{$invalidOp:1}}); +assert.eq(0, res.ok); +assert.eq(opCounters.command + 2, + newdb.serverStatus().opcounters.command); // "serverStatus", "count" counted + +// Command, unrecognized. +opCounters = newdb.serverStatus().opcounters; +res = t.runCommand("command that doesn't exist"); +assert.eq(0, res.ok); +//assert.eq(opCounters.command + 1, newdb.serverStatus().opcounters.command); // "serverStatus" counted +// TODO Replace below with above when SERVER-9038 is resolved (mongos counts unrecognized commands) +assert.eq(opCounters.command + (isMongos ? 2 : 1), newdb.serverStatus().opcounters.command); + +// Command, recognized, counting suppressed (TODO implement when SERVER-9038 is resolved). diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js index deaab67a969..f236a9d4bc7 100644 --- a/jstests/disk/diskfull.js +++ b/jstests/disk/diskfull.js @@ -22,16 +22,16 @@ if ( doIt ) { m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1", '--nojournal' ); d = m.getDB( "diskfulltest" ); c = d.getCollection( "diskfulltest" ); - c.save( { a: 6 } ); - assert(d.getLastError().length ); - printjson( d.getLastErrorObj() ); + assert.writeError(c.insert( { a: 6 } )); + assert.soon( function() { c.save( { a : 6 } ); return rawMongoProgramOutput().match( /file allocation failure/ ); }, "didn't see 'file allocation failure'" ); - c.save( { a: 6 } ); - assert.eq(d.getLastError(), "Can't take a write lock while out of disk space"); // every following fail + res = assert.writeError(c.insert({ a: 6 })); + var errmsg = res.getWriteError().errmsg; + assert.eq(errmsg, "Can't take a write lock while out of disk space"); // every following fail sleep( 3000 ); diff --git a/jstests/disk/killall.js b/jstests/disk/killall.js index a46a3588241..3be9f530780 100644 --- a/jstests/disk/killall.js +++ b/jstests/disk/killall.js @@ -21,8 +21,7 @@ var mongod = startMongod( "--port", port, "--dbpath", dbpath, "--nohttpinterface var db = mongod.getDB( "test" ); var collection = db.getCollection( baseName ); -collection.save( {} ); -assert( ! db.getLastError() ); +assert.writeOK(collection.insert({})); s1 = startParallelShell( "db." + baseName + ".count( { $where: function() { while( 1 ) { ; } } } )", port ); // HACK(schwerin): startParallelShell's return value should allow you to block until the command has diff --git a/jstests/disk/preallocate_directoryperdb.js b/jstests/disk/preallocate_directoryperdb.js index 8b3d551b5d4..38f55866f60 100644 --- a/jstests/disk/preallocate_directoryperdb.js +++ b/jstests/disk/preallocate_directoryperdb.js @@ -22,14 +22,15 @@ function checkDb2DirAbsent() { var m = startMongod( "--smallfiles", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" ); db = m.getDB( baseName ); db2 = m.getDB( baseName2 ); -c = db[ baseName ]; -c2 = db2[ baseName2 ]; -big = new Array( 5000 ).toString(); +var bulk = db[ baseName ].initializeUnorderedBulkOp(); +var bulk2 = db2[ baseName2 ].initializeUnorderedBulkOp(); +var big = new Array( 5000 ).toString(); for( var i = 0; i < 3000; ++i ) { - c.save( { b:big } ); - c2.save( { b:big } ); - db.getLastError(); + bulk.insert({ b:big }); + bulk2.insert({ b:big }); } +assert.writeOK(bulk.execute()); +assert.writeOK(bulk2.execute()); // Due to our write pattern, we expect db2's .3 file to be queued up in the file // allocator behind db's .3 file at the time db2 is dropped. This will @@ -43,8 +44,7 @@ db.dropDatabase(); // Try writing a new database, to ensure file allocator is still working. db3 = m.getDB( baseName3 ); c3 = db[ baseName3 ]; -c3.save( {} ); -assert( !db3.getLastError() ); +assert.writeOK(c3.insert( {} )); assert.eq( 1, c3.count() ); checkDb2DirAbsent(); diff --git a/jstests/disk/quota.js b/jstests/disk/quota.js index aa8963c6263..e8476072ff1 100644 --- a/jstests/disk/quota.js +++ b/jstests/disk/quota.js @@ -11,10 +11,11 @@ db = m.getDB( baseName ); big = new Array( 10000 ).toString(); // Insert documents until quota is exhausted. -while( !db.getLastError() ) { - db[ baseName ].save( {b:big} ); +var coll = db[ baseName ]; +var res = coll.insert({ b: big }); +while( !res.hasWriteError() ) { + res = coll.insert({ b: big }); } -printjson( db.getLastError() ); dotTwoDataFile = baseName + ".2"; files = listFiles( dbpath ); @@ -27,8 +28,7 @@ dotTwoDataFile = "local" + ".2"; // Check that quota does not apply to local db, and a .2 file can be created. l = m.getDB( "local" )[ baseName ]; for( i = 0; i < 10000; ++i ) { - l.save( {b:big} ); - assert( !db.getLastError() ); + assert.writeOK(l.insert({ b: big })); dotTwoFound = false; if ( i % 100 != 0 ) { continue; diff --git a/jstests/disk/quota2.js b/jstests/disk/quota2.js index a5f07abec8a..cdeda1a2a23 100644 --- a/jstests/disk/quota2.js +++ b/jstests/disk/quota2.js @@ -13,12 +13,12 @@ db = m.getDB( baseName ); big = new Array( 10000 ).toString(); // Insert documents until quota is exhausted. -while( !db.getLastError() ) { - db[ baseName ].save( {b:big} ); +var coll = db[ baseName ]; +var res = coll.insert({ b: big }); +while( !res.hasWriteError() ) { + res = coll.insert({ b: big }); } -db.resetError(); - // Trigger allocation of an additional file for a 'special' namespace. for( n = 0; !db.getLastError(); ++n ) { db.createCollection( '' + n ); @@ -27,10 +27,10 @@ for( n = 0; !db.getLastError(); ++n ) { // Check that new docs are saved in the .0 file. for( i = 0; i < n; ++i ) { c = db[ ''+i ]; - c.save( {b:big} ); - if( !db.getLastError() ) { - assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file ); + res = c.insert({ b: big }); + if( !res.hasWriteError() ) { + assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file ); } } -} \ No newline at end of file +} diff --git a/jstests/dur/a_quick.js b/jstests/dur/a_quick.js index ab36f91327e..bbec8af6939 100755 --- a/jstests/dur/a_quick.js +++ b/jstests/dur/a_quick.js @@ -62,8 +62,7 @@ tst.log("start mongod without dur"); var conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur"); tst.log("without dur work"); var d = conn.getDB("test"); -d.foo.insert({ _id:123 }); -d.getLastError(); +assert.writeOK(d.foo.insert({ _id: 123 })); tst.log("stop without dur"); stopMongod(30000); @@ -72,8 +71,7 @@ tst.log("start mongod with dur"); conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--durOptions", 8); tst.log("with dur work"); d = conn.getDB("test"); -d.foo.insert({ _id: 123 }); -d.getLastError(); // wait +assert.writeOK(d.foo.insert({ _id: 123 })); // we could actually do getlasterror fsync:1 now, but maybe this is agood // as it will assure that commits happen on a timely basis. a bunch of the other dur/*js diff --git a/jstests/dur/closeall.js b/jstests/dur/closeall.js index 3d7119ab134..8c3864e8118 100644 --- a/jstests/dur/closeall.js +++ b/jstests/dur/closeall.js @@ -30,26 +30,24 @@ function f(variant, quickCommits, paranoid) { print("closeall.js run test"); print("wait for initial sync to finish") // SERVER-4852 - db1.foo.insert({}); - err = db1.getLastErrorObj(2); - printjson(err) - assert.isnull(err.err); - db1.foo.remove({}); - err = db1.getLastErrorObj(2); - printjson(err) - assert.isnull(err.err); + assert.writeOK(db1.foo.insert({}, { writeConcern: { w: 2 }})); + assert.writeOK(db1.foo.remove({}, { writeConcern: { w: 2 }})); print("initial sync done") - for( var i = 0; i < N; i++ ) { - db1.foo.insert({x:1}); // this does wait for a return code so we will get some parallelism - if( i % 7 == 0 ) - db1.foo.insert({x:99, y:2}); - if( i % 49 == 0 ) - db1.foo.update({ x: 99 }, { a: 1, b: 2, c: 3, d: 4 }); - if (i % 100 == 0) - db1.foo.find(); - if( i == 800 ) - db1.foo.ensureIndex({ x: 1 }); + var writeOps = startParallelShell('var coll = db.getSiblingDB("closealltest").foo; \ + var bulk = coll.initializeUnorderedBulkOp(); \ + for( var i = 0; i < ' + N + '; i++ ) { \ + bulk.insert({ x: 1 }); \ + if ( i % 7 == 0 ) \ + bulk.insert({ x: 99, y: 2 }); \ + if ( i % 49 == 0 ) \ + bulk.find({ x: 99 }).update( \ + { a: 1, b: 2, c: 3, d: 4 }); \ + if( i == 800 ) \ + coll.ensureIndex({ x: 1 }); \ + }', 30001); + + for( var i = 0; i < N; i++ ) { var res = null; try { if( variant == 1 ) @@ -61,7 +59,6 @@ function f(variant, quickCommits, paranoid) { res = db2.adminCommand("closeAllDatabases"); } catch (e) { - sleep(5000); // sleeping a little makes console output order prettier print("\n\n\nFAIL closeall.js closeAllDatabases command invocation threw an exception. i:" + i); try { print("getlasterror:"); @@ -74,8 +71,6 @@ function f(variant, quickCommits, paranoid) { print("got another exception : " + e); } print("\n\n\n"); - // sleep a little to capture possible mongod output? - sleep(2000); throw e; } assert( res.ok, "closeAllDatabases res.ok=false"); @@ -87,6 +82,8 @@ function f(variant, quickCommits, paranoid) { print("closeall.js shutting down servers"); stopMongod(30002); stopMongod(30001); + + writeOps(); } // Skip this test on 32-bit Windows (unfixable failures in MapViewOfFileEx) diff --git a/jstests/dur/diskfull.js b/jstests/dur/diskfull.js index a1efba5595d..a604439424d 100644 --- a/jstests/dur/diskfull.js +++ b/jstests/dur/diskfull.js @@ -50,15 +50,12 @@ function work() { log("work"); try { var d = conn.getDB("test"); - - big = new Array( 5000 ).toString(); + var big = new Array( 5000 ).toString(); + var bulk = d.foo.initializeUnorderedBulkOp(); for( i = 0; i < 10000; ++i ) { - d.foo.insert( { _id:i, b:big } ); + bulk.insert({ _id: i, b: big }); } - - gle = d.getLastError(); - if ( gle ) - throw gle; + assert.writeOK(bulk.execute()); } catch ( e ) { print( e ); raise( e ); diff --git a/jstests/dur/dropdb.js b/jstests/dur/dropdb.js index 4fb94cc7d1e..54de6bdd7f2 100644 --- a/jstests/dur/dropdb.js +++ b/jstests/dur/dropdb.js @@ -62,10 +62,8 @@ function work() { d.dropDatabase(); - d.foo.insert({ _id: 100 }); - // assure writes applied in case we kill -9 on return from this function - assert(d.runCommand({ getlasterror: 1, fsync: 1 }).ok, "getlasterror not ok"); + assert.writeOK(d.foo.insert({ _id: 100 }, { writeConcern: { fsync: 1 }})); } function verify() { diff --git a/jstests/dur/dur1.js b/jstests/dur/dur1.js index cb4495aea52..0aecaaac21c 100755 --- a/jstests/dur/dur1.js +++ b/jstests/dur/dur1.js @@ -64,12 +64,6 @@ function work() { // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually: d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 }); -// d.a.update({ _id: 4 }, { $inc: { x: 1} }); -// d.a.reIndex(); - - // assure writes applied in case we kill -9 on return from this function - d.getLastError(); - log("endwork"); return d; } diff --git a/jstests/dur/dur1_tool.js b/jstests/dur/dur1_tool.js index fdfe05236f4..adee933fdb4 100755 --- a/jstests/dur/dur1_tool.js +++ b/jstests/dur/dur1_tool.js @@ -63,13 +63,6 @@ function work() { // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually: d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 }); - -// d.a.update({ _id: 4 }, { $inc: { x: 1} }); -// d.a.reIndex(); - - // assure writes applied in case we kill -9 on return from this function - d.getLastError(); - log("endwork"); return d; } diff --git a/jstests/dur/indexbg2.js b/jstests/dur/indexbg2.js index a7484f0a561..d239d4eaa44 100644 --- a/jstests/dur/indexbg2.js +++ b/jstests/dur/indexbg2.js @@ -15,5 +15,5 @@ for( var i = 1000; i < 2000; ++i ) { t.insert( {_id:i,a:'abcd',b:'bcde',x:'four score and seven years ago'} ); t.remove( {_id:i} ); } -t.insert( {_id:2000,a:'abcd',b:'bcde',x:'four score and seven years ago'} ); -assert( !t.getDB().getLastError() ); +assert.writeOK(t.insert({ _id: 2000, a: 'abcd', b: 'bcde', x: 'four score and seven years ago' })); + diff --git a/jstests/dur/manyRestart.js b/jstests/dur/manyRestart.js index f434278ca9a..5a68afdecbb 100755 --- a/jstests/dur/manyRestart.js +++ b/jstests/dur/manyRestart.js @@ -63,12 +63,6 @@ function work() { // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually: d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 }); - -// d.a.update({ _id: 4 }, { $inc: { x: 1} }); -// d.a.reIndex(); - - // assure writes applied in case we kill -9 on return from this function - d.getLastError(); log("endwork"); return d; } diff --git a/jstests/dur/md5.js b/jstests/dur/md5.js index 1773091186a..1b4ec43340e 100644 --- a/jstests/dur/md5.js +++ b/jstests/dur/md5.js @@ -29,13 +29,6 @@ function work() { // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually: d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 }); - - // d.a.update({ _id: 4 }, { $inc: { x: 1} }); - // d.a.reIndex(); - - // assure writes applied in case we kill -9 on return from this function - d.getLastError(); - log("endwork"); } diff --git a/jstests/dur/oplog.js b/jstests/dur/oplog.js index cd7d7c5e6ef..8ded3c980a9 100755 --- a/jstests/dur/oplog.js +++ b/jstests/dur/oplog.js @@ -81,9 +81,6 @@ function work() { d.foo.insert({ _id: 6, q: "aaaaa", b: big, z: 3 }); d.foo.update({ _id: 5 }, { $set: { z: 99} }); - // assure writes applied in case we kill -9 on return from this function - d.getLastError(); - log("endwork"); verify(); diff --git a/jstests/gle/block2.js b/jstests/gle/block2.js new file mode 100644 index 00000000000..142d51519b2 --- /dev/null +++ b/jstests/gle/block2.js @@ -0,0 +1,58 @@ +/** + * Basic gle testing for master/slave environment. Write command version also + * available at jstests/repl. + */ + +var rt = new ReplTest( "block1" ); + +var m = rt.start( true ); +var s = rt.start( false ); + +if (m.writeMode() == 'commands') { + jsTest.log('Skipping test since commands mode is already tested in repl/'); +} +else { + + function setup(){ + + dbm = m.getDB( "foo" ); + dbs = s.getDB( "foo" ); + + tm = dbm.bar; + ts = dbs.bar; + } + setup(); + + function check( msg ){ + assert.eq( tm.count() , ts.count() , "check: " + msg ); + } + + function worked( w , wtimeout ){ + var gle = dbm.getLastError( w , wtimeout ); + if (gle != null) { + printjson(gle); + } + return gle == null; + } + + check( "A" ); + + tm.save( { x : 1 } ); + assert( worked( 2 ) , "B" ); + + tm.save( { x : 2 } ); + assert( worked( 2 , 3000 ) , "C" ); + + rt.stop( false ); + tm.save( { x : 3 } ); + assert.eq( 3 , tm.count() , "D1" ); + assert( ! worked( 2 , 3000 ) , "D2" ); + + s = rt.start( false ); + setup(); + assert( worked( 2 , 30000 ) , "E" ); + +} + +rt.stop(); + diff --git a/jstests/gle/gle_after_split_failure_during_migration.js b/jstests/gle/gle_after_split_failure_during_migration.js new file mode 100644 index 00000000000..9d0a6a9ca2a --- /dev/null +++ b/jstests/gle/gle_after_split_failure_during_migration.js @@ -0,0 +1,136 @@ +/** + * SERVER-4987 This test tries to check the getLastError call will still use + * the same connection even if a split chunk triggered while doing inserts + * failed (cause by StaleConfigException). + * + * TODO: SERVER-5175 + * This test relies on the corresponding delays inside (1) WriteBackListener::run + * and (2) ShardStrategy::_insert and (3) receivedInsert from instance.cpp + * to make the bug easier to manifest. + * + * The purpose of (1) is to make the writebacks slower so the failed inserts won't + * be reapplied on time. + * + * The purpose of (2) is to make it easier for the moveChunk command from the other + * mongos to interleave in between the moment the insert has set its shard version and + * when in tries to autosplit (Note: it should be long enough to allow the moveChunk + * to actually complete before it tries to proceed to autosplit). + * + * The purpose of (3) is to make sure that the insert won't get applied to the + * shard right away so when a different connection is used to do the getLastError, + * the write will still not be applied. + */ +function testGleAfterSplitDuringMigration(){ + var st = new ShardingTest({ shards: 2, verbose: 2, mongos: 2, + other: { chunksize: 1 }}); + + // Stop the balancer to prevent it from contending with the distributed lock. + st.stopBalancer(); + + var DB_NAME = jsTest.name(); + var COLL_NAME = "coll"; + + var mongos = st.s0; + var confDB = mongos.getDB( "config" ); + var coll = mongos.getCollection( DB_NAME + "." + COLL_NAME ); + + var shardConn = st.d0; + var shardColl = shardConn.getCollection( coll.getFullName() ); + + var data = "x"; + var dataSize = 1024 * 256; // bytes, must be power of 2 + while( data.length < dataSize ) data += data; + + // Shard collection + st.shardColl( coll, { _id : 1 }, false ); + + var docID = 0; + + /** + * @return {Mongo} the connection object given the name of the shard. + */ + var getShardConn = function( shardName ) { + var shardLoc = confDB.shards.findOne({ _id: shardName }).host; + return new Mongo( shardLoc ); + }; + + /** + * Inserts documents using a direct shard connection to the max key chunk + * enough to make sure that it will trigger the auto split. + * + * variables from outer scope: docID, coll, confDB, data + */ + var primeForSplitting = function() { + var topChunk = confDB.chunks.find().sort({ max: -1 }).limit( 1 ).next(); + var shardLoc = getShardConn( topChunk.shard ); + var testColl = shardLoc.getCollection( coll.getFullName() ); + + var superSaturatedChunkSize = 1024 * 1024 * 10; // 10MB + var docsToSaturateChunkSize = superSaturatedChunkSize / dataSize; + + for ( var i = 0; i < docsToSaturateChunkSize; i++ ) { + testColl.insert({ _id: docID++, val: data }); + } + + assert.eq( null, testColl.getDB().getLastError() ); + }; + + /** + * Moves a random chunk to a new shard using a different mongos. + * + * @param tries {Number} number of retry attempts when the moveChunk command + * fails. + * + * variables from outer scope: coll, st + */ + var moveRandomChunk = function( tries ) { + var otherConfDB = st.s1.getDB( "config" ); + var chunksCursor = otherConfDB.chunks.find().sort({ max: 1 }); + var chunkCount = chunksCursor.count(); + + var randIdx = Math.floor( Math.random() * chunkCount ); + // Don't get the chunk with max/min key + randIdx = ( randIdx == chunkCount )? randIdx - 1 : randIdx; + randIdx = ( randIdx == 0 )? randIdx + 1 : randIdx; + + var chunk = chunksCursor.arrayAccess( randIdx ); + var chunkOwner = chunk.shard; + var newOwner = otherConfDB.shards.findOne({ _id: { $ne: chunkOwner }})._id; + + var result = otherConfDB.adminCommand({ moveChunk: coll.getFullName(), + find: { _id: chunk.min._id }, + to: newOwner }); + + jsTest.log( "moveChunk result: " + tojson( result )); + if( !result.ok && tries > 1 ) { + moveRandomChunk( tries - 1 ); + } + }; + + var chunks = 0; + do { + coll.insert({ _id: docID++, val: data }); + chunks = mongos.getDB( "config" ).chunks.find().count(); + } while ( chunks < 5 ); + + primeForSplitting(); + + jsTest.log( "Starting the insert that should trigger auto-split." ); + + // TODO: SERVER-5175 Trigger delays here + coll.insert({ _id: docID++, val: data }); + moveRandomChunk( 3 ); + + // getLastError should wait for all writes to this connection. + var errObj = coll.getDB().getLastErrorObj(); + jsTest.log( "Last Error Object: " + tojson( errObj )); + + assert.eq( docID, coll.find().itcount(), "Count does not match!" ); + + jsTest.log( "Finished counting." ); + + st.stop(); +} + +testGleAfterSplitDuringMigration(); + diff --git a/jstests/gle/opcounters_legacy.js b/jstests/gle/opcounters_legacy.js new file mode 100644 index 00000000000..52e18c48643 --- /dev/null +++ b/jstests/gle/opcounters_legacy.js @@ -0,0 +1,174 @@ +// Test that opcounters get incremented properly. +// Write command version also available at jstests/core. + +// Remember the global 'db' var +var lastDB = db; +var mongo = new Mongo(db.getMongo().host); +mongo.writeMode = function() { return "legacy"; } +db = mongo.getDB(db.toString()); + +var t = db.opcounters; +var isMongos = ("isdbgrid" == db.runCommand("ismaster").msg); +var opCounters; + +// +// 1. Insert. +// +// - mongod, single insert: +// counted as 1 op if successful, else 0 +// - mongod, bulk insert of N with continueOnError=true: +// counted as N ops, regardless of errors +// - mongod, bulk insert of N with continueOnError=false: +// counted as K ops, where K is number of docs successfully inserted +// +// - mongos +// count ops attempted like insert commands +// + +t.drop(); + +// Single insert, no error. +opCounters = db.serverStatus().opcounters; +t.insert({_id:0}); +assert(!db.getLastError()); +assert.eq(opCounters.insert + 1, db.serverStatus().opcounters.insert); + +// Bulk insert, no error. +opCounters = db.serverStatus().opcounters; +t.insert([{_id:1},{_id:2}]) +assert(!db.getLastError()); +assert.eq(opCounters.insert + 2, db.serverStatus().opcounters.insert); + +// Single insert, with error. +opCounters = db.serverStatus().opcounters; +t.insert({_id:0}) +print( db.getLastError() ) +assert(db.getLastError()); +assert.eq(opCounters.insert + (isMongos ? 1 : 0), db.serverStatus().opcounters.insert); + +// Bulk insert, with error, continueOnError=false. +opCounters = db.serverStatus().opcounters; +t.insert([{_id:3},{_id:3},{_id:4}]) +assert(db.getLastError()); +assert.eq(opCounters.insert + (isMongos ? 2 : 1), db.serverStatus().opcounters.insert); + +// Bulk insert, with error, continueOnError=true. +var continueOnErrorFlag = 1; +opCounters = db.serverStatus().opcounters; +t.insert([{_id:5},{_id:5},{_id:6}], continueOnErrorFlag) +assert(db.getLastError()); +assert.eq(opCounters.insert + 3, db.serverStatus().opcounters.insert); + +// +// 2. Update. +// +// - counted as 1 op, regardless of errors +// + +t.drop(); +t.insert({_id:0}); + +// Update, no error. +opCounters = db.serverStatus().opcounters; +t.update({_id:0}, {$set:{a:1}}); +assert(!db.getLastError()); +assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update); + +// Update, with error. +opCounters = db.serverStatus().opcounters; +t.update({_id:0}, {$set:{_id:1}}); +assert(db.getLastError()); +assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update); + +// +// 3. Delete. +// +// - counted as 1 op, regardless of errors +// + +t.drop(); +t.insert([{_id:0},{_id:1}]); + +// Delete, no error. +opCounters = db.serverStatus().opcounters; +t.remove({_id:0}); +assert(!db.getLastError()); +assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete); + +// Delete, with error. +opCounters = db.serverStatus().opcounters; +t.remove({_id:{$invalidOp:1}}); +assert(db.getLastError()); +assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete); + +// +// 4. Query. +// +// - mongod: counted as 1 op, regardless of errors +// - mongos: counted as 1 op if successful, else 0 +// + +t.drop(); +t.insert({_id:0}); + +// Query, no error. +opCounters = db.serverStatus().opcounters; +t.findOne(); +assert.eq(opCounters.query + 1, db.serverStatus().opcounters.query); + +// Query, with error. +opCounters = db.serverStatus().opcounters; +assert.throws(function() { t.findOne({_id:{$invalidOp:1}}) }); +assert.eq(opCounters.query + (isMongos ? 0 : 1), db.serverStatus().opcounters.query); + +// +// 5. Getmore. +// +// - counted as 1 op per getmore issued, regardless of errors +// + +t.drop(); +t.insert([{_id:0},{_id:1},{_id:2}]); + +// Getmore, no error. +opCounters = db.serverStatus().opcounters; +t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore +assert.eq(opCounters.query + 1, db.serverStatus().opcounters.query); +assert.eq(opCounters.getmore + 1, db.serverStatus().opcounters.getmore); + +// Getmore, with error (TODO implement when SERVER-5813 is resolved). + +// +// 6. Command. +// +// - unrecognized commands not counted +// - recognized commands counted as 1 op, regardless of errors +// - some (recognized) commands can suppress command counting (i.e. aren't counted as commands) +// + +t.drop(); +t.insert({_id:0}) + +// Command, recognized, no error. +opCounters = db.serverStatus().opcounters; +assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted + +// Command, recognized, with error. +opCounters = db.serverStatus().opcounters; +res = t.runCommand("count", {query:{$invalidOp:1}}); +assert.eq(0, res.ok); +assert.eq(opCounters.command + 2, + db.serverStatus().opcounters.command); // "serverStatus", "count" counted + +// Command, unrecognized. +opCounters = db.serverStatus().opcounters; +res = t.runCommand("command that doesn't exist"); +assert.eq(0, res.ok); +//assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted +// TODO Replace below with above when SERVER-9038 is resolved (mongos counts unrecognized commands) +assert.eq(opCounters.command + (isMongos ? 2 : 1), db.serverStatus().opcounters.command); + +// Command, recognized, counting suppressed (TODO implement when SERVER-9038 is resolved). + +// Restore 'db' var +db = lastDB; diff --git a/jstests/gle/sync1.js b/jstests/gle/sync1.js new file mode 100644 index 00000000000..83d26d1e71f --- /dev/null +++ b/jstests/gle/sync1.js @@ -0,0 +1,57 @@ +// TODO: remove test after we deprecate SyncClusterConnection + +var test = new SyncCCTest( "sync1" ); + +if (test.conn.writeMode() == 'commands') { + jsTest.log('Skipping test not compatible with write commands'); +} +else { + + db = test.conn.getDB( "test" ) + t = db.sync1 + t.save( { x : 1 } ) + assert.eq( 1 , t.find().itcount() , "A1" ); + assert.eq( 1 , t.find().count() , "A2" ); + t.save( { x : 2 } ) + assert.eq( 2 , t.find().itcount() , "A3" ); + assert.eq( 2 , t.find().count() , "A4" ); + + test.checkHashes( "test" , "A3" ); + + test.tempKill(); + assert.throws( function(){ t.save( { x : 3 } ); } , null , "B1" ); + // It's ok even for some of the mongod to be unreachable for read-only cmd + assert.eq( 2, t.find({}).count() ); + // It's NOT ok for some of the mongod to be unreachable for write cmd + assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); }); + assert.eq( 2 , t.find().itcount() , "B2" ); + test.tempStart(); + test.checkHashes( "test" , "B3" ); + + // Trying killing the second mongod + test.tempKill( 1 ); + assert.throws( function(){ t.save( { x : 3 } ); } ); + // It's ok even for some of the mongod to be unreachable for read-only cmd + assert.eq( 2, t.find({}).count() ); + // It's NOT ok for some of the mongod to be unreachable for write cmd + assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); }); + assert.eq( 2 , t.find().itcount() ); + test.tempStart( 1 ); + + assert.eq( 2 , t.find().itcount() , "C1" ); + assert.soon( function(){ + try { + t.remove( { x : 1 } ) + return true; + } + catch ( e ){ + print( e ); + } + return false; + } ) + t.find().forEach( printjson ) + assert.eq( 1 , t.find().itcount() , "C2" ); + + test.stop(); + +} diff --git a/jstests/gle/sync4.js b/jstests/gle/sync4.js new file mode 100644 index 00000000000..b6b1a777856 --- /dev/null +++ b/jstests/gle/sync4.js @@ -0,0 +1,26 @@ +// TODO: remove test after we deprecate SyncClusterConnection + +test = new SyncCCTest( "sync4" ) + +if (test.conn.writeMode() == 'commands') { + jsTest.log('Skipping test not compatible with write commands'); +} +else { + + db = test.conn.getDB( "test" ) + t = db.sync4 + + for ( i=0; i<1000; i++ ){ + t.insert( { _id : i , x : "asdasdsdasdas" } ) + } + db.getLastError(); + + test.checkHashes( "test" , "A0" ); + assert.eq( 1000 , t.find().count() , "A1" ) + assert.eq( 1000 , t.find().itcount() , "A2" ) + assert.eq( 1000 , t.find().snapshot().batchSize(10).itcount() , "A2" ) + + test.stop(); + +} + diff --git a/jstests/gle/sync8.js b/jstests/gle/sync8.js new file mode 100644 index 00000000000..81404785ac3 --- /dev/null +++ b/jstests/gle/sync8.js @@ -0,0 +1,21 @@ +// TODO: remove test after we deprecate SyncClusterConnection + +// Test for SERVER-11492 - make sure that upserting a new document reports n:1 in GLE + +var test = new SyncCCTest( "sync1" ); + +if (test.conn.writeMode() == 'commands') { + jsTest.log('Skipping test not compatible with write commands'); +} +else { + var db = test.conn.getDB( "test" ); + var t = db.sync8; + t.remove({}); + + t.update({_id:1}, {$set:{a:1}}, true); + var le = db.getLastErrorObj(); + assert.eq(1, le.n); + + test.stop(); + +} diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js index 60cb7733f5d..248f5e49a6c 100644 --- a/jstests/libs/geo_near_random.js +++ b/jstests/libs/geo_near_random.js @@ -12,16 +12,16 @@ GeoNearRandomTest = function(name) { GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){ - if(!indexBounds){ - scale = scale || 1; // scale is good for staying away from edges - return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale]; - } - else{ - var range = indexBounds.max - indexBounds.min; - var eps = Math.pow(2, -40); - // Go very close to the borders but not quite there. - return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min]; - } + if(!indexBounds){ + scale = scale || 1; // scale is good for staying away from edges + return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale]; + } + else{ + var range = indexBounds.max - indexBounds.min; + var eps = Math.pow(2, -40); + // Go very close to the borders but not quite there. + return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min]; + } } @@ -29,27 +29,29 @@ GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) { assert.eq(this.nPts, 0, "insertPoints already called"); this.nPts = nPts; + var bulk = this.t.initializeUnorderedBulkOp(); for (var i=0; i .5 ) - t.remove( { _id : i } ) + bulk.find({ _id: i }).remove(); else - t.insert( { _id : i , s : s } ) + bulk.find({ _id: i }).upsert().updateOne({ _id: i, s: s }); } - //printjson( t.stats() ); - + assert.writeOK(bulk.execute()); assert.eq( orig.storageSize , t.stats().storageSize , "B" + j ) } - test.stop(); diff --git a/jstests/noPassthrough/geo_full.js b/jstests/noPassthrough/geo_full.js index ffeb26a2606..9d9203242d3 100644 --- a/jstests/noPassthrough/geo_full.js +++ b/jstests/noPassthrough/geo_full.js @@ -22,96 +22,89 @@ testServer = new SlowWeeklyMongod( "geo_full" ) db = testServer.getDB( "test" ); var randEnvironment = function(){ - - // Normal earth environment - if( Random.rand() < 0.5 ){ - return { max : 180, - min : -180, - bits : Math.floor( Random.rand() * 32 ) + 1, - earth : true, - bucketSize : 360 / ( 4 * 1024 * 1024 * 1024 ) } - } - - var scales = [ 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000 ] - var scale = scales[ Math.floor( Random.rand() * scales.length ) ] - var offset = Random.rand() * scale - + + // Normal earth environment + if( Random.rand() < 0.5 ){ + return { max : 180, + min : -180, + bits : Math.floor( Random.rand() * 32 ) + 1, + earth : true, + bucketSize : 360 / ( 4 * 1024 * 1024 * 1024 ) }; + } + + var scales = [ 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000 ] + var scale = scales[ Math.floor( Random.rand() * scales.length ) ] + var offset = Random.rand() * scale + var max = Random.rand() * scale + offset - var min = - Random.rand() * scale + offset - var bits = Math.floor( Random.rand() * 32 ) + 1 - var bits = Math.floor( Random.rand() * 32 ) + 1 - var range = max - min + var min = - Random.rand() * scale + offset + var bits = Math.floor( Random.rand() * 32 ) + 1 + var bits = Math.floor( Random.rand() * 32 ) + 1 + var range = max - min var bucketSize = range / ( 4 * 1024 * 1024 * 1024 ) - - return { max : max, - min : min, - bits : bits, - earth : false, - bucketSize : bucketSize } - -} + + return { max : max, + min : min, + bits : bits, + earth : false, + bucketSize : bucketSize } +}; var randPoint = function( env, query ) { - - if( query && Random.rand() > 0.5 ) - return query.exact - - if( env.earth ) - return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ] - - var range = env.max - env.min - return [ Random.rand() * range + env.min, Random.rand() * range + env.min ]; + + if( query && Random.rand() > 0.5 ) + return query.exact + + if( env.earth ) + return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ] + + var range = env.max - env.min + return [ Random.rand() * range + env.min, Random.rand() * range + env.min ]; } var randLocType = function( loc, wrapIn ){ - return randLocTypes( [ loc ], wrapIn )[0] + return randLocTypes( [ loc ], wrapIn )[0] } var randLocTypes = function( locs, wrapIn ) { - - var rLocs = [] - - for( var i = 0; i < locs.length; i++ ){ + + var rLocs = [] + + for( var i = 0; i < locs.length; i++ ){ rLocs.push( locs[i] ) - // {x:1, y:1} \ne [1,1]. - //if( Random.rand() < 0.5 ) - //rLocs.push( { x : locs[i][0], y : locs[i][1] } ) - //else - } - - if( wrapIn ){ - var wrappedLocs = [] - for( var i = 0; i < rLocs.length; i++ ){ - var wrapper = {} - wrapper[wrapIn] = rLocs[i] - wrappedLocs.push( wrapper ) - } - - return wrappedLocs - } - - return rLocs - -} + } + + if( wrapIn ){ + var wrappedLocs = [] + for( var i = 0; i < rLocs.length; i++ ){ + var wrapper = {} + wrapper[wrapIn] = rLocs[i] + wrappedLocs.push( wrapper ) + } + + return wrappedLocs + } + + return rLocs +}; var randDataType = function() { - var scales = [ 1, 10, 100, 1000, 10000 ] - var docScale = scales[ Math.floor( Random.rand() * scales.length ) ] - var locScale = scales[ Math.floor( Random.rand() * scales.length ) ] - - var numDocs = 40000 - var maxLocs = 40000 - // Make sure we don't blow past our test resources - while( numDocs * maxLocs > 40000 ){ - numDocs = Math.floor( Random.rand() * docScale ) + 1 - maxLocs = Math.floor( Random.rand() * locScale ) + 1 - } - - return { numDocs : numDocs, - maxLocs : maxLocs } - -} + var scales = [ 1, 10, 100, 1000, 10000 ] + var docScale = scales[ Math.floor( Random.rand() * scales.length ) ] + var locScale = scales[ Math.floor( Random.rand() * scales.length ) ] + + var numDocs = 40000 + var maxLocs = 40000 + // Make sure we don't blow past our test resources + while( numDocs * maxLocs > 40000 ){ + numDocs = Math.floor( Random.rand() * docScale ) + 1 + maxLocs = Math.floor( Random.rand() * locScale ) + 1 + } + + return { numDocs : numDocs, + maxLocs : maxLocs } +}; function deg2rad(arg) { return arg * Math.PI / 180.0; } function rad2deg(arg) { return arg * 180.0 / Math.PI; } @@ -140,194 +133,181 @@ function pointIsOK(startPoint, radius, env) { } var randQuery = function( env ) { - - var center = randPoint( env ) - - var sphereRadius = -1 - var sphereCenter = null - if( env.earth ){ - // Get a start point that doesn't require wrapping - // TODO: Are we a bit too aggressive with wrapping issues? - var i - for( i = 0; i < 5; i++ ){ + var center = randPoint( env ) + + var sphereRadius = -1 + var sphereCenter = null + if( env.earth ){ + // Get a start point that doesn't require wrapping + // TODO: Are we a bit too aggressive with wrapping issues? + var i + for( i = 0; i < 5; i++ ){ sphereRadius = Random.rand() * 45 * Math.PI / 180 sphereCenter = randPoint( env ) if (pointIsOK(sphereCenter, sphereRadius, env)) { break; } - /* - var t = db.testSphere; t.drop(); t.ensureIndex({ loc : "2d" }, env ) - try{ t.find({ loc : { $within : { $centerSphere : [ sphereCenter, sphereRadius ] } } } ).count(); var err; if( err = db.getLastError() ) throw err; } - catch(e) { print( e ); continue } - print( " Radius " + sphereRadius + " and center " + sphereCenter + " ok ! ") - break; - */ - } - if( i == 5 ) sphereRadius = -1; - - } - - var box = [ randPoint( env ), randPoint( env ) ] - - var boxPoly = [[ box[0][0], box[0][1] ], - [ box[0][0], box[1][1] ], - [ box[1][0], box[1][1] ], - [ box[1][0], box[0][1] ] ] - - if( box[0][0] > box[1][0] ){ - var swap = box[0][0] - box[0][0] = box[1][0] - box[1][0] = swap - } - - if( box[0][1] > box[1][1] ){ - var swap = box[0][1] - box[0][1] = box[1][1] - box[1][1] = swap - } - - return { center : center, - radius : box[1][0] - box[0][0], - exact : randPoint( env ), - sphereCenter : sphereCenter, - sphereRadius : sphereRadius, - box : box, - boxPoly : boxPoly } - -} + } + if( i == 5 ) sphereRadius = -1; + + } + + var box = [ randPoint( env ), randPoint( env ) ] + + var boxPoly = [[ box[0][0], box[0][1] ], + [ box[0][0], box[1][1] ], + [ box[1][0], box[1][1] ], + [ box[1][0], box[0][1] ] ] + + if( box[0][0] > box[1][0] ){ + var swap = box[0][0] + box[0][0] = box[1][0] + box[1][0] = swap + } + + if( box[0][1] > box[1][1] ){ + var swap = box[0][1] + box[0][1] = box[1][1] + box[1][1] = swap + } + return { center : center, + radius : box[1][0] - box[0][0], + exact : randPoint( env ), + sphereCenter : sphereCenter, + sphereRadius : sphereRadius, + box : box, + boxPoly : boxPoly } +}; var resultTypes = { "exact" : function( loc ){ - return query.exact[0] == loc[0] && query.exact[1] == loc[1] + return query.exact[0] == loc[0] && query.exact[1] == loc[1] }, "center" : function( loc ){ - return Geo.distance( query.center, loc ) <= query.radius + return Geo.distance( query.center, loc ) <= query.radius }, "box" : function( loc ){ - return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] && - loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1] - -}, + return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] && + loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1] + +}, "sphere" : function( loc ){ - return ( query.sphereRadius >= 0 ? ( Geo.sphereDistance( query.sphereCenter, loc ) <= query.sphereRadius ) : false ) -}, + return ( query.sphereRadius >= 0 ? + ( Geo.sphereDistance( query.sphereCenter, loc ) <= query.sphereRadius ) : false ); +}, "poly" : function( loc ){ - return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] && - loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1] + return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] && + loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1]; }} var queryResults = function( locs, query, results ){ - - if( ! results["center"] ){ - for( var type in resultTypes ){ - results[type] = { - docsIn : 0, - docsOut : 0, - locsIn : 0, - locsOut : 0 - } - } - } - - var indResults = {} - for( var type in resultTypes ){ - indResults[type] = { - docIn : false, - locsIn : 0, - locsOut : 0 - } - } - - for( var type in resultTypes ){ - - var docIn = false - for( var i = 0; i < locs.length; i++ ){ - if( resultTypes[type]( locs[i] ) ){ - results[type].locsIn++ - indResults[type].locsIn++ - indResults[type].docIn = true - } - else{ - results[type].locsOut++ - indResults[type].locsOut++ - } - } - if( indResults[type].docIn ) results[type].docsIn++ - else results[type].docsOut++ - - } - - return indResults - + + if( ! results["center"] ){ + for( var type in resultTypes ){ + results[type] = { + docsIn : 0, + docsOut : 0, + locsIn : 0, + locsOut : 0 + } + } + } + + var indResults = {} + for( var type in resultTypes ){ + indResults[type] = { + docIn : false, + locsIn : 0, + locsOut : 0 + } + } + + for( var type in resultTypes ){ + + var docIn = false + for( var i = 0; i < locs.length; i++ ){ + if( resultTypes[type]( locs[i] ) ){ + results[type].locsIn++ + indResults[type].locsIn++ + indResults[type].docIn = true + } + else{ + results[type].locsOut++ + indResults[type].locsOut++ + } + } + if( indResults[type].docIn ) results[type].docsIn++ + else results[type].docsOut++ + + } + + return indResults } var randQueryAdditions = function( doc, indResults ){ - - for( var type in resultTypes ){ - var choice = Random.rand() - if( Random.rand() < 0.25 ) - doc[type] = ( indResults[type].docIn ? { docIn : "yes" } : { docIn : "no" } ) - else if( Random.rand() < 0.5 ) - doc[type] = ( indResults[type].docIn ? { docIn : [ "yes" ] } : { docIn : [ "no" ] } ) - else if( Random.rand() < 0.75 ) - doc[type] = ( indResults[type].docIn ? [ { docIn : "yes" } ] : [ { docIn : "no" } ] ) - else - doc[type] = ( indResults[type].docIn ? [ { docIn : [ "yes" ] } ] : [ { docIn : [ "no" ] } ] ) - } - + + for( var type in resultTypes ){ + var choice = Random.rand() + if( Random.rand() < 0.25 ) + doc[type] = ( indResults[type].docIn ? { docIn : "yes" } : { docIn : "no" } ) + else if( Random.rand() < 0.5 ) + doc[type] = ( indResults[type].docIn ? { docIn : [ "yes" ] } : { docIn : [ "no" ] } ) + else if( Random.rand() < 0.75 ) + doc[type] = ( indResults[type].docIn ? [ { docIn : "yes" } ] : [ { docIn : "no" } ] ) + else + doc[type] = ( indResults[type].docIn ? [{ docIn: [ "yes" ] }] : [{ docIn: [ "no" ] }]); + } } var randIndexAdditions = function( indexDoc ){ - - for( var type in resultTypes ){ - - if( Random.rand() < 0.5 ) continue; - - var choice = Random.rand() - if( Random.rand() < 0.5 ) - indexDoc[type] = 1 - else - indexDoc[type + ".docIn"] = 1 - - } - -} + + for( var type in resultTypes ){ + + if( Random.rand() < 0.5 ) continue; + + var choice = Random.rand() + if( Random.rand() < 0.5 ) + indexDoc[type] = 1 + else + indexDoc[type + ".docIn"] = 1; + } +}; var randYesQuery = function(){ - - var choice = Math.floor( Random.rand() * 7 ) - if( choice == 0 ) - return { $ne : "no" } - else if( choice == 1 ) - return "yes" - else if( choice == 2 ) - return /^yes/ - else if( choice == 3 ) - return { $in : [ "good", "yes", "ok" ] } - else if( choice == 4 ) - return { $exists : true } - else if( choice == 5 ) - return { $nin : [ "bad", "no", "not ok" ] } - else if( choice == 6 ) - return { $not : /^no/ } + + var choice = Math.floor( Random.rand() * 7 ) + if( choice == 0 ) + return { $ne : "no" } + else if( choice == 1 ) + return "yes" + else if( choice == 2 ) + return /^yes/ + else if( choice == 3 ) + return { $in : [ "good", "yes", "ok" ] } + else if( choice == 4 ) + return { $exists : true } + else if( choice == 5 ) + return { $nin : [ "bad", "no", "not ok" ] } + else if( choice == 6 ) + return { $not : /^no/ } } var locArray = function( loc ){ - if( loc.x ) return [ loc.x, loc.y ] - if( ! loc.length ) return [ loc[0], loc[1] ] - return loc + if( loc.x ) return [ loc.x, loc.y ] + if( ! loc.length ) return [ loc[0], loc[1] ] + return loc } var locsArray = function( locs ){ - if( locs.loc ){ - arr = [] - for( var i = 0; i < locs.loc.length; i++ ) arr.push( locArray( locs.loc[i] ) ) - return arr - } - else{ - arr = [] - for( var i = 0; i < locs.length; i++ ) arr.push( locArray( locs[i].loc ) ) - return arr - } + if( locs.loc ){ + arr = [] + for( var i = 0; i < locs.loc.length; i++ ) arr.push( locArray( locs.loc[i] ) ) + return arr + } + else{ + arr = [] + for( var i = 0; i < locs.length; i++ ) arr.push( locArray( locs[i].loc ) ) + return arr + } } var minBoxSize = function( env, box ){ @@ -335,16 +315,16 @@ var minBoxSize = function( env, box ){ } var minBucketScale = function( env, box ){ - + if( box.length && box[0].length ) box = [ box[0][0] - box[1][0], box[0][1] - box[1][1] ] - + if( box.length ) box = Math.max( box[0], box[1] ) - + print( box ) print( env.bucketSize ) - + return Math.ceil( Math.log( box / env.bucketSize ) / Math.log( 2 ) ) } @@ -352,119 +332,114 @@ var minBucketScale = function( env, box ){ // TODO: Add spherical $uniqueDocs tests var numTests = 100 -// Our seed will change every time this is run, but +// Our seed will change every time this is run, but // each individual test will be reproducible given // that seed and test number var seed = new Date().getTime() //seed = 175 + 288 + 12 for ( var test = 0; test < numTests; test++ ) { - - Random.srand( seed + test ); - //Random.srand( 42240 ) - //Random.srand( 7344 ) - var t = db.testAllGeo - t.drop() - - print( "Generating test environment #" + test ) - var env = randEnvironment() - //env.bits = 11 - var query = randQuery( env ) - var data = randDataType() - //data.numDocs = 5; data.maxLocs = 1; - var paddingSize = Math.floor( Random.rand() * 10 + 1 ) - var results = {} - var totalPoints = 0 - print( "Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs + " locs " ) - - // Index after a random number of docs added - var indexIt = Math.floor( Random.rand() * data.numDocs ) - - for ( var i = 0; i < data.numDocs; i++ ) { - - if( indexIt == i ){ - var indexDoc = { "locs.loc" : "2d" } - randIndexAdditions( indexDoc ) - - // printjson( indexDoc ) - - t.ensureIndex( indexDoc, env ) - assert.isnull( db.getLastError() ) - } - - var numLocs = Math.floor( Random.rand() * data.maxLocs + 1 ) - totalPoints += numLocs - - var multiPoint = [] - for ( var p = 0; p < numLocs; p++ ) { - var point = randPoint( env, query ) - multiPoint.push( point ) - } - - var indResults = queryResults( multiPoint, query, results ) - - var doc - // Nest the keys differently - if( Random.rand() < 0.5 ) - doc = { locs : { loc : randLocTypes( multiPoint ) } } - else - doc = { locs : randLocTypes( multiPoint, "loc" ) } - - randQueryAdditions( doc, indResults ) - - //printjson( doc ) - doc._id = i - t.insert( doc ) - - } - - var padding = "x" - for( var i = 0; i < paddingSize; i++ ) padding = padding + padding - - print( padding ) - - printjson( { seed : seed, - test: test, - env : env, - query : query, - data : data, - results : results, - paddingSize : paddingSize } ) - - // exact - print( "Exact query..." ) - assert.eq( results.exact.docsIn, t.find( { "locs.loc" : randLocType( query.exact ), "exact.docIn" : randYesQuery() } ).count() ) - - // $center - print( "Center query..." ) - print( "Min box : " + minBoxSize( env, query.radius ) ) - assert.eq( results.center.docsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).count() ) - - print( "Center query update..." ) - // printjson( t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).toArray() ) - t.update( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : true } }, "center.docIn" : randYesQuery() }, { $set : { "centerPaddingA" : padding } }, false, true ) - assert.eq( results.center.docsIn, t.getDB().getLastErrorObj().n ) - - if( query.sphereRadius >= 0 ){ - - print( "Center sphere query...") - // $centerSphere - assert.eq( results.sphere.docsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ] } }, "sphere.docIn" : randYesQuery() } ).count() ) - - print( "Center sphere query update..." ) - // printjson( t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).toArray() ) - t.update( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ], $uniqueDocs : true } }, "sphere.docIn" : randYesQuery() }, { $set : { "spherePaddingA" : padding } }, false, true ) - assert.eq( results.sphere.docsIn, t.getDB().getLastErrorObj().n ) - - } - - // $box - print( "Box query..." ) - assert.eq( results.box.docsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : true } }, "box.docIn" : randYesQuery() } ).count() ) - - // $polygon - print( "Polygon query..." ) - assert.eq( results.poly.docsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ).count() ) + + Random.srand( seed + test ); + //Random.srand( 42240 ) + //Random.srand( 7344 ) + var t = db.testAllGeo + t.drop() + + print( "Generating test environment #" + test ) + var env = randEnvironment() + //env.bits = 11 + var query = randQuery( env ) + var data = randDataType() + //data.numDocs = 5; data.maxLocs = 1; + var paddingSize = Math.floor( Random.rand() * 10 + 1 ) + var results = {} + var totalPoints = 0 + print( "Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs + " locs " ) + + var bulk = t.initializeUnorderedBulkOp(); + for ( var i = 0; i < data.numDocs; i++ ) { + var numLocs = Math.floor( Random.rand() * data.maxLocs + 1 ) + totalPoints += numLocs + + var multiPoint = [] + for ( var p = 0; p < numLocs; p++ ) { + var point = randPoint( env, query ) + multiPoint.push( point ) + } + + var indResults = queryResults( multiPoint, query, results ) + + var doc + // Nest the keys differently + if( Random.rand() < 0.5 ) + doc = { locs : { loc : randLocTypes( multiPoint ) } } + else + doc = { locs : randLocTypes( multiPoint, "loc" ) } + + randQueryAdditions( doc, indResults ) + + doc._id = i + bulk.insert( doc ); + } + assert.writeOK(bulk.execute()); + + var indexDoc = { "locs.loc" : "2d" }; + randIndexAdditions( indexDoc ); + t.ensureIndex( indexDoc, env ); + assert.isnull( db.getLastError() ); + + var padding = "x" + for( var i = 0; i < paddingSize; i++ ) padding = padding + padding + + print( padding ) + + printjson( { seed : seed, + test: test, + env : env, + query : query, + data : data, + results : results, + paddingSize : paddingSize } ) + + // exact + print( "Exact query..." ) + assert.eq( results.exact.docsIn, t.find( { "locs.loc" : randLocType( query.exact ), "exact.docIn" : randYesQuery() } ).count() ) + + // $center + print( "Center query..." ) + print( "Min box : " + minBoxSize( env, query.radius ) ) + assert.eq( results.center.docsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).count() ) + + print( "Center query update..." ) + var res = t.update({ "locs.loc": { $within: { $center: [ query.center, query.radius ], + $uniqueDocs: true }}, + "center.docIn": randYesQuery() }, + { $set: { centerPaddingA: padding }}, false, true); + assert.eq( results.center.docsIn, res.nModified ); + + if( query.sphereRadius >= 0 ){ + + print( "Center sphere query...") + // $centerSphere + assert.eq( results.sphere.docsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ] } }, "sphere.docIn" : randYesQuery() } ).count() ) + + print( "Center sphere query update..." ) + res = t.update({ "locs.loc": { $within: { + $centerSphere: [ query.sphereCenter, query.sphereRadius ], + $uniqueDocs: true } }, + "sphere.docIn" : randYesQuery() }, + { $set: { spherePaddingA: padding } }, false, true); + assert.eq( results.sphere.docsIn, res.nModified ); + } + + // $box + print( "Box query..." ) + assert.eq( results.box.docsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : true } }, "box.docIn" : randYesQuery() } ).count() ) + + // $polygon + print( "Polygon query..." ) + assert.eq( results.poly.docsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ).count() ) // $near, $nearSphere and geoNear results have a default document limit of 100. var defaultDocLimit = 100; @@ -506,12 +481,12 @@ for ( var test = 0; test < numTests; test++ ) { var num = Math.min( 2* defaultDocLimit, 2 * results.center.docsIn); - var output = db.runCommand( { - geoNear : "testAllGeo", - near : query.center, - maxDistance : query.radius , - includeLocs : true, - num : num } ).results + var output = db.runCommand( { + geoNear : "testAllGeo", + near : query.center, + maxDistance : query.radius , + includeLocs : true, + num : num } ).results assert.eq( Math.min( num, results.center.docsIn ), output.length, @@ -520,40 +495,36 @@ for ( var test = 0; test < numTests; test++ ) { "; radius: " + query.radius + "; docs: " + results.center.docsIn + "; locs: " + results.center.locsIn ) - var distance = 0; - for ( var i = 0; i < output.length; i++ ) { - var retDistance = output[i].dis - var retLoc = locArray( output[i].loc ) - - // print( "Dist from : " + results[i].loc + " to " + startPoint + " is " - // + retDistance + " vs " + radius ) - - var arrLocs = locsArray( output[i].obj.locs ) - - assert.contains( retLoc, arrLocs ) - - // printjson( arrLocs ) - - var distInObj = false - for ( var j = 0; j < arrLocs.length && distInObj == false; j++ ) { - var newDistance = Geo.distance( locArray( query.center ) , arrLocs[j] ) - distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 ) - } - - assert( distInObj ) - assert.between( retDistance - 0.0001 , Geo.distance( locArray( query.center ), retLoc ), retDistance + 0.0001 ) - assert.lte( retDistance, query.radius ) - assert.gte( retDistance, distance ) - distance = retDistance - } - - } - - // $polygon + var distance = 0; + for ( var i = 0; i < output.length; i++ ) { + var retDistance = output[i].dis + var retLoc = locArray( output[i].loc ) + + var arrLocs = locsArray( output[i].obj.locs ) + + assert.contains( retLoc, arrLocs ) + + var distInObj = false + for ( var j = 0; j < arrLocs.length && distInObj == false; j++ ) { + var newDistance = Geo.distance( locArray( query.center ) , arrLocs[j] ) + distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 ) + } + + assert( distInObj ) + assert.between( retDistance - 0.0001 , Geo.distance( locArray( query.center ), retLoc ), retDistance + 0.0001 ) + assert.lte( retDistance, query.radius ) + assert.gte( retDistance, distance ) + distance = retDistance + } + + } + + // $polygon print( "Polygon remove..." ) - t.remove( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ) - assert.eq( results.poly.docsIn, t.getDB().getLastErrorObj().n ) - + res = t.remove({ "locs.loc": { $within: { $polygon: query.boxPoly }}, + "poly.docIn": randYesQuery() }); + assert.eq( results.poly.docsIn, res.nRemoved ); + } diff --git a/jstests/noPassthrough/geo_mnypts_plus_fields.js b/jstests/noPassthrough/geo_mnypts_plus_fields.js index 53d33da4f29..7c5e23d4b97 100644 --- a/jstests/noPassthrough/geo_mnypts_plus_fields.js +++ b/jstests/noPassthrough/geo_mnypts_plus_fields.js @@ -12,7 +12,8 @@ for( var fields = 1; fields < maxFields; fields++ ){ coll.drop() var totalPts = 500 * 1000 - + + var bulk = coll.initializeUnorderedBulkOp(); // Add points in a 100x100 grid for( var i = 0; i < totalPts; i++ ){ var ii = i % 10000 @@ -37,10 +38,11 @@ for( var fields = 1; fields < maxFields; fields++ ){ doc[ "field" + j ] = field } - - coll.insert( doc ) + + bulk.insert( doc ); } - + assert.writeOK(bulk.execute()); + // Create the query for the additional fields queryFields = {} for( var j = 0; j < fields; j++ ){ diff --git a/jstests/noPassthrough/gle_after_split_failure_during_migration.js b/jstests/noPassthrough/gle_after_split_failure_during_migration.js deleted file mode 100644 index 9d0a6a9ca2a..00000000000 --- a/jstests/noPassthrough/gle_after_split_failure_during_migration.js +++ /dev/null @@ -1,136 +0,0 @@ -/** - * SERVER-4987 This test tries to check the getLastError call will still use - * the same connection even if a split chunk triggered while doing inserts - * failed (cause by StaleConfigException). - * - * TODO: SERVER-5175 - * This test relies on the corresponding delays inside (1) WriteBackListener::run - * and (2) ShardStrategy::_insert and (3) receivedInsert from instance.cpp - * to make the bug easier to manifest. - * - * The purpose of (1) is to make the writebacks slower so the failed inserts won't - * be reapplied on time. - * - * The purpose of (2) is to make it easier for the moveChunk command from the other - * mongos to interleave in between the moment the insert has set its shard version and - * when in tries to autosplit (Note: it should be long enough to allow the moveChunk - * to actually complete before it tries to proceed to autosplit). - * - * The purpose of (3) is to make sure that the insert won't get applied to the - * shard right away so when a different connection is used to do the getLastError, - * the write will still not be applied. - */ -function testGleAfterSplitDuringMigration(){ - var st = new ShardingTest({ shards: 2, verbose: 2, mongos: 2, - other: { chunksize: 1 }}); - - // Stop the balancer to prevent it from contending with the distributed lock. - st.stopBalancer(); - - var DB_NAME = jsTest.name(); - var COLL_NAME = "coll"; - - var mongos = st.s0; - var confDB = mongos.getDB( "config" ); - var coll = mongos.getCollection( DB_NAME + "." + COLL_NAME ); - - var shardConn = st.d0; - var shardColl = shardConn.getCollection( coll.getFullName() ); - - var data = "x"; - var dataSize = 1024 * 256; // bytes, must be power of 2 - while( data.length < dataSize ) data += data; - - // Shard collection - st.shardColl( coll, { _id : 1 }, false ); - - var docID = 0; - - /** - * @return {Mongo} the connection object given the name of the shard. - */ - var getShardConn = function( shardName ) { - var shardLoc = confDB.shards.findOne({ _id: shardName }).host; - return new Mongo( shardLoc ); - }; - - /** - * Inserts documents using a direct shard connection to the max key chunk - * enough to make sure that it will trigger the auto split. - * - * variables from outer scope: docID, coll, confDB, data - */ - var primeForSplitting = function() { - var topChunk = confDB.chunks.find().sort({ max: -1 }).limit( 1 ).next(); - var shardLoc = getShardConn( topChunk.shard ); - var testColl = shardLoc.getCollection( coll.getFullName() ); - - var superSaturatedChunkSize = 1024 * 1024 * 10; // 10MB - var docsToSaturateChunkSize = superSaturatedChunkSize / dataSize; - - for ( var i = 0; i < docsToSaturateChunkSize; i++ ) { - testColl.insert({ _id: docID++, val: data }); - } - - assert.eq( null, testColl.getDB().getLastError() ); - }; - - /** - * Moves a random chunk to a new shard using a different mongos. - * - * @param tries {Number} number of retry attempts when the moveChunk command - * fails. - * - * variables from outer scope: coll, st - */ - var moveRandomChunk = function( tries ) { - var otherConfDB = st.s1.getDB( "config" ); - var chunksCursor = otherConfDB.chunks.find().sort({ max: 1 }); - var chunkCount = chunksCursor.count(); - - var randIdx = Math.floor( Math.random() * chunkCount ); - // Don't get the chunk with max/min key - randIdx = ( randIdx == chunkCount )? randIdx - 1 : randIdx; - randIdx = ( randIdx == 0 )? randIdx + 1 : randIdx; - - var chunk = chunksCursor.arrayAccess( randIdx ); - var chunkOwner = chunk.shard; - var newOwner = otherConfDB.shards.findOne({ _id: { $ne: chunkOwner }})._id; - - var result = otherConfDB.adminCommand({ moveChunk: coll.getFullName(), - find: { _id: chunk.min._id }, - to: newOwner }); - - jsTest.log( "moveChunk result: " + tojson( result )); - if( !result.ok && tries > 1 ) { - moveRandomChunk( tries - 1 ); - } - }; - - var chunks = 0; - do { - coll.insert({ _id: docID++, val: data }); - chunks = mongos.getDB( "config" ).chunks.find().count(); - } while ( chunks < 5 ); - - primeForSplitting(); - - jsTest.log( "Starting the insert that should trigger auto-split." ); - - // TODO: SERVER-5175 Trigger delays here - coll.insert({ _id: docID++, val: data }); - moveRandomChunk( 3 ); - - // getLastError should wait for all writes to this connection. - var errObj = coll.getDB().getLastErrorObj(); - jsTest.log( "Last Error Object: " + tojson( errObj )); - - assert.eq( docID, coll.find().itcount(), "Count does not match!" ); - - jsTest.log( "Finished counting." ); - - st.stop(); -} - -testGleAfterSplitDuringMigration(); - diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js index b335480d0ff..7118fc64a6c 100644 --- a/jstests/noPassthrough/indexbg1.js +++ b/jstests/noPassthrough/indexbg1.js @@ -37,10 +37,11 @@ while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ m t = db[ baseName ]; t.drop(); + var bulk = db.jstests_indexbg1.initializeUnorderedBulkOp(); for( i = 0; i < size; ++i ) { - db.jstests_indexbg1.save( {i:i} ); + bulk.insert({ i: i }); } - db.getLastError(); + assert.writeOK(bulk.execute()); assert.eq( size, t.count() ); doParallel( fullName + ".ensureIndex( {i:1}, {background:true} )" ); @@ -64,25 +65,16 @@ while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ m assert( ex.nscanned < 1000 , "took too long to find 100: " + tojson( ex ) ); - t.remove( {i:40}, true ); // table scan - assert( !db.getLastError() ); - - t.update( {i:10}, {i:-10} ); // should scan 10 - assert( !db.getLastError() ); + assert.writeOK(t.remove({ i: 40 }, true )); // table scan + assert.writeOK(t.update({ i: 10 }, { i :-10 })); // should scan 10 id = t.find().hint( {$natural:-1} ).next()._id; - t.update( {_id:id}, {i:-2} ); - assert( !db.getLastError() ); - - t.save( {i:-50} ); - assert( !db.getLastError() ); - - t.save( {i:size+2} ); - assert( !db.getLastError() ); + assert.writeOK(t.update({ _id: id }, { i: -2 } )); + assert.writeOK(t.save({ i: -50 })); + assert.writeOK(t.save({ i: size + 2 })); assert.eq( size + 1, t.count() ); - assert( !db.getLastError() ); print( "finished with checks" ); } catch( e ) { @@ -115,11 +107,11 @@ assert.eq( 1, t.count( {i:-2} ) ); assert.eq( 1, t.count( {i:-50} ) ); assert.eq( 1, t.count( {i:size+2} ) ); assert.eq( 0, t.count( {i:40} ) ); -assert( !db.getLastError() ); print("about to drop index"); t.dropIndex( {i:1} ); -printjson( db.getLastError() ); -assert( !db.getLastError() ); +var gle = db.getLastError(); +printjson( gle ); +assert( !gle ); testServer.stop(); } // if(0) diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js index e89b91fa2cf..7e4560703a0 100644 --- a/jstests/noPassthrough/indexbg2.js +++ b/jstests/noPassthrough/indexbg2.js @@ -51,9 +51,7 @@ doTest = function(dropDups) { // wait for indexing to start assert.soon(function() { return 2 == db.system.indexes.count({ ns: "test." + baseName }) }, "no index created", 30000, 50); t.save({ i: 0, n: true }); - //printjson(db.getLastError()); t.save({ i: size - 1, n: true }); - //printjson(db.getLastError()); } catch (e) { // only a failure if we're still indexing // wait for parallel status to update to reflect indexing status diff --git a/jstests/noPassthrough/query_yield1.js b/jstests/noPassthrough/query_yield1.js index 624215f8c45..7c168c1e208 100644 --- a/jstests/noPassthrough/query_yield1.js +++ b/jstests/noPassthrough/query_yield1.js @@ -14,9 +14,11 @@ q = function(){ var x=this.n; for ( var i=0; i<250; i++ ){ x = x * 2; } return f while ( true ){ function fill(){ + var bulk = t.initializeUnorderedBulkOp(); for ( ; i 10, "D" ); assert( t.findOne( { i : i - 1 } ), "E" ); -t.remove( { i : i - 1 } ); -assert( db.getLastError().indexOf( "capped" ) >= 0, "F" ); +var res = assert.writeError(t.remove( { i : i - 1 } )); +assert( res.getWriteError().errmsg.indexOf( "capped" ) >= 0, "F" ); assert( t.validate().valid, "G" ); diff --git a/jstests/noPassthroughWithMongod/fsync2.js b/jstests/noPassthroughWithMongod/fsync2.js index bdf956f30f2..7080837a99b 100644 --- a/jstests/noPassthroughWithMongod/fsync2.js +++ b/jstests/noPassthroughWithMongod/fsync2.js @@ -41,9 +41,7 @@ function doTest() { //assert.eq(1, m.getDB(db.getName()).fsync2.count()); assert( m.getDB("admin").$cmd.sys.unlock.findOne().ok ); - - db.getLastError(); - + assert.eq( 2, db.fsync2.count() ); } diff --git a/jstests/noPassthroughWithMongod/geo_axis_aligned.js b/jstests/noPassthroughWithMongod/geo_axis_aligned.js index 0161eccb4ac..084b839cabc 100644 --- a/jstests/noPassthroughWithMongod/geo_axis_aligned.js +++ b/jstests/noPassthroughWithMongod/geo_axis_aligned.js @@ -17,15 +17,14 @@ centers = [] bounds = [] for( var s = 0; s < scale.length; s++ ){ - for ( var i = 0; i < radius.length; i++ ) { - radii.push( radius[i] * scale[s] ) - } - - for ( var j = 0; j < center.length; j++ ) { - centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] ) - bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] ) - } + for ( var i = 0; i < radius.length; i++ ) { + radii.push( radius[i] * scale[s] ) + } + for ( var j = 0; j < center.length; j++ ) { + centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] ) + bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] ) + } } radius = radii @@ -34,75 +33,74 @@ bound = bounds for ( var b = 0; b < bits.length; b++ ) { - - - printjson( radius ) - printjson( centers ) - - for ( var i = 0; i < radius.length; i++ ) { - for ( var j = 0; j < center.length; j++ ) { - - printjson( { center : center[j], radius : radius[i], bits : bits[b] } ); - - t.drop() - - // Make sure our numbers are precise enough for this test - if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) ) - continue; - - t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } ); - t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } ); - t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } ); - t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } ); - t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } ); - t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } ); - t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } ); - t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } ); - t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } ); - - t.ensureIndex( { loc : "2d" }, { max : bound[j][1], min : bound[j][0], bits : bits[b] } ); - - if( db.getLastError() ) continue; - - print( "DOING WITHIN QUERY ") - r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } ); - - //printjson( r.toArray() ); - - assert.eq( 5, r.count() ); - - // FIXME: surely code like this belongs in utils.js. - a = r.toArray(); - x = []; - for ( k in a ) - x.push( a[k]["_id"] ) - x.sort() - assert.eq( [ 1, 2, 3, 4, 5 ], x ); - - print( " DOING NEAR QUERY ") - //printjson( center[j] ) - r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } ) - assert.eq( 5, r.count() ); - - print( " DOING DIST QUERY ") - - a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results - assert.eq( 5, a.length ); - - //printjson( a ); - - var distance = 0; - for( var k = 0; k < a.length; k++ ){ - //print( a[k].dis ) - //print( distance ) - assert.gte( a[k].dis, distance ); - //printjson( a[k].obj ) - //print( distance = a[k].dis ); - } - - r = t.find( { loc : { $within : { $box : [ [ center[j][0] - radius[i], center[j][1] - radius[i] ], [ center[j][0] + radius[i], center[j][1] + radius[i] ] ] } } }, { _id : 1 } ) - assert.eq( 9, r.count() ); - - } - } -} \ No newline at end of file + printjson( radius ) + printjson( centers ) + + for ( var i = 0; i < radius.length; i++ ) { + for ( var j = 0; j < center.length; j++ ) { + printjson( { center : center[j], radius : radius[i], bits : bits[b] } ); + + t.drop() + + // Make sure our numbers are precise enough for this test + if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) ) + continue; + + t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } ); + t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } ); + t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } ); + t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } ); + t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } ); + t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } ); + t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } ); + t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } ); + t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } ); + + var res = t.ensureIndex({ loc: "2d" }, + { max: bound[j][1], + min : bound[j][0], + bits : bits[b] }); + + // ensureIndex fails when this iteration inserted coordinates that are out of bounds. + // These are invalid cases, so we skip them. + if (!res.ok) continue; + + print( "DOING WITHIN QUERY ") + r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } ); + + assert.eq( 5, r.count() ); + + // FIXME: surely code like this belongs in utils.js. + a = r.toArray(); + x = []; + for ( k in a ) + x.push( a[k]["_id"] ) + x.sort() + assert.eq( [ 1, 2, 3, 4, 5 ], x ); + + print( " DOING NEAR QUERY ") + //printjson( center[j] ) + r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } ) + assert.eq( 5, r.count() ); + + print( " DOING DIST QUERY ") + + a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results + assert.eq( 5, a.length ); + + var distance = 0; + for( var k = 0; k < a.length; k++ ){ + assert.gte( a[k].dis, distance ); + + } + + r = t.find({ loc: { $within: { $box: [ [ center[j][0] - radius[i], + center[j][1] - radius[i] ], + [ center[j][0] + radius[i], + center[j][1] + radius[i] ]]}}}, + { _id: 1 } ); + assert.eq( 9, r.count() ); + + } + } +} diff --git a/jstests/noPassthroughWithMongod/geo_mnypts.js b/jstests/noPassthroughWithMongod/geo_mnypts.js index ac4065158bf..bc7935fa7a6 100644 --- a/jstests/noPassthroughWithMongod/geo_mnypts.js +++ b/jstests/noPassthroughWithMongod/geo_mnypts.js @@ -6,10 +6,12 @@ coll.drop() var totalPts = 500 * 1000 // Add points in a 100x100 grid +var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < totalPts; i++ ){ var ii = i % 10000 - coll.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] }) + bulk.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] }); } +assert.writeOK(bulk.execute()); coll.ensureIndex({ loc : "2d" }) diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js index 25bf0269ccc..5b19b2b2080 100644 --- a/jstests/noPassthroughWithMongod/geo_polygon.js +++ b/jstests/noPassthroughWithMongod/geo_polygon.js @@ -15,12 +15,14 @@ if ( bi.indexOf( "erh2" ) >= 0 ){ if ( shouldRun ) { num = 0; + var bulk = t.initializeUnorderedBulkOp(); for ( x = -180; x < 180; x += .5 ){ for ( y = -180; y < 180; y += .5 ){ o = { _id : num++ , loc : [ x , y ] }; - t.save( o ); + bulk.insert( o ); } } + assert.writeOK(bulk.execute()); var numTests = 31; for( var n = 0; n < numTests; n++ ){ diff --git a/jstests/noPassthroughWithMongod/index_check10.js b/jstests/noPassthroughWithMongod/index_check10.js index 79d0d93fc9b..84e7342e051 100644 --- a/jstests/noPassthroughWithMongod/index_check10.js +++ b/jstests/noPassthroughWithMongod/index_check10.js @@ -104,25 +104,30 @@ function doIt( indexVersion ) { } } + var bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 10000; ++i ) { - t.save( obj() ); + bulk.insert( obj() ); } + assert.writeOK(bulk.execute()); t.ensureIndex( idx , { v : indexVersion } ); check(); + bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 10000; ++i ) { if ( Random.rand() > 0.9 ) { - t.save( obj() ); + bulk.insert( obj() ); } else { - t.remove( obj() ); // improve + bulk.find( obj() ).remove(); // improve } if( Random.rand() > 0.999 ) { print( i ); + assert.writeOK(bulk.execute()); check(); + bulk = t.initializeUnorderedBulkOp(); } } - + assert.writeOK(bulk.execute()); check(); } diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js index 8a50471940b..fd1b1d5eaa1 100644 --- a/jstests/noPassthroughWithMongod/index_check9.js +++ b/jstests/noPassthroughWithMongod/index_check9.js @@ -106,25 +106,32 @@ function check() { assert.eq( c3.length, count ); } +var bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 10000; ++i ) { - t.save( obj() ); + bulk.insert( obj() ); if( Random.rand() > 0.999 ) { print( i ); + assert.writeOK(bulk.execute()); check(); + bulk = t.initializeUnorderedBulkOp(); } } +bulk = t.initializeUnorderedBulkOp(); for( var i = 0; i < 100000; ++i ) { if ( Random.rand() > 0.9 ) { - t.save( obj() ); + bulk.insert( obj() ); } else { - t.remove( obj() ); // improve + bulk.find( obj() ).remove(); // improve } if( Random.rand() > 0.999 ) { print( i ); + assert.writeOK(bulk.execute()); check(); + bulk = t.initializeUnorderedBulkOp(); } } +assert.writeOK(bulk.execute()); check(); @@ -132,4 +139,4 @@ check(); for( var z = 0; z < 5; ++z ) { doIt(); -} \ No newline at end of file +} diff --git a/jstests/noPassthroughWithMongod/index_hammer1.js b/jstests/noPassthroughWithMongod/index_hammer1.js index 87fd3820f66..675a2f8db7c 100644 --- a/jstests/noPassthroughWithMongod/index_hammer1.js +++ b/jstests/noPassthroughWithMongod/index_hammer1.js @@ -2,9 +2,10 @@ t = db.index_hammer1; t.drop(); +var bulk = t.initializeUnorderedBulkOp(); for ( i=0; i<10000; i++ ) - t.insert( { x : i , y : i } ); -db.getLastError(); + bulk.insert({ x: i, y: i }); +assert.writeOK(bulk.execute()); ops = [] diff --git a/jstests/noPassthroughWithMongod/index_killop.js b/jstests/noPassthroughWithMongod/index_killop.js index b022e31f3b8..f897f6a80de 100644 --- a/jstests/noPassthroughWithMongod/index_killop.js +++ b/jstests/noPassthroughWithMongod/index_killop.js @@ -5,10 +5,11 @@ t.drop(); // Insert a large number of documents, enough to ensure that an index build on these documents will // be interrupted before complete. +var bulk = t.initializeUnorderedBulkOp(); for( i = 0; i < 1e6; ++i ) { - t.save( { a:i } ); + bulk.insert({ a: i }); } -db.getLastError(); +assert.writeOK(bulk.execute()); function debug( x ) { // printjson( x ); @@ -23,7 +24,7 @@ function getIndexBuildOpId() { // Identify the index build as an insert into the 'test.system.indexes' // namespace. It is assumed that no other clients are concurrently // accessing the 'test' database. - if ( op.op == 'insert' && op.ns == 'test.system.indexes' ) { + if ( op.op == 'query' && 'createIndexes' in op.query ) { debug( op.opid ); indexBuildOpId = op.opid; } @@ -33,9 +34,8 @@ function getIndexBuildOpId() { /** Test that building an index with @param 'options' can be aborted using killop. */ function testAbortIndexBuild( options ) { - - // Create an index asynchronously by using a new connection. - new Mongo( db.getMongo().host ).getCollection( t.toString() ).createIndex( { a:1 }, options ); + var createIdx = startParallelShell('var coll = db.jstests_slownightly_index_killop; \ + coll.createIndex({ a: 1 }, ' + tojson(options) + ');'); // When the index build starts, find its op id. assert.soon( function() { return ( opId = getIndexBuildOpId() ) != -1; } ); @@ -44,6 +44,8 @@ function testAbortIndexBuild( options ) { // Wait for the index build to stop. assert.soon( function() { return getIndexBuildOpId() == -1; } ); + createIdx(); + // Check that no new index has been created. This verifies that the index build was aborted // rather than successfully completed. assert.eq( [ { _id:1 } ], t.getIndexKeys() ); diff --git a/jstests/noPassthroughWithMongod/index_multi.js b/jstests/noPassthroughWithMongod/index_multi.js index ac259455d36..e4c38632bcf 100644 --- a/jstests/noPassthroughWithMongod/index_multi.js +++ b/jstests/noPassthroughWithMongod/index_multi.js @@ -4,8 +4,9 @@ Random.setRandomSeed(); var coll = db.index_multi; +var bulk = coll.initializeUnorderedBulkOp(); print("Populate the collection with random data"); -for (var i=0;i<1e4; i++) { +for (var i = 0; i < 1e4; i++) { var doc = {"_id" : i}; for (var j=0; j<100; j++) { @@ -22,52 +23,54 @@ for (var i=0;i<1e4; i++) { } } - if (i%1000 == 0) { - print("inserted "+i); - } - - coll.insert(doc); + bulk.insert(doc); } +assert.writeOK(bulk.execute()); // Array of all index specs var specs = []; var multikey = []; +var indexJobs = []; print("Create 3 triple indexes"); -for (var i=90; i<93; i++) { +for (var i = 90; i < 93; i++) { var spec = {}; spec["field"+i] = 1; spec["field"+(i+1)] = 1; spec["field"+(i+2)] = 1; - startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});" - +"db.results.insert(db.runCommand({getlasterror:1}));"); + indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + "," + + "{ background: true });" + + "db.results.insert(db.runCommand({ getlasterror: 1 }));")); specs.push(spec); multikey.push(i % 10 == 0 || (i+1) % 10 == 0 || (i+2) % 10 == 0); } print("Create 30 compound indexes"); -for (var i=30; i<90; i+=2) { +for (var i = 30; i < 90; i += 2) { var spec = {}; spec["field"+i] = 1; spec["field"+(i+1)] = 1; - startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});" - +"db.results.insert(db.runCommand({getlasterror:1}));"); + indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + ", " + + "{ background: true });" + + "db.results.insert(db.runCommand({ getlasterror: 1 }));")); specs.push(spec); multikey.push(i % 10 == 0 || (i+1) % 10 == 0); } print("Create 30 indexes"); -for (var i=0; i<30; i++) { +for (var i = 0; i < 30; i++) { var spec = {}; spec["field"+i] = 1; - startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});" - +"db.results.insert(db.runCommand({getlasterror:1}));"); + indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + ", " + + "{ background: true });" + + "db.results.insert(db.runCommand({ getlasterror: 1 }));")); specs.push(spec); multikey.push(i % 10 == 0); } print("Do some sets and unsets"); -for (i=0; i<1e4; i++) { +bulk = coll.initializeUnorderedBulkOp(); +for (i = 0; i < 1e4; i++) { var criteria = {_id: Random.randInt(1e5)}; var mod = {}; if (Random.rand() < .5) { @@ -79,31 +82,23 @@ for (i=0; i<1e4; i++) { mod['$unset']['field'+Random.randInt(100)] = true; } - coll.update(criteria, mod); + bulk.find(criteria).update(mod); } +assert.writeOK(bulk.execute()); + +indexJobs.forEach(function(join) { + join(); +}); printjson(db.results.find().toArray()); printjson(coll.getIndexes()); print("Make sure we end up with 64 indexes"); -assert.soon( - function() { - for (var i in specs) { - print("trying to hint on "+tojson(specs[i])); - try { - var explain = coll.find().hint(specs[i]).explain(); - printjson(explain); - assert.eq(multikey[i], explain.isMultiKey); - } catch (x) { - print(x+", hinting on "+tojson(specs[i])); - return false; - } - } - return true; - }, - "wait for all indexes to be built", - 120000 -); +for (var i in specs) { + print("trying to hint on "+tojson(specs[i])); + var explain = coll.find().hint(specs[i]).explain(); + assert.eq(multikey[i], explain.isMultiKey, tojson(explain)); +} print("SUCCESS!"); diff --git a/jstests/noPassthroughWithMongod/index_retry.js b/jstests/noPassthroughWithMongod/index_retry.js index d0465476144..7c79e75af5f 100644 --- a/jstests/noPassthroughWithMongod/index_retry.js +++ b/jstests/noPassthroughWithMongod/index_retry.js @@ -12,13 +12,14 @@ t.drop(); // Insert a large number of documents, enough to ensure that an index build on these documents can // be interrupted before complete. +var bulk = t.initializeUnorderedBulkOp(); for (i = 0; i < 5e5; ++i) { - t.save( { a:i } ); + bulk.insert({ a: i }); if (i % 10000 == 0) { print("i: " + i); } } -test.getLastError(); +assert.writeOK(bulk.execute()); function debug(x) { printjson(x); @@ -36,14 +37,15 @@ function indexBuildInProgress() { // Identify the index build as an insert into the 'test.system.indexes' // namespace. It is assumed that no other clients are concurrently // accessing the 'test' database. - if ( op.op == 'insert' && op.ns == 'test.system.indexes' ) { + if ( op.op == 'query' && 'createIndexes' in op.query ) { debug(op.opid); + var idxSpec = op.query.indexes[0]; // SERVER-4295 Make sure the index details are there // we can't assert these things, since there is a race in reporting // but we won't count if they aren't - if ( "a_1" == op.insert.name && - 1 == op.insert.key.a && - op.insert.background ) { + if ( "a_1" == idxSpec.name && + 1 == idxSpec.key.a && + idxSpec.background ) { indexBuildOpId = op.opid; } } @@ -53,10 +55,9 @@ function indexBuildInProgress() { } function abortDuringIndexBuild(options) { - - // Create an index asynchronously by using a new connection. - new Mongo(test.getMongo().host).getCollection(t.toString()).createIndex( - { a:1 }, { background:true } ); + var createIdx = startParallelShell('var coll = db.jstests_slownightly_index_retry; \ + coll.createIndex({ a: 1 }, { background: true });', + ports[0]); // Wait for the index build to start. var times = 0; @@ -68,6 +69,7 @@ function abortDuringIndexBuild(options) { print("killing the mongod"); stopMongod(ports[0], /* signal */ 9); + createIdx(); } abortDuringIndexBuild(); diff --git a/jstests/noPassthroughWithMongod/indexbg_drop.js b/jstests/noPassthroughWithMongod/indexbg_drop.js index 73446736d84..1739d89054a 100644 --- a/jstests/noPassthroughWithMongod/indexbg_drop.js +++ b/jstests/noPassthroughWithMongod/indexbg_drop.js @@ -45,9 +45,11 @@ var dc = {dropIndexes: collection, index: "i_1"}; // set up collections masterDB.dropDatabase(); jsTest.log("creating test data " + size + " documents"); +var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp(); for( i = 0; i < size; ++i ) { - masterDB.getCollection(collection).save( {i: Random.rand()} ); + bulk.insert({ i: Random.rand() }); } +assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing for test of: " + tojson(dc)); // Add another index to be sure the drop command works. diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js index 09c75056ca2..f6f1d426161 100644 --- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js +++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js @@ -66,9 +66,11 @@ for (var idx = 0; idx < dropAction.length; idx++) { // set up collections masterDB.dropDatabase(); jsTest.log("creating test data " + size + " documents"); + var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp(); for(var i = 0; i < size; ++i ) { - masterDB.getCollection(collection).save( {i:i} ); + bulk.insert({ i: i }); } + assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing for test of: " + JSON.stringify(dc)); masterDB.getCollection(collection).ensureIndex( {i:1}, {background:true} ); diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js index 01d3b23a07c..a3b2c26f59e 100644 --- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js +++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js @@ -38,9 +38,11 @@ var secondDB = second.getDB('bgIndexSec'); var size = 500000; jsTest.log("creating test data " + size + " documents"); +var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp(); for(var i = 0; i < size; ++i) { - masterDB.jstests_bgsec.save( {i:i} ); + bulk.insert({ i: i }); } +assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing"); masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} ); diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js index 7aac1265c6d..57a1e5341ad 100644 --- a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js +++ b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js @@ -67,9 +67,11 @@ if (0) { var size = 500000; jsTest.log("creating test data " + size + " documents"); + var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp(); for( i = 0; i < size; ++i ) { - masterDB.jstests_bgsec.save( {i:i} ); + bulk.insert({ i : i }); } + assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing"); masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} ); @@ -85,8 +87,7 @@ if (0) { // Make sure a journal flush for the oplog occurs, by doing a local journaled write to the // secondary - second.getDB('local').foo.insert({a:1}); - second.getDB('local').runCommand( { getLastError: 1, j: true } ); + assert.writeOK(second.getDB('local').foo.insert({ a: 1 }, { writeConcern: { j: true }})); // restart secondary and reconnect jsTest.log("Restarting secondary"); diff --git a/jstests/noPassthroughWithMongod/large_chunk.js b/jstests/noPassthroughWithMongod/large_chunk.js index 2e648084947..12f0c48fdcd 100644 --- a/jstests/noPassthroughWithMongod/large_chunk.js +++ b/jstests/noPassthroughWithMongod/large_chunk.js @@ -20,11 +20,12 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; +var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 400 * 1024 * 1024 ) ){ - db.foo.insert( { _id : num++ , s : bigString } ); + bulk.insert({ _id: num++, s: bigString }); inserted += bigString.length; } -db.getLastError(); +assert.writeOK(bulk.execute()); // Turn on sharding on the 'test.foo' collection and generate a large chunk s.adminCommand( { enablesharding : "test" } ); diff --git a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js index fd7ec8c68e7..1ff024fcb03 100644 --- a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js +++ b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js @@ -15,14 +15,14 @@ var coll = db.getCollection("mrInput"); // var expectedOutColl = []; +var bulk = coll.initializeUnorderedBulkOp(); for (var i = 0; i < 10; i++) { for (var j = 1; j < 50; j++) { - coll.insert({idx: i, j: j}); + bulk.insert({ idx: i, j: j }); } expectedOutColl.push ({ _id: i, value: j - 1 }); } - -assertGLEOK(db.getLastErrorObj()); +assert.writeOK(bulk.execute()); function mapFn() { emit(this.idx, 1); }; function reduceFn(key, values) { return Array.sum(values); }; @@ -41,4 +41,4 @@ assert.eq(out.counts.emit, 490, "emit count is wrong"); // changed, such as reduceTriggerRatio or maxInMemSize. If not the case, then something else // must have changed with when intermediate reduces occur (see mongo::mr::State::checkSize). // -assert.eq(out.counts.reduce, 14, "reduce count is wrong"); \ No newline at end of file +assert.eq(out.counts.reduce, 14, "reduce count is wrong"); diff --git a/jstests/noPassthroughWithMongod/mr_shard_version.js b/jstests/noPassthroughWithMongod/mr_shard_version.js index 47fd99ea30e..c011e7700e9 100644 --- a/jstests/noPassthroughWithMongod/mr_shard_version.js +++ b/jstests/noPassthroughWithMongod/mr_shard_version.js @@ -11,11 +11,12 @@ var numDocs = 500000 var numKeys = 1000 var numTests = 3 +var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < numDocs; i++ ){ - coll.insert({ _id : i, key : "" + ( i % numKeys ), value : i % numKeys }) + bulk.insert({ _id: i, key: "" + ( i % numKeys ), value: i % numKeys }); } +assert.writeOK(bulk.execute()); -assert.eq( null, coll.getDB().getLastError() ) assert.eq( numDocs, coll.find().itcount() ) var halfId = coll.find().itcount() / 2 diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js index 7aa55564640..6f6c196510d 100644 --- a/jstests/noPassthroughWithMongod/no_balance_collection.js +++ b/jstests/noPassthroughWithMongod/no_balance_collection.js @@ -68,9 +68,11 @@ sh.waitForBalancer(true) // Make sure auto-migrates on insert don't move chunks var lastMigration = sh._lastMigration( collB ) +var bulk = collB.initializeUnorderedBulkOp(); for( var i = 0; i < 1000000; i++ ){ - collB.insert({ _id : i, hello : "world" }) + bulk.insert({ _id: i, hello: "world" }); } +assert.writeOK(bulk.execute()); printjson( lastMigration ) printjson( sh._lastMigration( collB ) ) @@ -78,4 +80,4 @@ printjson( sh._lastMigration( collB ) ) if( lastMigration == null ) assert.eq( null, sh._lastMigration( collB ) ) else assert.eq( lastMigration.time, sh._lastMigration( collB ).time ) -st.stop() \ No newline at end of file +st.stop() diff --git a/jstests/noPassthroughWithMongod/opcounters_legacy.js b/jstests/noPassthroughWithMongod/opcounters_legacy.js deleted file mode 100644 index 7db520a109f..00000000000 --- a/jstests/noPassthroughWithMongod/opcounters_legacy.js +++ /dev/null @@ -1,173 +0,0 @@ -// Test that opcounters get incremented properly. - -// Remember the global 'db' var -var lastDB = db; -var mongo = new Mongo(db.getMongo().host); -mongo.writeMode = function() { return "legacy"; } -db = mongo.getDB(db.toString()); - -var t = db.opcounters; -var isMongos = ("isdbgrid" == db.runCommand("ismaster").msg); -var opCounters; - -// -// 1. Insert. -// -// - mongod, single insert: -// counted as 1 op if successful, else 0 -// - mongod, bulk insert of N with continueOnError=true: -// counted as N ops, regardless of errors -// - mongod, bulk insert of N with continueOnError=false: -// counted as K ops, where K is number of docs successfully inserted -// -// - mongos -// count ops attempted like insert commands -// - -t.drop(); - -// Single insert, no error. -opCounters = db.serverStatus().opcounters; -t.insert({_id:0}); -assert(!db.getLastError()); -assert.eq(opCounters.insert + 1, db.serverStatus().opcounters.insert); - -// Bulk insert, no error. -opCounters = db.serverStatus().opcounters; -t.insert([{_id:1},{_id:2}]) -assert(!db.getLastError()); -assert.eq(opCounters.insert + 2, db.serverStatus().opcounters.insert); - -// Single insert, with error. -opCounters = db.serverStatus().opcounters; -t.insert({_id:0}) -print( db.getLastError() ) -assert(db.getLastError()); -assert.eq(opCounters.insert + (isMongos ? 1 : 0), db.serverStatus().opcounters.insert); - -// Bulk insert, with error, continueOnError=false. -opCounters = db.serverStatus().opcounters; -t.insert([{_id:3},{_id:3},{_id:4}]) -assert(db.getLastError()); -assert.eq(opCounters.insert + (isMongos ? 2 : 1), db.serverStatus().opcounters.insert); - -// Bulk insert, with error, continueOnError=true. -var continueOnErrorFlag = 1; -opCounters = db.serverStatus().opcounters; -t.insert([{_id:5},{_id:5},{_id:6}], continueOnErrorFlag) -assert(db.getLastError()); -assert.eq(opCounters.insert + 3, db.serverStatus().opcounters.insert); - -// -// 2. Update. -// -// - counted as 1 op, regardless of errors -// - -t.drop(); -t.insert({_id:0}); - -// Update, no error. -opCounters = db.serverStatus().opcounters; -t.update({_id:0}, {$set:{a:1}}); -assert(!db.getLastError()); -assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update); - -// Update, with error. -opCounters = db.serverStatus().opcounters; -t.update({_id:0}, {$set:{_id:1}}); -assert(db.getLastError()); -assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update); - -// -// 3. Delete. -// -// - counted as 1 op, regardless of errors -// - -t.drop(); -t.insert([{_id:0},{_id:1}]); - -// Delete, no error. -opCounters = db.serverStatus().opcounters; -t.remove({_id:0}); -assert(!db.getLastError()); -assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete); - -// Delete, with error. -opCounters = db.serverStatus().opcounters; -t.remove({_id:{$invalidOp:1}}); -assert(db.getLastError()); -assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete); - -// -// 4. Query. -// -// - mongod: counted as 1 op, regardless of errors -// - mongos: counted as 1 op if successful, else 0 -// - -t.drop(); -t.insert({_id:0}); - -// Query, no error. -opCounters = db.serverStatus().opcounters; -t.findOne(); -assert.eq(opCounters.query + 1, db.serverStatus().opcounters.query); - -// Query, with error. -opCounters = db.serverStatus().opcounters; -assert.throws(function() { t.findOne({_id:{$invalidOp:1}}) }); -assert.eq(opCounters.query + (isMongos ? 0 : 1), db.serverStatus().opcounters.query); - -// -// 5. Getmore. -// -// - counted as 1 op per getmore issued, regardless of errors -// - -t.drop(); -t.insert([{_id:0},{_id:1},{_id:2}]); - -// Getmore, no error. -opCounters = db.serverStatus().opcounters; -t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore -assert.eq(opCounters.query + 1, db.serverStatus().opcounters.query); -assert.eq(opCounters.getmore + 1, db.serverStatus().opcounters.getmore); - -// Getmore, with error (TODO implement when SERVER-5813 is resolved). - -// -// 6. Command. -// -// - unrecognized commands not counted -// - recognized commands counted as 1 op, regardless of errors -// - some (recognized) commands can suppress command counting (i.e. aren't counted as commands) -// - -t.drop(); -t.insert({_id:0}) - -// Command, recognized, no error. -opCounters = db.serverStatus().opcounters; -assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted - -// Command, recognized, with error. -opCounters = db.serverStatus().opcounters; -res = t.runCommand("count", {query:{$invalidOp:1}}); -assert.eq(0, res.ok); -assert.eq(opCounters.command + 2, - db.serverStatus().opcounters.command); // "serverStatus", "count" counted - -// Command, unrecognized. -opCounters = db.serverStatus().opcounters; -res = t.runCommand("command that doesn't exist"); -assert.eq(0, res.ok); -//assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted -// TODO Replace below with above when SERVER-9038 is resolved (mongos counts unrecognized commands) -assert.eq(opCounters.command + (isMongos ? 2 : 1), db.serverStatus().opcounters.command); - -// Command, recognized, counting suppressed (TODO implement when SERVER-9038 is resolved). - -// Restore 'db' var -db = lastDB; diff --git a/jstests/noPassthroughWithMongod/opcounters_write_cmd.js b/jstests/noPassthroughWithMongod/opcounters_write_cmd.js deleted file mode 100644 index 47a1bc63515..00000000000 --- a/jstests/noPassthroughWithMongod/opcounters_write_cmd.js +++ /dev/null @@ -1,166 +0,0 @@ -// Test that opcounters get incremented properly. - -var mongo = new Mongo(db.getMongo().host); -mongo.forceWriteMode("commands"); -var newdb = mongo.getDB(db.toString()); - -var t = newdb.opcounters; -var isMongos = ("isdbgrid" == newdb.runCommand("ismaster").msg); -var opCounters; -var res; - -assert(t.getDB().getMongo().useWriteCommands(), "test is not running with write commands") - -// -// Count ops attempted in write commands in mongod and mongos -// - -// -// 1. Insert. -// -// - unordered insert of N: -// counted as N ops, regardless of errors -// - ordered insert of N: -// counted as K + 1 ops, where K is number of docs successfully inserted, -// adding the failed attempt -// - -t.drop(); - -// Single insert, no error. -opCounters = newdb.serverStatus().opcounters; -res = t.insert({_id:0}); -assert.writeOK(res); -assert.eq(opCounters.insert + 1, newdb.serverStatus().opcounters.insert); - -// Bulk insert, no error. -opCounters = newdb.serverStatus().opcounters; -res = t.insert([{_id:1},{_id:2}]) -assert.writeOK(res); -assert.eq(opCounters.insert + 2, newdb.serverStatus().opcounters.insert); - -// Single insert, with error. -opCounters = newdb.serverStatus().opcounters; -res = t.insert({_id:0}) -assert.writeError(res); -assert.eq(opCounters.insert + 1, newdb.serverStatus().opcounters.insert); - -// Bulk insert, with error, ordered. -opCounters = newdb.serverStatus().opcounters; -res = t.insert([{_id:3},{_id:3},{_id:4}]) -assert.writeError(res); -assert.eq(opCounters.insert + 2, newdb.serverStatus().opcounters.insert); - -// Bulk insert, with error, unordered. -var continueOnErrorFlag = 1; -opCounters = newdb.serverStatus().opcounters; -res = t.insert([{_id:5},{_id:5},{_id:6}], continueOnErrorFlag) -assert.writeError(res); -assert.eq(opCounters.insert + 3, newdb.serverStatus().opcounters.insert); - -// -// 2. Update. -// - -t.drop(); -t.insert({_id:0}); - -// Update, no error. -opCounters = newdb.serverStatus().opcounters; -res = t.update({_id:0}, {$set:{a:1}}); -assert.writeOK(res); -assert.eq(opCounters.update + 1, newdb.serverStatus().opcounters.update); - -// Update, with error. -opCounters = newdb.serverStatus().opcounters; -res = t.update({_id:0}, {$set:{_id:1}}); -assert.writeError(res); -assert.eq(opCounters.update + 1, newdb.serverStatus().opcounters.update); - -// -// 3. Delete. -// - -t.drop(); -t.insert([{_id:0},{_id:1}]); - -// Delete, no error. -opCounters = newdb.serverStatus().opcounters; -res = t.remove({_id:0}); -assert.writeOK(res); -assert.eq(opCounters.delete + 1, newdb.serverStatus().opcounters.delete); - -// Delete, with error. -opCounters = newdb.serverStatus().opcounters; -res = t.remove({_id:{$invalidOp:1}}); -assert.writeError(res); -assert.eq(opCounters.delete + 1, newdb.serverStatus().opcounters.delete); - -// -// 4. Query. -// -// - mongod: counted as 1 op, regardless of errors -// - mongos: counted as 1 op if successful, else 0 -// - -t.drop(); -t.insert({_id:0}); - -// Query, no error. -opCounters = newdb.serverStatus().opcounters; -t.findOne(); -assert.eq(opCounters.query + 1, newdb.serverStatus().opcounters.query); - -// Query, with error. -opCounters = newdb.serverStatus().opcounters; -assert.throws(function() { t.findOne({_id:{$invalidOp:1}}) }); -assert.eq(opCounters.query + (isMongos ? 0 : 1), newdb.serverStatus().opcounters.query); - -// -// 5. Getmore. -// -// - counted as 1 op per getmore issued, regardless of errors -// - -t.drop(); -t.insert([{_id:0},{_id:1},{_id:2}]); - -// Getmore, no error. -opCounters = newdb.serverStatus().opcounters; -t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore -assert.eq(opCounters.query + 1, newdb.serverStatus().opcounters.query); -assert.eq(opCounters.getmore + 1, newdb.serverStatus().opcounters.getmore); - -// Getmore, with error (TODO implement when SERVER-5813 is resolved). - -// -// 6. Command. -// -// - unrecognized commands not counted -// - recognized commands counted as 1 op, regardless of errors -// - some (recognized) commands can suppress command counting (i.e. aren't counted as commands) -// - -t.drop(); -t.insert({_id:0}) - -// Command, recognized, no error. -opCounters = newdb.serverStatus().opcounters; -assert.eq(opCounters.command + 1, newdb.serverStatus().opcounters.command); // "serverStatus" counted - -// Command, recognized, with error. -opCounters = newdb.serverStatus().opcounters; -res = t.runCommand("count", {query:{$invalidOp:1}}); -assert.eq(0, res.ok); -assert.eq(opCounters.command + 2, - newdb.serverStatus().opcounters.command); // "serverStatus", "count" counted - -// Command, unrecognized. -opCounters = newdb.serverStatus().opcounters; -res = t.runCommand("command that doesn't exist"); -assert.eq(0, res.ok); -//assert.eq(opCounters.command + 1, newdb.serverStatus().opcounters.command); // "serverStatus" counted -// TODO Replace below with above when SERVER-9038 is resolved (mongos counts unrecognized commands) -assert.eq(opCounters.command + (isMongos ? 2 : 1), newdb.serverStatus().opcounters.command); - -// Command, recognized, counting suppressed (TODO implement when SERVER-9038 is resolved). diff --git a/jstests/noPassthroughWithMongod/parallel_collection_scan.js b/jstests/noPassthroughWithMongod/parallel_collection_scan.js index d745f088376..44e5d361e45 100644 --- a/jstests/noPassthroughWithMongod/parallel_collection_scan.js +++ b/jstests/noPassthroughWithMongod/parallel_collection_scan.js @@ -6,10 +6,11 @@ s = ""; while ( s.length < 10000 ) s += "."; +var bulk = t.initializeUnorderedBulkOp(); for ( i = 0; i < 8000; i++ ) { - t.insert( { x : i, s : s } ); + bulk.insert({ x: i, s: s }); } - +assert.writeOK(bulk.execute()); function iterateSliced() { var res = t.runCommand( "parallelCollectionScan", { numCursors : 3 } ); diff --git a/jstests/noPassthroughWithMongod/remove9.js b/jstests/noPassthroughWithMongod/remove9.js index e7dfe9bfff1..7492e36c5b9 100644 --- a/jstests/noPassthroughWithMongod/remove9.js +++ b/jstests/noPassthroughWithMongod/remove9.js @@ -5,8 +5,7 @@ js = "while( 1 ) { for( i = 0; i < 10000; ++i ) { db.jstests_remove9.save( {i:i} pid = startMongoProgramNoConnect( "mongo" , "--eval" , js , db ? db.getMongo().host : null ); for( var i = 0; i < 10000; ++i ) { - t.remove( {i:Random.randInt( 10000 )} ); - assert.automsg( "!db.getLastError()" ); + assert.writeOK(t.remove( { i: Random.randInt( 10000 )} )); } -stopMongoProgramByPid( pid ); \ No newline at end of file +stopMongoProgramByPid( pid ); diff --git a/jstests/noPassthroughWithMongod/sharding_balance1.js b/jstests/noPassthroughWithMongod/sharding_balance1.js index 761dbf0f5a3..32bb8ba508e 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance1.js +++ b/jstests/noPassthroughWithMongod/sharding_balance1.js @@ -15,12 +15,13 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; +var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 20 * 1024 * 1024 ) ){ - db.foo.insert( { _id : num++ , s : bigString } ); + bulk.insert({ _id: num++, s: bigString }); inserted += bigString.length; } +assert.writeOK(bulk.execute()); -db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_balance2.js b/jstests/noPassthroughWithMongod/sharding_balance2.js index 083494bec37..28569e2b031 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance2.js +++ b/jstests/noPassthroughWithMongod/sharding_balance2.js @@ -26,12 +26,12 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; +var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 40 * 1024 * 1024 ) ){ - db.foo.insert( { _id : num++ , s : bigString } ); + bulk.insert({ _id: num++, s: bigString }); inserted += bigString.length; } - -db.getLastError(); +assert.writeOK(bulk.execute()); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_balance3.js b/jstests/noPassthroughWithMongod/sharding_balance3.js index 591b869ba53..03787c756ac 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance3.js +++ b/jstests/noPassthroughWithMongod/sharding_balance3.js @@ -16,12 +16,13 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; +var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 40 * 1024 * 1024 ) ){ - db.foo.insert( { _id : num++ , s : bigString } ); + bulk.insert({ _id: num++, s: bigString }); inserted += bigString.length; } +assert.writeOK(bulk.execute()); -db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_balance4.js b/jstests/noPassthroughWithMongod/sharding_balance4.js index f1c27afa0bb..c2a3d744964 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance4.js +++ b/jstests/noPassthroughWithMongod/sharding_balance4.js @@ -35,12 +35,12 @@ counts = {} // -function doUpdate( includeString, optionalId ){ +function doUpdate( bulk, includeString, optionalId ){ var up = { $inc : { x : 1 } } if ( includeString ) up["$set"] = { s : bigString }; var myid = optionalId == undefined ? Random.randInt( N ) : optionalId - db.foo.update( { _id : myid } , up , true ); + bulk.find({ _id : myid }).upsert().update( up ); counts[myid] = ( counts[myid] ? counts[myid] : 0 ) + 1; return myid; @@ -48,14 +48,15 @@ function doUpdate( includeString, optionalId ){ // Initially update all documents from 1 to N, otherwise later checks can fail because no document // previously existed +var bulk = db.foo.initializeUnorderedBulkOp(); for ( i = 0; i < N; i++ ){ - doUpdate( true, i ) + doUpdate( bulk, true, i ); } for ( i=0; i .99 ){ - db.getLastError() - check( "random late check" ); // SERVER-1430 - } + assert.eq( 1, res.nModified, + "diff myid: " + myid + " 2: " + res.toString() + "\n" + + " correct count is: " + counts[myid] + + " db says count is: " + tojson(db.foo.findOne({ _id: myid })) ); var x = s.chunkCounts( "foo" ) if ( Math.random() > .999 ) diff --git a/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js b/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js index e42d29effd4..0c897ccfc72 100644 --- a/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js +++ b/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js @@ -14,12 +14,14 @@ while ( bigString.length < 10000 ) inserted = 0; num = 0; + +var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 20 * 1024 * 1024 ) ){ - db.foo.insert( { _id : Math.random() , s : bigString } ); + bulk.insert({ _id: Math.random(), s: bigString }); inserted += bigString.length; } +assert.writeOK(bulk.execute()); -db.getLastError(); s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); assert.lt( 20 , s.config.chunks.count() , "setup2" ); diff --git a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js b/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js index 414b6d57925..32278c089f3 100644 --- a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js +++ b/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js @@ -22,11 +22,11 @@ for( var i = 0; i < nsq; i++ ) data += data dataObj = {} for( var i = 0; i < n; i++ ) dataObj["data-" + i] = data +var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < 40; i++ ) { - if(i != 0 && i % 10 == 0) printjson( coll.stats() ) - coll.save({ data : dataObj }) + bulk.insert({ data: dataObj }); } -db.getLastError(); +assert.writeOK(bulk.execute()); assert.eq( 40 , coll.count() , "prep1" ); @@ -46,9 +46,9 @@ assert.soon( // On *extremely* slow or variable systems, we've seen migrations fail in the critical section and // kill the server. Do an explicit check for this. SERVER-8781 // TODO: Remove once we can better specify what systems to run what tests on. - try { - assert.eq(null, shardA.getDB("admin").getLastError()); - assert.eq(null, shardB.getDB("admin").getLastError()); + try { + assert.commandWorked(shardA.getDB("admin").runCommand({ ping: 1 })); + assert.commandWorked(shardB.getDB("admin").runCommand({ ping: 1 })); } catch(e) { print("An error occurred contacting a shard during balancing," + diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js b/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js index 2948dbef3f9..c4484356dd4 100644 --- a/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js +++ b/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js @@ -23,11 +23,11 @@ numDocs = 20 * docsPerChunk print( "stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs ) -for ( i=0; i=BIG; i-- ) { - am.a.update( { _id : i } , { $set : { x : 1 } } ) - if ( i == N ) { - am.getLastError() - assert.lt( as.a.count() , BIG , "B1" ) - print( "NOW : " + as.a.count() ) - } +bulk = am.a.initializeUnorderedBulkOp(); +for (i = N - 1; i >= BIG; i--) { + bulk.find({ _id: i }).update({ $set: { x: 1 }}); } +assert.writeOK(bulk.execute()); check( "B" ) rt.stop(); - - - - diff --git a/jstests/repl/repl12.js b/jstests/repl/repl12.js index 8db4b75e7a9..4bfaf17d5d7 100644 --- a/jstests/repl/repl12.js +++ b/jstests/repl/repl12.js @@ -17,7 +17,6 @@ for( i = 0; i < 3; ++i ) { m.getDB( a ).c.save( {} ); a += "a"; } -m.getDB(a).getLastError(); //print("\n\n\n DB NAMES MASTER:"); //printjson(m.getDBNames()); diff --git a/jstests/repl/repl13.js b/jstests/repl/repl13.js index e8a80966dab..78daae24c32 100644 --- a/jstests/repl/repl13.js +++ b/jstests/repl/repl13.js @@ -12,10 +12,11 @@ m = rt.start( true ); mc = m.getDB( 'd' )[ 'c' ]; // Insert some documents with a:{} fields. -for( i = 0; i < 100000; ++i ) { - mc.save( {_id:i,a:{}} ); +var bulk = mc.initializeUnorderedBulkOp(); +for(var i = 0; i < 100000; ++i) { + bulk.insert({ _id: i, a: {}}); } -m.getDB( 'd' ).getLastError(); +assert.writeOK(bulk.execute()); s = rt.start( false ); sc = s.getDB( 'd' )[ 'c' ]; @@ -26,11 +27,13 @@ assert.soon( function() { debug( sc.count() ); return sc.count() > 0; } ); // Update documents that will be cloned last with the intent that an updated version will be cloned. // This may cause an assertion when an update that was successfully applied to the original version // of a document is replayed against an updated version of the same document. +bulk = mc.initializeUnorderedBulkOp(); for( i = 99999; i >= 90000; --i ) { // If the document is cloned as {a:1}, the {$set:{'a.b':1}} modifier will uassert. - mc.update( {_id:i}, {$set:{'a.b':1}} ); - mc.update( {_id:i}, {$set:{a:1}} ); + bulk.find({ _id: i }).update({ $set: { 'a.b': 1 }}); + bulk.find({ _id: i }).update({ $set: { a: 1 }}); } +assert.writeOK(bulk.execute()); // The initial sync completes and subsequent writes succeed, in spite of any assertions that occur // when the update operations above are replicated. diff --git a/jstests/repl/repl17.js b/jstests/repl/repl17.js index 8011d974328..c7a7be35ffc 100644 --- a/jstests/repl/repl17.js +++ b/jstests/repl/repl17.js @@ -11,7 +11,6 @@ md = master.getDB( 'd' ); for( i = 0; i < 1000; ++i ) { md[ ''+i ].save( {} ); } -md.getLastError(); slave = rt.start( false ); sd = slave.getDB( 'd' ); diff --git a/jstests/repl/repl19.js b/jstests/repl/repl19.js index 71d4335014a..a655d522bae 100644 --- a/jstests/repl/repl19.js +++ b/jstests/repl/repl19.js @@ -13,8 +13,7 @@ for( i = 0; i < 100000; ++i ) { } targetId = 1000*1000; -mc.insert( { _id:targetId, val:[ 1, 2, 3 ] } ); -master.getDB( 'd' ).getLastError(); +assert.writeOK(mc.insert({ _id: targetId, val: [ 1, 2, 3 ] })); slave = rt.start( false ); sc = slave.getDB( 'd' )[ 'c' ]; diff --git a/jstests/repl/repl20.js b/jstests/repl/repl20.js index 02e50f58f1f..c30ef8d6f3c 100644 --- a/jstests/repl/repl20.js +++ b/jstests/repl/repl20.js @@ -13,8 +13,7 @@ for( i = 0; i < 100000; ++i ) { } targetId = 1000*1000; -mc.insert( { _id:targetId, val:[ 1 ] } ); -master.getDB( 'd' ).getLastError(); +assert.writeOK(mc.insert({ _id: targetId, val: [ 1 ] })); slave = rt.start( false ); sc = slave.getDB( 'd' )[ 'c' ]; diff --git a/jstests/repl/repl21.js b/jstests/repl/repl21.js index a94a4b5b779..87c0c7fdd02 100644 --- a/jstests/repl/repl21.js +++ b/jstests/repl/repl21.js @@ -8,13 +8,14 @@ rt = new ReplTest( "repl21tests" ); master = rt.start( true ); mc = master.getDB( 'd' )[ 'c' ]; -for( i = 0; i < 100000; ++i ) { - mc.insert( { _id:i, z:i } ); +var bulk = mc.initializeUnorderedBulkOp(); +for(var i = 0; i < 100000; ++i) { + bulk.insert({ _id: i, z: i }); } targetId = 1000*1000; -mc.insert( { _id:targetId, val:[ 1 ] } ); -master.getDB( 'd' ).getLastError(); +bulk.insert({ _id: targetId, val: [ 1 ] }); +assert.writeOK(bulk.execute()); slave = rt.start( false ); sc = slave.getDB( 'd' )[ 'c' ]; @@ -36,4 +37,4 @@ assert.soon( function() { return sc.count( { _id:'sentinel' } ) > 0; } ); assert.eq( [ 1, 3 ], mc.findOne( { _id:targetId } ).val ); assert.eq( [ 1, 3 ], sc.findOne( { _id:targetId } ).val ); -} \ No newline at end of file +} diff --git a/jstests/repl/repl5.js b/jstests/repl/repl5.js index eda48496656..aeba7eb1095 100644 --- a/jstests/repl/repl5.js +++ b/jstests/repl/repl5.js @@ -14,8 +14,10 @@ doTest = function(signal, extraOpts) { m = rt.start( true ); ma = m.getDB( "a" ).a; + var bulk = ma.initializeUnorderedBulkOp(); for( i = 0; i < 10000; ++i ) - ma.save( { i:i } ); + bulk.insert({ i: i }); + assert.writeOK(bulk.execute()); s = rt.start(false, extraOpts); soonCountAtLeast( "a", "a", 1 ); diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js index b65085c5702..f10b6b777bc 100644 --- a/jstests/replsets/auth1.js +++ b/jstests/replsets/auth1.js @@ -134,19 +134,18 @@ wait(function() { print("add some more data 1"); master.auth("bar", "baz"); +bulk = master.foo.initializeUnorderedBulkOp(); for (var i=0; i<1000; i++) { - master.foo.insert({x:i, foo : "bar"}); + bulk.insert({ x: i, foo: "bar" }); } -var result = master.runCommand({getlasterror:1, w:2, wtimeout:60000}); -printjson(result); - +assert.writeOK(bulk.execute({ w: 2 })); print("resync"); rs.restart(0, {"keyFile" : path+"key1"}); print("add some more data 2"); -var bulk = master.foo.initializeUnorderedBulkOp(); +bulk = master.foo.initializeUnorderedBulkOp(); for (var i=0; i<1000; i++) { bulk.insert({ x: i, foo: "bar" }); } diff --git a/jstests/replsets/downstream.js b/jstests/replsets/downstream.js index 795e6671d46..f50716f0340 100755 --- a/jstests/replsets/downstream.js +++ b/jstests/replsets/downstream.js @@ -23,11 +23,13 @@ var conn = repset.getMaster() var db = conn.getDB('test') // Add data to it -for (var i = 0; i < N; i++) - db['foo'].insert({x: i, text: Text}) - +var bulk = db.foo.initializeUnorderedBulkOp(); +for (var i = 0; i < N; i++) { + bulk.insert({ x: i, text: Text }); +} // wait to be copied to at least one secondary (BUG hangs here) -db.getLastError(2) +assert.writeOK(bulk.execute({ w: 2 })); + print('getlasterror_w2.js SUCCESS') } diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js index da8979bb34c..20364381dd0 100644 --- a/jstests/replsets/replset2.js +++ b/jstests/replsets/replset2.js @@ -77,7 +77,6 @@ doTest = function (signal) { s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 }); assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1"); - // Test getlasterror with large insert print("replset2.js **** Try inserting many records ****") try { var bigData = new Array(2000).toString(); diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js index 55e68768eee..074aa7b9dda 100644 --- a/jstests/replsets/tags2.js +++ b/jstests/replsets/tags2.js @@ -1,4 +1,4 @@ -// Change a getLastErrorMode from 2 to 3 servers +// Change a write concern mode from 2 to 3 servers var host = getHostName(); var replTest = new ReplSetTest( {name: "rstag", nodes: 4, startPort: 31000} ); diff --git a/jstests/sharding/movechunk_include.js b/jstests/sharding/movechunk_include.js index e74c6c4bf4d..8ffa1664526 100644 --- a/jstests/sharding/movechunk_include.js +++ b/jstests/sharding/movechunk_include.js @@ -13,16 +13,16 @@ function setupMoveChunkTest(st) { str += "asdasdsdasdasdasdas"; } - var data = num = 0; + var data = 0; + var num = 0; //Insert till you get to 10MB of data + var bulk = testcoll.initializeUnorderedBulkOp(); while ( data < ( 1024 * 1024 * 10 ) ) { - testcoll.insert( { _id : num++ , s : str } ) - data += str.length + bulk.insert({ _id: num++, s: str }); + data += str.length; } - - //Flush and wait - testdb.getLastError() + assert.writeOK(bulk.execute()); var stats = st.chunkCounts( "foo" ) var to = "" diff --git a/jstests/sharding/multi_write_target.js b/jstests/sharding/multi_write_target.js index 52ce36a83e0..3fc528293c8 100644 --- a/jstests/sharding/multi_write_target.js +++ b/jstests/sharding/multi_write_target.js @@ -25,20 +25,15 @@ st.printShardingStatus(); jsTest.log("Testing multi-update..."); // Put data on all shards -st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 }); -assert.gleOK(st.shard0.getCollection(coll.toString()).getDB().getLastErrorObj()); -st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 }); -assert.gleOK(st.shard1.getCollection(coll.toString()).getDB().getLastErrorObj()); +assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 })); +assert.writeOK(st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 })); // Data not in chunks -st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 }); -assert.gleOK(st.shard2.getCollection(coll.toString()).getDB().getLastErrorObj()); +assert.writeOK(st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 })); // Non-multi-update doesn't work without shard key -coll.update({ x : 1 }, { $set : { updated : true } }, { multi : false }); -assert.gleError(coll.getDB().getLastErrorObj()); +assert.writeError(coll.update({ x : 1 }, { $set : { updated : true } }, { multi : false })); -coll.update({ x : 1 }, { $set : { updated : true } }, { multi : true }); -assert.gleOK(coll.getDB().getLastErrorObj()); +assert.writeOK(coll.update({ x : 1 }, { $set : { updated : true } }, { multi : true })); // Ensure update goes to *all* shards assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({ updated : true })); @@ -46,8 +41,7 @@ assert.neq(null, st.shard1.getCollection(coll.toString()).findOne({ updated : tr assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({ updated : true })); // _id update works, and goes to all shards -coll.update({ _id : 0 }, { $set : { updatedById : true } }, { multi : false }); -assert.gleOK(coll.getDB().getLastErrorObj()); +assert.writeOK(coll.update({ _id : 0 }, { $set : { updatedById : true } }, { multi : false })); // Ensure _id update goes to *all* shards assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({ updatedById : true })); @@ -56,11 +50,9 @@ assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({ updatedById jsTest.log("Testing multi-delete..."); // non-multi-delete doesn't work without shard key -coll.remove({ x : 1 }, { justOne : true }); -assert.gleError(coll.getDB().getLastErrorObj()); +assert.writeError(coll.remove({ x : 1 }, { justOne : true })); -coll.remove({ x : 1 }, { justOne : false }); -assert.gleOK(coll.getDB().getLastErrorObj()); +assert.writeOK(coll.remove({ x : 1 }, { justOne : false })); // Ensure delete goes to *all* shards assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({ x : 1 })); @@ -68,16 +60,12 @@ assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({ x : 1 })); assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({ x : 1 })); // Put more on all shards -st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 }); -assert.gleOK(st.shard0.getCollection(coll.toString()).getDB().getLastErrorObj()); -st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 }); -assert.gleOK(st.shard1.getCollection(coll.toString()).getDB().getLastErrorObj()); +assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 })); +assert.writeOK(st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 })); // Data not in chunks -st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 }); -assert.gleOK(st.shard2.getCollection(coll.toString()).getDB().getLastErrorObj()); +assert.writeOK(st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 })); -coll.remove({ _id : 0 }, { justOne : true }); -assert.gleOK(coll.getDB().getLastErrorObj()); +assert.writeOK(coll.remove({ _id : 0 }, { justOne : true })); // Ensure _id delete goes to *all* shards assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({ x : 1 })); diff --git a/jstests/slow1/sharding_multiple_collections.js b/jstests/slow1/sharding_multiple_collections.js index 8cd44a9f556..7c2cb1720cf 100644 --- a/jstests/slow1/sharding_multiple_collections.js +++ b/jstests/slow1/sharding_multiple_collections.js @@ -12,11 +12,14 @@ S = "" while ( S.length < 500 ) S += "123123312312"; +var bulk = db.foo.initializeUnorderedBulkOp(); +var bulk2 = db.bar.initializeUnorderedBulkOp(); for ( i=0; i= 2 ) - break; + if( pass >= 2 ) + break; print("32bit.js PASS #" + pass); pass++; - + t = mydb.colltest_32bit; print("seed=" + seed); - + t.insert({x:1}); t.ensureIndex({a:1}); t.ensureIndex({b:1}, true); t.ensureIndex({x:1}); if( Math.random() < 0.3 ) - t.ensureIndex({c:1}); + t.ensureIndex({c:1}); t.ensureIndex({d:1}); t.ensureIndex({e:1}); t.ensureIndex({f:1}); - + big = 'a b'; big = big + big; k = big; big = big + big; big = big + big; big = big + big; - + a = 0; c = 'kkk'; var start = new Date(); - while( 1 ) { - b = Math.random(seed); - d = c + -a; + while( 1 ) { + b = Math.random(seed); + d = c + -a; f = Math.random(seed) + a; a++; - cc = big; - if( Math.random(seed) < .1 ) - cc = null; - t.insert({a:a,b:b,c:cc,d:d,f:f}); - if( Math.random(seed) < 0.01 ) { - - if( mydb.getLastError() ) { - /* presumably we have mmap error on 32 bit. try a few more manipulations attempting to break things */ - t.insert({a:33,b:44,c:55,d:66,f:66}); - t.insert({a:33,b:44000,c:55,d:66}); - t.insert({a:33,b:440000,c:55}); - t.insert({a:33,b:4400000}); - t.update({a:20},{'$set':{c:'abc'}}); - t.update({a:21},{'$set':{c:'aadsfbc'}}); - t.update({a:22},{'$set':{c:'c'}}); - t.update({a:23},{'$set':{b:cc}}); - t.remove({a:22}); - break; - } - - t.remove({a:a}); - t.remove({b:Math.random(seed)}); - t.insert({e:1}); - t.insert({f:'aaaaaaaaaa'}); - + cc = big; + if( Math.random(seed) < .1 ) + cc = null; + var res = t.insert({ a: a, b: b, c: cc, d: d, f: f }); + if( Math.random(seed) < 0.01 ) { + if (res.hasWriteError()) { + // Presumably we have mmap error on 32 bit. try a few more manipulations + // attempting to break things. + t.insert({a:33,b:44,c:55,d:66,f:66}); + t.insert({a:33,b:44000,c:55,d:66}); + t.insert({a:33,b:440000,c:55}); + t.insert({a:33,b:4400000}); + t.update({a:20},{'$set':{c:'abc'}}); + t.update({a:21},{'$set':{c:'aadsfbc'}}); + t.update({a:22},{'$set':{c:'c'}}); + t.update({a:23},{'$set':{b:cc}}); + t.remove({a:22}); + break; + } + + t.remove({a:a}); + t.remove({b:Math.random(seed)}); + t.insert({e:1}); + t.insert({f:'aaaaaaaaaa'}); + if( Math.random() < 0.00001 ) { print("remove cc"); t.remove({c:cc}); } if( Math.random() < 0.0001 ) { print("update cc"); t.update({c:cc},{'$set':{c:1}},false,true); } if( Math.random() < 0.00001 ) { print("remove e"); t.remove({e:1}); } - } - if (a == 20000 ) { - var delta_ms = (new Date())-start; - // 2MM / 20000 = 100. 1000ms/sec. - var eta_secs = delta_ms * (100 / 1000); - print("32bit.js eta_secs:" + eta_secs); - if( eta_secs > 1000 ) { - print("32bit.js machine is slow, stopping early. a:" + a); - mydb.dropDatabase(); - return; - } - } - if( a % 100000 == 0 ) { - print(a); - // on 64 bit we won't error out, so artificially stop. on 32 bit we will hit mmap limit ~1.6MM but may - // vary by a factor of 2x by platform - if( a >= 2200000 ) { + } + if (a == 20000 ) { + var delta_ms = (new Date())-start; + // 2MM / 20000 = 100. 1000ms/sec. + var eta_secs = delta_ms * (100 / 1000); + print("32bit.js eta_secs:" + eta_secs); + if( eta_secs > 1000 ) { + print("32bit.js machine is slow, stopping early. a:" + a); + mydb.dropDatabase(); + return; + } + } + if( a % 100000 == 0 ) { + print(a); + // on 64 bit we won't error out, so artificially stop. on 32 bit we will hit + // mmap limit ~1.6MM but may vary by a factor of 2x by platform + if( a >= 2200000 ) { mydb.dropDatabase(); - return; - } + return; + } } - } + } print("count: " + t.count()); var res = t.validate(); @@ -105,11 +105,11 @@ function f() { print("32bit.js FAIL validating"); print(res.result); printjson(res); - //mydb.dropDatabase(); - throw "fail validating 32bit.js"; + //mydb.dropDatabase(); + throw "fail validating 32bit.js"; } - mydb.dropDatabase(); + mydb.dropDatabase(); } print("32bit.js SUCCESS"); diff --git a/jstests/slow2/conc_update.js b/jstests/slow2/conc_update.js index 0d778df047e..4ee5bd22ca7 100644 --- a/jstests/slow2/conc_update.js +++ b/jstests/slow2/conc_update.js @@ -6,46 +6,42 @@ db.dropDatabase(); NRECORDS=3*1024*1024 print("loading "+NRECORDS+" documents (progress msg every 1024*1024 documents)") -for (i=0; i<(NRECORDS); i++) { - db.conc.insert({x:i}) - if ((i%(1024*1024))==0) - print("loaded " + i/(1024*1024) + " mibi-records") +var bulk = db.conc.initializeUnorderedBulkOp(); +for (var i = 0; i < NRECORDS; i++) { + bulk.insert({ x: i }); } +assert.writeOK(bulk.execute()); print("making an index (this will take a while)") db.conc.ensureIndex({x:1}) var c1=db.conc.count({x:{$lt:NRECORDS}}) -updater=startParallelShell("db=db.getSisterDB('concurrency');\ - db.concflag.insert( {inprog:true} );\ - sleep(20);\ - db.conc.update({}, {$inc:{x: "+NRECORDS+"}}, false, true);\ - e=db.getLastError();\ - print('update error: '+ e);\ - db.concflag.update({},{inprog:false});\ - assert.eq(e, null, 'update failed');"); +updater = startParallelShell("db = db.getSisterDB('concurrency');\ + db.concflag.insert({ inprog: true });\ + sleep(20);\ + assert.writeOK(db.conc.update({}, \ + { $inc: { x: " + NRECORDS + "}}, false, true)); \ + assert.writeOK(db.concflag.update({}, { inprog: false }));"); -assert.soon( function(){ var x = db.concflag.findOne(); return x && x.inprog; } , +assert.soon( function(){ var x = db.concflag.findOne(); return x && x.inprog; } , "wait for fork" , 30000 , 1 ); querycount=0; decrements=0; misses=0 -assert.soon( - function(){ - c2=db.conc.count({x:{$lt:NRECORDS}}) - print(c2) - querycount++; - if (c2