summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2014-04-25 14:04:36 -0400
committerRandolph Tan <randolph@10gen.com>2014-05-06 16:32:44 -0400
commit87dc3ae516e1d12a632dc604710661e38ed7b3dd (patch)
tree3a483a3d0c38ce00a7f4d7dba0e9cba7f7eba5f3 /jstests
parent6b945ec15c61f6bd4bfbaf382624d886ec8441d2 (diff)
downloadmongo-87dc3ae516e1d12a632dc604710661e38ed7b3dd.tar.gz
SERVER-13741 Migrate remaining tests to use write commands
Diffstat (limited to 'jstests')
-rw-r--r--jstests/auth/lib/commands_lib.js1
-rw-r--r--jstests/core/opcounters_write_cmd.js (renamed from jstests/noPassthroughWithMongod/opcounters_write_cmd.js)1
-rw-r--r--jstests/disk/diskfull.js10
-rw-r--r--jstests/disk/killall.js3
-rw-r--r--jstests/disk/preallocate_directoryperdb.js16
-rw-r--r--jstests/disk/quota.js10
-rw-r--r--jstests/disk/quota2.js16
-rwxr-xr-xjstests/dur/a_quick.js6
-rw-r--r--jstests/dur/closeall.js39
-rw-r--r--jstests/dur/diskfull.js11
-rw-r--r--jstests/dur/dropdb.js4
-rwxr-xr-xjstests/dur/dur1.js6
-rwxr-xr-xjstests/dur/dur1_tool.js7
-rw-r--r--jstests/dur/indexbg2.js4
-rwxr-xr-xjstests/dur/manyRestart.js6
-rw-r--r--jstests/dur/md5.js7
-rwxr-xr-xjstests/dur/oplog.js3
-rw-r--r--jstests/gle/block2.js58
-rw-r--r--jstests/gle/gle_after_split_failure_during_migration.js (renamed from jstests/noPassthrough/gle_after_split_failure_during_migration.js)0
-rw-r--r--jstests/gle/opcounters_legacy.js (renamed from jstests/noPassthroughWithMongod/opcounters_legacy.js)1
-rw-r--r--jstests/gle/sync1.js57
-rw-r--r--jstests/gle/sync4.js26
-rw-r--r--jstests/gle/sync8.js21
-rw-r--r--jstests/libs/geo_near_random.js46
-rwxr-xr-xjstests/misc/biginsert.js18
-rw-r--r--jstests/noPassthrough/disk_reuse1.js27
-rw-r--r--jstests/noPassthrough/geo_full.js739
-rw-r--r--jstests/noPassthrough/geo_mnypts_plus_fields.js10
-rw-r--r--jstests/noPassthrough/indexbg1.js30
-rw-r--r--jstests/noPassthrough/indexbg2.js2
-rw-r--r--jstests/noPassthrough/query_yield1.js8
-rw-r--r--jstests/noPassthrough/query_yield2.js8
-rw-r--r--jstests/noPassthrough/repair2.js19
-rw-r--r--jstests/noPassthrough/sync1.js49
-rw-r--r--jstests/noPassthrough/sync4.js19
-rw-r--r--jstests/noPassthrough/sync8.js13
-rw-r--r--jstests/noPassthrough/update_server-5552.js10
-rw-r--r--jstests/noPassthrough/update_yield1.js7
-rw-r--r--jstests/noPassthroughWithMongod/autosplit_heuristics.js10
-rw-r--r--jstests/noPassthroughWithMongod/background.js46
-rw-r--r--jstests/noPassthroughWithMongod/balance_repl.js7
-rw-r--r--jstests/noPassthroughWithMongod/balance_tags1.js5
-rw-r--r--jstests/noPassthroughWithMongod/balance_tags2.js5
-rw-r--r--jstests/noPassthroughWithMongod/btreedel.js4
-rw-r--r--jstests/noPassthroughWithMongod/bulk_shard_insert.js7
-rw-r--r--jstests/noPassthroughWithMongod/capped4.js4
-rw-r--r--jstests/noPassthroughWithMongod/fsync2.js4
-rw-r--r--jstests/noPassthroughWithMongod/geo_axis_aligned.js158
-rw-r--r--jstests/noPassthroughWithMongod/geo_mnypts.js4
-rw-r--r--jstests/noPassthroughWithMongod/geo_polygon.js4
-rw-r--r--jstests/noPassthroughWithMongod/index_check10.js13
-rw-r--r--jstests/noPassthroughWithMongod/index_check9.js15
-rw-r--r--jstests/noPassthroughWithMongod/index_hammer1.js5
-rw-r--r--jstests/noPassthroughWithMongod/index_killop.js14
-rw-r--r--jstests/noPassthroughWithMongod/index_multi.js65
-rw-r--r--jstests/noPassthroughWithMongod/index_retry.js22
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_drop.js4
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_interrupts.js4
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_secondary.js4
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js7
-rw-r--r--jstests/noPassthroughWithMongod/large_chunk.js5
-rw-r--r--jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js8
-rw-r--r--jstests/noPassthroughWithMongod/mr_shard_version.js5
-rw-r--r--jstests/noPassthroughWithMongod/no_balance_collection.js6
-rw-r--r--jstests/noPassthroughWithMongod/parallel_collection_scan.js5
-rw-r--r--jstests/noPassthroughWithMongod/remove9.js5
-rw-r--r--jstests/noPassthroughWithMongod/sharding_balance1.js5
-rw-r--r--jstests/noPassthroughWithMongod/sharding_balance2.js6
-rw-r--r--jstests/noPassthroughWithMongod/sharding_balance3.js5
-rw-r--r--jstests/noPassthroughWithMongod/sharding_balance4.js35
-rw-r--r--jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js6
-rw-r--r--jstests/noPassthroughWithMongod/sharding_migrateBigObject.js12
-rw-r--r--jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js8
-rw-r--r--jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js17
-rw-r--r--jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js10
-rw-r--r--jstests/noPassthroughWithMongod/sharding_rs1.js5
-rw-r--r--jstests/noPassthroughWithMongod/sharding_rs2.js5
-rw-r--r--jstests/noPassthroughWithMongod/ttl1.js1
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl.js11
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl_maintenance.js6
-rw-r--r--jstests/noPassthroughWithMongod/ttl_sharded.js7
-rw-r--r--jstests/quota/quota1.js51
-rw-r--r--jstests/repl/basic1.js43
-rw-r--r--jstests/repl/block1.js3
-rw-r--r--jstests/repl/block2.js25
-rw-r--r--jstests/repl/drop_dups.js24
-rw-r--r--jstests/repl/master1.js4
-rw-r--r--jstests/repl/mastermaster1.js55
-rw-r--r--jstests/repl/mod_move.js37
-rw-r--r--jstests/repl/repl12.js1
-rw-r--r--jstests/repl/repl13.js13
-rw-r--r--jstests/repl/repl17.js1
-rw-r--r--jstests/repl/repl19.js3
-rw-r--r--jstests/repl/repl20.js3
-rw-r--r--jstests/repl/repl21.js11
-rw-r--r--jstests/repl/repl5.js4
-rw-r--r--jstests/replsets/auth1.js9
-rwxr-xr-xjstests/replsets/downstream.js10
-rw-r--r--jstests/replsets/replset2.js1
-rw-r--r--jstests/replsets/tags2.js2
-rw-r--r--jstests/sharding/movechunk_include.js12
-rw-r--r--jstests/sharding/multi_write_target.js36
-rw-r--r--jstests/slow1/sharding_multiple_collections.js9
-rwxr-xr-xjstests/slow2/32bit.js118
-rw-r--r--jstests/slow2/conc_update.js48
-rw-r--r--jstests/slow2/dur_big_atomic_update.js21
-rw-r--r--jstests/slow2/mr_during_migrate.js6
-rw-r--r--jstests/slow2/replsets_killop.js10
-rw-r--r--jstests/slow2/replsets_prefetch_stress.js8
-rw-r--r--jstests/slow2/rollback4.js7
-rw-r--r--jstests/ssl/libs/ssl_helpers.js7
-rw-r--r--jstests/ssl/sharding_with_x509.js26
-rw-r--r--jstests/tool/dumpfilename1.js5
-rw-r--r--jstests/tool/dumprestoreWithNoOptions.js10
-rw-r--r--jstests/tool/dumprestore_auth3.js3
-rw-r--r--jstests/tool/exportimport_bigarray.js11
116 files changed, 1189 insertions, 1350 deletions
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index d318d6467b3..8a00aaa4ae5 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -1770,7 +1770,6 @@ var authCommandsLib = {
command: {renameCollection: firstDbName + ".x", to: secondDbName + ".y"},
setup: function (db) {
db.getSisterDB(firstDbName).x.save( {} );
- db.getSisterDB(firstDbName).getLastError();
db.getSisterDB(adminDbName).runCommand({movePrimary: firstDbName, to: shard0name});
db.getSisterDB(adminDbName).runCommand({movePrimary: secondDbName, to: shard0name});
},
diff --git a/jstests/noPassthroughWithMongod/opcounters_write_cmd.js b/jstests/core/opcounters_write_cmd.js
index 47a1bc63515..88a5c65b4c3 100644
--- a/jstests/noPassthroughWithMongod/opcounters_write_cmd.js
+++ b/jstests/core/opcounters_write_cmd.js
@@ -1,4 +1,5 @@
// Test that opcounters get incremented properly.
+// Legacy write mode test also available at jstests/gle.
var mongo = new Mongo(db.getMongo().host);
mongo.forceWriteMode("commands");
diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js
index deaab67a969..f236a9d4bc7 100644
--- a/jstests/disk/diskfull.js
+++ b/jstests/disk/diskfull.js
@@ -22,16 +22,16 @@ if ( doIt ) {
m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1", '--nojournal' );
d = m.getDB( "diskfulltest" );
c = d.getCollection( "diskfulltest" );
- c.save( { a: 6 } );
- assert(d.getLastError().length );
- printjson( d.getLastErrorObj() );
+ assert.writeError(c.insert( { a: 6 } ));
+
assert.soon(
function() { c.save( { a : 6 } );
return rawMongoProgramOutput().match( /file allocation failure/ );
},
"didn't see 'file allocation failure'" );
- c.save( { a: 6 } );
- assert.eq(d.getLastError(), "Can't take a write lock while out of disk space"); // every following fail
+ res = assert.writeError(c.insert({ a: 6 }));
+ var errmsg = res.getWriteError().errmsg;
+ assert.eq(errmsg, "Can't take a write lock while out of disk space"); // every following fail
sleep( 3000 );
diff --git a/jstests/disk/killall.js b/jstests/disk/killall.js
index a46a3588241..3be9f530780 100644
--- a/jstests/disk/killall.js
+++ b/jstests/disk/killall.js
@@ -21,8 +21,7 @@ var mongod = startMongod( "--port", port, "--dbpath", dbpath, "--nohttpinterface
var db = mongod.getDB( "test" );
var collection = db.getCollection( baseName );
-collection.save( {} );
-assert( ! db.getLastError() );
+assert.writeOK(collection.insert({}));
s1 = startParallelShell( "db." + baseName + ".count( { $where: function() { while( 1 ) { ; } } } )", port );
// HACK(schwerin): startParallelShell's return value should allow you to block until the command has
diff --git a/jstests/disk/preallocate_directoryperdb.js b/jstests/disk/preallocate_directoryperdb.js
index 8b3d551b5d4..38f55866f60 100644
--- a/jstests/disk/preallocate_directoryperdb.js
+++ b/jstests/disk/preallocate_directoryperdb.js
@@ -22,14 +22,15 @@ function checkDb2DirAbsent() {
var m = startMongod( "--smallfiles", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
db = m.getDB( baseName );
db2 = m.getDB( baseName2 );
-c = db[ baseName ];
-c2 = db2[ baseName2 ];
-big = new Array( 5000 ).toString();
+var bulk = db[ baseName ].initializeUnorderedBulkOp();
+var bulk2 = db2[ baseName2 ].initializeUnorderedBulkOp();
+var big = new Array( 5000 ).toString();
for( var i = 0; i < 3000; ++i ) {
- c.save( { b:big } );
- c2.save( { b:big } );
- db.getLastError();
+ bulk.insert({ b:big });
+ bulk2.insert({ b:big });
}
+assert.writeOK(bulk.execute());
+assert.writeOK(bulk2.execute());
// Due to our write pattern, we expect db2's .3 file to be queued up in the file
// allocator behind db's .3 file at the time db2 is dropped. This will
@@ -43,8 +44,7 @@ db.dropDatabase();
// Try writing a new database, to ensure file allocator is still working.
db3 = m.getDB( baseName3 );
c3 = db[ baseName3 ];
-c3.save( {} );
-assert( !db3.getLastError() );
+assert.writeOK(c3.insert( {} ));
assert.eq( 1, c3.count() );
checkDb2DirAbsent();
diff --git a/jstests/disk/quota.js b/jstests/disk/quota.js
index aa8963c6263..e8476072ff1 100644
--- a/jstests/disk/quota.js
+++ b/jstests/disk/quota.js
@@ -11,10 +11,11 @@ db = m.getDB( baseName );
big = new Array( 10000 ).toString();
// Insert documents until quota is exhausted.
-while( !db.getLastError() ) {
- db[ baseName ].save( {b:big} );
+var coll = db[ baseName ];
+var res = coll.insert({ b: big });
+while( !res.hasWriteError() ) {
+ res = coll.insert({ b: big });
}
-printjson( db.getLastError() );
dotTwoDataFile = baseName + ".2";
files = listFiles( dbpath );
@@ -27,8 +28,7 @@ dotTwoDataFile = "local" + ".2";
// Check that quota does not apply to local db, and a .2 file can be created.
l = m.getDB( "local" )[ baseName ];
for( i = 0; i < 10000; ++i ) {
- l.save( {b:big} );
- assert( !db.getLastError() );
+ assert.writeOK(l.insert({ b: big }));
dotTwoFound = false;
if ( i % 100 != 0 ) {
continue;
diff --git a/jstests/disk/quota2.js b/jstests/disk/quota2.js
index a5f07abec8a..cdeda1a2a23 100644
--- a/jstests/disk/quota2.js
+++ b/jstests/disk/quota2.js
@@ -13,12 +13,12 @@ db = m.getDB( baseName );
big = new Array( 10000 ).toString();
// Insert documents until quota is exhausted.
-while( !db.getLastError() ) {
- db[ baseName ].save( {b:big} );
+var coll = db[ baseName ];
+var res = coll.insert({ b: big });
+while( !res.hasWriteError() ) {
+ res = coll.insert({ b: big });
}
-db.resetError();
-
// Trigger allocation of an additional file for a 'special' namespace.
for( n = 0; !db.getLastError(); ++n ) {
db.createCollection( '' + n );
@@ -27,10 +27,10 @@ for( n = 0; !db.getLastError(); ++n ) {
// Check that new docs are saved in the .0 file.
for( i = 0; i < n; ++i ) {
c = db[ ''+i ];
- c.save( {b:big} );
- if( !db.getLastError() ) {
- assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file );
+ res = c.insert({ b: big });
+ if( !res.hasWriteError() ) {
+ assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file );
}
}
-} \ No newline at end of file
+}
diff --git a/jstests/dur/a_quick.js b/jstests/dur/a_quick.js
index ab36f91327e..bbec8af6939 100755
--- a/jstests/dur/a_quick.js
+++ b/jstests/dur/a_quick.js
@@ -62,8 +62,7 @@ tst.log("start mongod without dur");
var conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur");
tst.log("without dur work");
var d = conn.getDB("test");
-d.foo.insert({ _id:123 });
-d.getLastError();
+assert.writeOK(d.foo.insert({ _id: 123 }));
tst.log("stop without dur");
stopMongod(30000);
@@ -72,8 +71,7 @@ tst.log("start mongod with dur");
conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--durOptions", 8);
tst.log("with dur work");
d = conn.getDB("test");
-d.foo.insert({ _id: 123 });
-d.getLastError(); // wait
+assert.writeOK(d.foo.insert({ _id: 123 }));
// we could actually do getlasterror fsync:1 now, but maybe this is agood
// as it will assure that commits happen on a timely basis. a bunch of the other dur/*js
diff --git a/jstests/dur/closeall.js b/jstests/dur/closeall.js
index 3d7119ab134..8c3864e8118 100644
--- a/jstests/dur/closeall.js
+++ b/jstests/dur/closeall.js
@@ -30,26 +30,24 @@ function f(variant, quickCommits, paranoid) {
print("closeall.js run test");
print("wait for initial sync to finish") // SERVER-4852
- db1.foo.insert({});
- err = db1.getLastErrorObj(2);
- printjson(err)
- assert.isnull(err.err);
- db1.foo.remove({});
- err = db1.getLastErrorObj(2);
- printjson(err)
- assert.isnull(err.err);
+ assert.writeOK(db1.foo.insert({}, { writeConcern: { w: 2 }}));
+ assert.writeOK(db1.foo.remove({}, { writeConcern: { w: 2 }}));
print("initial sync done")
- for( var i = 0; i < N; i++ ) {
- db1.foo.insert({x:1}); // this does wait for a return code so we will get some parallelism
- if( i % 7 == 0 )
- db1.foo.insert({x:99, y:2});
- if( i % 49 == 0 )
- db1.foo.update({ x: 99 }, { a: 1, b: 2, c: 3, d: 4 });
- if (i % 100 == 0)
- db1.foo.find();
- if( i == 800 )
- db1.foo.ensureIndex({ x: 1 });
+ var writeOps = startParallelShell('var coll = db.getSiblingDB("closealltest").foo; \
+ var bulk = coll.initializeUnorderedBulkOp(); \
+ for( var i = 0; i < ' + N + '; i++ ) { \
+ bulk.insert({ x: 1 }); \
+ if ( i % 7 == 0 ) \
+ bulk.insert({ x: 99, y: 2 }); \
+ if ( i % 49 == 0 ) \
+ bulk.find({ x: 99 }).update( \
+ { a: 1, b: 2, c: 3, d: 4 }); \
+ if( i == 800 ) \
+ coll.ensureIndex({ x: 1 }); \
+ }', 30001);
+
+ for( var i = 0; i < N; i++ ) {
var res = null;
try {
if( variant == 1 )
@@ -61,7 +59,6 @@ function f(variant, quickCommits, paranoid) {
res = db2.adminCommand("closeAllDatabases");
}
catch (e) {
- sleep(5000); // sleeping a little makes console output order prettier
print("\n\n\nFAIL closeall.js closeAllDatabases command invocation threw an exception. i:" + i);
try {
print("getlasterror:");
@@ -74,8 +71,6 @@ function f(variant, quickCommits, paranoid) {
print("got another exception : " + e);
}
print("\n\n\n");
- // sleep a little to capture possible mongod output?
- sleep(2000);
throw e;
}
assert( res.ok, "closeAllDatabases res.ok=false");
@@ -87,6 +82,8 @@ function f(variant, quickCommits, paranoid) {
print("closeall.js shutting down servers");
stopMongod(30002);
stopMongod(30001);
+
+ writeOps();
}
// Skip this test on 32-bit Windows (unfixable failures in MapViewOfFileEx)
diff --git a/jstests/dur/diskfull.js b/jstests/dur/diskfull.js
index a1efba5595d..a604439424d 100644
--- a/jstests/dur/diskfull.js
+++ b/jstests/dur/diskfull.js
@@ -50,15 +50,12 @@ function work() {
log("work");
try {
var d = conn.getDB("test");
-
- big = new Array( 5000 ).toString();
+ var big = new Array( 5000 ).toString();
+ var bulk = d.foo.initializeUnorderedBulkOp();
for( i = 0; i < 10000; ++i ) {
- d.foo.insert( { _id:i, b:big } );
+ bulk.insert({ _id: i, b: big });
}
-
- gle = d.getLastError();
- if ( gle )
- throw gle;
+ assert.writeOK(bulk.execute());
} catch ( e ) {
print( e );
raise( e );
diff --git a/jstests/dur/dropdb.js b/jstests/dur/dropdb.js
index 4fb94cc7d1e..54de6bdd7f2 100644
--- a/jstests/dur/dropdb.js
+++ b/jstests/dur/dropdb.js
@@ -62,10 +62,8 @@ function work() {
d.dropDatabase();
- d.foo.insert({ _id: 100 });
-
// assure writes applied in case we kill -9 on return from this function
- assert(d.runCommand({ getlasterror: 1, fsync: 1 }).ok, "getlasterror not ok");
+ assert.writeOK(d.foo.insert({ _id: 100 }, { writeConcern: { fsync: 1 }}));
}
function verify() {
diff --git a/jstests/dur/dur1.js b/jstests/dur/dur1.js
index cb4495aea52..0aecaaac21c 100755
--- a/jstests/dur/dur1.js
+++ b/jstests/dur/dur1.js
@@ -64,12 +64,6 @@ function work() {
// try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually:
d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 });
-// d.a.update({ _id: 4 }, { $inc: { x: 1} });
-// d.a.reIndex();
-
- // assure writes applied in case we kill -9 on return from this function
- d.getLastError();
-
log("endwork");
return d;
}
diff --git a/jstests/dur/dur1_tool.js b/jstests/dur/dur1_tool.js
index fdfe05236f4..adee933fdb4 100755
--- a/jstests/dur/dur1_tool.js
+++ b/jstests/dur/dur1_tool.js
@@ -63,13 +63,6 @@ function work() {
// try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually:
d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 });
-
-// d.a.update({ _id: 4 }, { $inc: { x: 1} });
-// d.a.reIndex();
-
- // assure writes applied in case we kill -9 on return from this function
- d.getLastError();
-
log("endwork");
return d;
}
diff --git a/jstests/dur/indexbg2.js b/jstests/dur/indexbg2.js
index a7484f0a561..d239d4eaa44 100644
--- a/jstests/dur/indexbg2.js
+++ b/jstests/dur/indexbg2.js
@@ -15,5 +15,5 @@ for( var i = 1000; i < 2000; ++i ) {
t.insert( {_id:i,a:'abcd',b:'bcde',x:'four score and seven years ago'} );
t.remove( {_id:i} );
}
-t.insert( {_id:2000,a:'abcd',b:'bcde',x:'four score and seven years ago'} );
-assert( !t.getDB().getLastError() );
+assert.writeOK(t.insert({ _id: 2000, a: 'abcd', b: 'bcde', x: 'four score and seven years ago' }));
+
diff --git a/jstests/dur/manyRestart.js b/jstests/dur/manyRestart.js
index f434278ca9a..5a68afdecbb 100755
--- a/jstests/dur/manyRestart.js
+++ b/jstests/dur/manyRestart.js
@@ -63,12 +63,6 @@ function work() {
// try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually:
d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 });
-
-// d.a.update({ _id: 4 }, { $inc: { x: 1} });
-// d.a.reIndex();
-
- // assure writes applied in case we kill -9 on return from this function
- d.getLastError();
log("endwork");
return d;
}
diff --git a/jstests/dur/md5.js b/jstests/dur/md5.js
index 1773091186a..1b4ec43340e 100644
--- a/jstests/dur/md5.js
+++ b/jstests/dur/md5.js
@@ -29,13 +29,6 @@ function work() {
// try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually:
d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 });
-
- // d.a.update({ _id: 4 }, { $inc: { x: 1} });
- // d.a.reIndex();
-
- // assure writes applied in case we kill -9 on return from this function
- d.getLastError();
-
log("endwork");
}
diff --git a/jstests/dur/oplog.js b/jstests/dur/oplog.js
index cd7d7c5e6ef..8ded3c980a9 100755
--- a/jstests/dur/oplog.js
+++ b/jstests/dur/oplog.js
@@ -81,9 +81,6 @@ function work() {
d.foo.insert({ _id: 6, q: "aaaaa", b: big, z: 3 });
d.foo.update({ _id: 5 }, { $set: { z: 99} });
- // assure writes applied in case we kill -9 on return from this function
- d.getLastError();
-
log("endwork");
verify();
diff --git a/jstests/gle/block2.js b/jstests/gle/block2.js
new file mode 100644
index 00000000000..142d51519b2
--- /dev/null
+++ b/jstests/gle/block2.js
@@ -0,0 +1,58 @@
+/**
+ * Basic gle testing for master/slave environment. Write command version also
+ * available at jstests/repl.
+ */
+
+var rt = new ReplTest( "block1" );
+
+var m = rt.start( true );
+var s = rt.start( false );
+
+if (m.writeMode() == 'commands') {
+ jsTest.log('Skipping test since commands mode is already tested in repl/');
+}
+else {
+
+ function setup(){
+
+ dbm = m.getDB( "foo" );
+ dbs = s.getDB( "foo" );
+
+ tm = dbm.bar;
+ ts = dbs.bar;
+ }
+ setup();
+
+ function check( msg ){
+ assert.eq( tm.count() , ts.count() , "check: " + msg );
+ }
+
+ function worked( w , wtimeout ){
+ var gle = dbm.getLastError( w , wtimeout );
+ if (gle != null) {
+ printjson(gle);
+ }
+ return gle == null;
+ }
+
+ check( "A" );
+
+ tm.save( { x : 1 } );
+ assert( worked( 2 ) , "B" );
+
+ tm.save( { x : 2 } );
+ assert( worked( 2 , 3000 ) , "C" );
+
+ rt.stop( false );
+ tm.save( { x : 3 } );
+ assert.eq( 3 , tm.count() , "D1" );
+ assert( ! worked( 2 , 3000 ) , "D2" );
+
+ s = rt.start( false );
+ setup();
+ assert( worked( 2 , 30000 ) , "E" );
+
+}
+
+rt.stop();
+
diff --git a/jstests/noPassthrough/gle_after_split_failure_during_migration.js b/jstests/gle/gle_after_split_failure_during_migration.js
index 9d0a6a9ca2a..9d0a6a9ca2a 100644
--- a/jstests/noPassthrough/gle_after_split_failure_during_migration.js
+++ b/jstests/gle/gle_after_split_failure_during_migration.js
diff --git a/jstests/noPassthroughWithMongod/opcounters_legacy.js b/jstests/gle/opcounters_legacy.js
index 7db520a109f..52e18c48643 100644
--- a/jstests/noPassthroughWithMongod/opcounters_legacy.js
+++ b/jstests/gle/opcounters_legacy.js
@@ -1,4 +1,5 @@
// Test that opcounters get incremented properly.
+// Write command version also available at jstests/core.
// Remember the global 'db' var
var lastDB = db;
diff --git a/jstests/gle/sync1.js b/jstests/gle/sync1.js
new file mode 100644
index 00000000000..83d26d1e71f
--- /dev/null
+++ b/jstests/gle/sync1.js
@@ -0,0 +1,57 @@
+// TODO: remove test after we deprecate SyncClusterConnection
+
+var test = new SyncCCTest( "sync1" );
+
+if (test.conn.writeMode() == 'commands') {
+ jsTest.log('Skipping test not compatible with write commands');
+}
+else {
+
+ db = test.conn.getDB( "test" )
+ t = db.sync1
+ t.save( { x : 1 } )
+ assert.eq( 1 , t.find().itcount() , "A1" );
+ assert.eq( 1 , t.find().count() , "A2" );
+ t.save( { x : 2 } )
+ assert.eq( 2 , t.find().itcount() , "A3" );
+ assert.eq( 2 , t.find().count() , "A4" );
+
+ test.checkHashes( "test" , "A3" );
+
+ test.tempKill();
+ assert.throws( function(){ t.save( { x : 3 } ); } , null , "B1" );
+ // It's ok even for some of the mongod to be unreachable for read-only cmd
+ assert.eq( 2, t.find({}).count() );
+ // It's NOT ok for some of the mongod to be unreachable for write cmd
+ assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); });
+ assert.eq( 2 , t.find().itcount() , "B2" );
+ test.tempStart();
+ test.checkHashes( "test" , "B3" );
+
+ // Trying killing the second mongod
+ test.tempKill( 1 );
+ assert.throws( function(){ t.save( { x : 3 } ); } );
+ // It's ok even for some of the mongod to be unreachable for read-only cmd
+ assert.eq( 2, t.find({}).count() );
+ // It's NOT ok for some of the mongod to be unreachable for write cmd
+ assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); });
+ assert.eq( 2 , t.find().itcount() );
+ test.tempStart( 1 );
+
+ assert.eq( 2 , t.find().itcount() , "C1" );
+ assert.soon( function(){
+ try {
+ t.remove( { x : 1 } )
+ return true;
+ }
+ catch ( e ){
+ print( e );
+ }
+ return false;
+ } )
+ t.find().forEach( printjson )
+ assert.eq( 1 , t.find().itcount() , "C2" );
+
+ test.stop();
+
+}
diff --git a/jstests/gle/sync4.js b/jstests/gle/sync4.js
new file mode 100644
index 00000000000..b6b1a777856
--- /dev/null
+++ b/jstests/gle/sync4.js
@@ -0,0 +1,26 @@
+// TODO: remove test after we deprecate SyncClusterConnection
+
+test = new SyncCCTest( "sync4" )
+
+if (test.conn.writeMode() == 'commands') {
+ jsTest.log('Skipping test not compatible with write commands');
+}
+else {
+
+ db = test.conn.getDB( "test" )
+ t = db.sync4
+
+ for ( i=0; i<1000; i++ ){
+ t.insert( { _id : i , x : "asdasdsdasdas" } )
+ }
+ db.getLastError();
+
+ test.checkHashes( "test" , "A0" );
+ assert.eq( 1000 , t.find().count() , "A1" )
+ assert.eq( 1000 , t.find().itcount() , "A2" )
+ assert.eq( 1000 , t.find().snapshot().batchSize(10).itcount() , "A2" )
+
+ test.stop();
+
+}
+
diff --git a/jstests/gle/sync8.js b/jstests/gle/sync8.js
new file mode 100644
index 00000000000..81404785ac3
--- /dev/null
+++ b/jstests/gle/sync8.js
@@ -0,0 +1,21 @@
+// TODO: remove test after we deprecate SyncClusterConnection
+
+// Test for SERVER-11492 - make sure that upserting a new document reports n:1 in GLE
+
+var test = new SyncCCTest( "sync1" );
+
+if (test.conn.writeMode() == 'commands') {
+ jsTest.log('Skipping test not compatible with write commands');
+}
+else {
+ var db = test.conn.getDB( "test" );
+ var t = db.sync8;
+ t.remove({});
+
+ t.update({_id:1}, {$set:{a:1}}, true);
+ var le = db.getLastErrorObj();
+ assert.eq(1, le.n);
+
+ test.stop();
+
+}
diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js
index 60cb7733f5d..248f5e49a6c 100644
--- a/jstests/libs/geo_near_random.js
+++ b/jstests/libs/geo_near_random.js
@@ -12,16 +12,16 @@ GeoNearRandomTest = function(name) {
GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){
- if(!indexBounds){
- scale = scale || 1; // scale is good for staying away from edges
- return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
- }
- else{
- var range = indexBounds.max - indexBounds.min;
- var eps = Math.pow(2, -40);
- // Go very close to the borders but not quite there.
- return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min];
- }
+ if(!indexBounds){
+ scale = scale || 1; // scale is good for staying away from edges
+ return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+ }
+ else{
+ var range = indexBounds.max - indexBounds.min;
+ var eps = Math.pow(2, -40);
+ // Go very close to the borders but not quite there.
+ return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min];
+ }
}
@@ -29,27 +29,29 @@ GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
assert.eq(this.nPts, 0, "insertPoints already called");
this.nPts = nPts;
+ var bulk = this.t.initializeUnorderedBulkOp();
for (var i=0; i<nPts; i++){
- this.t.insert({_id: i, loc: this.mkPt(scale, indexBounds)});
+ bulk.insert({ _id: i, loc: this.mkPt(scale, indexBounds) });
}
+ assert.writeOK(bulk.execute());
if(!indexBounds)
- this.t.ensureIndex({loc: '2d'});
+ this.t.ensureIndex({loc: '2d'});
else
- this.t.ensureIndex({loc: '2d'}, indexBounds)
+ this.t.ensureIndex({loc: '2d'}, indexBounds)
}
GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
for (var i=0; i < short.length; i++){
-
- var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0]
- var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1]
- var dS = short[i].obj ? short[i].dis : 1
-
- var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0]
- var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1]
- var dL = long[i].obj ? long[i].dis : 1
-
+
+ var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0]
+ var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1]
+ var dS = short[i].obj ? short[i].dis : 1
+
+ var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0]
+ var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1]
+ var dL = long[i].obj ? long[i].dis : 1
+
assert.eq([xS, yS, dS], [xL, yL, dL]);
}
}
diff --git a/jstests/misc/biginsert.js b/jstests/misc/biginsert.js
deleted file mode 100755
index ebbdc18ba3e..00000000000
--- a/jstests/misc/biginsert.js
+++ /dev/null
@@ -1,18 +0,0 @@
-o = "xxxxxxxxxxxxxxxxxxx";
-o = o + o;
-o + o;
-o = o + o;
-o = o + o;
-o = o + o;
-
-var B = 40000;
-var last = new Date();
-for (i = 0; i < 30000000; i++) {
- db.foo.insert({ o: o });
- if (i % B == 0) {
- var n = new Date();
- print(i);
- print("per sec: " + B*1000 / (n - last));
- last = n;
- }
-}
diff --git a/jstests/noPassthrough/disk_reuse1.js b/jstests/noPassthrough/disk_reuse1.js
index 249985edd1a..c208dcefb1f 100644
--- a/jstests/noPassthrough/disk_reuse1.js
+++ b/jstests/noPassthrough/disk_reuse1.js
@@ -16,31 +16,36 @@ while ( s.length < 1024 )
state = {}
-for ( i=0; i<N; i++ )
- t.insert( { _id : i , s : s } );
+var bulk = t.initializeUnorderedBulkOp();
+for (var i = 0; i < N; i++) {
+ bulk.insert({ _id: i, s: s });
+}
+assert.writeOK(bulk.execute());
orig = t.stats();
t.remove({});
-for ( i=0; i<N; i++ )
- t.insert( { _id : i , s : s } );
+bulk = t.initializeUnorderedBulkOp();
+for (i = 0; i < N; i++) {
+ bulk.insert({ _id: i, s: s });
+}
+assert.writeOK(bulk.execute());
assert.eq( orig.storageSize , t.stats().storageSize , "A" )
-for ( j=0; j<100; j++ ){
- for ( i=0; i<N; i++ ){
+for (j = 0; j < 100; j++){
+ for (i = 0; i < N; i++){
+ bulk = t.initializeUnorderedBulkOp();
var r = Math.random();
if ( r > .5 )
- t.remove( { _id : i } )
+ bulk.find({ _id: i }).remove();
else
- t.insert( { _id : i , s : s } )
+ bulk.find({ _id: i }).upsert().updateOne({ _id: i, s: s });
}
- //printjson( t.stats() );
-
+ assert.writeOK(bulk.execute());
assert.eq( orig.storageSize , t.stats().storageSize , "B" + j )
}
-
test.stop();
diff --git a/jstests/noPassthrough/geo_full.js b/jstests/noPassthrough/geo_full.js
index ffeb26a2606..9d9203242d3 100644
--- a/jstests/noPassthrough/geo_full.js
+++ b/jstests/noPassthrough/geo_full.js
@@ -22,96 +22,89 @@ testServer = new SlowWeeklyMongod( "geo_full" )
db = testServer.getDB( "test" );
var randEnvironment = function(){
-
- // Normal earth environment
- if( Random.rand() < 0.5 ){
- return { max : 180,
- min : -180,
- bits : Math.floor( Random.rand() * 32 ) + 1,
- earth : true,
- bucketSize : 360 / ( 4 * 1024 * 1024 * 1024 ) }
- }
-
- var scales = [ 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000 ]
- var scale = scales[ Math.floor( Random.rand() * scales.length ) ]
- var offset = Random.rand() * scale
-
+
+ // Normal earth environment
+ if( Random.rand() < 0.5 ){
+ return { max : 180,
+ min : -180,
+ bits : Math.floor( Random.rand() * 32 ) + 1,
+ earth : true,
+ bucketSize : 360 / ( 4 * 1024 * 1024 * 1024 ) };
+ }
+
+ var scales = [ 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000 ]
+ var scale = scales[ Math.floor( Random.rand() * scales.length ) ]
+ var offset = Random.rand() * scale
+
var max = Random.rand() * scale + offset
- var min = - Random.rand() * scale + offset
- var bits = Math.floor( Random.rand() * 32 ) + 1
- var bits = Math.floor( Random.rand() * 32 ) + 1
- var range = max - min
+ var min = - Random.rand() * scale + offset
+ var bits = Math.floor( Random.rand() * 32 ) + 1
+ var bits = Math.floor( Random.rand() * 32 ) + 1
+ var range = max - min
var bucketSize = range / ( 4 * 1024 * 1024 * 1024 )
-
- return { max : max,
- min : min,
- bits : bits,
- earth : false,
- bucketSize : bucketSize }
-
-}
+
+ return { max : max,
+ min : min,
+ bits : bits,
+ earth : false,
+ bucketSize : bucketSize }
+};
var randPoint = function( env, query ) {
-
- if( query && Random.rand() > 0.5 )
- return query.exact
-
- if( env.earth )
- return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ]
-
- var range = env.max - env.min
- return [ Random.rand() * range + env.min, Random.rand() * range + env.min ];
+
+ if( query && Random.rand() > 0.5 )
+ return query.exact
+
+ if( env.earth )
+ return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ]
+
+ var range = env.max - env.min
+ return [ Random.rand() * range + env.min, Random.rand() * range + env.min ];
}
var randLocType = function( loc, wrapIn ){
- return randLocTypes( [ loc ], wrapIn )[0]
+ return randLocTypes( [ loc ], wrapIn )[0]
}
var randLocTypes = function( locs, wrapIn ) {
-
- var rLocs = []
-
- for( var i = 0; i < locs.length; i++ ){
+
+ var rLocs = []
+
+ for( var i = 0; i < locs.length; i++ ){
rLocs.push( locs[i] )
- // {x:1, y:1} \ne [1,1].
- //if( Random.rand() < 0.5 )
- //rLocs.push( { x : locs[i][0], y : locs[i][1] } )
- //else
- }
-
- if( wrapIn ){
- var wrappedLocs = []
- for( var i = 0; i < rLocs.length; i++ ){
- var wrapper = {}
- wrapper[wrapIn] = rLocs[i]
- wrappedLocs.push( wrapper )
- }
-
- return wrappedLocs
- }
-
- return rLocs
-
-}
+ }
+
+ if( wrapIn ){
+ var wrappedLocs = []
+ for( var i = 0; i < rLocs.length; i++ ){
+ var wrapper = {}
+ wrapper[wrapIn] = rLocs[i]
+ wrappedLocs.push( wrapper )
+ }
+
+ return wrappedLocs
+ }
+
+ return rLocs
+};
var randDataType = function() {
- var scales = [ 1, 10, 100, 1000, 10000 ]
- var docScale = scales[ Math.floor( Random.rand() * scales.length ) ]
- var locScale = scales[ Math.floor( Random.rand() * scales.length ) ]
-
- var numDocs = 40000
- var maxLocs = 40000
- // Make sure we don't blow past our test resources
- while( numDocs * maxLocs > 40000 ){
- numDocs = Math.floor( Random.rand() * docScale ) + 1
- maxLocs = Math.floor( Random.rand() * locScale ) + 1
- }
-
- return { numDocs : numDocs,
- maxLocs : maxLocs }
-
-}
+ var scales = [ 1, 10, 100, 1000, 10000 ]
+ var docScale = scales[ Math.floor( Random.rand() * scales.length ) ]
+ var locScale = scales[ Math.floor( Random.rand() * scales.length ) ]
+
+ var numDocs = 40000
+ var maxLocs = 40000
+ // Make sure we don't blow past our test resources
+ while( numDocs * maxLocs > 40000 ){
+ numDocs = Math.floor( Random.rand() * docScale ) + 1
+ maxLocs = Math.floor( Random.rand() * locScale ) + 1
+ }
+
+ return { numDocs : numDocs,
+ maxLocs : maxLocs }
+};
function deg2rad(arg) { return arg * Math.PI / 180.0; }
function rad2deg(arg) { return arg * 180.0 / Math.PI; }
@@ -140,194 +133,181 @@ function pointIsOK(startPoint, radius, env) {
}
var randQuery = function( env ) {
-
- var center = randPoint( env )
-
- var sphereRadius = -1
- var sphereCenter = null
- if( env.earth ){
- // Get a start point that doesn't require wrapping
- // TODO: Are we a bit too aggressive with wrapping issues?
- var i
- for( i = 0; i < 5; i++ ){
+ var center = randPoint( env )
+
+ var sphereRadius = -1
+ var sphereCenter = null
+ if( env.earth ){
+ // Get a start point that doesn't require wrapping
+ // TODO: Are we a bit too aggressive with wrapping issues?
+ var i
+ for( i = 0; i < 5; i++ ){
sphereRadius = Random.rand() * 45 * Math.PI / 180
sphereCenter = randPoint( env )
if (pointIsOK(sphereCenter, sphereRadius, env)) { break; }
- /*
- var t = db.testSphere; t.drop(); t.ensureIndex({ loc : "2d" }, env )
- try{ t.find({ loc : { $within : { $centerSphere : [ sphereCenter, sphereRadius ] } } } ).count(); var err; if( err = db.getLastError() ) throw err; }
- catch(e) { print( e ); continue }
- print( " Radius " + sphereRadius + " and center " + sphereCenter + " ok ! ")
- break;
- */
- }
- if( i == 5 ) sphereRadius = -1;
-
- }
-
- var box = [ randPoint( env ), randPoint( env ) ]
-
- var boxPoly = [[ box[0][0], box[0][1] ],
- [ box[0][0], box[1][1] ],
- [ box[1][0], box[1][1] ],
- [ box[1][0], box[0][1] ] ]
-
- if( box[0][0] > box[1][0] ){
- var swap = box[0][0]
- box[0][0] = box[1][0]
- box[1][0] = swap
- }
-
- if( box[0][1] > box[1][1] ){
- var swap = box[0][1]
- box[0][1] = box[1][1]
- box[1][1] = swap
- }
-
- return { center : center,
- radius : box[1][0] - box[0][0],
- exact : randPoint( env ),
- sphereCenter : sphereCenter,
- sphereRadius : sphereRadius,
- box : box,
- boxPoly : boxPoly }
-
-}
+ }
+ if( i == 5 ) sphereRadius = -1;
+
+ }
+
+ var box = [ randPoint( env ), randPoint( env ) ]
+
+ var boxPoly = [[ box[0][0], box[0][1] ],
+ [ box[0][0], box[1][1] ],
+ [ box[1][0], box[1][1] ],
+ [ box[1][0], box[0][1] ] ]
+
+ if( box[0][0] > box[1][0] ){
+ var swap = box[0][0]
+ box[0][0] = box[1][0]
+ box[1][0] = swap
+ }
+
+ if( box[0][1] > box[1][1] ){
+ var swap = box[0][1]
+ box[0][1] = box[1][1]
+ box[1][1] = swap
+ }
+ return { center : center,
+ radius : box[1][0] - box[0][0],
+ exact : randPoint( env ),
+ sphereCenter : sphereCenter,
+ sphereRadius : sphereRadius,
+ box : box,
+ boxPoly : boxPoly }
+};
var resultTypes = {
"exact" : function( loc ){
- return query.exact[0] == loc[0] && query.exact[1] == loc[1]
+ return query.exact[0] == loc[0] && query.exact[1] == loc[1]
},
"center" : function( loc ){
- return Geo.distance( query.center, loc ) <= query.radius
+ return Geo.distance( query.center, loc ) <= query.radius
},
"box" : function( loc ){
- return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
- loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1]
-
-},
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1]
+
+},
"sphere" : function( loc ){
- return ( query.sphereRadius >= 0 ? ( Geo.sphereDistance( query.sphereCenter, loc ) <= query.sphereRadius ) : false )
-},
+ return ( query.sphereRadius >= 0 ?
+ ( Geo.sphereDistance( query.sphereCenter, loc ) <= query.sphereRadius ) : false );
+},
"poly" : function( loc ){
- return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
- loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1]
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
}}
var queryResults = function( locs, query, results ){
-
- if( ! results["center"] ){
- for( var type in resultTypes ){
- results[type] = {
- docsIn : 0,
- docsOut : 0,
- locsIn : 0,
- locsOut : 0
- }
- }
- }
-
- var indResults = {}
- for( var type in resultTypes ){
- indResults[type] = {
- docIn : false,
- locsIn : 0,
- locsOut : 0
- }
- }
-
- for( var type in resultTypes ){
-
- var docIn = false
- for( var i = 0; i < locs.length; i++ ){
- if( resultTypes[type]( locs[i] ) ){
- results[type].locsIn++
- indResults[type].locsIn++
- indResults[type].docIn = true
- }
- else{
- results[type].locsOut++
- indResults[type].locsOut++
- }
- }
- if( indResults[type].docIn ) results[type].docsIn++
- else results[type].docsOut++
-
- }
-
- return indResults
-
+
+ if( ! results["center"] ){
+ for( var type in resultTypes ){
+ results[type] = {
+ docsIn : 0,
+ docsOut : 0,
+ locsIn : 0,
+ locsOut : 0
+ }
+ }
+ }
+
+ var indResults = {}
+ for( var type in resultTypes ){
+ indResults[type] = {
+ docIn : false,
+ locsIn : 0,
+ locsOut : 0
+ }
+ }
+
+ for( var type in resultTypes ){
+
+ var docIn = false
+ for( var i = 0; i < locs.length; i++ ){
+ if( resultTypes[type]( locs[i] ) ){
+ results[type].locsIn++
+ indResults[type].locsIn++
+ indResults[type].docIn = true
+ }
+ else{
+ results[type].locsOut++
+ indResults[type].locsOut++
+ }
+ }
+ if( indResults[type].docIn ) results[type].docsIn++
+ else results[type].docsOut++
+
+ }
+
+ return indResults
}
var randQueryAdditions = function( doc, indResults ){
-
- for( var type in resultTypes ){
- var choice = Random.rand()
- if( Random.rand() < 0.25 )
- doc[type] = ( indResults[type].docIn ? { docIn : "yes" } : { docIn : "no" } )
- else if( Random.rand() < 0.5 )
- doc[type] = ( indResults[type].docIn ? { docIn : [ "yes" ] } : { docIn : [ "no" ] } )
- else if( Random.rand() < 0.75 )
- doc[type] = ( indResults[type].docIn ? [ { docIn : "yes" } ] : [ { docIn : "no" } ] )
- else
- doc[type] = ( indResults[type].docIn ? [ { docIn : [ "yes" ] } ] : [ { docIn : [ "no" ] } ] )
- }
-
+
+ for( var type in resultTypes ){
+ var choice = Random.rand()
+ if( Random.rand() < 0.25 )
+ doc[type] = ( indResults[type].docIn ? { docIn : "yes" } : { docIn : "no" } )
+ else if( Random.rand() < 0.5 )
+ doc[type] = ( indResults[type].docIn ? { docIn : [ "yes" ] } : { docIn : [ "no" ] } )
+ else if( Random.rand() < 0.75 )
+ doc[type] = ( indResults[type].docIn ? [ { docIn : "yes" } ] : [ { docIn : "no" } ] )
+ else
+ doc[type] = ( indResults[type].docIn ? [{ docIn: [ "yes" ] }] : [{ docIn: [ "no" ] }]);
+ }
}
var randIndexAdditions = function( indexDoc ){
-
- for( var type in resultTypes ){
-
- if( Random.rand() < 0.5 ) continue;
-
- var choice = Random.rand()
- if( Random.rand() < 0.5 )
- indexDoc[type] = 1
- else
- indexDoc[type + ".docIn"] = 1
-
- }
-
-}
+
+ for( var type in resultTypes ){
+
+ if( Random.rand() < 0.5 ) continue;
+
+ var choice = Random.rand()
+ if( Random.rand() < 0.5 )
+ indexDoc[type] = 1
+ else
+ indexDoc[type + ".docIn"] = 1;
+ }
+};
var randYesQuery = function(){
-
- var choice = Math.floor( Random.rand() * 7 )
- if( choice == 0 )
- return { $ne : "no" }
- else if( choice == 1 )
- return "yes"
- else if( choice == 2 )
- return /^yes/
- else if( choice == 3 )
- return { $in : [ "good", "yes", "ok" ] }
- else if( choice == 4 )
- return { $exists : true }
- else if( choice == 5 )
- return { $nin : [ "bad", "no", "not ok" ] }
- else if( choice == 6 )
- return { $not : /^no/ }
+
+ var choice = Math.floor( Random.rand() * 7 )
+ if( choice == 0 )
+ return { $ne : "no" }
+ else if( choice == 1 )
+ return "yes"
+ else if( choice == 2 )
+ return /^yes/
+ else if( choice == 3 )
+ return { $in : [ "good", "yes", "ok" ] }
+ else if( choice == 4 )
+ return { $exists : true }
+ else if( choice == 5 )
+ return { $nin : [ "bad", "no", "not ok" ] }
+ else if( choice == 6 )
+ return { $not : /^no/ }
}
var locArray = function( loc ){
- if( loc.x ) return [ loc.x, loc.y ]
- if( ! loc.length ) return [ loc[0], loc[1] ]
- return loc
+ if( loc.x ) return [ loc.x, loc.y ]
+ if( ! loc.length ) return [ loc[0], loc[1] ]
+ return loc
}
var locsArray = function( locs ){
- if( locs.loc ){
- arr = []
- for( var i = 0; i < locs.loc.length; i++ ) arr.push( locArray( locs.loc[i] ) )
- return arr
- }
- else{
- arr = []
- for( var i = 0; i < locs.length; i++ ) arr.push( locArray( locs[i].loc ) )
- return arr
- }
+ if( locs.loc ){
+ arr = []
+ for( var i = 0; i < locs.loc.length; i++ ) arr.push( locArray( locs.loc[i] ) )
+ return arr
+ }
+ else{
+ arr = []
+ for( var i = 0; i < locs.length; i++ ) arr.push( locArray( locs[i].loc ) )
+ return arr
+ }
}
var minBoxSize = function( env, box ){
@@ -335,16 +315,16 @@ var minBoxSize = function( env, box ){
}
var minBucketScale = function( env, box ){
-
+
if( box.length && box[0].length )
box = [ box[0][0] - box[1][0], box[0][1] - box[1][1] ]
-
+
if( box.length )
box = Math.max( box[0], box[1] )
-
+
print( box )
print( env.bucketSize )
-
+
return Math.ceil( Math.log( box / env.bucketSize ) / Math.log( 2 ) )
}
@@ -352,119 +332,114 @@ var minBucketScale = function( env, box ){
// TODO: Add spherical $uniqueDocs tests
var numTests = 100
-// Our seed will change every time this is run, but
+// Our seed will change every time this is run, but
// each individual test will be reproducible given
// that seed and test number
var seed = new Date().getTime()
//seed = 175 + 288 + 12
for ( var test = 0; test < numTests; test++ ) {
-
- Random.srand( seed + test );
- //Random.srand( 42240 )
- //Random.srand( 7344 )
- var t = db.testAllGeo
- t.drop()
-
- print( "Generating test environment #" + test )
- var env = randEnvironment()
- //env.bits = 11
- var query = randQuery( env )
- var data = randDataType()
- //data.numDocs = 5; data.maxLocs = 1;
- var paddingSize = Math.floor( Random.rand() * 10 + 1 )
- var results = {}
- var totalPoints = 0
- print( "Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs + " locs " )
-
- // Index after a random number of docs added
- var indexIt = Math.floor( Random.rand() * data.numDocs )
-
- for ( var i = 0; i < data.numDocs; i++ ) {
-
- if( indexIt == i ){
- var indexDoc = { "locs.loc" : "2d" }
- randIndexAdditions( indexDoc )
-
- // printjson( indexDoc )
-
- t.ensureIndex( indexDoc, env )
- assert.isnull( db.getLastError() )
- }
-
- var numLocs = Math.floor( Random.rand() * data.maxLocs + 1 )
- totalPoints += numLocs
-
- var multiPoint = []
- for ( var p = 0; p < numLocs; p++ ) {
- var point = randPoint( env, query )
- multiPoint.push( point )
- }
-
- var indResults = queryResults( multiPoint, query, results )
-
- var doc
- // Nest the keys differently
- if( Random.rand() < 0.5 )
- doc = { locs : { loc : randLocTypes( multiPoint ) } }
- else
- doc = { locs : randLocTypes( multiPoint, "loc" ) }
-
- randQueryAdditions( doc, indResults )
-
- //printjson( doc )
- doc._id = i
- t.insert( doc )
-
- }
-
- var padding = "x"
- for( var i = 0; i < paddingSize; i++ ) padding = padding + padding
-
- print( padding )
-
- printjson( { seed : seed,
- test: test,
- env : env,
- query : query,
- data : data,
- results : results,
- paddingSize : paddingSize } )
-
- // exact
- print( "Exact query..." )
- assert.eq( results.exact.docsIn, t.find( { "locs.loc" : randLocType( query.exact ), "exact.docIn" : randYesQuery() } ).count() )
-
- // $center
- print( "Center query..." )
- print( "Min box : " + minBoxSize( env, query.radius ) )
- assert.eq( results.center.docsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).count() )
-
- print( "Center query update..." )
- // printjson( t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).toArray() )
- t.update( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : true } }, "center.docIn" : randYesQuery() }, { $set : { "centerPaddingA" : padding } }, false, true )
- assert.eq( results.center.docsIn, t.getDB().getLastErrorObj().n )
-
- if( query.sphereRadius >= 0 ){
-
- print( "Center sphere query...")
- // $centerSphere
- assert.eq( results.sphere.docsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ] } }, "sphere.docIn" : randYesQuery() } ).count() )
-
- print( "Center sphere query update..." )
- // printjson( t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).toArray() )
- t.update( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ], $uniqueDocs : true } }, "sphere.docIn" : randYesQuery() }, { $set : { "spherePaddingA" : padding } }, false, true )
- assert.eq( results.sphere.docsIn, t.getDB().getLastErrorObj().n )
-
- }
-
- // $box
- print( "Box query..." )
- assert.eq( results.box.docsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : true } }, "box.docIn" : randYesQuery() } ).count() )
-
- // $polygon
- print( "Polygon query..." )
- assert.eq( results.poly.docsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ).count() )
+
+ Random.srand( seed + test );
+ //Random.srand( 42240 )
+ //Random.srand( 7344 )
+ var t = db.testAllGeo
+ t.drop()
+
+ print( "Generating test environment #" + test )
+ var env = randEnvironment()
+ //env.bits = 11
+ var query = randQuery( env )
+ var data = randDataType()
+ //data.numDocs = 5; data.maxLocs = 1;
+ var paddingSize = Math.floor( Random.rand() * 10 + 1 )
+ var results = {}
+ var totalPoints = 0
+ print( "Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs + " locs " )
+
+ var bulk = t.initializeUnorderedBulkOp();
+ for ( var i = 0; i < data.numDocs; i++ ) {
+ var numLocs = Math.floor( Random.rand() * data.maxLocs + 1 )
+ totalPoints += numLocs
+
+ var multiPoint = []
+ for ( var p = 0; p < numLocs; p++ ) {
+ var point = randPoint( env, query )
+ multiPoint.push( point )
+ }
+
+ var indResults = queryResults( multiPoint, query, results )
+
+ var doc
+ // Nest the keys differently
+ if( Random.rand() < 0.5 )
+ doc = { locs : { loc : randLocTypes( multiPoint ) } }
+ else
+ doc = { locs : randLocTypes( multiPoint, "loc" ) }
+
+ randQueryAdditions( doc, indResults )
+
+ doc._id = i
+ bulk.insert( doc );
+ }
+ assert.writeOK(bulk.execute());
+
+ var indexDoc = { "locs.loc" : "2d" };
+ randIndexAdditions( indexDoc );
+ t.ensureIndex( indexDoc, env );
+ assert.isnull( db.getLastError() );
+
+ var padding = "x"
+ for( var i = 0; i < paddingSize; i++ ) padding = padding + padding
+
+ print( padding )
+
+ printjson( { seed : seed,
+ test: test,
+ env : env,
+ query : query,
+ data : data,
+ results : results,
+ paddingSize : paddingSize } )
+
+ // exact
+ print( "Exact query..." )
+ assert.eq( results.exact.docsIn, t.find( { "locs.loc" : randLocType( query.exact ), "exact.docIn" : randYesQuery() } ).count() )
+
+ // $center
+ print( "Center query..." )
+ print( "Min box : " + minBoxSize( env, query.radius ) )
+ assert.eq( results.center.docsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).count() )
+
+ print( "Center query update..." )
+ var res = t.update({ "locs.loc": { $within: { $center: [ query.center, query.radius ],
+ $uniqueDocs: true }},
+ "center.docIn": randYesQuery() },
+ { $set: { centerPaddingA: padding }}, false, true);
+ assert.eq( results.center.docsIn, res.nModified );
+
+ if( query.sphereRadius >= 0 ){
+
+ print( "Center sphere query...")
+ // $centerSphere
+ assert.eq( results.sphere.docsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ] } }, "sphere.docIn" : randYesQuery() } ).count() )
+
+ print( "Center sphere query update..." )
+ res = t.update({ "locs.loc": { $within: {
+ $centerSphere: [ query.sphereCenter, query.sphereRadius ],
+ $uniqueDocs: true } },
+ "sphere.docIn" : randYesQuery() },
+ { $set: { spherePaddingA: padding } }, false, true);
+ assert.eq( results.sphere.docsIn, res.nModified );
+ }
+
+ // $box
+ print( "Box query..." )
+ assert.eq( results.box.docsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : true } }, "box.docIn" : randYesQuery() } ).count() )
+
+ // $polygon
+ print( "Polygon query..." )
+ assert.eq( results.poly.docsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ).count() )
// $near, $nearSphere and geoNear results have a default document limit of 100.
var defaultDocLimit = 100;
@@ -506,12 +481,12 @@ for ( var test = 0; test < numTests; test++ ) {
var num = Math.min( 2* defaultDocLimit, 2 * results.center.docsIn);
- var output = db.runCommand( {
- geoNear : "testAllGeo",
- near : query.center,
- maxDistance : query.radius ,
- includeLocs : true,
- num : num } ).results
+ var output = db.runCommand( {
+ geoNear : "testAllGeo",
+ near : query.center,
+ maxDistance : query.radius ,
+ includeLocs : true,
+ num : num } ).results
assert.eq( Math.min( num, results.center.docsIn ),
output.length,
@@ -520,40 +495,36 @@ for ( var test = 0; test < numTests; test++ ) {
"; radius: " + query.radius +
"; docs: " + results.center.docsIn + "; locs: " + results.center.locsIn )
- var distance = 0;
- for ( var i = 0; i < output.length; i++ ) {
- var retDistance = output[i].dis
- var retLoc = locArray( output[i].loc )
-
- // print( "Dist from : " + results[i].loc + " to " + startPoint + " is "
- // + retDistance + " vs " + radius )
-
- var arrLocs = locsArray( output[i].obj.locs )
-
- assert.contains( retLoc, arrLocs )
-
- // printjson( arrLocs )
-
- var distInObj = false
- for ( var j = 0; j < arrLocs.length && distInObj == false; j++ ) {
- var newDistance = Geo.distance( locArray( query.center ) , arrLocs[j] )
- distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 )
- }
-
- assert( distInObj )
- assert.between( retDistance - 0.0001 , Geo.distance( locArray( query.center ), retLoc ), retDistance + 0.0001 )
- assert.lte( retDistance, query.radius )
- assert.gte( retDistance, distance )
- distance = retDistance
- }
-
- }
-
- // $polygon
+ var distance = 0;
+ for ( var i = 0; i < output.length; i++ ) {
+ var retDistance = output[i].dis
+ var retLoc = locArray( output[i].loc )
+
+ var arrLocs = locsArray( output[i].obj.locs )
+
+ assert.contains( retLoc, arrLocs )
+
+ var distInObj = false
+ for ( var j = 0; j < arrLocs.length && distInObj == false; j++ ) {
+ var newDistance = Geo.distance( locArray( query.center ) , arrLocs[j] )
+ distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 )
+ }
+
+ assert( distInObj )
+ assert.between( retDistance - 0.0001 , Geo.distance( locArray( query.center ), retLoc ), retDistance + 0.0001 )
+ assert.lte( retDistance, query.radius )
+ assert.gte( retDistance, distance )
+ distance = retDistance
+ }
+
+ }
+
+ // $polygon
print( "Polygon remove..." )
- t.remove( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } )
- assert.eq( results.poly.docsIn, t.getDB().getLastErrorObj().n )
-
+ res = t.remove({ "locs.loc": { $within: { $polygon: query.boxPoly }},
+ "poly.docIn": randYesQuery() });
+ assert.eq( results.poly.docsIn, res.nRemoved );
+
}
diff --git a/jstests/noPassthrough/geo_mnypts_plus_fields.js b/jstests/noPassthrough/geo_mnypts_plus_fields.js
index 53d33da4f29..7c5e23d4b97 100644
--- a/jstests/noPassthrough/geo_mnypts_plus_fields.js
+++ b/jstests/noPassthrough/geo_mnypts_plus_fields.js
@@ -12,7 +12,8 @@ for( var fields = 1; fields < maxFields; fields++ ){
coll.drop()
var totalPts = 500 * 1000
-
+
+ var bulk = coll.initializeUnorderedBulkOp();
// Add points in a 100x100 grid
for( var i = 0; i < totalPts; i++ ){
var ii = i % 10000
@@ -37,10 +38,11 @@ for( var fields = 1; fields < maxFields; fields++ ){
doc[ "field" + j ] = field
}
-
- coll.insert( doc )
+
+ bulk.insert( doc );
}
-
+ assert.writeOK(bulk.execute());
+
// Create the query for the additional fields
queryFields = {}
for( var j = 0; j < fields; j++ ){
diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js
index d2c3ccac7e4..1a7ba4b3f4c 100644
--- a/jstests/noPassthrough/indexbg1.js
+++ b/jstests/noPassthrough/indexbg1.js
@@ -35,10 +35,11 @@ while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ m
t = db[ baseName ];
t.drop();
+ var bulk = db.jstests_indexbg1.initializeUnorderedBulkOp();
for( i = 0; i < size; ++i ) {
- db.jstests_indexbg1.save( {i:i} );
+ bulk.insert({ i: i });
}
- db.getLastError();
+ assert.writeOK(bulk.execute());
assert.eq( size, t.count() );
doParallel( fullName + ".ensureIndex( {i:1}, {background:true} )" );
@@ -62,25 +63,16 @@ while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ m
assert( ex.nscanned < 1000 , "took too long to find 100: " + tojson( ex ) );
- t.remove( {i:40}, true ); // table scan
- assert( !db.getLastError() );
-
- t.update( {i:10}, {i:-10} ); // should scan 10
- assert( !db.getLastError() );
+ assert.writeOK(t.remove({ i: 40 }, true )); // table scan
+ assert.writeOK(t.update({ i: 10 }, { i :-10 })); // should scan 10
id = t.find().hint( {$natural:-1} ).next()._id;
- t.update( {_id:id}, {i:-2} );
- assert( !db.getLastError() );
-
- t.save( {i:-50} );
- assert( !db.getLastError() );
-
- t.save( {i:size+2} );
- assert( !db.getLastError() );
+ assert.writeOK(t.update({ _id: id }, { i: -2 } ));
+ assert.writeOK(t.save({ i: -50 }));
+ assert.writeOK(t.save({ i: size + 2 }));
assert.eq( size + 1, t.count() );
- assert( !db.getLastError() );
print( "finished with checks" );
} catch( e ) {
@@ -113,10 +105,10 @@ assert.eq( 1, t.count( {i:-2} ) );
assert.eq( 1, t.count( {i:-50} ) );
assert.eq( 1, t.count( {i:size+2} ) );
assert.eq( 0, t.count( {i:40} ) );
-assert( !db.getLastError() );
print("about to drop index");
t.dropIndex( {i:1} );
-printjson( db.getLastError() );
-assert( !db.getLastError() );
+var gle = db.getLastError();
+printjson( gle );
+assert( !gle );
testServer.stop();
diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js
index 0b5edc79aa2..fcdac89956d 100644
--- a/jstests/noPassthrough/indexbg2.js
+++ b/jstests/noPassthrough/indexbg2.js
@@ -49,9 +49,7 @@ doTest = function(dropDups) {
// wait for indexing to start
assert.soon(function() { return 2 == db.system.indexes.count({ ns: "test." + baseName }) }, "no index created", 30000, 50);
t.save({ i: 0, n: true });
- //printjson(db.getLastError());
t.save({ i: size - 1, n: true });
- //printjson(db.getLastError());
} catch (e) {
// only a failure if we're still indexing
// wait for parallel status to update to reflect indexing status
diff --git a/jstests/noPassthrough/query_yield1.js b/jstests/noPassthrough/query_yield1.js
index 624215f8c45..7c168c1e208 100644
--- a/jstests/noPassthrough/query_yield1.js
+++ b/jstests/noPassthrough/query_yield1.js
@@ -14,9 +14,11 @@ q = function(){ var x=this.n; for ( var i=0; i<250; i++ ){ x = x * 2; } return f
while ( true ){
function fill(){
+ var bulk = t.initializeUnorderedBulkOp();
for ( ; i<N; i++ ){
- t.insert( { _id : i , n : 1 } )
+ bulk.insert({ _id: i, n: 1 });
}
+ assert.writeOK(bulk.execute());
}
function timeQuery(){
@@ -58,7 +60,7 @@ num = 0;
start = new Date();
biggestMe = 0;
while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
- var me = Date.timeFunc( function(){ t.insert( { x : 1 } ); db.getLastError(); } )
+ var me = Date.timeFunc( function(){ t.insert( { x : 1 } ); });
var x = db.currentOp()
if ( num++ == 0 ){
@@ -84,4 +86,4 @@ assert.eq( 0 , x.inprog.length , "weird 2" );
testServer.stop();
-} \ No newline at end of file
+}
diff --git a/jstests/noPassthrough/query_yield2.js b/jstests/noPassthrough/query_yield2.js
index 71ce4535aa6..b2262d6b357 100644
--- a/jstests/noPassthrough/query_yield2.js
+++ b/jstests/noPassthrough/query_yield2.js
@@ -26,9 +26,11 @@ print( "Shell ==== Creating test.query_yield2 collection ..." );
print( "Shell ==== Adding documents until a time-wasting query takes over 2 seconds to complete" );
while ( true ){
function fill() {
+ var bulk = t.initializeUnorderedBulkOp();
for ( ; i < N; ++i ) {
- t.insert( { _id : i , n : 1 } )
+ bulk.insert({ _id: i , n: 1 });
}
+ assert.writeOK(bulk.execute());
}
function timeQuery() {
return Date.timeFunc(
@@ -100,7 +102,7 @@ while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ) {
if ( num == 0 ) {
print( "Shell ==== Starting loop " + num + ", inserting 1 document" );
}
- insertTime = Date.timeFunc( function() { t.insert( { x : 1 } ); db.getLastError(); } );
+ insertTime = Date.timeFunc( function() { t.insert({ x: 1 } ); });
currentOp = db.currentOp();
len = currentOp.inprog.length;
print( "Shell ==== Time to insert document " + num + " was " + insertTime + " ms, db.currentOp().inprog.length is " + len );
@@ -133,4 +135,4 @@ if ( len != 0 ) {
print( "Shell ==== Test completed successfully, shutting down server" );
testServer.stop();
-} \ No newline at end of file
+}
diff --git a/jstests/noPassthrough/repair2.js b/jstests/noPassthrough/repair2.js
index 6f57ac0d45f..e80a3edf02b 100644
--- a/jstests/noPassthrough/repair2.js
+++ b/jstests/noPassthrough/repair2.js
@@ -9,8 +9,8 @@ t = testServer.getDB( baseName )[ baseName ];
t.drop();
function protect( f ) {
- try {
- f();
+ try {
+ f();
} catch( e ) {
printjson( e );
}
@@ -19,16 +19,17 @@ function protect( f ) {
s = startParallelShell( "db = db.getSisterDB( '" + baseName + "'); for( i = 0; i < 10; ++i ) { db.repairDatabase(); sleep( 5000 ); }" );
for( i = 0; i < 30; ++i ) {
-
- for( j = 0; j < 5000; ++j ) {
- protect( function() { t.insert( {_id:j} ); } );
+ var bulk = t.initializeUnorderedBulkOp();
+ for( j = 0; j < 5000; ++j ) {
+ bulk.insert({ _id: j } );
}
- for( j = 0; j < 5000; ++j ) {
- protect( function() { t.remove( {_id:j} ); } );
+ for( j = 0; j < 5000; ++j ) {
+ bulk.find({ _id: j }).remove();
}
-
- assert.eq( 0, t.count() );
+
+ assert.writeOK(bulk.execute());
+ assert.eq( 0, t.count() );
}
diff --git a/jstests/noPassthrough/sync1.js b/jstests/noPassthrough/sync1.js
deleted file mode 100644
index 490d2a53c5a..00000000000
--- a/jstests/noPassthrough/sync1.js
+++ /dev/null
@@ -1,49 +0,0 @@
-
-test = new SyncCCTest( "sync1" )
-
-db = test.conn.getDB( "test" )
-t = db.sync1
-t.save( { x : 1 } )
-assert.eq( 1 , t.find().itcount() , "A1" );
-assert.eq( 1 , t.find().count() , "A2" );
-t.save( { x : 2 } )
-assert.eq( 2 , t.find().itcount() , "A3" );
-assert.eq( 2 , t.find().count() , "A4" );
-
-test.checkHashes( "test" , "A3" );
-
-test.tempKill();
-assert.throws( function(){ t.save( { x : 3 } ); } , null , "B1" );
-// It's ok even for some of the mongod to be unreachable for read-only cmd
-assert.eq( 2, t.find({}).count() );
-// It's NOT ok for some of the mongod to be unreachable for write cmd
-assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); });
-assert.eq( 2 , t.find().itcount() , "B2" );
-test.tempStart();
-test.checkHashes( "test" , "B3" );
-
-// Trying killing the second mongod
-test.tempKill( 1 );
-assert.throws( function(){ t.save( { x : 3 } ); } );
-// It's ok even for some of the mongod to be unreachable for read-only cmd
-assert.eq( 2, t.find({}).count() );
-// It's NOT ok for some of the mongod to be unreachable for write cmd
-assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); });
-assert.eq( 2 , t.find().itcount() );
-test.tempStart( 1 );
-
-assert.eq( 2 , t.find().itcount() , "C1" );
-assert.soon( function(){
- try {
- t.remove( { x : 1 } )
- return true;
- }
- catch ( e ){
- print( e );
- }
- return false;
-} )
-t.find().forEach( printjson )
-assert.eq( 1 , t.find().itcount() , "C2" );
-
-test.stop();
diff --git a/jstests/noPassthrough/sync4.js b/jstests/noPassthrough/sync4.js
deleted file mode 100644
index 6733f07089d..00000000000
--- a/jstests/noPassthrough/sync4.js
+++ /dev/null
@@ -1,19 +0,0 @@
-
-test = new SyncCCTest( "sync4" )
-
-db = test.conn.getDB( "test" )
-t = db.sync4
-
-for ( i=0; i<1000; i++ ){
- t.insert( { _id : i , x : "asdasdsdasdas" } )
-}
-db.getLastError();
-
-test.checkHashes( "test" , "A0" );
-assert.eq( 1000 , t.find().count() , "A1" )
-assert.eq( 1000 , t.find().itcount() , "A2" )
-assert.eq( 1000 , t.find().snapshot().batchSize(10).itcount() , "A2" )
-
-
-
-test.stop();
diff --git a/jstests/noPassthrough/sync8.js b/jstests/noPassthrough/sync8.js
deleted file mode 100644
index 241ad655569..00000000000
--- a/jstests/noPassthrough/sync8.js
+++ /dev/null
@@ -1,13 +0,0 @@
-// Test for SERVER-11492 - make sure that upserting a new document reports n:1 in GLE
-
-var test = new SyncCCTest( "sync1" );
-
-var db = test.conn.getDB( "test" );
-var t = db.sync8;
-t.remove({});
-
-t.update({_id:1}, {$set:{a:1}}, true);
-var le = db.getLastErrorObj();
-assert.eq(1, le.n);
-
-test.stop();
diff --git a/jstests/noPassthrough/update_server-5552.js b/jstests/noPassthrough/update_server-5552.js
index d1f1bcb518b..c164ba67694 100644
--- a/jstests/noPassthrough/update_server-5552.js
+++ b/jstests/noPassthrough/update_server-5552.js
@@ -1,5 +1,3 @@
-
-
load( "jstests/libs/slow_weekly_util.js" )
testServer = new SlowWeeklyMongod( "update_server-5552" )
db = testServer.getDB( "test" );
@@ -9,9 +7,11 @@ t.drop()
N = 10000;
-for ( i=0; i<N; i++ )
- t.insert( { _id : i , x : 1 } )
-db.getLastError();
+var bulk = t.initializeUnorderedBulkOp();
+for ( i=0; i<N; i++ ) {
+ bulk.insert({ _id: i, x: 1 });
+}
+assert.writeOK(bulk.execute());
join = startParallelShell( "while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );" )
diff --git a/jstests/noPassthrough/update_yield1.js b/jstests/noPassthrough/update_yield1.js
index db684a6d6eb..98437414600 100644
--- a/jstests/noPassthrough/update_yield1.js
+++ b/jstests/noPassthrough/update_yield1.js
@@ -12,16 +12,17 @@ var i = 0;
while ( true ){
var fill = function() {
+ var bulk = t.initializeUnorderedBulkOp();
for ( ; i<N; i++ ){
- t.insert( { _id : i , n : 1 } );
+ bulk.insert({ _id: i, n: 1 });
}
+ assert.writeOK(bulk.execute());
};
var timeUpdate = function() {
return Date.timeFunc(
function(){
t.update( {} , { $inc : { n : 1 } } , false , true );
- var r = db.getLastErrorObj();
}
);
};
@@ -48,7 +49,7 @@ function haveInProgressUpdate() {
// --- test 1
-var join = startParallelShell( "db.update_yield1.update( {} , { $inc : { n : 1 } } , false , true ); db.getLastError()" );
+var join = startParallelShell( "db.update_yield1.update( {}, { $inc: { n: 1 }}, false, true );" );
assert.soon(haveInProgressUpdate, "never doing update");
var num = 0;
diff --git a/jstests/noPassthroughWithMongod/autosplit_heuristics.js b/jstests/noPassthroughWithMongod/autosplit_heuristics.js
index 33649617126..ee1d28b5eda 100644
--- a/jstests/noPassthroughWithMongod/autosplit_heuristics.js
+++ b/jstests/noPassthroughWithMongod/autosplit_heuristics.js
@@ -60,15 +60,11 @@ printjson({ chunkSizeBytes : chunkSizeBytes,
totalInserts : totalInserts });
// Insert enough docs to trigger splits into all chunks
+var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < totalInserts; i++) {
- coll.insert({ _id : i % numChunks + (i / totalInserts) });
- if ( i % ( numChunks * 1000 ) == 0 ) {
- print( "Inserted " + i + " docs, " +
- ( i * approxSize / numChunks ) + " bytes per chunk." );
- }
+ bulk.insert({ _id : i % numChunks + (i / totalInserts) });
}
-
-assert.eq(null, coll.getDB().getLastError());
+assert.writeOK(bulk.execute());
jsTest.log("Inserts completed...");
diff --git a/jstests/noPassthroughWithMongod/background.js b/jstests/noPassthroughWithMongod/background.js
index d1d0047988a..188027a029b 100644
--- a/jstests/noPassthroughWithMongod/background.js
+++ b/jstests/noPassthroughWithMongod/background.js
@@ -7,45 +7,41 @@ t.drop();
var a = new Mongo( db.getMongo().host ).getDB( db.getName() );
+var bulk = t.initializeUnorderedBulkOp();
for( var i = 0; i < 100000; i++ ) {
- t.insert({y:'aaaaaaaaaaaa',i:i});
- if( i % 10000 == 0 ) {
- db.getLastError();
- print(i);
- }
+ bulk.insert({ y: 'aaaaaaaaaaaa', i: i });
+ if( i % 10000 == 0 ) {
+ assert.writeOK(bulk.execute());
+ bulk = t.initializeUnorderedBulkOp();
+ print(i);
+ }
}
-//db.getLastError();
-
// start bg indexing
a.system.indexes.insert({ns:"test.bg1", key:{i:1}, name:"i_1", background:true});
// add more data
-
+bulk = t.initializeUnorderedBulkOp();
for( var i = 0; i < 100000; i++ ) {
- t.insert({i:i});
- if( i % 10000 == 0 ) {
- printjson( db.currentOp() );
- db.getLastError();
- print(i);
- }
+ bulk.insert({ i: i });
+ if( i % 10000 == 0 ) {
+ printjson( db.currentOp() );
+ assert.writeOK(bulk.execute());
+ bulk = t.initializeUnorderedBulkOp();
+ print(i);
+ }
}
-printjson( db.getLastErrorObj() );
+assert.writeOK(bulk.execute());
printjson( db.currentOp() );
-for( var i = 0; i < 40; i++ ) {
- if( db.currentOp().inprog.length == 0 )
- break;
- print("waiting");
- sleep(1000);
+for( var i = 0; i < 40; i++ ) {
+ if( db.currentOp().inprog.length == 0 )
+ break;
+ print("waiting");
+ sleep(1000);
}
-printjson( a.getLastErrorObj() );
-
var idx = t.getIndexes();
-// print("indexes:");
-// printjson(idx);
-
assert( idx[1].key.i == 1 );
diff --git a/jstests/noPassthroughWithMongod/balance_repl.js b/jstests/noPassthroughWithMongod/balance_repl.js
index 610af04767b..c5818ea19b0 100644
--- a/jstests/noPassthroughWithMongod/balance_repl.js
+++ b/jstests/noPassthroughWithMongod/balance_repl.js
@@ -5,10 +5,11 @@ s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true, _noslee
db = s.getDB( "test" );
-for ( i=0; i<2100; i++ ) {
- db.foo.insert( { _id : i , x : i } );
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 2100; i++) {
+ bulk.insert({ _id: i, x: i });
}
-db.getLastError();
+assert.writeOK(bulk.execute());
serverName = s.getServerName( "test" )
other = s.config.shards.findOne( { _id : { $ne : serverName } } );
diff --git a/jstests/noPassthroughWithMongod/balance_tags1.js b/jstests/noPassthroughWithMongod/balance_tags1.js
index 945f0526b17..1122380d7bc 100644
--- a/jstests/noPassthroughWithMongod/balance_tags1.js
+++ b/jstests/noPassthroughWithMongod/balance_tags1.js
@@ -3,10 +3,11 @@ s = new ShardingTest( "balance_tags1" , 3 , 1 , 1 , { sync:true, chunksize : 1 ,
s.config.settings.update( { _id: "balancer" }, { $set : { stopped: false, _nosleep: true } } , true );
db = s.getDB( "test" );
+var bulk = db.foo.initializeUnorderedBulkOp();
for ( i=0; i<21; i++ ) {
- db.foo.insert( { _id : i , x : i } );
+ bulk.insert({ _id: i, x: i });
}
-db.getLastError();
+assert.writeOK(bulk.execute());
s.adminCommand( { enablesharding : "test" } )
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
diff --git a/jstests/noPassthroughWithMongod/balance_tags2.js b/jstests/noPassthroughWithMongod/balance_tags2.js
index 55ad3dc5a97..6d0ed6ea7ca 100644
--- a/jstests/noPassthroughWithMongod/balance_tags2.js
+++ b/jstests/noPassthroughWithMongod/balance_tags2.js
@@ -6,10 +6,11 @@ s = new ShardingTest( "balance_tags2" , 3 , 1 , 1 ,
s.config.settings.save({ _id: "balancer", _nosleep: true});
db = s.getDB( "test" );
+var bulk = db.foo.initializeUnorderedBulkOp();
for ( i=0; i<21; i++ ) {
- db.foo.insert( { _id : i , x : i } );
+ bulk.insert({ _id: i, x: i });
}
-db.getLastError();
+assert.writeOK(bulk.execute());
// enable sharding, shard, and stop balancer
sh.enableSharding("test");
diff --git a/jstests/noPassthroughWithMongod/btreedel.js b/jstests/noPassthroughWithMongod/btreedel.js
index 824eb3e63a6..89af6aa7d5d 100644
--- a/jstests/noPassthroughWithMongod/btreedel.js
+++ b/jstests/noPassthroughWithMongod/btreedel.js
@@ -3,9 +3,11 @@
t = db.foo;
t.remove({});
+var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 1000000; i++) {
- t.insert({ _id: i, x: 'a b' });
+ bulk.insert({ _id: i, x: 'a b' });
}
+assert.writeOK(bulk.execute());
print("1 insert done count: " + t.count());
diff --git a/jstests/noPassthroughWithMongod/bulk_shard_insert.js b/jstests/noPassthroughWithMongod/bulk_shard_insert.js
index d9cd25a635e..74810a7c668 100644
--- a/jstests/noPassthroughWithMongod/bulk_shard_insert.js
+++ b/jstests/noPassthroughWithMongod/bulk_shard_insert.js
@@ -46,12 +46,7 @@ while( docsInserted < numDocs ){
bulk.push({ hi : "there", at : docsInserted, i : i, x : x })
}
- coll.insert( bulk )
- var result = db.getLastError( 1 )
- if( result != null ){
- printjson( result )
- throw result
- }
+ assert.writeOK(coll.insert( bulk ));
if( Math.floor( docsInserted / 10000 ) != Math.floor( ( docsInserted + currBulkSize ) / 10000 ) ){
print( "Inserted " + (docsInserted + currBulkSize) + " documents." )
diff --git a/jstests/noPassthroughWithMongod/capped4.js b/jstests/noPassthroughWithMongod/capped4.js
index 27d138c16ce..be02e3c6ae1 100644
--- a/jstests/noPassthroughWithMongod/capped4.js
+++ b/jstests/noPassthroughWithMongod/capped4.js
@@ -22,8 +22,8 @@ assert( !d.hasNext(), "C" );
assert( t.find().sort( { i : 1 } ).hint( { i : 1 } ).toArray().length > 10, "D" );
assert( t.findOne( { i : i - 1 } ), "E" );
-t.remove( { i : i - 1 } );
-assert( db.getLastError().indexOf( "capped" ) >= 0, "F" );
+var res = assert.writeError(t.remove( { i : i - 1 } ));
+assert( res.getWriteError().errmsg.indexOf( "capped" ) >= 0, "F" );
assert( t.validate().valid, "G" );
diff --git a/jstests/noPassthroughWithMongod/fsync2.js b/jstests/noPassthroughWithMongod/fsync2.js
index bdf956f30f2..7080837a99b 100644
--- a/jstests/noPassthroughWithMongod/fsync2.js
+++ b/jstests/noPassthroughWithMongod/fsync2.js
@@ -41,9 +41,7 @@ function doTest() {
//assert.eq(1, m.getDB(db.getName()).fsync2.count());
assert( m.getDB("admin").$cmd.sys.unlock.findOne().ok );
-
- db.getLastError();
-
+
assert.eq( 2, db.fsync2.count() );
}
diff --git a/jstests/noPassthroughWithMongod/geo_axis_aligned.js b/jstests/noPassthroughWithMongod/geo_axis_aligned.js
index 0161eccb4ac..084b839cabc 100644
--- a/jstests/noPassthroughWithMongod/geo_axis_aligned.js
+++ b/jstests/noPassthroughWithMongod/geo_axis_aligned.js
@@ -17,15 +17,14 @@ centers = []
bounds = []
for( var s = 0; s < scale.length; s++ ){
- for ( var i = 0; i < radius.length; i++ ) {
- radii.push( radius[i] * scale[s] )
- }
-
- for ( var j = 0; j < center.length; j++ ) {
- centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] )
- bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] )
- }
+ for ( var i = 0; i < radius.length; i++ ) {
+ radii.push( radius[i] * scale[s] )
+ }
+ for ( var j = 0; j < center.length; j++ ) {
+ centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] )
+ bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] )
+ }
}
radius = radii
@@ -34,75 +33,74 @@ bound = bounds
for ( var b = 0; b < bits.length; b++ ) {
-
-
- printjson( radius )
- printjson( centers )
-
- for ( var i = 0; i < radius.length; i++ ) {
- for ( var j = 0; j < center.length; j++ ) {
-
- printjson( { center : center[j], radius : radius[i], bits : bits[b] } );
-
- t.drop()
-
- // Make sure our numbers are precise enough for this test
- if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) )
- continue;
-
- t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } );
- t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } );
- t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } );
- t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } );
- t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } );
- t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } );
- t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } );
- t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } );
- t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } );
-
- t.ensureIndex( { loc : "2d" }, { max : bound[j][1], min : bound[j][0], bits : bits[b] } );
-
- if( db.getLastError() ) continue;
-
- print( "DOING WITHIN QUERY ")
- r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } );
-
- //printjson( r.toArray() );
-
- assert.eq( 5, r.count() );
-
- // FIXME: surely code like this belongs in utils.js.
- a = r.toArray();
- x = [];
- for ( k in a )
- x.push( a[k]["_id"] )
- x.sort()
- assert.eq( [ 1, 2, 3, 4, 5 ], x );
-
- print( " DOING NEAR QUERY ")
- //printjson( center[j] )
- r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } )
- assert.eq( 5, r.count() );
-
- print( " DOING DIST QUERY ")
-
- a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results
- assert.eq( 5, a.length );
-
- //printjson( a );
-
- var distance = 0;
- for( var k = 0; k < a.length; k++ ){
- //print( a[k].dis )
- //print( distance )
- assert.gte( a[k].dis, distance );
- //printjson( a[k].obj )
- //print( distance = a[k].dis );
- }
-
- r = t.find( { loc : { $within : { $box : [ [ center[j][0] - radius[i], center[j][1] - radius[i] ], [ center[j][0] + radius[i], center[j][1] + radius[i] ] ] } } }, { _id : 1 } )
- assert.eq( 9, r.count() );
-
- }
- }
-} \ No newline at end of file
+ printjson( radius )
+ printjson( centers )
+
+ for ( var i = 0; i < radius.length; i++ ) {
+ for ( var j = 0; j < center.length; j++ ) {
+ printjson( { center : center[j], radius : radius[i], bits : bits[b] } );
+
+ t.drop()
+
+ // Make sure our numbers are precise enough for this test
+ if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) )
+ continue;
+
+ t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } );
+ t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } );
+ t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } );
+ t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } );
+ t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } );
+ t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } );
+ t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } );
+ t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } );
+ t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } );
+
+ var res = t.ensureIndex({ loc: "2d" },
+ { max: bound[j][1],
+ min : bound[j][0],
+ bits : bits[b] });
+
+ // ensureIndex fails when this iteration inserted coordinates that are out of bounds.
+ // These are invalid cases, so we skip them.
+ if (!res.ok) continue;
+
+ print( "DOING WITHIN QUERY ")
+ r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } );
+
+ assert.eq( 5, r.count() );
+
+ // FIXME: surely code like this belongs in utils.js.
+ a = r.toArray();
+ x = [];
+ for ( k in a )
+ x.push( a[k]["_id"] )
+ x.sort()
+ assert.eq( [ 1, 2, 3, 4, 5 ], x );
+
+ print( " DOING NEAR QUERY ")
+ //printjson( center[j] )
+ r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } )
+ assert.eq( 5, r.count() );
+
+ print( " DOING DIST QUERY ")
+
+ a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results
+ assert.eq( 5, a.length );
+
+ var distance = 0;
+ for( var k = 0; k < a.length; k++ ){
+ assert.gte( a[k].dis, distance );
+
+ }
+
+ r = t.find({ loc: { $within: { $box: [ [ center[j][0] - radius[i],
+ center[j][1] - radius[i] ],
+ [ center[j][0] + radius[i],
+ center[j][1] + radius[i] ]]}}},
+ { _id: 1 } );
+ assert.eq( 9, r.count() );
+
+ }
+ }
+}
diff --git a/jstests/noPassthroughWithMongod/geo_mnypts.js b/jstests/noPassthroughWithMongod/geo_mnypts.js
index ac4065158bf..bc7935fa7a6 100644
--- a/jstests/noPassthroughWithMongod/geo_mnypts.js
+++ b/jstests/noPassthroughWithMongod/geo_mnypts.js
@@ -6,10 +6,12 @@ coll.drop()
var totalPts = 500 * 1000
// Add points in a 100x100 grid
+var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < totalPts; i++ ){
var ii = i % 10000
- coll.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] })
+ bulk.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] });
}
+assert.writeOK(bulk.execute());
coll.ensureIndex({ loc : "2d" })
diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js
index 25bf0269ccc..5b19b2b2080 100644
--- a/jstests/noPassthroughWithMongod/geo_polygon.js
+++ b/jstests/noPassthroughWithMongod/geo_polygon.js
@@ -15,12 +15,14 @@ if ( bi.indexOf( "erh2" ) >= 0 ){
if ( shouldRun ) {
num = 0;
+ var bulk = t.initializeUnorderedBulkOp();
for ( x = -180; x < 180; x += .5 ){
for ( y = -180; y < 180; y += .5 ){
o = { _id : num++ , loc : [ x , y ] };
- t.save( o );
+ bulk.insert( o );
}
}
+ assert.writeOK(bulk.execute());
var numTests = 31;
for( var n = 0; n < numTests; n++ ){
diff --git a/jstests/noPassthroughWithMongod/index_check10.js b/jstests/noPassthroughWithMongod/index_check10.js
index 79d0d93fc9b..84e7342e051 100644
--- a/jstests/noPassthroughWithMongod/index_check10.js
+++ b/jstests/noPassthroughWithMongod/index_check10.js
@@ -104,25 +104,30 @@ function doIt( indexVersion ) {
}
}
+ var bulk = t.initializeUnorderedBulkOp();
for( var i = 0; i < 10000; ++i ) {
- t.save( obj() );
+ bulk.insert( obj() );
}
+ assert.writeOK(bulk.execute());
t.ensureIndex( idx , { v : indexVersion } );
check();
+ bulk = t.initializeUnorderedBulkOp();
for( var i = 0; i < 10000; ++i ) {
if ( Random.rand() > 0.9 ) {
- t.save( obj() );
+ bulk.insert( obj() );
} else {
- t.remove( obj() ); // improve
+ bulk.find( obj() ).remove(); // improve
}
if( Random.rand() > 0.999 ) {
print( i );
+ assert.writeOK(bulk.execute());
check();
+ bulk = t.initializeUnorderedBulkOp();
}
}
-
+ assert.writeOK(bulk.execute());
check();
}
diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js
index 8a50471940b..fd1b1d5eaa1 100644
--- a/jstests/noPassthroughWithMongod/index_check9.js
+++ b/jstests/noPassthroughWithMongod/index_check9.js
@@ -106,25 +106,32 @@ function check() {
assert.eq( c3.length, count );
}
+var bulk = t.initializeUnorderedBulkOp();
for( var i = 0; i < 10000; ++i ) {
- t.save( obj() );
+ bulk.insert( obj() );
if( Random.rand() > 0.999 ) {
print( i );
+ assert.writeOK(bulk.execute());
check();
+ bulk = t.initializeUnorderedBulkOp();
}
}
+bulk = t.initializeUnorderedBulkOp();
for( var i = 0; i < 100000; ++i ) {
if ( Random.rand() > 0.9 ) {
- t.save( obj() );
+ bulk.insert( obj() );
} else {
- t.remove( obj() ); // improve
+ bulk.find( obj() ).remove(); // improve
}
if( Random.rand() > 0.999 ) {
print( i );
+ assert.writeOK(bulk.execute());
check();
+ bulk = t.initializeUnorderedBulkOp();
}
}
+assert.writeOK(bulk.execute());
check();
@@ -132,4 +139,4 @@ check();
for( var z = 0; z < 5; ++z ) {
doIt();
-} \ No newline at end of file
+}
diff --git a/jstests/noPassthroughWithMongod/index_hammer1.js b/jstests/noPassthroughWithMongod/index_hammer1.js
index 87fd3820f66..675a2f8db7c 100644
--- a/jstests/noPassthroughWithMongod/index_hammer1.js
+++ b/jstests/noPassthroughWithMongod/index_hammer1.js
@@ -2,9 +2,10 @@
t = db.index_hammer1;
t.drop();
+var bulk = t.initializeUnorderedBulkOp();
for ( i=0; i<10000; i++ )
- t.insert( { x : i , y : i } );
-db.getLastError();
+ bulk.insert({ x: i, y: i });
+assert.writeOK(bulk.execute());
ops = []
diff --git a/jstests/noPassthroughWithMongod/index_killop.js b/jstests/noPassthroughWithMongod/index_killop.js
index b022e31f3b8..f897f6a80de 100644
--- a/jstests/noPassthroughWithMongod/index_killop.js
+++ b/jstests/noPassthroughWithMongod/index_killop.js
@@ -5,10 +5,11 @@ t.drop();
// Insert a large number of documents, enough to ensure that an index build on these documents will
// be interrupted before complete.
+var bulk = t.initializeUnorderedBulkOp();
for( i = 0; i < 1e6; ++i ) {
- t.save( { a:i } );
+ bulk.insert({ a: i });
}
-db.getLastError();
+assert.writeOK(bulk.execute());
function debug( x ) {
// printjson( x );
@@ -23,7 +24,7 @@ function getIndexBuildOpId() {
// Identify the index build as an insert into the 'test.system.indexes'
// namespace. It is assumed that no other clients are concurrently
// accessing the 'test' database.
- if ( op.op == 'insert' && op.ns == 'test.system.indexes' ) {
+ if ( op.op == 'query' && 'createIndexes' in op.query ) {
debug( op.opid );
indexBuildOpId = op.opid;
}
@@ -33,9 +34,8 @@ function getIndexBuildOpId() {
/** Test that building an index with @param 'options' can be aborted using killop. */
function testAbortIndexBuild( options ) {
-
- // Create an index asynchronously by using a new connection.
- new Mongo( db.getMongo().host ).getCollection( t.toString() ).createIndex( { a:1 }, options );
+ var createIdx = startParallelShell('var coll = db.jstests_slownightly_index_killop; \
+ coll.createIndex({ a: 1 }, ' + tojson(options) + ');');
// When the index build starts, find its op id.
assert.soon( function() { return ( opId = getIndexBuildOpId() ) != -1; } );
@@ -44,6 +44,8 @@ function testAbortIndexBuild( options ) {
// Wait for the index build to stop.
assert.soon( function() { return getIndexBuildOpId() == -1; } );
+ createIdx();
+
// Check that no new index has been created. This verifies that the index build was aborted
// rather than successfully completed.
assert.eq( [ { _id:1 } ], t.getIndexKeys() );
diff --git a/jstests/noPassthroughWithMongod/index_multi.js b/jstests/noPassthroughWithMongod/index_multi.js
index ac259455d36..e4c38632bcf 100644
--- a/jstests/noPassthroughWithMongod/index_multi.js
+++ b/jstests/noPassthroughWithMongod/index_multi.js
@@ -4,8 +4,9 @@ Random.setRandomSeed();
var coll = db.index_multi;
+var bulk = coll.initializeUnorderedBulkOp();
print("Populate the collection with random data");
-for (var i=0;i<1e4; i++) {
+for (var i = 0; i < 1e4; i++) {
var doc = {"_id" : i};
for (var j=0; j<100; j++) {
@@ -22,52 +23,54 @@ for (var i=0;i<1e4; i++) {
}
}
- if (i%1000 == 0) {
- print("inserted "+i);
- }
-
- coll.insert(doc);
+ bulk.insert(doc);
}
+assert.writeOK(bulk.execute());
// Array of all index specs
var specs = [];
var multikey = [];
+var indexJobs = [];
print("Create 3 triple indexes");
-for (var i=90; i<93; i++) {
+for (var i = 90; i < 93; i++) {
var spec = {};
spec["field"+i] = 1;
spec["field"+(i+1)] = 1;
spec["field"+(i+2)] = 1;
- startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});"
- +"db.results.insert(db.runCommand({getlasterror:1}));");
+ indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + "," +
+ "{ background: true });" +
+ "db.results.insert(db.runCommand({ getlasterror: 1 }));"));
specs.push(spec);
multikey.push(i % 10 == 0 || (i+1) % 10 == 0 || (i+2) % 10 == 0);
}
print("Create 30 compound indexes");
-for (var i=30; i<90; i+=2) {
+for (var i = 30; i < 90; i += 2) {
var spec = {};
spec["field"+i] = 1;
spec["field"+(i+1)] = 1;
- startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});"
- +"db.results.insert(db.runCommand({getlasterror:1}));");
+ indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + ", " +
+ "{ background: true });" +
+ "db.results.insert(db.runCommand({ getlasterror: 1 }));"));
specs.push(spec);
multikey.push(i % 10 == 0 || (i+1) % 10 == 0);
}
print("Create 30 indexes");
-for (var i=0; i<30; i++) {
+for (var i = 0; i < 30; i++) {
var spec = {};
spec["field"+i] = 1;
- startParallelShell("db.index_multi.createIndex("+tojson(spec)+", {background:true});"
- +"db.results.insert(db.runCommand({getlasterror:1}));");
+ indexJobs.push(startParallelShell("db.index_multi.createIndex(" + tojson(spec) + ", " +
+ "{ background: true });" +
+ "db.results.insert(db.runCommand({ getlasterror: 1 }));"));
specs.push(spec);
multikey.push(i % 10 == 0);
}
print("Do some sets and unsets");
-for (i=0; i<1e4; i++) {
+bulk = coll.initializeUnorderedBulkOp();
+for (i = 0; i < 1e4; i++) {
var criteria = {_id: Random.randInt(1e5)};
var mod = {};
if (Random.rand() < .5) {
@@ -79,31 +82,23 @@ for (i=0; i<1e4; i++) {
mod['$unset']['field'+Random.randInt(100)] = true;
}
- coll.update(criteria, mod);
+ bulk.find(criteria).update(mod);
}
+assert.writeOK(bulk.execute());
+
+indexJobs.forEach(function(join) {
+ join();
+});
printjson(db.results.find().toArray());
printjson(coll.getIndexes());
print("Make sure we end up with 64 indexes");
-assert.soon(
- function() {
- for (var i in specs) {
- print("trying to hint on "+tojson(specs[i]));
- try {
- var explain = coll.find().hint(specs[i]).explain();
- printjson(explain);
- assert.eq(multikey[i], explain.isMultiKey);
- } catch (x) {
- print(x+", hinting on "+tojson(specs[i]));
- return false;
- }
- }
- return true;
- },
- "wait for all indexes to be built",
- 120000
-);
+for (var i in specs) {
+ print("trying to hint on "+tojson(specs[i]));
+ var explain = coll.find().hint(specs[i]).explain();
+ assert.eq(multikey[i], explain.isMultiKey, tojson(explain));
+}
print("SUCCESS!");
diff --git a/jstests/noPassthroughWithMongod/index_retry.js b/jstests/noPassthroughWithMongod/index_retry.js
index d0465476144..7c79e75af5f 100644
--- a/jstests/noPassthroughWithMongod/index_retry.js
+++ b/jstests/noPassthroughWithMongod/index_retry.js
@@ -12,13 +12,14 @@ t.drop();
// Insert a large number of documents, enough to ensure that an index build on these documents can
// be interrupted before complete.
+var bulk = t.initializeUnorderedBulkOp();
for (i = 0; i < 5e5; ++i) {
- t.save( { a:i } );
+ bulk.insert({ a: i });
if (i % 10000 == 0) {
print("i: " + i);
}
}
-test.getLastError();
+assert.writeOK(bulk.execute());
function debug(x) {
printjson(x);
@@ -36,14 +37,15 @@ function indexBuildInProgress() {
// Identify the index build as an insert into the 'test.system.indexes'
// namespace. It is assumed that no other clients are concurrently
// accessing the 'test' database.
- if ( op.op == 'insert' && op.ns == 'test.system.indexes' ) {
+ if ( op.op == 'query' && 'createIndexes' in op.query ) {
debug(op.opid);
+ var idxSpec = op.query.indexes[0];
// SERVER-4295 Make sure the index details are there
// we can't assert these things, since there is a race in reporting
// but we won't count if they aren't
- if ( "a_1" == op.insert.name &&
- 1 == op.insert.key.a &&
- op.insert.background ) {
+ if ( "a_1" == idxSpec.name &&
+ 1 == idxSpec.key.a &&
+ idxSpec.background ) {
indexBuildOpId = op.opid;
}
}
@@ -53,10 +55,9 @@ function indexBuildInProgress() {
}
function abortDuringIndexBuild(options) {
-
- // Create an index asynchronously by using a new connection.
- new Mongo(test.getMongo().host).getCollection(t.toString()).createIndex(
- { a:1 }, { background:true } );
+ var createIdx = startParallelShell('var coll = db.jstests_slownightly_index_retry; \
+ coll.createIndex({ a: 1 }, { background: true });',
+ ports[0]);
// Wait for the index build to start.
var times = 0;
@@ -68,6 +69,7 @@ function abortDuringIndexBuild(options) {
print("killing the mongod");
stopMongod(ports[0], /* signal */ 9);
+ createIdx();
}
abortDuringIndexBuild();
diff --git a/jstests/noPassthroughWithMongod/indexbg_drop.js b/jstests/noPassthroughWithMongod/indexbg_drop.js
index 9e754b747ef..db4493df017 100644
--- a/jstests/noPassthroughWithMongod/indexbg_drop.js
+++ b/jstests/noPassthroughWithMongod/indexbg_drop.js
@@ -42,9 +42,11 @@ var dc = {dropIndexes: collection, index: "i_1"};
// set up collections
masterDB.dropDatabase();
jsTest.log("creating test data " + size + " documents");
+var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
for( i = 0; i < size; ++i ) {
- masterDB.getCollection(collection).save( {i: Random.rand()} );
+ bulk.insert({ i: Random.rand() });
}
+assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing for test of: " + tojson(dc));
// Add another index to be sure the drop command works.
diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
index 09c75056ca2..f6f1d426161 100644
--- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js
+++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
@@ -66,9 +66,11 @@ for (var idx = 0; idx < dropAction.length; idx++) {
// set up collections
masterDB.dropDatabase();
jsTest.log("creating test data " + size + " documents");
+ var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
for(var i = 0; i < size; ++i ) {
- masterDB.getCollection(collection).save( {i:i} );
+ bulk.insert({ i: i });
}
+ assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing for test of: " + JSON.stringify(dc));
masterDB.getCollection(collection).ensureIndex( {i:1}, {background:true} );
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
index 01d3b23a07c..a3b2c26f59e 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
@@ -38,9 +38,11 @@ var secondDB = second.getDB('bgIndexSec');
var size = 500000;
jsTest.log("creating test data " + size + " documents");
+var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
for(var i = 0; i < size; ++i) {
- masterDB.jstests_bgsec.save( {i:i} );
+ bulk.insert({ i: i });
}
+assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing");
masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} );
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
index 38cced11bb9..1ea53e6aac2 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
@@ -64,9 +64,11 @@
var size = 500000;
jsTest.log("creating test data " + size + " documents");
+ var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
for( i = 0; i < size; ++i ) {
- masterDB.jstests_bgsec.save( {i:i} );
+ bulk.insert({ i : i });
}
+ assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing");
masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} );
@@ -82,8 +84,7 @@
// Make sure a journal flush for the oplog occurs, by doing a local journaled write to the
// secondary
- second.getDB('local').foo.insert({a:1});
- second.getDB('local').runCommand( { getLastError: 1, j: true } );
+ assert.writeOK(second.getDB('local').foo.insert({ a: 1 }, { writeConcern: { j: true }}));
// restart secondary and reconnect
jsTest.log("Restarting secondary");
diff --git a/jstests/noPassthroughWithMongod/large_chunk.js b/jstests/noPassthroughWithMongod/large_chunk.js
index 2e648084947..12f0c48fdcd 100644
--- a/jstests/noPassthroughWithMongod/large_chunk.js
+++ b/jstests/noPassthroughWithMongod/large_chunk.js
@@ -20,11 +20,12 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 400 * 1024 * 1024 ) ){
- db.foo.insert( { _id : num++ , s : bigString } );
+ bulk.insert({ _id: num++, s: bigString });
inserted += bigString.length;
}
-db.getLastError();
+assert.writeOK(bulk.execute());
// Turn on sharding on the 'test.foo' collection and generate a large chunk
s.adminCommand( { enablesharding : "test" } );
diff --git a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
index fd7ec8c68e7..1ff024fcb03 100644
--- a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
+++ b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
@@ -15,14 +15,14 @@ var coll = db.getCollection("mrInput");
//
var expectedOutColl = [];
+var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 10; i++) {
for (var j = 1; j < 50; j++) {
- coll.insert({idx: i, j: j});
+ bulk.insert({ idx: i, j: j });
}
expectedOutColl.push ({ _id: i, value: j - 1 });
}
-
-assertGLEOK(db.getLastErrorObj());
+assert.writeOK(bulk.execute());
function mapFn() { emit(this.idx, 1); };
function reduceFn(key, values) { return Array.sum(values); };
@@ -41,4 +41,4 @@ assert.eq(out.counts.emit, 490, "emit count is wrong");
// changed, such as reduceTriggerRatio or maxInMemSize. If not the case, then something else
// must have changed with when intermediate reduces occur (see mongo::mr::State::checkSize).
//
-assert.eq(out.counts.reduce, 14, "reduce count is wrong"); \ No newline at end of file
+assert.eq(out.counts.reduce, 14, "reduce count is wrong");
diff --git a/jstests/noPassthroughWithMongod/mr_shard_version.js b/jstests/noPassthroughWithMongod/mr_shard_version.js
index 47fd99ea30e..c011e7700e9 100644
--- a/jstests/noPassthroughWithMongod/mr_shard_version.js
+++ b/jstests/noPassthroughWithMongod/mr_shard_version.js
@@ -11,11 +11,12 @@ var numDocs = 500000
var numKeys = 1000
var numTests = 3
+var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < numDocs; i++ ){
- coll.insert({ _id : i, key : "" + ( i % numKeys ), value : i % numKeys })
+ bulk.insert({ _id: i, key: "" + ( i % numKeys ), value: i % numKeys });
}
+assert.writeOK(bulk.execute());
-assert.eq( null, coll.getDB().getLastError() )
assert.eq( numDocs, coll.find().itcount() )
var halfId = coll.find().itcount() / 2
diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js
index 7aa55564640..6f6c196510d 100644
--- a/jstests/noPassthroughWithMongod/no_balance_collection.js
+++ b/jstests/noPassthroughWithMongod/no_balance_collection.js
@@ -68,9 +68,11 @@ sh.waitForBalancer(true)
// Make sure auto-migrates on insert don't move chunks
var lastMigration = sh._lastMigration( collB )
+var bulk = collB.initializeUnorderedBulkOp();
for( var i = 0; i < 1000000; i++ ){
- collB.insert({ _id : i, hello : "world" })
+ bulk.insert({ _id: i, hello: "world" });
}
+assert.writeOK(bulk.execute());
printjson( lastMigration )
printjson( sh._lastMigration( collB ) )
@@ -78,4 +80,4 @@ printjson( sh._lastMigration( collB ) )
if( lastMigration == null ) assert.eq( null, sh._lastMigration( collB ) )
else assert.eq( lastMigration.time, sh._lastMigration( collB ).time )
-st.stop() \ No newline at end of file
+st.stop()
diff --git a/jstests/noPassthroughWithMongod/parallel_collection_scan.js b/jstests/noPassthroughWithMongod/parallel_collection_scan.js
index d745f088376..44e5d361e45 100644
--- a/jstests/noPassthroughWithMongod/parallel_collection_scan.js
+++ b/jstests/noPassthroughWithMongod/parallel_collection_scan.js
@@ -6,10 +6,11 @@ s = "";
while ( s.length < 10000 )
s += ".";
+var bulk = t.initializeUnorderedBulkOp();
for ( i = 0; i < 8000; i++ ) {
- t.insert( { x : i, s : s } );
+ bulk.insert({ x: i, s: s });
}
-
+assert.writeOK(bulk.execute());
function iterateSliced() {
var res = t.runCommand( "parallelCollectionScan", { numCursors : 3 } );
diff --git a/jstests/noPassthroughWithMongod/remove9.js b/jstests/noPassthroughWithMongod/remove9.js
index e7dfe9bfff1..7492e36c5b9 100644
--- a/jstests/noPassthroughWithMongod/remove9.js
+++ b/jstests/noPassthroughWithMongod/remove9.js
@@ -5,8 +5,7 @@ js = "while( 1 ) { for( i = 0; i < 10000; ++i ) { db.jstests_remove9.save( {i:i}
pid = startMongoProgramNoConnect( "mongo" , "--eval" , js , db ? db.getMongo().host : null );
for( var i = 0; i < 10000; ++i ) {
- t.remove( {i:Random.randInt( 10000 )} );
- assert.automsg( "!db.getLastError()" );
+ assert.writeOK(t.remove( { i: Random.randInt( 10000 )} ));
}
-stopMongoProgramByPid( pid ); \ No newline at end of file
+stopMongoProgramByPid( pid );
diff --git a/jstests/noPassthroughWithMongod/sharding_balance1.js b/jstests/noPassthroughWithMongod/sharding_balance1.js
index e0c36f6cea5..7f3892ce8f3 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance1.js
+++ b/jstests/noPassthroughWithMongod/sharding_balance1.js
@@ -15,12 +15,13 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 20 * 1024 * 1024 ) ){
- db.foo.insert( { _id : num++ , s : bigString } );
+ bulk.insert({ _id: num++, s: bigString });
inserted += bigString.length;
}
+assert.writeOK(bulk.execute());
-db.getLastError();
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.lt( 20 , s.config.chunks.count() , "setup2" );
diff --git a/jstests/noPassthroughWithMongod/sharding_balance2.js b/jstests/noPassthroughWithMongod/sharding_balance2.js
index 519f88807a2..c3e2e825ba3 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance2.js
+++ b/jstests/noPassthroughWithMongod/sharding_balance2.js
@@ -26,12 +26,12 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 40 * 1024 * 1024 ) ){
- db.foo.insert( { _id : num++ , s : bigString } );
+ bulk.insert({ _id: num++, s: bigString });
inserted += bigString.length;
}
-
-db.getLastError();
+assert.writeOK(bulk.execute());
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.lt( 20 , s.config.chunks.count() , "setup2" );
diff --git a/jstests/noPassthroughWithMongod/sharding_balance3.js b/jstests/noPassthroughWithMongod/sharding_balance3.js
index 5e85a694716..59f4136d44c 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance3.js
+++ b/jstests/noPassthroughWithMongod/sharding_balance3.js
@@ -16,12 +16,13 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 40 * 1024 * 1024 ) ){
- db.foo.insert( { _id : num++ , s : bigString } );
+ bulk.insert({ _id: num++, s: bigString });
inserted += bigString.length;
}
+assert.writeOK(bulk.execute());
-db.getLastError();
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.lt( 20 , s.config.chunks.count() , "setup2" );
diff --git a/jstests/noPassthroughWithMongod/sharding_balance4.js b/jstests/noPassthroughWithMongod/sharding_balance4.js
index f1c27afa0bb..c2a3d744964 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance4.js
+++ b/jstests/noPassthroughWithMongod/sharding_balance4.js
@@ -35,12 +35,12 @@ counts = {}
//
-function doUpdate( includeString, optionalId ){
+function doUpdate( bulk, includeString, optionalId ){
var up = { $inc : { x : 1 } }
if ( includeString )
up["$set"] = { s : bigString };
var myid = optionalId == undefined ? Random.randInt( N ) : optionalId
- db.foo.update( { _id : myid } , up , true );
+ bulk.find({ _id : myid }).upsert().update( up );
counts[myid] = ( counts[myid] ? counts[myid] : 0 ) + 1;
return myid;
@@ -48,14 +48,15 @@ function doUpdate( includeString, optionalId ){
// Initially update all documents from 1 to N, otherwise later checks can fail because no document
// previously existed
+var bulk = db.foo.initializeUnorderedBulkOp();
for ( i = 0; i < N; i++ ){
- doUpdate( true, i )
+ doUpdate( bulk, true, i );
}
for ( i=0; i<N*9; i++ ){
- doUpdate( false )
+ doUpdate( bulk, false );
}
-db.getLastError();
+assert.writeOK(bulk.execute());
for ( var i=0; i<50; i++ ){
s.printChunks( "test.foo" )
@@ -109,25 +110,15 @@ function check( msg , dontAssert ){
function diff1(){
jsTest.log("Running diff1...")
-
- var myid = doUpdate( false )
- var le = db.getLastErrorCmd();
-
- if ( le.err )
- print( "ELIOT ELIOT : " + tojson( le ) + "\t" + myid );
-
- if ( ! le.updatedExisting || le.n != 1 ) {
- print( "going to assert for id: " + myid + " correct count is: " + counts[myid] + " db says count is: " + tojson(db.foo.findOne( { _id : myid } )) );
- }
-
- assert( le.updatedExisting , "GLE diff myid: " + myid + " 1: " + tojson(le) )
- assert.eq( 1 , le.n , "GLE diff myid: " + myid + " 2: " + tojson(le) )
+ bulk = db.foo.initializeUnorderedBulkOp();
+ var myid = doUpdate( bulk, false );
+ var res = assert.writeOK(bulk.execute());
- if ( Math.random() > .99 ){
- db.getLastError()
- check( "random late check" ); // SERVER-1430
- }
+ assert.eq( 1, res.nModified,
+ "diff myid: " + myid + " 2: " + res.toString() + "\n" +
+ " correct count is: " + counts[myid] +
+ " db says count is: " + tojson(db.foo.findOne({ _id: myid })) );
var x = s.chunkCounts( "foo" )
if ( Math.random() > .999 )
diff --git a/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js b/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js
index 41bf9bf03c5..e3728817744 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js
+++ b/jstests/noPassthroughWithMongod/sharding_balance_randomorder1.js
@@ -14,12 +14,14 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
+
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 20 * 1024 * 1024 ) ){
- db.foo.insert( { _id : Math.random() , s : bigString } );
+ bulk.insert({ _id: Math.random(), s: bigString });
inserted += bigString.length;
}
+assert.writeOK(bulk.execute());
-db.getLastError();
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.lt( 20 , s.config.chunks.count() , "setup2" );
diff --git a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js b/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js
index 414b6d57925..32278c089f3 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js
+++ b/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js
@@ -22,11 +22,11 @@ for( var i = 0; i < nsq; i++ ) data += data
dataObj = {}
for( var i = 0; i < n; i++ ) dataObj["data-" + i] = data
+var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < 40; i++ ) {
- if(i != 0 && i % 10 == 0) printjson( coll.stats() )
- coll.save({ data : dataObj })
+ bulk.insert({ data: dataObj });
}
-db.getLastError();
+assert.writeOK(bulk.execute());
assert.eq( 40 , coll.count() , "prep1" );
@@ -46,9 +46,9 @@ assert.soon(
// On *extremely* slow or variable systems, we've seen migrations fail in the critical section and
// kill the server. Do an explicit check for this. SERVER-8781
// TODO: Remove once we can better specify what systems to run what tests on.
- try {
- assert.eq(null, shardA.getDB("admin").getLastError());
- assert.eq(null, shardB.getDB("admin").getLastError());
+ try {
+ assert.commandWorked(shardA.getDB("admin").runCommand({ ping: 1 }));
+ assert.commandWorked(shardB.getDB("admin").runCommand({ ping: 1 }));
}
catch(e) {
print("An error occurred contacting a shard during balancing," +
diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js b/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js
index 2948dbef3f9..c4484356dd4 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js
+++ b/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js
@@ -23,11 +23,11 @@ numDocs = 20 * docsPerChunk
print( "stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs )
-for ( i=0; i<numDocs; i++ ){
- t.insert( { _id : i , s : bigString } );
+var bulk = t.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++){
+ bulk.insert({ _id: i, s: bigString });
}
-
-db.getLastError();
+assert.writeOK(bulk.execute());
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
index 6b2e7faa56b..8132e33c5d0 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
+++ b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
@@ -40,15 +40,14 @@ coll.insert({ _id : -2, d : data15PlusMB });
coll.insert({ _id : -1, d : data15PlusMB });
// Docs of assorted sizes
-coll.insert({ _id : 0, d : "x" });
-coll.insert({ _id : 1, d : data15PlusMB });
-coll.insert({ _id : 2, d : "x" });
-coll.insert({ _id : 3, d : data15MB });
-coll.insert({ _id : 4, d : "x" });
-coll.insert({ _id : 5, d : data1MB });
-coll.insert({ _id : 6, d : "x" });
-
-assert.eq( null, coll.getDB().getLastError() );
+assert.writeOK(coll.insert({ _id : 0, d : "x" }));
+assert.writeOK(coll.insert({ _id : 1, d : data15PlusMB }));
+assert.writeOK(coll.insert({ _id : 2, d : "x" }));
+assert.writeOK(coll.insert({ _id : 3, d : data15MB }));
+assert.writeOK(coll.insert({ _id : 4, d : "x" }));
+assert.writeOK(coll.insert({ _id : 5, d : data1MB }));
+assert.writeOK(coll.insert({ _id : 6, d : "x" }));
+
assert.eq( 9, coll.find().itcount() );
jsTest.log( "Starting migration..." );
diff --git a/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js b/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js
index 9c4d73d5a2c..0d8af3a1ebe 100644
--- a/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js
+++ b/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js
@@ -6,12 +6,14 @@ s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
db = s.getDB( "test" );
+var bulk = db.foo.initializeUnorderedBulkOp();
+var bulk2 = db.bar.initializeUnorderedBulkOp();
for ( i=0; i<100; i++ ) {
- db.foo.insert( { _id : i , x : i } )
- db.bar.insert( { _id : i , x : i } )
+ bulk.insert({ _id: i, x: i });
+ bulk2.insert({ _id: i, x: i });
}
-
-db.getLastError();
+assert.writeOK(bulk.execute());
+assert.writeOK(bulk2.execute());
sh.splitAt( "test.foo" , { _id : 50 } )
diff --git a/jstests/noPassthroughWithMongod/sharding_rs1.js b/jstests/noPassthroughWithMongod/sharding_rs1.js
index d79605ad177..53a1f5e5dda 100644
--- a/jstests/noPassthroughWithMongod/sharding_rs1.js
+++ b/jstests/noPassthroughWithMongod/sharding_rs1.js
@@ -15,12 +15,13 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 20 * 1024 * 1024 ) ){
- db.foo.insert( { _id : num++ , s : bigString , x : Math.random() } );
+ bulk.insert({ _id: num++, s: bigString, x: Math.random() });
inserted += bigString.length;
}
+assert.writeOK(bulk.execute());
-db.getLastError();
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.lt( 20 , s.config.chunks.count() , "setup2" );
diff --git a/jstests/noPassthroughWithMongod/sharding_rs2.js b/jstests/noPassthroughWithMongod/sharding_rs2.js
index b577bf82ba9..1a0fe612d70 100644
--- a/jstests/noPassthroughWithMongod/sharding_rs2.js
+++ b/jstests/noPassthroughWithMongod/sharding_rs2.js
@@ -92,11 +92,12 @@ assert.lte( before.query + 10 , after.query , "B3" )
db.foo.ensureIndex( { x : 1 } )
+var bulk = db.foo.initializeUnorderedBulkOp();
for ( i=0; i<100; i++ ){
if ( i == 17 ) continue;
- db.foo.insert( { x : i } )
+ bulk.insert({ x: i });
}
-db.getLastError( 3 , 10000 );
+assert.writeOK(bulk.execute({ w: 3 }));
// Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
// replication for this and future tests to pass
diff --git a/jstests/noPassthroughWithMongod/ttl1.js b/jstests/noPassthroughWithMongod/ttl1.js
index cba4d035b05..3ce494bc2ef 100644
--- a/jstests/noPassthroughWithMongod/ttl1.js
+++ b/jstests/noPassthroughWithMongod/ttl1.js
@@ -35,7 +35,6 @@ t.insert( { x : true } ) //non-date value
t.insert( { x : "yo" } ) //non-date value
t.insert( { x : 3 } ) //non-date value
t.insert( { x : /foo/ } ) //non-date value
-db.getLastError();
assert.eq( 30 , t.count() );
diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js
index 3b251dfa8a9..5646ce22a39 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl.js
@@ -29,9 +29,11 @@ masterdb.createCollection(mastercol.getName(), {usePowerOf2Sizes: false});
// create new collection. insert 24 docs, aged at one-hour intervalss
now = (new Date()).getTime();
-for ( i=0; i<24; i++ )
- mastercol.insert( { x : new Date( now - ( 3600 * 1000 * i ) ) } );
-masterdb.getLastError();
+var bulk = mastercol.initializeUnorderedBulkOp();
+for ( i=0; i<24; i++ ) {
+ bulk.insert({ x: new Date( now - ( 3600 * 1000 * i )) });
+}
+assert.writeOK(bulk.execute());
rt.awaitReplication();
assert.eq( 24 , mastercol.count() , "docs not inserted on primary" );
assert.eq( 24 , slave1col.count() , "docs not inserted on secondary" );
@@ -48,8 +50,7 @@ assert.eq( 0 , slave1col.stats().userFlags , "userFlags not 0 on secondary");
// create TTL index, wait for TTL monitor to kick in, then check that
// userFlags get set to 1, and correct number of docs age out
-mastercol.ensureIndex( { x : 1 } , { expireAfterSeconds : 20000 } );
-masterdb.getLastError();
+assert.commandWorked(mastercol.ensureIndex({ x: 1 }, { expireAfterSeconds: 20000 }));
rt.awaitReplication();
sleep(70*1000); // TTL monitor runs every 60 seconds, so wait 70
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
index 99c8681a144..15e72b66a81 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
@@ -17,8 +17,7 @@ var primeSystemReplset = function() {
print("create a TTL collection");
var testDB = conn.getDB("test");
- testDB.foo.ensureIndex({x:1}, {expireAfterSeconds : 2});
- testDB.getLastError();
+ assert.commandWorked(testDB.foo.ensureIndex({ x: 1 }, { expireAfterSeconds: 2 }));
};
var restartWithConfig = function() {
@@ -38,8 +37,7 @@ var restartWithConfig = function() {
var restartWithoutConfig = function() {
var localDB = conn.getDB("local");
- localDB.system.replset.remove({});
- localDB.getLastError();
+ assert.writeOK(localDB.system.replset.remove({}));
stopMongod(runner.port(), 15);
diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js
index d5aa45e269a..2c524d8d788 100644
--- a/jstests/noPassthroughWithMongod/ttl_sharded.js
+++ b/jstests/noPassthroughWithMongod/ttl_sharded.js
@@ -20,11 +20,12 @@ s.adminCommand( { shardcollection : ns , key: { _id : 1 } } );
// insert 24 docs, with timestamps at one hour intervals
var now = (new Date()).getTime();
-for ( i=0; i<24; i++ ){
+var bulk = t.initializeUnorderedBulkOp();
+for (var i = 0; i < 24; i++) {
var past = new Date( now - ( 3600 * 1000 * i ) );
- t.insert( {_id : i , x : past } );
+ bulk.insert({ _id: i, x: past });
}
-s.getDB( dbname ).getLastError();
+assert.writeOK(bulk.execute());
assert.eq( t.count() , 24 , "initial docs not inserted");
// create the TTL index which delete anything older than ~5.5 hours
diff --git a/jstests/quota/quota1.js b/jstests/quota/quota1.js
deleted file mode 100644
index f062686b099..00000000000
--- a/jstests/quota/quota1.js
+++ /dev/null
@@ -1,51 +0,0 @@
-t = db.quota1;
-
-print( "starting quota1.a" );
-
-// Test that db.eval() times out if quotas are enabled
-assert.throws(
- function(z){
- db.eval(
- function(){
- db.quota1a.save( { a : 1 } );
- var a = 5;
- while ( true ){
- a += 2;
- }
- }
- )
- }
-);
-
-print( "done quota1.a" );
-
-//print( "starting quota1.b" );
-//assert.throws(
-// function(z){
-// db.eval(
-// function(){
-// db.quota1b.save( { a : 1 } );
-// var a = 5;
-// assert( sleep( 150000 ) );
-// }
-// )
-// }
-//);
-//print( "done quota1.b" );
-//
-//print( "starting quota1.c" );
-//assert.throws(
-// function(z){
-// db.eval(
-// function(){
-// db.quota1c.save( { a : 1 } );
-// var a = 1;
-// while ( true ){
-// a += 1;
-// assert( sleep( 1000 ) );
-// }
-// }
-// )
-// }
-//);
-//print( "done quota1.c" );
diff --git a/jstests/repl/basic1.js b/jstests/repl/basic1.js
index ccde8874fbd..a2ec3ceb52f 100644
--- a/jstests/repl/basic1.js
+++ b/jstests/repl/basic1.js
@@ -25,7 +25,7 @@ function check( note ){
sleep( 200 );
}
lastOpLogEntry = m.getDB("local").oplog.$main.find({op:{$ne:"n"}}).sort({$natural:-1}).limit(-1).next();
- note = note + tojson(am.a.find().toArray()) + " != " + tojson(as.a.find().toArray())
+ note = note + tojson(am.a.find().toArray()) + " != " + tojson(as.a.find().toArray())
+ "last oplog:" + tojson(lastOpLogEntry);
assert.eq( x.md5 , y.md5 , note );
}
@@ -44,9 +44,8 @@ check( "C" );
// ----- check features -------
// map/reduce
-am.mr.insert( { tags : [ "a" ] } )
-am.mr.insert( { tags : [ "a" , "b" ] } )
-am.getLastError();
+assert.writeOK(am.mr.insert({ tags: [ "a" ]}));
+assert.writeOK(am.mr.insert({ tags: [ "a", "b" ]}));
check( "mr setup" );
m = function(){
@@ -87,22 +86,19 @@ block();
checkNumCollections( "MR4" );
-
-t = am.rpos;
-t.insert( { _id : 1 , a : [ { n : "a" , c : 1 } , { n : "b" , c : 1 } , { n : "c" , c : 1 } ] , b : [ 1 , 2 , 3 ] } )
-block();
+var t = am.rpos;
+var writeOption = { writeConcern: { w: 2, wtimeout: 3000 }};
+t.insert({ _id: 1, a: [{ n: "a", c: 1 }, { n: "b", c: 1 }, { n: "c", c: 1 }], b: [ 1, 2, 3 ]},
+ writeOption);
check( "after pos 1 " );
-t.update( { "a.n" : "b" } , { $inc : { "a.$.c" : 1 } } )
-block();
+t.update({ "a.n": "b" }, { $inc: { "a.$.c": 1 }}, writeOption);
check( "after pos 2 " );
-t.update( { "b" : 2 } , { $inc : { "b.$" : 1 } } )
-block();
+t.update({ b: 2 }, { $inc: { "b.$": 1 }}, writeOption);
check( "after pos 3 " );
-t.update( { "b" : 3} , { $set : { "b.$" : 17 } } )
-block();
+t.update({ b: 3 }, { $set: { "b.$": 17 }}, writeOption);
check( "after pos 4 " );
@@ -112,23 +108,17 @@ printjson( as.rpos.findOne() )
//am.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().limit(10).sort( { $natural : -1 } ).forEach( printjson )
t = am.b;
-t.update( { "_id" : "fun"}, { $inc : {"a.b.c.x" : 6743} } , true, false)
-block()
+var updateOption = { upsert: true, multi: false, writeConcern: { w: 2, wtimeout: 3000 }};
+t.update({ _id: "fun" }, { $inc: { "a.b.c.x": 6743 }}, updateOption);
check( "b 1" );
-t.update( { "_id" : "fun"}, { $inc : {"a.b.c.x" : 5} } , true, false)
-block()
+t.update({ _id: "fun" }, { $inc: { "a.b.c.x": 5 }}, updateOption);
check( "b 2" );
-t.update( { "_id" : "fun"}, { $inc : {"a.b.c.x" : 100, "a.b.c.y" : 911} } , true, false)
-block()
+t.update({ _id: "fun" }, { $inc: { "a.b.c.x": 100, "a.b.c.y": 911 }}, updateOption);
assert.eq( { _id : "fun" , a : { b : { c : { x : 6848 , y : 911 } } } } , as.b.findOne() , "b 3" );
-//printjson( t.findOne() )
-//printjson( as.b.findOne() )
-//am.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().sort( { $natural : -1 } ).limit(3).forEach( printjson )
check( "b 4" );
-
// lots of indexes
am.lotOfIndexes.insert( { x : 1 } )
@@ -136,10 +126,8 @@ for ( i=0; i<200; i++ ){
var idx = {}
idx["x"+i] = 1;
am.lotOfIndexes.ensureIndex( idx );
- am.getLastError()
}
-
assert.soon( function(){ return am.lotOfIndexes.getIndexes().length == as.lotOfIndexes.getIndexes().length; } , "lots of indexes a" )
assert.eq( am.lotOfIndexes.getIndexes().length , as.lotOfIndexes.getIndexes().length , "lots of indexes b" )
@@ -154,9 +142,8 @@ assert.soon( function(){ z = as.mu1.findOne(); printjson( z ); return friendlyEq
// profiling - this sould be last
am.setProfilingLevel( 2 )
-am.foo.insert( { x : 1 } )
+am.foo.insert({ x: 1 }, writeOption);
am.foo.findOne()
-block();
assert.eq( 2 , am.system.profile.count() , "P1" )
assert.eq( 0 , as.system.profile.count() , "P2" )
diff --git a/jstests/repl/block1.js b/jstests/repl/block1.js
index e358ba39705..ef36c3efb34 100644
--- a/jstests/repl/block1.js
+++ b/jstests/repl/block1.js
@@ -11,8 +11,7 @@ tm = dbm.bar;
ts = dbs.bar;
for ( var i=0; i<1000; i++ ){
- tm.insert( { _id : i } );
- dbm.runCommand( { getlasterror : 1 , w : 2 } )
+ tm.insert({ _id: i }, { writeConcern: { w: 2 }});
assert.eq( i + 1 , ts.count() , "A" + i );
assert.eq( i + 1 , tm.count() , "B" + i );
}
diff --git a/jstests/repl/block2.js b/jstests/repl/block2.js
index 64e52b8a94f..fc35b2774c4 100644
--- a/jstests/repl/block2.js
+++ b/jstests/repl/block2.js
@@ -18,33 +18,14 @@ function check( msg ){
assert.eq( tm.count() , ts.count() , "check: " + msg );
}
-function worked( w , wtimeout ){
- var gle = dbm.getLastError( w , wtimeout );
- if (gle != null) {
- printjson(gle);
- }
- return gle == null;
-}
-
check( "A" );
-tm.save( { x : 1 } );
-assert( worked( 2 ) , "B" );
-
-tm.save( { x : 2 } );
-assert( worked( 2 , 3000 ) , "C" )
+assert.writeOK(tm.insert({ x: 1 }, { writeConcern: { w: 2 }}));
+assert.writeOK(tm.insert({ x: 2 }, { writeConcern: { w: 2, wtimeout: 3000 }}));
rt.stop( false );
-tm.save( { x : 3 } )
+assert.writeError(tm.insert({ x: 3 }, { writeConcern: { w: 2, wtimeout: 3000 }}));
assert.eq( 3 , tm.count() , "D1" );
-assert( ! worked( 2 , 3000 ) , "D2" )
-
-s = rt.start( false )
-setup();
-assert( worked( 2 , 30000 ) , "E" )
rt.stop();
-
-
-
diff --git a/jstests/repl/drop_dups.js b/jstests/repl/drop_dups.js
index 1b151cfb71f..bd3d2820108 100644
--- a/jstests/repl/drop_dups.js
+++ b/jstests/repl/drop_dups.js
@@ -4,9 +4,7 @@ var rt = new ReplTest( "drop_dups" );
m = rt.start( true );
s = rt.start( false );
-function block(){
- am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } )
-}
+var writeOption = { writeConcern: { w: 2, wtimeout: 3000 }};
am = m.getDB( "foo" );
as = s.getDB( "foo" );
@@ -16,20 +14,18 @@ function run( createInBackground ) {
collName = "foo" + ( createInBackground ? "B" : "F" );
am[collName].drop();
- am.blah.insert( { x : 1 } )
+ am.blah.insert({ x: 1 }, writeOption);
assert.soon( function(){
- block();
return as.blah.findOne();
}
);
-
-
- for ( i=0; i<10; i++ ) {
- am[collName].insert( { _id : i , x : Math.floor( i / 2 ) } )
+
+ var bulk = am[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < 10; i++) {
+ bulk.insert({ _id: i, x: Math.floor( i / 2 ) });
}
-
- block();
-
+ assert.writeOK(bulk.execute({ w: 2, wtimeout: 3000 }));
+
am.runCommand( { "godinsert" : collName , obj : { _id : 100 , x : 20 } } );
am.runCommand( { "godinsert" : collName , obj : { _id : 101 , x : 20 } } );
@@ -43,8 +39,8 @@ function run( createInBackground ) {
}
am[collName].ensureIndex( { x : 1 } , { unique : true , dropDups : true , background : createInBackground } );
- am.blah.insert( { x : 1 } )
- block();
+ am.blah.insert({ x: 1 }, writeOption);
+
assert.eq( 2 , am[collName].getIndexKeys().length , "A1 : " + createInBackground )
if (!createInBackground) {
assert.eq( 2 , as[collName].getIndexKeys().length , "A2 : " + createInBackground )
diff --git a/jstests/repl/master1.js b/jstests/repl/master1.js
index 93bfaf7862c..49b3416d202 100644
--- a/jstests/repl/master1.js
+++ b/jstests/repl/master1.js
@@ -43,11 +43,9 @@ m.getDB( "local" ).runCommand( {godinsert:"oplog.$main", obj:op} );
rt.stop( true );
m = rt.start( true, null, true );
assert.eq( op.ts.i, lastop().ts.i );
-am().save( {} );
-// The above write should cause the server to terminate
assert.throws(function() {
- am().findOne();
+ am().save( {} ); // triggers fassert because ofclock skew
});
assert.neq(0, rt.stop( true )); // fasserted
diff --git a/jstests/repl/mastermaster1.js b/jstests/repl/mastermaster1.js
deleted file mode 100644
index 97fdc149b56..00000000000
--- a/jstests/repl/mastermaster1.js
+++ /dev/null
@@ -1,55 +0,0 @@
-// basic testing of master/master
-
-
-ports = allocatePorts( 2 )
-
-left = startMongodTest( ports[0] , "mastermaster1left" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[1] } )
-
-x = left.getDB( "admin" ).runCommand( "ismaster" )
-assert( x.ismaster , "left: " + tojson( x ) )
-
-right = startMongodTest( ports[1] , "mastermaster1right" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[0] } )
-
-x = right.getDB( "admin" ).runCommand( "ismaster" )
-assert( x.ismaster , "right: " + tojson( x ) )
-
-print( "check 1" )
-
-
-ldb = left.getDB( "test" )
-rdb = right.getDB( "test" )
-
-print( "check 2" )
-
-ldb.foo.insert( { _id : 1 , x : "eliot" } )
-result = ldb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 40000 } );
-printjson(result);
-rdb.foo.insert( { _id : 2 , x : "sara" } )
-result = rdb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 40000 } )
-printjson(result);
-
-print( "check 3" )
-
-print( "left" )
-ldb.foo.find().forEach( printjsononeline )
-print( "right" )
-rdb.foo.find().forEach( printjsononeline )
-
-print( "oplog" )
-
-rdb.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().forEach( printjsononeline )
-
-/*
-assert.eq( 2 , ldb.foo.count() , "B1" )
-assert.eq( 2 , rdb.foo.count() , "B2" )
-*/
-
-print( "going to stop everything" )
-
-for ( var i=0; i<ports.length; i++ ){
- stopMongod( ports[i] );
-}
-
-print( "yay" )
-
-
diff --git a/jstests/repl/mod_move.js b/jstests/repl/mod_move.js
index d39e747b833..66cc00b39e5 100644
--- a/jstests/repl/mod_move.js
+++ b/jstests/repl/mod_move.js
@@ -6,10 +6,6 @@ var rt = new ReplTest( "mod_move" );
m = rt.start( true , { oplogSize : 50 } );
-function block(){
- am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } )
-}
-
am = m.getDB( "foo" );
function check( note ){
@@ -31,39 +27,30 @@ function check( note ){
BIG = 100000;
N = BIG * 2;
-s : "asdasdasdasdasdasdasdadasdadasdadasdasdas"
-
-for ( i=0; i<BIG; i++ ) {
- am.a.insert( { _id : i , s : 1 , x : 1 } )
+var bulk = am.a.initializeUnorderedBulkOp();
+for (var i = 0; i < BIG; i++) {
+ bulk.insert({ _id: i, s: 1, x: 1 });
}
-for ( ; i<N; i++ ) {
- am.a.insert( { _id : i , s : 1 } )
+for (; i < N; i++) {
+ bulk.insert({ _id: i, s: 1 });
}
-for ( i=0; i<BIG; i++ ) {
- am.a.remove( { _id : i } )
+for (i = 0; i < BIG; i++) {
+ bulk.find({ _id: i }).remove();
}
-am.getLastError();
+assert.writeOK(bulk.execute());
assert.eq( BIG , am.a.count() )
assert.eq( 1 , am.a.stats().paddingFactor , "A2" )
-
// start slave
s = rt.start( false );
as = s.getDB( "foo" );
-for ( i=N-1; i>=BIG; i-- ) {
- am.a.update( { _id : i } , { $set : { x : 1 } } )
- if ( i == N ) {
- am.getLastError()
- assert.lt( as.a.count() , BIG , "B1" )
- print( "NOW : " + as.a.count() )
- }
+bulk = am.a.initializeUnorderedBulkOp();
+for (i = N - 1; i >= BIG; i--) {
+ bulk.find({ _id: i }).update({ $set: { x: 1 }});
}
+assert.writeOK(bulk.execute());
check( "B" )
rt.stop();
-
-
-
-
diff --git a/jstests/repl/repl12.js b/jstests/repl/repl12.js
index 8db4b75e7a9..4bfaf17d5d7 100644
--- a/jstests/repl/repl12.js
+++ b/jstests/repl/repl12.js
@@ -17,7 +17,6 @@ for( i = 0; i < 3; ++i ) {
m.getDB( a ).c.save( {} );
a += "a";
}
-m.getDB(a).getLastError();
//print("\n\n\n DB NAMES MASTER:");
//printjson(m.getDBNames());
diff --git a/jstests/repl/repl13.js b/jstests/repl/repl13.js
index e8a80966dab..78daae24c32 100644
--- a/jstests/repl/repl13.js
+++ b/jstests/repl/repl13.js
@@ -12,10 +12,11 @@ m = rt.start( true );
mc = m.getDB( 'd' )[ 'c' ];
// Insert some documents with a:{} fields.
-for( i = 0; i < 100000; ++i ) {
- mc.save( {_id:i,a:{}} );
+var bulk = mc.initializeUnorderedBulkOp();
+for(var i = 0; i < 100000; ++i) {
+ bulk.insert({ _id: i, a: {}});
}
-m.getDB( 'd' ).getLastError();
+assert.writeOK(bulk.execute());
s = rt.start( false );
sc = s.getDB( 'd' )[ 'c' ];
@@ -26,11 +27,13 @@ assert.soon( function() { debug( sc.count() ); return sc.count() > 0; } );
// Update documents that will be cloned last with the intent that an updated version will be cloned.
// This may cause an assertion when an update that was successfully applied to the original version
// of a document is replayed against an updated version of the same document.
+bulk = mc.initializeUnorderedBulkOp();
for( i = 99999; i >= 90000; --i ) {
// If the document is cloned as {a:1}, the {$set:{'a.b':1}} modifier will uassert.
- mc.update( {_id:i}, {$set:{'a.b':1}} );
- mc.update( {_id:i}, {$set:{a:1}} );
+ bulk.find({ _id: i }).update({ $set: { 'a.b': 1 }});
+ bulk.find({ _id: i }).update({ $set: { a: 1 }});
}
+assert.writeOK(bulk.execute());
// The initial sync completes and subsequent writes succeed, in spite of any assertions that occur
// when the update operations above are replicated.
diff --git a/jstests/repl/repl17.js b/jstests/repl/repl17.js
index 8011d974328..c7a7be35ffc 100644
--- a/jstests/repl/repl17.js
+++ b/jstests/repl/repl17.js
@@ -11,7 +11,6 @@ md = master.getDB( 'd' );
for( i = 0; i < 1000; ++i ) {
md[ ''+i ].save( {} );
}
-md.getLastError();
slave = rt.start( false );
sd = slave.getDB( 'd' );
diff --git a/jstests/repl/repl19.js b/jstests/repl/repl19.js
index 71d4335014a..a655d522bae 100644
--- a/jstests/repl/repl19.js
+++ b/jstests/repl/repl19.js
@@ -13,8 +13,7 @@ for( i = 0; i < 100000; ++i ) {
}
targetId = 1000*1000;
-mc.insert( { _id:targetId, val:[ 1, 2, 3 ] } );
-master.getDB( 'd' ).getLastError();
+assert.writeOK(mc.insert({ _id: targetId, val: [ 1, 2, 3 ] }));
slave = rt.start( false );
sc = slave.getDB( 'd' )[ 'c' ];
diff --git a/jstests/repl/repl20.js b/jstests/repl/repl20.js
index 02e50f58f1f..c30ef8d6f3c 100644
--- a/jstests/repl/repl20.js
+++ b/jstests/repl/repl20.js
@@ -13,8 +13,7 @@ for( i = 0; i < 100000; ++i ) {
}
targetId = 1000*1000;
-mc.insert( { _id:targetId, val:[ 1 ] } );
-master.getDB( 'd' ).getLastError();
+assert.writeOK(mc.insert({ _id: targetId, val: [ 1 ] }));
slave = rt.start( false );
sc = slave.getDB( 'd' )[ 'c' ];
diff --git a/jstests/repl/repl21.js b/jstests/repl/repl21.js
index a94a4b5b779..87c0c7fdd02 100644
--- a/jstests/repl/repl21.js
+++ b/jstests/repl/repl21.js
@@ -8,13 +8,14 @@ rt = new ReplTest( "repl21tests" );
master = rt.start( true );
mc = master.getDB( 'd' )[ 'c' ];
-for( i = 0; i < 100000; ++i ) {
- mc.insert( { _id:i, z:i } );
+var bulk = mc.initializeUnorderedBulkOp();
+for(var i = 0; i < 100000; ++i) {
+ bulk.insert({ _id: i, z: i });
}
targetId = 1000*1000;
-mc.insert( { _id:targetId, val:[ 1 ] } );
-master.getDB( 'd' ).getLastError();
+bulk.insert({ _id: targetId, val: [ 1 ] });
+assert.writeOK(bulk.execute());
slave = rt.start( false );
sc = slave.getDB( 'd' )[ 'c' ];
@@ -36,4 +37,4 @@ assert.soon( function() { return sc.count( { _id:'sentinel' } ) > 0; } );
assert.eq( [ 1, 3 ], mc.findOne( { _id:targetId } ).val );
assert.eq( [ 1, 3 ], sc.findOne( { _id:targetId } ).val );
-} \ No newline at end of file
+}
diff --git a/jstests/repl/repl5.js b/jstests/repl/repl5.js
index eda48496656..aeba7eb1095 100644
--- a/jstests/repl/repl5.js
+++ b/jstests/repl/repl5.js
@@ -14,8 +14,10 @@ doTest = function(signal, extraOpts) {
m = rt.start( true );
ma = m.getDB( "a" ).a;
+ var bulk = ma.initializeUnorderedBulkOp();
for( i = 0; i < 10000; ++i )
- ma.save( { i:i } );
+ bulk.insert({ i: i });
+ assert.writeOK(bulk.execute());
s = rt.start(false, extraOpts);
soonCountAtLeast( "a", "a", 1 );
diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js
index b65085c5702..f10b6b777bc 100644
--- a/jstests/replsets/auth1.js
+++ b/jstests/replsets/auth1.js
@@ -134,19 +134,18 @@ wait(function() {
print("add some more data 1");
master.auth("bar", "baz");
+bulk = master.foo.initializeUnorderedBulkOp();
for (var i=0; i<1000; i++) {
- master.foo.insert({x:i, foo : "bar"});
+ bulk.insert({ x: i, foo: "bar" });
}
-var result = master.runCommand({getlasterror:1, w:2, wtimeout:60000});
-printjson(result);
-
+assert.writeOK(bulk.execute({ w: 2 }));
print("resync");
rs.restart(0, {"keyFile" : path+"key1"});
print("add some more data 2");
-var bulk = master.foo.initializeUnorderedBulkOp();
+bulk = master.foo.initializeUnorderedBulkOp();
for (var i=0; i<1000; i++) {
bulk.insert({ x: i, foo: "bar" });
}
diff --git a/jstests/replsets/downstream.js b/jstests/replsets/downstream.js
index 795e6671d46..f50716f0340 100755
--- a/jstests/replsets/downstream.js
+++ b/jstests/replsets/downstream.js
@@ -23,11 +23,13 @@ var conn = repset.getMaster()
var db = conn.getDB('test')
// Add data to it
-for (var i = 0; i < N; i++)
- db['foo'].insert({x: i, text: Text})
-
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < N; i++) {
+ bulk.insert({ x: i, text: Text });
+}
// wait to be copied to at least one secondary (BUG hangs here)
-db.getLastError(2)
+assert.writeOK(bulk.execute({ w: 2 }));
+
print('getlasterror_w2.js SUCCESS')
}
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
index da8979bb34c..20364381dd0 100644
--- a/jstests/replsets/replset2.js
+++ b/jstests/replsets/replset2.js
@@ -77,7 +77,6 @@ doTest = function (signal) {
s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 });
assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1");
- // Test getlasterror with large insert
print("replset2.js **** Try inserting many records ****")
try {
var bigData = new Array(2000).toString();
diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js
index 55e68768eee..074aa7b9dda 100644
--- a/jstests/replsets/tags2.js
+++ b/jstests/replsets/tags2.js
@@ -1,4 +1,4 @@
-// Change a getLastErrorMode from 2 to 3 servers
+// Change a write concern mode from 2 to 3 servers
var host = getHostName();
var replTest = new ReplSetTest( {name: "rstag", nodes: 4, startPort: 31000} );
diff --git a/jstests/sharding/movechunk_include.js b/jstests/sharding/movechunk_include.js
index e74c6c4bf4d..8ffa1664526 100644
--- a/jstests/sharding/movechunk_include.js
+++ b/jstests/sharding/movechunk_include.js
@@ -13,16 +13,16 @@ function setupMoveChunkTest(st) {
str += "asdasdsdasdasdasdas";
}
- var data = num = 0;
+ var data = 0;
+ var num = 0;
//Insert till you get to 10MB of data
+ var bulk = testcoll.initializeUnorderedBulkOp();
while ( data < ( 1024 * 1024 * 10 ) ) {
- testcoll.insert( { _id : num++ , s : str } )
- data += str.length
+ bulk.insert({ _id: num++, s: str });
+ data += str.length;
}
-
- //Flush and wait
- testdb.getLastError()
+ assert.writeOK(bulk.execute());
var stats = st.chunkCounts( "foo" )
var to = ""
diff --git a/jstests/sharding/multi_write_target.js b/jstests/sharding/multi_write_target.js
index 52ce36a83e0..3fc528293c8 100644
--- a/jstests/sharding/multi_write_target.js
+++ b/jstests/sharding/multi_write_target.js
@@ -25,20 +25,15 @@ st.printShardingStatus();
jsTest.log("Testing multi-update...");
// Put data on all shards
-st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 });
-assert.gleOK(st.shard0.getCollection(coll.toString()).getDB().getLastErrorObj());
-st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 });
-assert.gleOK(st.shard1.getCollection(coll.toString()).getDB().getLastErrorObj());
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 }));
+assert.writeOK(st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 }));
// Data not in chunks
-st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 });
-assert.gleOK(st.shard2.getCollection(coll.toString()).getDB().getLastErrorObj());
+assert.writeOK(st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 }));
// Non-multi-update doesn't work without shard key
-coll.update({ x : 1 }, { $set : { updated : true } }, { multi : false });
-assert.gleError(coll.getDB().getLastErrorObj());
+assert.writeError(coll.update({ x : 1 }, { $set : { updated : true } }, { multi : false }));
-coll.update({ x : 1 }, { $set : { updated : true } }, { multi : true });
-assert.gleOK(coll.getDB().getLastErrorObj());
+assert.writeOK(coll.update({ x : 1 }, { $set : { updated : true } }, { multi : true }));
// Ensure update goes to *all* shards
assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({ updated : true }));
@@ -46,8 +41,7 @@ assert.neq(null, st.shard1.getCollection(coll.toString()).findOne({ updated : tr
assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({ updated : true }));
// _id update works, and goes to all shards
-coll.update({ _id : 0 }, { $set : { updatedById : true } }, { multi : false });
-assert.gleOK(coll.getDB().getLastErrorObj());
+assert.writeOK(coll.update({ _id : 0 }, { $set : { updatedById : true } }, { multi : false }));
// Ensure _id update goes to *all* shards
assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({ updatedById : true }));
@@ -56,11 +50,9 @@ assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({ updatedById
jsTest.log("Testing multi-delete...");
// non-multi-delete doesn't work without shard key
-coll.remove({ x : 1 }, { justOne : true });
-assert.gleError(coll.getDB().getLastErrorObj());
+assert.writeError(coll.remove({ x : 1 }, { justOne : true }));
-coll.remove({ x : 1 }, { justOne : false });
-assert.gleOK(coll.getDB().getLastErrorObj());
+assert.writeOK(coll.remove({ x : 1 }, { justOne : false }));
// Ensure delete goes to *all* shards
assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({ x : 1 }));
@@ -68,16 +60,12 @@ assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({ x : 1 }));
assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({ x : 1 }));
// Put more on all shards
-st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 });
-assert.gleOK(st.shard0.getCollection(coll.toString()).getDB().getLastErrorObj());
-st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 });
-assert.gleOK(st.shard1.getCollection(coll.toString()).getDB().getLastErrorObj());
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 0, skey : -1, x : 1 }));
+assert.writeOK(st.shard1.getCollection(coll.toString()).insert({ _id : 1, skey : 1, x : 1 }));
// Data not in chunks
-st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 });
-assert.gleOK(st.shard2.getCollection(coll.toString()).getDB().getLastErrorObj());
+assert.writeOK(st.shard2.getCollection(coll.toString()).insert({ _id : 0, x : 1 }));
-coll.remove({ _id : 0 }, { justOne : true });
-assert.gleOK(coll.getDB().getLastErrorObj());
+assert.writeOK(coll.remove({ _id : 0 }, { justOne : true }));
// Ensure _id delete goes to *all* shards
assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({ x : 1 }));
diff --git a/jstests/slow1/sharding_multiple_collections.js b/jstests/slow1/sharding_multiple_collections.js
index 61d9911afca..d817038ecdb 100644
--- a/jstests/slow1/sharding_multiple_collections.js
+++ b/jstests/slow1/sharding_multiple_collections.js
@@ -12,11 +12,14 @@ S = ""
while ( S.length < 500 )
S += "123123312312";
+var bulk = db.foo.initializeUnorderedBulkOp();
+var bulk2 = db.bar.initializeUnorderedBulkOp();
for ( i=0; i<N; i++ ){
- db.foo.insert( { _id : i , s : S } )
- db.bar.insert( { _id : i , s : S , s2 : S } )
- db.getLastError()
+ bulk.insert({ _id: i, s: S });
+ bulk2.insert({ _id: i, s: S, s2: S });
}
+assert.writeOK(bulk.execute());
+assert.writeOK(bulk2.execute());
db.printShardingStatus()
diff --git a/jstests/slow2/32bit.js b/jstests/slow2/32bit.js
index d80cc7821c3..a149ea3e8dd 100755
--- a/jstests/slow2/32bit.js
+++ b/jstests/slow2/32bit.js
@@ -7,97 +7,97 @@ if (forceSeedToBe)
function f() {
seed = forceSeedToBe || Math.random();
-
+
pass = 1;
var mydb = db.getSisterDB( "test_32bit" );
mydb.dropDatabase();
while( 1 ) {
- if( pass >= 2 )
- break;
+ if( pass >= 2 )
+ break;
print("32bit.js PASS #" + pass);
pass++;
-
+
t = mydb.colltest_32bit;
print("seed=" + seed);
-
+
t.insert({x:1});
t.ensureIndex({a:1});
t.ensureIndex({b:1}, true);
t.ensureIndex({x:1});
if( Math.random() < 0.3 )
- t.ensureIndex({c:1});
+ t.ensureIndex({c:1});
t.ensureIndex({d:1});
t.ensureIndex({e:1});
t.ensureIndex({f:1});
-
+
big = 'a b';
big = big + big;
k = big;
big = big + big;
big = big + big;
big = big + big;
-
+
a = 0;
c = 'kkk';
var start = new Date();
- while( 1 ) {
- b = Math.random(seed);
- d = c + -a;
+ while( 1 ) {
+ b = Math.random(seed);
+ d = c + -a;
f = Math.random(seed) + a;
a++;
- cc = big;
- if( Math.random(seed) < .1 )
- cc = null;
- t.insert({a:a,b:b,c:cc,d:d,f:f});
- if( Math.random(seed) < 0.01 ) {
-
- if( mydb.getLastError() ) {
- /* presumably we have mmap error on 32 bit. try a few more manipulations attempting to break things */
- t.insert({a:33,b:44,c:55,d:66,f:66});
- t.insert({a:33,b:44000,c:55,d:66});
- t.insert({a:33,b:440000,c:55});
- t.insert({a:33,b:4400000});
- t.update({a:20},{'$set':{c:'abc'}});
- t.update({a:21},{'$set':{c:'aadsfbc'}});
- t.update({a:22},{'$set':{c:'c'}});
- t.update({a:23},{'$set':{b:cc}});
- t.remove({a:22});
- break;
- }
-
- t.remove({a:a});
- t.remove({b:Math.random(seed)});
- t.insert({e:1});
- t.insert({f:'aaaaaaaaaa'});
-
+ cc = big;
+ if( Math.random(seed) < .1 )
+ cc = null;
+ var res = t.insert({ a: a, b: b, c: cc, d: d, f: f });
+ if( Math.random(seed) < 0.01 ) {
+ if (res.hasWriteError()) {
+ // Presumably we have mmap error on 32 bit. try a few more manipulations
+ // attempting to break things.
+ t.insert({a:33,b:44,c:55,d:66,f:66});
+ t.insert({a:33,b:44000,c:55,d:66});
+ t.insert({a:33,b:440000,c:55});
+ t.insert({a:33,b:4400000});
+ t.update({a:20},{'$set':{c:'abc'}});
+ t.update({a:21},{'$set':{c:'aadsfbc'}});
+ t.update({a:22},{'$set':{c:'c'}});
+ t.update({a:23},{'$set':{b:cc}});
+ t.remove({a:22});
+ break;
+ }
+
+ t.remove({a:a});
+ t.remove({b:Math.random(seed)});
+ t.insert({e:1});
+ t.insert({f:'aaaaaaaaaa'});
+
if( Math.random() < 0.00001 ) { print("remove cc"); t.remove({c:cc}); }
if( Math.random() < 0.0001 ) { print("update cc"); t.update({c:cc},{'$set':{c:1}},false,true); }
if( Math.random() < 0.00001 ) { print("remove e"); t.remove({e:1}); }
- }
- if (a == 20000 ) {
- var delta_ms = (new Date())-start;
- // 2MM / 20000 = 100. 1000ms/sec.
- var eta_secs = delta_ms * (100 / 1000);
- print("32bit.js eta_secs:" + eta_secs);
- if( eta_secs > 1000 ) {
- print("32bit.js machine is slow, stopping early. a:" + a);
- mydb.dropDatabase();
- return;
- }
- }
- if( a % 100000 == 0 ) {
- print(a);
- // on 64 bit we won't error out, so artificially stop. on 32 bit we will hit mmap limit ~1.6MM but may
- // vary by a factor of 2x by platform
- if( a >= 2200000 ) {
+ }
+ if (a == 20000 ) {
+ var delta_ms = (new Date())-start;
+ // 2MM / 20000 = 100. 1000ms/sec.
+ var eta_secs = delta_ms * (100 / 1000);
+ print("32bit.js eta_secs:" + eta_secs);
+ if( eta_secs > 1000 ) {
+ print("32bit.js machine is slow, stopping early. a:" + a);
+ mydb.dropDatabase();
+ return;
+ }
+ }
+ if( a % 100000 == 0 ) {
+ print(a);
+ // on 64 bit we won't error out, so artificially stop. on 32 bit we will hit
+ // mmap limit ~1.6MM but may vary by a factor of 2x by platform
+ if( a >= 2200000 ) {
mydb.dropDatabase();
- return;
- }
+ return;
+ }
}
- }
+ }
print("count: " + t.count());
var res = t.validate();
@@ -105,11 +105,11 @@ function f() {
print("32bit.js FAIL validating");
print(res.result);
printjson(res);
- //mydb.dropDatabase();
- throw "fail validating 32bit.js";
+ //mydb.dropDatabase();
+ throw "fail validating 32bit.js";
}
- mydb.dropDatabase();
+ mydb.dropDatabase();
}
print("32bit.js SUCCESS");
diff --git a/jstests/slow2/conc_update.js b/jstests/slow2/conc_update.js
index 0d778df047e..4ee5bd22ca7 100644
--- a/jstests/slow2/conc_update.js
+++ b/jstests/slow2/conc_update.js
@@ -6,46 +6,42 @@ db.dropDatabase();
NRECORDS=3*1024*1024
print("loading "+NRECORDS+" documents (progress msg every 1024*1024 documents)")
-for (i=0; i<(NRECORDS); i++) {
- db.conc.insert({x:i})
- if ((i%(1024*1024))==0)
- print("loaded " + i/(1024*1024) + " mibi-records")
+var bulk = db.conc.initializeUnorderedBulkOp();
+for (var i = 0; i < NRECORDS; i++) {
+ bulk.insert({ x: i });
}
+assert.writeOK(bulk.execute());
print("making an index (this will take a while)")
db.conc.ensureIndex({x:1})
var c1=db.conc.count({x:{$lt:NRECORDS}})
-updater=startParallelShell("db=db.getSisterDB('concurrency');\
- db.concflag.insert( {inprog:true} );\
- sleep(20);\
- db.conc.update({}, {$inc:{x: "+NRECORDS+"}}, false, true);\
- e=db.getLastError();\
- print('update error: '+ e);\
- db.concflag.update({},{inprog:false});\
- assert.eq(e, null, 'update failed');");
+updater = startParallelShell("db = db.getSisterDB('concurrency');\
+ db.concflag.insert({ inprog: true });\
+ sleep(20);\
+ assert.writeOK(db.conc.update({}, \
+ { $inc: { x: " + NRECORDS + "}}, false, true)); \
+ assert.writeOK(db.concflag.update({}, { inprog: false }));");
-assert.soon( function(){ var x = db.concflag.findOne(); return x && x.inprog; } ,
+assert.soon( function(){ var x = db.concflag.findOne(); return x && x.inprog; } ,
"wait for fork" , 30000 , 1 );
querycount=0;
decrements=0;
misses=0
-assert.soon(
- function(){
- c2=db.conc.count({x:{$lt:NRECORDS}})
- print(c2)
- querycount++;
- if (c2<c1)
- decrements++;
- else
- misses++;
- c1 = c2;
- return ! db.concflag.findOne().inprog;
- } ,
- "update never finished" , 2 * 60 * 60 * 1000 , 10 );
+assert.soon(function(){
+ c2=db.conc.count({x:{$lt:NRECORDS}})
+ print(c2)
+ querycount++;
+ if (c2<c1)
+ decrements++;
+ else
+ misses++;
+ c1 = c2;
+ return ! db.concflag.findOne().inprog;
+}, "update never finished" , 2 * 60 * 60 * 1000 , 10 );
print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
diff --git a/jstests/slow2/dur_big_atomic_update.js b/jstests/slow2/dur_big_atomic_update.js
index 18a7c4a68f2..b8d3ba60be1 100644
--- a/jstests/slow2/dur_big_atomic_update.js
+++ b/jstests/slow2/dur_big_atomic_update.js
@@ -8,26 +8,29 @@ conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur", "--durOption
d = conn.getDB("test");
d.foo.drop();
-for (var i=0; i<1024; i++){
- d.foo.insert({_id:i});
+var bulk = d.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 1024; i++){
+ bulk.insert({ _id: i });
}
+assert.writeOK(bulk.execute());
big_string = 'xxxxxxxxxxxxxxxx';
while (big_string.length < 1024*1024) {
big_string += big_string;
}
-d.foo.update({$atomic:1}, {$set: {big_string: big_string}}, false, /*multi*/true);
-err = d.getLastErrorObj();
-
-assert(err.err == null);
-assert(err.n == 1024);
+var res = assert.writeOK(d.foo.update({ $atomic: 1 },
+ { $set: { big_string: big_string }},
+ false, true /* multi */ ));
+assert.eq(1024, res.nModified);
d.dropDatabase();
-for (var i=0; i<1024; i++){
- d.foo.insert({_id:i});
+bulk = d.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 1024; i++){
+ bulk.insert({ _id: i });
}
+assert.writeOK(bulk.execute());
// Do it again but in a db.eval
d.eval(
diff --git a/jstests/slow2/mr_during_migrate.js b/jstests/slow2/mr_during_migrate.js
index 53ffd10fbec..6ab9754c4f6 100644
--- a/jstests/slow2/mr_during_migrate.js
+++ b/jstests/slow2/mr_during_migrate.js
@@ -17,11 +17,13 @@ var dataSize = 1024 // bytes, must be power of 2
var data = "x"
while( data.length < dataSize ) data += data
+var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < numDocs; i++ ){
- coll.insert({ _id : i, data : data })
+ bulk.insert({ _id: i, data: data });
}
+assert.writeOK(bulk.execute());
+
// Make sure everything got inserted
-assert.eq( null, coll.getDB().getLastError() )
assert.eq( numDocs, coll.find().itcount() )
diff --git a/jstests/slow2/replsets_killop.js b/jstests/slow2/replsets_killop.js
index 3d3ee51f709..e035ae3cd40 100644
--- a/jstests/slow2/replsets_killop.js
+++ b/jstests/slow2/replsets_killop.js
@@ -16,11 +16,11 @@ assert.soon( function() { return secondary.getDB( 'test' ).test.count() == 1; }
// Start a parallel shell to insert new documents on the primary.
inserter = startParallelShell(
- 'for( i = 1; i < ' + numDocs + '; ++i ) { \
- db.test.save( { a:i } ); \
- sleep( 1 ); \
- } \
- db.getLastError();'
+ 'var bulk = db.test.initializeUnorderedBulkOp(); \
+ for( i = 1; i < ' + numDocs + '; ++i ) { \
+ bulk.insert({ a: i }); \
+ } \
+ bulk.execute();'
);
// Periodically kill replication get mores.
diff --git a/jstests/slow2/replsets_prefetch_stress.js b/jstests/slow2/replsets_prefetch_stress.js
index dcd523dd3a4..4273cb594fd 100644
--- a/jstests/slow2/replsets_prefetch_stress.js
+++ b/jstests/slow2/replsets_prefetch_stress.js
@@ -8,8 +8,7 @@ replTest.initiate();
var master = replTest.getMaster();
c = master.getDB( 'd' )[ 'c' ];
-c.insert( { _id:0 } );
-master.getDB( 'd' ).getLastError();
+assert.writeOK(c.insert({ _id: 0 }));
replTest.awaitReplication();
// Create a:1 index.
@@ -22,10 +21,11 @@ for( i = 0; i < 10000; ++i ) {
}
// Insert documents with multikey values.
+var bulk = c.initializeUnorderedBulkOp();
for( i = 0; i < 1000; ++i ) {
- c.insert( { a:multikeyValues } );
+ bulk.insert({ a: multikeyValues });
}
-master.getDB( 'd' ).getLastError();
+assert.writeOK(bulk.execute());
replTest.awaitReplication(300000);
// Check document counts on all nodes. On error a node might go down or fail to sync all data, see
diff --git a/jstests/slow2/rollback4.js b/jstests/slow2/rollback4.js
index 4929e3777ff..7d511568ed9 100644
--- a/jstests/slow2/rollback4.js
+++ b/jstests/slow2/rollback4.js
@@ -31,12 +31,11 @@ replTest.awaitReplication();
// Insert into master
var big = { b:new Array( 1000 ).toString() };
+var bulk = master.getDB('db').c.initializeUnorderedBulkOp();
for( var i = 0; i < 1000000; ++i ) {
- if ( i % 10000 == 0 ) {
- print( i );
- }
- master.getDB( 'db' ).c.insert( big );
+ bulk.insert( big );
}
+assert.writeOK(bulk.execute());
// Stop master
replTest.stop( 0 );
diff --git a/jstests/ssl/libs/ssl_helpers.js b/jstests/ssl/libs/ssl_helpers.js
index faf5b777284..c4a0f97968e 100644
--- a/jstests/ssl/libs/ssl_helpers.js
+++ b/jstests/ssl/libs/ssl_helpers.js
@@ -65,13 +65,14 @@ function mixedShardTest(options1, options2, shouldSucceed) {
r = st.adminCommand({ shardCollection : "test.col" , key : { _id : 1 } });
assert.eq(r, true, "error sharding collection for this configuration");
- // Test mongos talking to shards
+ // Test mongos talking to shards
var bigstr = Array(1024*1024).join("#");
+ var bulk = db1.col.initializeUnorderedBulkOp();
for(var i = 0; i < 128; i++){
- db1.col.insert({_id:i, string:bigstr});
+ bulk.insert({ _id: i, string: bigstr });
}
- db1.getLastError();
+ assert.writeOK(bulk.execute());
assert.eq(128, db1.col.count(), "error retrieving documents from cluster");
// Test shards talking to each other
diff --git a/jstests/ssl/sharding_with_x509.js b/jstests/ssl/sharding_with_x509.js
index 75150a60548..245c10e9855 100644
--- a/jstests/ssl/sharding_with_x509.js
+++ b/jstests/ssl/sharding_with_x509.js
@@ -29,33 +29,33 @@ coll.ensureIndex({ insert : 1 })
print( "starting insertion phase" )
// Insert a bunch of data
-var toInsert = 2000
+var toInsert = 2000;
+var bulk = coll.initializedUnorderedBulkOp();
for( var i = 0; i < toInsert; i++ ){
- coll.insert({ my : "test", data : "to", insert : i })
+ bulk.insert({ my: "test", data: "to", insert: i });
}
-
-assert.eq( coll.getDB().getLastError(), null )
+assert.writeOK(bulk.execute());
print( "starting updating phase" )
// Update a bunch of data
-var toUpdate = toInsert
+var toUpdate = toInsert;
+bulk = coll.initializedUnorderedBulkOp();
for( var i = 0; i < toUpdate; i++ ){
- var id = coll.findOne({ insert : i })._id
- coll.update({ insert : i, _id : id }, { $inc : { counter : 1 } })
+ var id = coll.findOne({ insert : i })._id;
+ bulk.update({ insert : i, _id : id }, { $inc : { counter : 1 } });
}
-
-assert.eq( coll.getDB().getLastError(), null )
+assert.writeOK(bulk.execute());
print( "starting deletion" )
// Remove a bunch of data
-var toDelete = toInsert / 2
+var toDelete = toInsert / 2;
+bulk = coll.initializedUnorderedBulkOp();
for( var i = 0; i < toDelete; i++ ){
- coll.remove({ insert : i })
+ bulk.remove({ insert : i });
}
-
-assert.eq( coll.getDB().getLastError(), null )
+assert.writeOK(bulk.execute());
// Make sure the right amount of data is there
assert.eq( coll.find().count(), toInsert / 2 )
diff --git a/jstests/tool/dumpfilename1.js b/jstests/tool/dumpfilename1.js
index 84dae683a16..3cb2a26c6e4 100644
--- a/jstests/tool/dumpfilename1.js
+++ b/jstests/tool/dumpfilename1.js
@@ -7,9 +7,8 @@ t = new ToolTest( "dumpfilename1" );
t.startDB( "foo" );
c = t.db;
-c.getCollection("df/").insert({a:3});
-c.getCollection("df").insert({a:2});
-t.db.getLastError(); // Ensure data is written before dumping it through a spawned process.
+assert.writeOK(c.getCollection("df/").insert({ a: 3 }));
+assert.writeOK(c.getCollection("df").insert({ a: 2 }));
t.runTool( "dump" , "--out" , t.ext );
diff --git a/jstests/tool/dumprestoreWithNoOptions.js b/jstests/tool/dumprestoreWithNoOptions.js
index 5a866260bbc..60bcc4b70f2 100644
--- a/jstests/tool/dumprestoreWithNoOptions.js
+++ b/jstests/tool/dumprestoreWithNoOptions.js
@@ -30,8 +30,7 @@ for ( var opt in options ) {
assert.eq(options[opt], cappedOptions[opt],
'invalid option:' + tojson(options) + " " + tojson(cappedOptions));
}
-db.capped.insert({ x: 1 });
-db.getLastError()
+assert.writeOK(db.capped.insert({ x: 1 }));
// Full dump/restore
@@ -58,8 +57,7 @@ var cappedOptions = db.capped.exists().options;
for ( var opt in options ) {
assert.eq(options[opt], cappedOptions[opt], 'invalid option')
}
-db.capped.insert({ x: 1 });
-db.getLastError()
+assert.writeOK(db.capped.insert({ x: 1 }));
dumppath = t.ext + "noOptionsSingleDump/";
mkdir(dumppath);
@@ -87,8 +85,8 @@ var cappedOptions = db.capped.exists().options;
for ( var opt in options ) {
assert.eq(options[opt], cappedOptions[opt], 'invalid option')
}
-db.capped.insert({ x: 1 });
-db.getLastError()
+
+assert.writeOK(db.capped.insert({ x: 1 }));
dumppath = t.ext + "noOptionsSingleColDump/";
mkdir(dumppath);
diff --git a/jstests/tool/dumprestore_auth3.js b/jstests/tool/dumprestore_auth3.js
index 11867a8fce4..591e7fb0900 100644
--- a/jstests/tool/dumprestore_auth3.js
+++ b/jstests/tool/dumprestore_auth3.js
@@ -71,8 +71,7 @@ function runTest(shutdownServer) {
// Re-create user data
db.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
db.createRole({role: 'role', roles: [], privileges:[]});
- db.system.users.insert({user:'dbuser', pwd: 'pwd', roles: ['readWrite']});
- assert.gleSuccess(db);
+ assert.writeOK(db.system.users.insert({user:'dbuser', pwd: 'pwd', roles: ['readWrite']}));
assert.eq(1, db.bar.findOne().a);
assert.eq(1, db.getUsers().length, "didn't create user");
diff --git a/jstests/tool/exportimport_bigarray.js b/jstests/tool/exportimport_bigarray.js
index 43a209b8453..dbdd2a9a6d7 100644
--- a/jstests/tool/exportimport_bigarray.js
+++ b/jstests/tool/exportimport_bigarray.js
@@ -21,16 +21,11 @@ print('Number of documents to exceed maximum BSON size: ' + numDocs)
print('About to insert ' + numDocs + ' documents into ' +
exportimport_db.getName() + '.' + src.getName());
var i;
+var bulk = src.initializeUnorderedBulkOp();
for (i = 0; i < numDocs; ++i) {
- src.insert({ x : bigString });
-}
-var lastError = exportimport_db.getLastError();
-if (lastError == null) {
- print('Finished inserting ' + numDocs + ' documents');
-}
-else {
- doassert('Insertion failed: ' + lastError);
+ bulk.insert({ x: bigString });
}
+assert.writeOK(bulk.execute());
data = 'data/exportimport_array_test.json';