summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2014-03-04 17:41:56 -0500
committerRandolph Tan <randolph@10gen.com>2014-04-21 16:53:25 -0400
commit7acafe85d9bdd63122c19ba1cca86a7f55174941 (patch)
tree234effd8e5a5b6c63d8b12c74de2d9acb78a7509 /jstests
parente87b42c4f13e48078f5c4aefba3caf18dcfba072 (diff)
downloadmongo-7acafe85d9bdd63122c19ba1cca86a7f55174941.tar.gz
SERVER-13425 migrate sharding jstest suite to use write commands api
Diffstat (limited to 'jstests')
-rw-r--r--jstests/gle/gle_sharded_wc.js (renamed from jstests/sharding/gle_sharded_wc.js)11
-rw-r--r--jstests/gle/gle_sharded_write.js (renamed from jstests/sharding/gle_sharded_write.js)10
-rw-r--r--jstests/gle/updated_existing.js (renamed from jstests/sharding/updated_existing.js)0
-rw-r--r--jstests/noPassthrough/refresh_syncclusterconn.js (renamed from jstests/sharding/refresh_syncclusterconn.js)0
-rw-r--r--jstests/noPassthrough/sync1.js (renamed from jstests/sharding/sync1.js)0
-rw-r--r--jstests/noPassthrough/sync4.js (renamed from jstests/sharding/sync4.js)0
-rw-r--r--jstests/noPassthrough/sync8.js (renamed from jstests/sharding/sync8.js)0
-rw-r--r--jstests/sharding/SERVER-7379.js34
-rw-r--r--jstests/sharding/addshard1.js9
-rw-r--r--jstests/sharding/addshard5.js3
-rw-r--r--jstests/sharding/array_shard_key.js65
-rw-r--r--jstests/sharding/auth.js24
-rw-r--r--jstests/sharding/authCommands.js2
-rw-r--r--jstests/sharding/auth_config_down.js3
-rw-r--r--jstests/sharding/auth_repl.js3
-rw-r--r--jstests/sharding/auth_slaveok_routing.js6
-rw-r--r--jstests/sharding/authmr.js11
-rw-r--r--jstests/sharding/authwhere.js11
-rw-r--r--jstests/sharding/auto1.js20
-rw-r--r--jstests/sharding/auto2.js5
-rw-r--r--jstests/sharding/batch_write_command_sharded.js9
-rw-r--r--jstests/sharding/bulk_insert.js156
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd.js19
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_hashed.js5
-rw-r--r--jstests/sharding/coll_epoch_test1.js33
-rw-r--r--jstests/sharding/coll_epoch_test2.js18
-rw-r--r--jstests/sharding/conf_server_write_concern.js (renamed from jstests/sharding/gle_with_conf_servers.js)28
-rw-r--r--jstests/sharding/count_slaveok.js8
-rw-r--r--jstests/sharding/cursor1.js9
-rw-r--r--jstests/sharding/cursor_cleanup.js9
-rw-r--r--jstests/sharding/features1.js15
-rw-r--r--jstests/sharding/features2.js1
-rw-r--r--jstests/sharding/features3.js5
-rw-r--r--jstests/sharding/findandmodify1.js5
-rw-r--r--jstests/sharding/findandmodify2.js4
-rw-r--r--jstests/sharding/forget_mr_temp_ns.js5
-rw-r--r--jstests/sharding/fts_score_sort_sharded.js12
-rw-r--r--jstests/sharding/geo_shardedgeonear.js6
-rw-r--r--jstests/sharding/group_slaveok.js23
-rw-r--r--jstests/sharding/hash_regex_targetting.js5
-rw-r--r--jstests/sharding/hash_shard_unique_compound.js6
-rw-r--r--jstests/sharding/index1.js307
-rw-r--r--jstests/sharding/inserts_consistent.js47
-rw-r--r--jstests/sharding/jumbo1.js10
-rw-r--r--jstests/sharding/key_many.js10
-rw-r--r--jstests/sharding/listDatabases.js10
-rw-r--r--jstests/sharding/localhostAuthBypass.js27
-rw-r--r--jstests/sharding/mapReduce.js18
-rw-r--r--jstests/sharding/max_time_ms_sharded.js5
-rw-r--r--jstests/sharding/merge_chunks_test.js7
-rw-r--r--jstests/sharding/merge_chunks_test_with_data.js9
-rw-r--r--jstests/sharding/migrateBig.js11
-rw-r--r--jstests/sharding/migrateMemory.js1
-rw-r--r--jstests/sharding/migrate_overwrite_id.js9
-rw-r--r--jstests/sharding/mongos_no_detect_sharding.js7
-rw-r--r--jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js138
-rw-r--r--jstests/sharding/mongos_rs_shard_failure_tolerance.js142
-rw-r--r--jstests/sharding/mongos_shard_failure_tolerance.js89
-rw-r--r--jstests/sharding/mongos_validate_backoff.js3
-rw-r--r--jstests/sharding/mongos_validate_writes.js31
-rw-r--r--jstests/sharding/movechunk_with_def_paranoia.js2
-rw-r--r--jstests/sharding/movechunk_with_moveParanoia.js2
-rw-r--r--jstests/sharding/movechunk_with_noMoveParanoia.js2
-rw-r--r--jstests/sharding/moveprimary_ignore_sharded.js9
-rw-r--r--jstests/sharding/mrShardedOutput.js14
-rw-r--r--jstests/sharding/noUpdateButN1inAnotherCollection.js10
-rw-r--r--jstests/sharding/parallel.js6
-rw-r--r--jstests/sharding/prefix_shard_key.js28
-rw-r--r--jstests/sharding/presplit.js7
-rwxr-xr-xjstests/sharding/read_pref.js7
-rw-r--r--jstests/sharding/read_pref_multi_mongos_stale_config.js2
-rw-r--r--jstests/sharding/remove1.js9
-rw-r--r--jstests/sharding/remove2.js7
-rw-r--r--jstests/sharding/replmonitor_bad_seed.js1
-rw-r--r--jstests/sharding/return_partial_shards_down.js4
-rw-r--r--jstests/sharding/shard1.js4
-rw-r--r--jstests/sharding/shard2.js24
-rw-r--r--jstests/sharding/shard3.js4
-rw-r--r--jstests/sharding/shard7.js6
-rw-r--r--jstests/sharding/shard_existing.js5
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js23
-rw-r--r--jstests/sharding/shard_key_immutable.js465
-rw-r--r--jstests/sharding/shard_kill_and_pooling.js7
-rw-r--r--jstests/sharding/sharded_profile.js3
-rw-r--r--jstests/sharding/sharding_with_keyfile_auth.js26
-rw-r--r--jstests/sharding/sort1.js1
-rw-r--r--jstests/sharding/split_with_force.js10
-rw-r--r--jstests/sharding/split_with_force_small.js10
-rw-r--r--jstests/sharding/ssv_nochunk.js3
-rw-r--r--jstests/sharding/stale_version_write.js (renamed from jstests/sharding/writeback_shard_version.js)13
-rw-r--r--jstests/sharding/stats.js5
-rw-r--r--jstests/sharding/sync2.js1
-rw-r--r--jstests/sharding/test_stacked_migration_cleanup.js10
-rw-r--r--jstests/sharding/trace_missing_docs_test.js7
-rw-r--r--jstests/sharding/update1.js7
-rw-r--r--jstests/sharding/update_immutable_fields.js42
-rw-r--r--jstests/sharding/user_flags_sharded.js5
-rw-r--r--jstests/sharding/wbl_not_cleared.js20
-rw-r--r--jstests/sharding/writeback_bulk_insert.js91
-rw-r--r--jstests/sharding/writeback_server7958.js94
-rw-r--r--jstests/sharding/zbigMapReduce.js25
101 files changed, 838 insertions, 1662 deletions
diff --git a/jstests/sharding/gle_sharded_wc.js b/jstests/gle/gle_sharded_wc.js
index 7c5055ceaa7..54050acb141 100644
--- a/jstests/sharding/gle_sharded_wc.js
+++ b/jstests/gle/gle_sharded_wc.js
@@ -111,15 +111,8 @@ assert.eq(coll.count({ _id : 1 }), 1);
// NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
// successful writes from.
coll.remove({ _id : 1 });
-// The insert throws if write commands are enabled, since we get a response
-if ( coll.getMongo().useWriteCommands() ) {
- assert.throws( function() {
- coll.insert([{ _id : 1 }, { _id : -1 }]);
- });
-}
-else {
- coll.insert([{ _id : 1 }, { _id : -1 }]);
-}
+coll.insert([{ _id : 1 }, { _id : -1 }]);
+
printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
assert(gle.ok);
assert(gle.err);
diff --git a/jstests/sharding/gle_sharded_write.js b/jstests/gle/gle_sharded_write.js
index e4b135178a3..0f602a5e4d7 100644
--- a/jstests/sharding/gle_sharded_write.js
+++ b/jstests/gle/gle_sharded_write.js
@@ -164,15 +164,7 @@ assert(gle.errmsg);
// NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
// successful writes from.
coll.remove({ _id : 1 });
-// The insert throws if write commands are enabled, since we get a response
-if ( coll.getMongo().useWriteCommands() ) {
- assert.throws( function() {
- coll.insert([{ _id : 1 }, { _id : -1 }]);
- });
-}
-else {
- coll.insert([{ _id : 1 }, { _id : -1 }]);
-}
+coll.insert([{ _id : 1 }, { _id : -1 }]);
printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
assert(gle.ok);
assert(gle.err);
diff --git a/jstests/sharding/updated_existing.js b/jstests/gle/updated_existing.js
index bd03c535099..bd03c535099 100644
--- a/jstests/sharding/updated_existing.js
+++ b/jstests/gle/updated_existing.js
diff --git a/jstests/sharding/refresh_syncclusterconn.js b/jstests/noPassthrough/refresh_syncclusterconn.js
index b12cf504d75..b12cf504d75 100644
--- a/jstests/sharding/refresh_syncclusterconn.js
+++ b/jstests/noPassthrough/refresh_syncclusterconn.js
diff --git a/jstests/sharding/sync1.js b/jstests/noPassthrough/sync1.js
index 490d2a53c5a..490d2a53c5a 100644
--- a/jstests/sharding/sync1.js
+++ b/jstests/noPassthrough/sync1.js
diff --git a/jstests/sharding/sync4.js b/jstests/noPassthrough/sync4.js
index 6733f07089d..6733f07089d 100644
--- a/jstests/sharding/sync4.js
+++ b/jstests/noPassthrough/sync4.js
diff --git a/jstests/sharding/sync8.js b/jstests/noPassthrough/sync8.js
index 241ad655569..241ad655569 100644
--- a/jstests/sharding/sync8.js
+++ b/jstests/noPassthrough/sync8.js
diff --git a/jstests/sharding/SERVER-7379.js b/jstests/sharding/SERVER-7379.js
index 8ad1a12fb80..f4730495624 100644
--- a/jstests/sharding/SERVER-7379.js
+++ b/jstests/sharding/SERVER-7379.js
@@ -9,42 +9,38 @@ var testDoc = { "_id": 123, "categoryId": 9881, "store": "NEW" }
offerChange.remove({}, false);
offerChange.insert(testDoc)
-offerChange.update({ "_id": 123 }, { $set: { "store": "NEWEST" } }, true, false);
-var gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(offerChange.update({ _id: 123 }, { $set: { store: "NEWEST" } }, true, false));
var doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
offerChange.insert(testDoc)
-offerChange.update({ "_id": 123 }, { "_id": 123, "categoryId": 9881, "store": "NEWEST" }, true, false);
-var gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
-var doc = offerChange.findOne();
+assert.writeError(offerChange.update({ _id: 123 },
+ { _id: 123, categoryId: 9881, store: "NEWEST" },
+ true, false));
+doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
offerChange.insert(testDoc)
-offerChange.save({ "_id": 123, "categoryId": 9881, "store": "NEWEST" })
-var gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
-var doc = offerChange.findOne();
+assert.writeError(offerChange.save({ "_id": 123, "categoryId": 9881, "store": "NEWEST" }));
+doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
offerChange.insert(testDoc);
-offerChange.update({ "_id": 123, "store": "NEW" }, { "_id": 123, "categoryId": 9881, "store": "NEWEST" }, true, false);
-var gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
-var doc = offerChange.findOne();
+assert.writeError(offerChange.update({ _id: 123, store: "NEW" },
+ { _id: 123, categoryId: 9881, store: "NEWEST" },
+ true, false));
+doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
offerChange.insert(testDoc);
-offerChange.update({ "_id": 123, "categoryId": 9881 }, { "_id": 123, "categoryId": 9881, "store": "NEWEST" }, true, false);
-var gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
-var doc = offerChange.findOne();
+assert.writeError(offerChange.update({ _id: 123, categoryId: 9881 },
+ { _id: 123, categoryId: 9881, store: "NEWEST" },
+ true, false));
+doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
st.stop();
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index 0ca6a832a49..42342351d8a 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -9,10 +9,9 @@ conn1 = startMongodTest( 29000 );
db1 = conn1.getDB( "testDB" );
numObjs = 0;
for (i=0; i<3; i++){
- db1.foo.save( { a : i } );
+ assert.writeOK(db1.foo.save( { a : i } ));
numObjs++;
}
-db1.getLastError()
newShard = "myShard";
assert( s.admin.runCommand( { addshard: "localhost:29000" , name: newShard } ).ok, "did not accepted non-duplicated shard" );
@@ -20,11 +19,9 @@ assert( s.admin.runCommand( { addshard: "localhost:29000" , name: newShard } ).o
// a mongod with an existing database name should not be allowed to become a shard
conn2 = startMongodTest( 29001 );
db2 = conn2.getDB( "otherDB" );
-db2.foo.save( {a:1} );
-db2.getLastError()
+assert.writeOK(db2.foo.save({ a: 1 }));
db3 = conn2.getDB( "testDB" );
-db3.foo.save( {a:1} );
-db3.getLastError()
+assert.writeOK(db3.foo.save({ a: 1 } ));
s.config.databases.find().forEach( printjson )
rejectedShard = "rejectedShard";
diff --git a/jstests/sharding/addshard5.js b/jstests/sharding/addshard5.js
index a62dbe43c98..6c13985a436 100644
--- a/jstests/sharding/addshard5.js
+++ b/jstests/sharding/addshard5.js
@@ -32,8 +32,7 @@ printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._i
printjson( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }) )
// Insert one document
-coll.insert({ hello : "world" })
-assert.eq( null, coll.getDB().getLastError() )
+assert.writeOK(coll.insert({ hello : "world" }));
// Migrate the collection to and from shard2 so shard1 loads the shard2 host
printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id, _waitForDelete : true }) )
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
index 309f6191414..0c8d7e3a3dc 100644
--- a/jstests/sharding/array_shard_key.js
+++ b/jstests/sharding/array_shard_key.js
@@ -14,98 +14,65 @@ st.printShardingStatus()
print( "1: insert some invalid data" )
-var value = null
-
-var checkError = function( shouldError ){
- var error = coll.getDB().getLastError()
-
- if( error != null ) printjson( error )
-
- if( error == null && ! shouldError ) return
- if( error != null && shouldError ) return
-
- if( error == null ) print( "No error detected!" )
- else print( "Unexpected error!" )
-
- assert( false )
-}
+var value = null;
// Insert an object with invalid array key
-coll.insert({ i : [ 1, 2 ] })
-checkError( true )
+assert.writeError(coll.insert({ i : [ 1, 2 ] }));
// Insert an object with all the right fields, but an invalid array val for _id
-coll.insert({ _id : [ 1, 2 ] , i : 3})
-checkError( true )
+assert.writeError(coll.insert({ _id : [ 1, 2 ] , i : 3}));
// Insert an object with valid array key
-coll.insert({ i : 1 })
-checkError( false )
+assert.writeOK(coll.insert({ i : 1 }));
// Update the value with valid other field
value = coll.findOne({ i : 1 })
-coll.update( value, { $set : { j : 2 } } )
-checkError( false )
+assert.writeOK(coll.update( value, { $set : { j : 2 } } ));
// Update the value with invalid other fields
value = coll.findOne({ i : 1 })
-coll.update( value, Object.merge( value, { i : [ 3 ] } ) )
-checkError( true )
+assert.writeError(coll.update( value, Object.merge( value, { i : [ 3 ] } ) ));
// Multi-update the value with invalid other fields
value = coll.findOne({ i : 1 })
-coll.update( value, Object.merge( value, { i : [ 3, 4 ] } ), false, true)
-checkError( true )
+assert.writeError(coll.update( value, Object.merge( value, { i : [ 3, 4 ] } ), false, true));
// Multi-update the value with other fields (won't work, but no error)
value = coll.findOne({ i : 1 })
-coll.update( Object.merge( value, { i : [ 1, 1 ] } ), { $set : { k : 4 } }, false, true)
-checkError( false )
+assert.writeOK(coll.update( Object.merge( value, { i : [ 1, 1 ] } ), { $set : { k : 4 } }, false, true));
// Query the value with other fields (won't work, but no error)
value = coll.findOne({ i : 1 })
coll.find( Object.merge( value, { i : [ 1, 1 ] } ) ).toArray()
-checkError( false )
// Can't remove using multikey, but shouldn't error
value = coll.findOne({ i : 1 })
coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4 ] } ) )
-checkError( false )
// Can't remove using multikey, but shouldn't error
value = coll.findOne({ i : 1 })
-coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4, 5 ] } ) )
-error = coll.getDB().getLastError()
-assert.eq( error, null )
+assert.writeOK(coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4, 5 ] } ) ));
assert.eq( coll.find().itcount(), 1 )
value = coll.findOne({ i : 1 })
-coll.remove( Object.extend( value, { i : 1 } ) )
-error = coll.getDB().getLastError()
-assert.eq( error, null )
+assert.writeOK(coll.remove( Object.extend( value, { i : 1 } ) ));
assert.eq( coll.find().itcount(), 0 )
coll.ensureIndex({ _id : 1, i : 1, j: 1 });
// Can insert document that will make index into a multi-key as long as it's not part of shard key.
coll.remove({});
-coll.insert({ i: 1, j: [1, 2] });
-error = coll.getDB().getLastError();
-assert.eq( error, null );
+assert.writeOK(coll.insert({ i: 1, j: [1, 2] }));
assert.eq( coll.find().itcount(), 1 );
// Same is true for updates.
coll.remove({});
coll.insert({ _id: 1, i: 1 });
-coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] });
-error = coll.getDB().getLastError();
-assert.eq( error, null );
+assert.writeOK(coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] }));
assert.eq( coll.find().itcount(), 1 );
// Same for upserts.
coll.remove({});
-coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] }, true);
-error = coll.getDB().getLastError();
-assert.eq( error, null );
+assert.writeOK(coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] }, true));
assert.eq( coll.find().itcount(), 1 );
printjson( "Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey" )
@@ -114,8 +81,7 @@ printjson( "Sharding-then-inserting-multikey tested, now trying inserting-then-s
var coll = mongos.getCollection( "" + coll + "2" )
for( var i = 0; i < 10; i++ ){
// TODO : does not check weird cases like [ i, i ]
- coll.insert({ i : [ i, i + 1 ] })
- checkError( false )
+ assert.writeOK(coll.insert({ i : [ i, i + 1 ] }));
}
coll.ensureIndex({ _id : 1, i : 1 })
@@ -133,8 +99,7 @@ st.printShardingStatus()
var coll = mongos.getCollection( "" + coll + "3" )
for( var i = 0; i < 10; i++ ){
// TODO : does not check weird cases like [ i, i ]
- coll.insert({ i : i })
- checkError( false )
+ assert.writeOK(coll.insert({ i : i }));
}
coll.ensureIndex({ _id : 1, i : 1 })
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index be2f7803b46..1a9837cb1ff 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -58,8 +58,8 @@ else {
}
login(adminUser);
-s.getDB( "config" ).settings.update( { _id : "chunksize" }, {$set : {value : 1 }}, true );
-printjson(s.getDB("config").runCommand({getlasterror:1}));
+assert.writeOK(s.getDB( "config" ).settings.update({ _id: "chunksize" },
+ { $set: { value : 1 }}, true ));
printjson(s.getDB("config").settings.find().toArray());
print("restart mongos");
@@ -139,9 +139,7 @@ login(testUser);
assert.eq(s.getDB("test").foo.findOne(), null);
print("insert try 2");
-s.getDB("test").foo.insert({x:1});
-result = s.getDB("test").getLastErrorObj();
-assert.eq(result.err, null);
+assert.writeOK(s.getDB("test").foo.insert({ x: 1 }));
assert.eq( 1 , s.getDB( "test" ).foo.find().itcount() , tojson(result) );
logout(testUser);
@@ -164,15 +162,11 @@ ReplSetTest.awaitRSClientHosts(s.s, d2.nodes, {ok: true });
s.getDB("test").foo.remove({})
var num = 100000;
+var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
for (i=0; i<num; i++) {
- s.getDB("test").foo.insert({_id:i, x:i, abc : "defg", date : new Date(), str : "all the talk on the market"});
-}
-
-// Make sure all data gets sent through
-printjson( s.getDB("test").getLastError() )
-for (var i = 0; i < s._connections.length; i++) { // SERVER-4356
- s._connections[i].getDB("test").getLastError();
+ bulk.insert({ _id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market" });
}
+assert.writeOK(bulk.execute());
var d1Chunks = s.getDB("config").chunks.count({shard : "d1"});
var d2Chunks = s.getDB("config").chunks.count({shard : "d2"});
@@ -202,7 +196,7 @@ if (numDocs != num) {
lastDocNumber = docs[i].x;
numDocsSeen++;
}
- assert.eq(numDocs, numDocsSeen, "More docs discovered on second find() even though getLastError was already called")
+ assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()")
assert.eq(num - numDocs, missingDocNumbers.length);
load('jstests/libs/trace_missing_docs.js');
@@ -286,9 +280,7 @@ print( " testing find that should work" );
readOnlyDB.foo.findOne();
print( " testing write that should fail" );
-readOnlyDB.foo.insert( { eliot : 1 } );
-result = readOnlyDB.getLastError();
-assert( ! result.ok , tojson( result ) )
+assert.writeError(readOnlyDB.foo.insert({ eliot: 1 }));
print( " testing read command (should succeed)" );
assert.commandWorked(readOnlyDB.runCommand({count : "foo"}));
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index 93d178222f5..a3e5a712416 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -1,3 +1,5 @@
+// TODO: move back to sharding suite after SERVER-13402 is fixed
+
/**
* This tests using DB commands with authentication enabled when sharded.
*/
diff --git a/jstests/sharding/auth_config_down.js b/jstests/sharding/auth_config_down.js
index ba1eaa955aa..6f97051a864 100644
--- a/jstests/sharding/auth_config_down.js
+++ b/jstests/sharding/auth_config_down.js
@@ -14,8 +14,7 @@ var configs = st._configServers
printjson( configs )
st.printShardingStatus()
-mongos.getCollection( "foo.bar" ).insert({ hello : "world" })
-assert.eq( null, mongos.getDB( "foo" ).getLastError() )
+assert.writeOK(mongos.getCollection( "foo.bar" ).insert({ hello : "world" }));
var stopOrder = [ 1, 0 ]
diff --git a/jstests/sharding/auth_repl.js b/jstests/sharding/auth_repl.js
index 3c0a5d14be2..c07719843b1 100644
--- a/jstests/sharding/auth_repl.js
+++ b/jstests/sharding/auth_repl.js
@@ -11,8 +11,7 @@ var conn = new Mongo(replTest.getURL());
var testDB = conn.getDB('test');
var testColl = testDB.user;
-testColl.insert({ x: 1 });
-testDB.runCommand({ getLastError: 1, w: nodeCount });
+assert.writeOK(testColl.insert({ x: 1 }, { writeConcern: { w: nodeCount }}));
// Setup the cached connection for primary and secondary in DBClientReplicaSet
// before setting up authentication
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index 2ba8bec2b79..f89a6da086a 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -54,11 +54,11 @@ coll.setSlaveOk( true );
ReplSetTest.awaitRSClientHosts( mongos, replTest.getSecondaries(),
{ ok : true, secondary : true });
+var bulk = coll.initializeUnorderedBulkOp();
for ( var x = 0; x < 20; x++ ) {
- coll.insert({ v: x, k: 10 });
+ bulk.insert({ v: x, k: 10 });
}
-
-coll.runCommand({ getLastError: 1, w: nodeCount });
+assert.writeOK(bulk.execute({ w: nodeCount }));
/* Although mongos never caches query results, try to do a different query
* everytime just to be sure.
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index 52e69e88848..31a1bcf18f9 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -20,19 +20,12 @@ var test1User = {
roles: [{role: 'readWrite', db: 'test1', hasRole: true, canDelegate: false}]
};
-function assertGLEOK(status) {
- assert(status.ok && status.err === null,
- "Expected OK status object; found " + tojson(status));
-}
-
function assertRemove(collection, pattern) {
- collection.remove(pattern);
- assertGLEOK(collection.getDB().getLastErrorObj());
+ assert.writeOK(collection.remove(pattern));
}
function assertInsert(collection, obj) {
- collection.insert(obj);
- assertGLEOK(collection.getDB().getLastErrorObj());
+ assert.writeOK(collection.insert(obj));
}
var cluster = new ShardingTest("authwhere", 1, 0, 1,
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index 3cba1aee4e4..9516499580b 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -20,19 +20,12 @@ var test1Reader = {
roles: [{role: 'read', db: 'test1', hasRole:true, canDelegate: false}]
};
-function assertGLEOK(status) {
- assert(status.ok && status.err === null,
- "Expected OK status object; found " + tojson(status));
-}
-
function assertRemove(collection, pattern) {
- collection.remove(pattern);
- assertGLEOK(collection.getDB().getLastErrorObj());
+ assert.writeOK(collection.remove(pattern));
}
function assertInsert(collection, obj) {
- collection.insert(obj);
- assertGLEOK(collection.getDB().getLastErrorObj());
+ assert.writeOK(collection.insert(obj));
}
var cluster = new ShardingTest("authwhere", 1, 0, 1,
diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js
index 57b0a0053e8..5211108ece8 100644
--- a/jstests/sharding/auto1.js
+++ b/jstests/sharding/auto1.js
@@ -14,10 +14,11 @@ coll = db.foo;
var i=0;
+var bulk = coll.initializeUnorderedBulkOp();
for ( ; i<100; i++ ){
- coll.save( { num : i , s : bigString } );
+ bulk.insert( { num : i , s : bigString } );
}
-db.getLastError();
+assert.writeOK( bulk.execute() );
primary = s.getServer( "test" ).getDB( "test" );
@@ -29,28 +30,31 @@ assert.eq( 100 , primary.foo.count() );
print( "datasize: " + tojson( s.getServer( "test" ).getDB( "admin" ).runCommand( { datasize : "test.foo" } ) ) );
+bulk = coll.initializeUnorderedBulkOp();
for ( ; i<200; i++ ){
- coll.save( { num : i , s : bigString } );
+ bulk.insert( { num : i , s : bigString } );
}
-db.getLastError();
+assert.writeOK( bulk.execute() );
s.printChunks()
s.printChangeLog()
counts.push( s.config.chunks.count() );
+bulk = coll.initializeUnorderedBulkOp();
for ( ; i<400; i++ ){
- coll.save( { num : i , s : bigString } );
+ bulk.insert( { num : i , s : bigString } );
}
-db.getLastError();
+assert.writeOK( bulk.execute() );
s.printChunks();
s.printChangeLog()
counts.push( s.config.chunks.count() );
+bulk = coll.initializeUnorderedBulkOp();
for ( ; i<700; i++ ){
- coll.save( { num : i , s : bigString } );
+ bulk.insert( { num : i , s : bigString } );
}
-db.getLastError();
+assert.writeOK( bulk.execute() );
s.printChunks();
s.printChangeLog()
diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js
index 41cb38a4309..f95944a83f3 100644
--- a/jstests/sharding/auto2.js
+++ b/jstests/sharding/auto2.js
@@ -18,10 +18,12 @@ for ( j=0; j<30; j++ ){
print( "j:" + j + " : " +
Date.timeFunc(
function(){
+ var bulk = coll.initializeUnorderedBulkOp();
for ( var k=0; k<100; k++ ){
- coll.save( { num : i , s : bigString } );
+ bulk.insert( { num : i , s : bigString } );
i++;
}
+ assert.writeOK(bulk.execute());
}
) );
@@ -29,7 +31,6 @@ for ( j=0; j<30; j++ ){
assert.eq( i , j * 100 , "setup" );
// Until SERVER-9715 is fixed, the sync command must be run on a diff connection
new Mongo( s.s.host ).adminCommand( "connpoolsync" );
-db.getLastError();
print( "done inserting data" );
diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js
index 269ac5b24b9..02200945ef8 100644
--- a/jstests/sharding/batch_write_command_sharded.js
+++ b/jstests/sharding/batch_write_command_sharded.js
@@ -96,8 +96,7 @@ var oldChunks = config.chunks.find().toArray();
var staleMongos = MongoRunner.runMongos({ configdb : configConnStr });
brokenColl = staleMongos.getCollection(brokenColl.toString());
-brokenColl.insert({ hello : "world" });
-assert.eq(null, brokenColl.getDB().getLastError());
+assert.writeOK(brokenColl.insert({ hello : "world" }));
// Modify the chunks to make shards at a higher version
@@ -107,11 +106,9 @@ assert.commandWorked(admin.runCommand({ moveChunk : brokenColl.toString(),
// Rewrite the old chunks back to the config server
-config.chunks.remove({});
-assert.eq(null, config.getLastError());
+assert.writeOK(config.chunks.remove({}));
for ( var i = 0; i < oldChunks.length; i++ )
- config.chunks.insert(oldChunks[i]);
-assert.eq(null, config.getLastError());
+ assert.writeOK(config.chunks.insert(oldChunks[i]));
// Stale mongos can no longer bring itself up-to-date!
// END SETUP
diff --git a/jstests/sharding/bulk_insert.js b/jstests/sharding/bulk_insert.js
index e4eb9291906..7b420d707a0 100644
--- a/jstests/sharding/bulk_insert.js
+++ b/jstests/sharding/bulk_insert.js
@@ -48,21 +48,10 @@ printjson(admin.runCommand({ moveChunk: collSh + "",
var resetColls = function()
{
- collSh.remove({})
- assert.eq(null, collSh.getDB().getLastError());
-
- collUn.remove({})
- assert.eq(null, collUn.getDB().getLastError());
-
- collDi.remove({})
- assert.eq(null, collDi.getDB().getLastError());
-}
-
-var printPass = function(str)
-{
- print(str);
- return str;
-}
+ assert.writeOK(collSh.remove({}));
+ assert.writeOK(collUn.remove({}));
+ assert.writeOK(collDi.remove({}));
+};
var isDupKeyError = function(err)
{
@@ -82,16 +71,13 @@ resetColls();
var inserts = [{ukey : 0},
{ukey : 1}]
-collSh.insert(inserts);
-assert.eq(null, printPass(collSh.getDB().getLastError()));
+assert.writeOK(collSh.insert(inserts));
assert.eq(2, collSh.find().itcount());
-collUn.insert(inserts);
-assert.eq(null, printPass(collUn.getDB().getLastError()));
+assert.writeOK(collUn.insert(inserts));
assert.eq(2, collUn.find().itcount());
-collDi.insert(inserts);
-assert.eq(null, printPass(collDi.getDB().getLastError()));
+assert.writeOK(collDi.insert(inserts));
assert.eq(2, collDi.find().itcount());
jsTest.log("Bulk insert (no COE) with mongos error...")
@@ -101,8 +87,7 @@ var inserts = [{ukey : 0},
{hello : "world"},
{ukey : 1}]
-collSh.insert(inserts);
-assert.neq(null, printPass(collSh.getDB().getLastError()));
+assert.writeError(collSh.insert(inserts));
assert.eq(1, collSh.find().itcount());
jsTest.log("Bulk insert (no COE) with mongod error...")
@@ -112,16 +97,13 @@ var inserts = [{ukey : 0},
{ukey : 0},
{ukey : 1}]
-collSh.insert(inserts);
-assert.neq(null, printPass(collSh.getDB().getLastError()));
+assert.writeError(collSh.insert(inserts));
assert.eq(1, collSh.find().itcount());
-collUn.insert(inserts);
-assert.neq(null, printPass(collUn.getDB().getLastError()));
+assert.writeError(collUn.insert(inserts));
assert.eq(1, collUn.find().itcount());
-collDi.insert(inserts);
-assert.neq(null, printPass(collDi.getDB().getLastError()));
+assert.writeError(collDi.insert(inserts));
assert.eq(1, collDi.find().itcount());
jsTest.log("Bulk insert (no COE) with mongod and mongos error...")
@@ -132,22 +114,16 @@ var inserts = [{ukey : 0},
{ukey : 1},
{hello : "world"}]
-collSh.insert(inserts);
-var err = printPass(collSh.getDB().getLastError());
-assert.neq(null, err);
-assert(isDupKeyError(err));
+var res = assert.writeError(collSh.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
assert.eq(1, collSh.find().itcount());
-collUn.insert(inserts);
-var err = printPass(collUn.getDB().getLastError());
-assert.neq(null, err);
-assert(isDupKeyError(err));
+res = assert.writeError(collUn.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
assert.eq(1, collUn.find().itcount());
-collDi.insert(inserts);
-var err = printPass(collDi.getDB().getLastError());
-assert.neq(null, err);
-assert(isDupKeyError(err));
+res = assert.writeError(collDi.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
assert.eq(1, collDi.find().itcount());
jsTest.log("Bulk insert (no COE) on second shard...")
@@ -156,16 +132,13 @@ resetColls();
var inserts = [{ukey : 0},
{ukey : -1}]
-collSh.insert(inserts);
-assert.eq(null, printPass(collSh.getDB().getLastError()));
+assert.writeOK(collSh.insert(inserts));
assert.eq(2, collSh.find().itcount());
-collUn.insert(inserts);
-assert.eq(null, printPass(collUn.getDB().getLastError()));
+assert.writeOK(collUn.insert(inserts));
assert.eq(2, collUn.find().itcount());
-collDi.insert(inserts);
-assert.eq(null, printPass(collDi.getDB().getLastError()));
+assert.writeOK(collDi.insert(inserts));
assert.eq(2, collDi.find().itcount());
jsTest.log("Bulk insert to second shard (no COE) with mongos error...")
@@ -176,8 +149,7 @@ var inserts = [{ukey : 0},
{ukey : -1},
{hello : "world"}]
-collSh.insert(inserts);
-assert.neq(null, printPass(collSh.getDB().getLastError()));
+assert.writeError(collSh.insert(inserts));
assert.eq(3, collSh.find().itcount());
jsTest.log("Bulk insert to second shard (no COE) with mongod error...")
@@ -189,20 +161,16 @@ var inserts = [{ukey : 0},
{ukey : -2},
{ukey : -2}]
-collSh.insert(inserts);
-assert.neq(null, printPass(collSh.getDB().getLastError()));
+assert.writeError(collSh.insert(inserts));
assert.eq(4, collSh.find().itcount());
-collUn.insert(inserts);
-assert.neq(null, printPass(collUn.getDB().getLastError()));
+assert.writeError(collUn.insert(inserts));
assert.eq(4, collUn.find().itcount());
-collDi.insert(inserts);
-assert.neq(null, printPass(collDi.getDB().getLastError()));
+assert.writeError(collDi.insert(inserts));
assert.eq(4, collDi.find().itcount());
-jsTest
- .log("Bulk insert to third shard (no COE) with mongod and mongos error...")
+jsTest.log("Bulk insert to third shard (no COE) with mongod and mongos error...")
resetColls();
var inserts = [{ukey : 0},
@@ -213,22 +181,16 @@ var inserts = [{ukey : 0},
{ukey : 4},
{hello : "world"}]
-collSh.insert(inserts);
-var err = printPass(collSh.getDB().getLastError());
-assert.neq(null, err);
-assert(isDupKeyError(err));
+res = assert.writeError(collSh.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
assert.eq(5, collSh.find().itcount());
-collUn.insert(inserts);
-var err = printPass(collUn.getDB().getLastError());
-assert.neq(null, err);
-assert(isDupKeyError(err));
+res = assert.writeError(collUn.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
assert.eq(5, collUn.find().itcount());
-collDi.insert(inserts);
-var err = printPass(collDi.getDB().getLastError());
-assert.neq(null, err);
-assert(isDupKeyError(err));
+res = assert.writeError(collDi.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
assert.eq(5, collDi.find().itcount());
//
@@ -242,8 +204,7 @@ var inserts = [{ukey : 0},
{hello : "world"},
{ukey : 1}]
-collSh.insert(inserts, 1); // COE
-assert.neq(null, printPass(collSh.getDB().getLastError()));
+assert.writeError(collSh.insert(inserts, 1)); // COE
assert.eq(2, collSh.find().itcount());
jsTest.log("Bulk insert (yes COE) with mongod error...")
@@ -253,16 +214,13 @@ var inserts = [{ukey : 0},
{ukey : 0},
{ukey : 1}]
-collSh.insert(inserts, 1);
-assert.neq(null, printPass(collSh.getDB().getLastError()));
+assert.writeError(collSh.insert(inserts, 1));
assert.eq(2, collSh.find().itcount());
-collUn.insert(inserts, 1);
-assert.neq(null, printPass(collUn.getDB().getLastError()));
+assert.writeError(collUn.insert(inserts, 1));
assert.eq(2, collUn.find().itcount());
-collDi.insert(inserts, 1);
-assert.neq(null, printPass(collDi.getDB().getLastError()));
+assert.writeError(collDi.insert(inserts, 1));
assert.eq(2, collDi.find().itcount());
jsTest
@@ -278,23 +236,17 @@ var inserts = [{ukey : 0},
{hello : "world"}]
// Last error here is mongos error
-collSh.insert(inserts, 1);
-var err = printPass(collSh.getDB().getLastError());
-assert.neq(null, err);
-assert(!isDupKeyError(err));
+res = assert.writeError(collSh.insert(inserts, 1));
+assert(!isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
assert.eq(5, collSh.find().itcount());
// Extra insert goes through, since mongos error "doesn't count"
-collUn.insert(inserts, 1);
-var err = printPass(collUn.getDB().getLastError());
-assert.neq(null, err);
-assert(isDupKeyError(err));
+res = assert.writeError(collUn.insert(inserts, 1));
+assert.eq(6, res.nInserted, res.toString());
assert.eq(6, collUn.find().itcount());
-collDi.insert(inserts, 1);
-var err = printPass(collDi.getDB().getLastError());
-assert.neq(null, err);
-assert(isDupKeyError(err));
+res = assert.writeError(collDi.insert(inserts, 1));
+assert.eq(6, res.nInserted, res.toString());
assert.eq(6, collDi.find().itcount());
jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error "
@@ -310,23 +262,17 @@ var inserts = [{ukey : 0},
{ukey : 4}]
// Last error here is mongos error
-collSh.insert(inserts, 1);
-var err = printPass(collSh.getDB().getLastError());
-assert.neq(null, err);
-assert(isDupKeyError(err));
+res = assert.writeError(collSh.insert(inserts, 1));
+assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
assert.eq(5, collSh.find().itcount());
// Extra insert goes through, since mongos error "doesn't count"
-collUn.insert(inserts, 1);
-var err = printPass(collUn.getDB().getLastError());
-assert.neq(null, err);
-assert(isDupKeyError(err));
+res = assert.writeError(collUn.insert(inserts, 1));
+assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
assert.eq(6, collUn.find().itcount());
-collDi.insert(inserts, 1);
-var err = printPass(collDi.getDB().getLastError());
-assert.neq(null, err);
-assert(isDupKeyError(err));
+res = assert.writeError(collDi.insert(inserts, 1));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
assert.eq(6, collDi.find().itcount());
//
@@ -351,9 +297,7 @@ printjson(admin.runCommand({moveChunk : collSh + "",
to : shards[0]._id,
_waitForDelete: true}));
-staleCollSh.insert(inserts);
-var err = printPass(staleCollSh.getDB().getLastError());
-assert.eq(null, err);
+assert.writeOK(staleCollSh.insert(inserts));
//
// Test when the objects to be bulk inserted are 10MB, and so can't be inserted
@@ -392,8 +336,6 @@ printjson(admin.runCommand({moveChunk : collSh + "",
to : shards[0]._id,
_waitForDelete: true}));
-staleCollSh.insert(inserts);
-var err = printPass(staleCollSh.getDB().getLastError());
-assert.eq(null, err);
+assert.writeOK(staleCollSh.insert(inserts));
st.stop()
diff --git a/jstests/sharding/cleanup_orphaned_cmd.js b/jstests/sharding/cleanup_orphaned_cmd.js
index 872d124cfda..84ca52e0f18 100644
--- a/jstests/sharding/cleanup_orphaned_cmd.js
+++ b/jstests/sharding/cleanup_orphaned_cmd.js
@@ -25,16 +25,16 @@ st.printShardingStatus();
jsTest.log( "Inserting some regular docs..." );
-for ( var i = -50; i < 50; i++ ) coll.insert({ _id : i });
-assert.eq( null, coll.getDB().getLastError() );
+var bulk = coll.initializeUnorderedBulkOp();
+for ( var i = -50; i < 50; i++ ) bulk.insert({ _id : i });
+assert.writeOK( bulk.execute() );
// Half of the data is on each shard
jsTest.log( "Inserting some orphaned docs..." );
var shard0Coll = st.shard0.getCollection( coll + "" );
-shard0Coll.insert({ _id : 10 });
-assert.eq( null, shard0Coll.getDB().getLastError() );
+assert.writeOK( shard0Coll.insert({ _id : 10 }));
assert.neq( 50, shard0Coll.count() );
assert.eq( 100, coll.find().itcount() );
@@ -68,12 +68,13 @@ assert( admin.runCommand({ moveChunk : coll + "",
jsTest.log( "Inserting some more orphaned docs..." );
+st.printShardingStatus();
+
var shard0Coll = st.shard0.getCollection( coll + "" );
-shard0Coll.insert({ _id : -36 });
-shard0Coll.insert({ _id : -10 });
-shard0Coll.insert({ _id : 0 });
-shard0Coll.insert({ _id : 10 });
-assert.eq( null, shard0Coll.getDB().getLastError() );
+assert.writeOK(shard0Coll.insert({ _id : -35 }));
+assert.writeOK(shard0Coll.insert({ _id : -11 }));
+assert.writeOK(shard0Coll.insert({ _id : 0 }));
+assert.writeOK(shard0Coll.insert({ _id : 10 }));
assert.neq( 25, shard0Coll.count() );
assert.eq( 100, coll.find().itcount() );
diff --git a/jstests/sharding/cleanup_orphaned_cmd_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
index db19e55a79f..219f23d3282 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
@@ -36,8 +36,9 @@ jsTest.log( "Inserting some docs on each shard, so 1/2 will be orphaned..." );
for ( var s = 0; s < 2; s++ ) {
var shardColl = ( s == 0 ? st.shard0 : st.shard1 ).getCollection( coll + "" );
- for ( var i = 0; i < 100; i++ ) shardColl.insert({ _id : i });
- assert.eq( null, shardColl.getDB().getLastError() );
+ var bulk = shardColl.initializeUnorderedBulkOp();
+ for ( var i = 0; i < 100; i++ ) bulk.insert({ _id : i });
+ assert.writeOK(bulk.execute());
}
assert.eq( 200, st.shard0.getCollection( coll + "" ).find().itcount() +
diff --git a/jstests/sharding/coll_epoch_test1.js b/jstests/sharding/coll_epoch_test1.js
index 824e2b45167..c814805d531 100644
--- a/jstests/sharding/coll_epoch_test1.js
+++ b/jstests/sharding/coll_epoch_test1.js
@@ -30,9 +30,11 @@ jsTest.log( "Enabling sharding for the first time..." )
admin.runCommand({ enableSharding : coll.getDB() + "" })
admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } })
-for( var i = 0; i < 100; i++ )
- insertMongos.getCollection( coll + "" ).insert({ _id : i, test : "a" })
-assert.eq( null, insertMongos.getDB( coll.getDB() + "" ).getLastError() )
+var bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
+for( var i = 0; i < 100; i++ ) {
+ bulk.insert({ _id : i, test : "a" });
+}
+assert.writeOK( bulk.execute() );
assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "a" }).itcount() )
coll.drop()
@@ -48,9 +50,11 @@ admin.runCommand({ enableSharding : coll.getDB() + "" })
coll.ensureIndex({ notId : 1 })
admin.runCommand({ shardCollection : coll + "", key : { notId : 1 } })
-for( var i = 0; i < 100; i++ )
- insertMongos.getCollection( coll + "" ).insert({ notId : i, test : "b" })
-assert.eq( null, insertMongos.getDB( coll.getDB() + "" ).getLastError() )
+bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
+for( var i = 0; i < 100; i++ ) {
+ bulk.insert({ notId : i, test : "b" });
+}
+assert.writeOK( bulk.execute() );
assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "b" }).itcount() )
assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a" ] } }).itcount() )
@@ -73,12 +77,11 @@ admin.runCommand({ movePrimary : coll.getDB() + "",
to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) })
jsTest.log( "moved primary..." )
-
-for( var i = 0; i < 100; i++ )
- insertMongos.getCollection( coll + "" ).insert({ test : "c" })
-assert.eq( null, insertMongos.getDB( coll.getDB() + "" ).getLastError() )
-jsTest.log( "waited for gle..." )
+bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
+for( var i = 0; i < 100; i++ )
+ bulk.insert({ test : "c" });
+assert.writeOK( bulk.execute() );
assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "c" }).itcount() )
assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a", "b" ] } }).itcount() )
@@ -97,9 +100,11 @@ admin.runCommand({ movePrimary : coll.getDB() + "",
to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) })
admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } })
+bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
for( var i = 0; i < 100; i++ )
- insertMongos.getCollection( coll + "" ).insert({ test : "d" })
-assert.eq( null, insertMongos.getDB( coll.getDB() + "" ).getLastError() )
+ bulk.insert({ test : "d" });
+assert.writeOK( bulk.execute() );
+
assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "d" }).itcount() )
assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a", "b", "c" ] } }).itcount() )
@@ -107,4 +112,4 @@ coll.drop()
jsTest.log( "Done!" )
-st.stop() \ No newline at end of file
+st.stop()
diff --git a/jstests/sharding/coll_epoch_test2.js b/jstests/sharding/coll_epoch_test2.js
index 43528296cc9..20f1fe40774 100644
--- a/jstests/sharding/coll_epoch_test2.js
+++ b/jstests/sharding/coll_epoch_test2.js
@@ -34,8 +34,7 @@ jsTest.log( "Enabling sharding for the first time..." )
admin.runCommand({ enableSharding : coll.getDB() + "" })
admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } })
-coll.insert({ hello : "world" })
-assert.eq( null, coll.getDB().getLastError() )
+assert.writeOK(coll.insert({ hello : "world" }));
jsTest.log( "Sharding collection across multiple shards..." )
@@ -81,8 +80,9 @@ assert(droppedCollDoc.lastmodEpoch.equals(new ObjectId("000000000000000000000000
admin.runCommand({ enableSharding : coll.getDB() + "" })
admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } })
-for( var i = 0; i < 100; i++ ) coll.insert({ _id : i })
-assert.eq( null, coll.getDB().getLastError() )
+var bulk = coll.initializeUnorderedBulkOp();
+for( var i = 0; i < 100; i++ ) bulk.insert({ _id : i });
+assert.writeOK(bulk.execute());
printjson( admin.runCommand({ split : coll + "", middle : { _id : 200 } }) )
printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 200 },
@@ -100,20 +100,18 @@ assert.neq( null, readMongos.getCollection( coll + "" ).findOne({ _id : 1 }) )
jsTest.log( "Checking update...")
// Ensure that updating an element finds the right location
-updateMongos.getCollection( coll + "" ).update({ _id : 1 }, { $set : { updated : true } })
-assert.eq( null, updateMongos.getDB( coll.getDB() + "" ).getLastError() )
+assert.writeOK(updateMongos.getCollection( coll + "" ).update({ _id : 1 },
+ { $set : { updated : true } }));
assert.neq( null, coll.findOne({ updated : true }) )
jsTest.log( "Checking insert..." )
// Ensure that inserting an element finds the right shard
-insertMongos.getCollection( coll + "" ).insert({ _id : 101 })
-assert.eq( null, insertMongos.getDB( coll.getDB() + "" ).getLastError() )
+assert.writeOK(insertMongos.getCollection( coll + "" ).insert({ _id : 101 }));
assert.neq( null, coll.findOne({ _id : 101 }) )
jsTest.log( "Checking remove..." )
// Ensure that removing an element finds the right shard, verified by the mongos doing the sharding
-removeMongos.getCollection( coll + "" ).remove({ _id : 2 })
-assert.eq( null, removeMongos.getDB( coll.getDB() + "" ).getLastError() )
+assert.writeOK(removeMongos.getCollection( coll + "" ).remove({ _id : 2 }));
assert.eq( null, coll.findOne({ _id : 2 }) )
coll.drop()
diff --git a/jstests/sharding/gle_with_conf_servers.js b/jstests/sharding/conf_server_write_concern.js
index f9b54678078..30002ae9f46 100644
--- a/jstests/sharding/gle_with_conf_servers.js
+++ b/jstests/sharding/conf_server_write_concern.js
@@ -1,29 +1,25 @@
/**
- * Test getLastError with w parameter when writing directly to the config servers will
+ * Test write concern with w parameter when writing directly to the config servers will
* not cause an error.
*/
function writeToConfigTest(){
var st = new ShardingTest({ shards: 2 });
var confDB = st.s.getDB( 'config' );
- confDB.settings.update({ _id: 'balancer' }, { $set: { stopped: true }});
- var gleObj = confDB.runCommand({ getLastError: 1, w: 'majority' });
-
- assert( gleObj.ok );
- assert.eq(null, gleObj.err);
+ assert.writeOK(confDB.settings.update({ _id: 'balancer' },
+ { $set: { stopped: true }},
+ { writeConcern: { w: 'majority' }}));
// w:1 should still work
- confDB.settings.update({ _id: 'balancer' }, { $set: { stopped: true }});
- var gleObj = confDB.runCommand({ getLastError: 1, w: 1 });
-
- assert(gleObj.ok);
- assert.eq(null, gleObj.err);
+ assert.writeOK(confDB.settings.update({ _id: 'balancer' },
+ { $set: { stopped: true }},
+ { writeConcern: { w: 1 }}));
st.stop();
}
/**
- * Test getLastError with w parameter will not cause an error when writes to mongos
+ * Test write concern with w parameter will not cause an error when writes to mongos
* would trigger writes to config servers (in this test, split chunks is used).
*/
function configTest( configCount ){
@@ -47,15 +43,11 @@ function configTest( configCount ){
var x = 0;
while( currChunks <= initChunks ){
- coll.insert({ x: x++ });
- gleObj = testDB.runCommand({ getLastError: 1, w: 'majority' });
+ assert.writeOK(coll.insert({ x: x++ }, { writeConcern: { w: 'majority' }}));
currChunks = chunkCount();
}
- assert( gleObj.ok );
- assert.eq( null, gleObj.err );
-
- st.stop();
+ st.stop();
}
writeToConfigTest();
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index fadee5d81aa..08e80996a85 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -19,17 +19,16 @@ conn.setLogLevel( 3 )
var coll = conn.getCollection( "test.countSlaveOk" )
coll.drop()
+var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < 300; i++ ){
- coll.insert( { i : i % 10 } )
+ bulk.insert({ i: i % 10 });
}
+assert.writeOK(bulk.execute());
var connA = conn
var connB = new Mongo( st.s.host )
var connC = new Mongo( st.s.host )
-// Make sure the writes get through, otherwise we can continue to error these one-at-a-time
-coll.getDB().getLastError()
-
st.printShardingStatus()
// Wait for client to update itself and replication to finish
@@ -64,7 +63,6 @@ try {
coll.find({ i : 0 }).count()
print( "Should not reach here!" )
- printjson( coll.getDB().getLastError() )
assert( false )
}
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index d0e9751b226..6ca7e5ff1cb 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -16,10 +16,11 @@ primary = s.getServer( "test" ).getDB( "test" );
secondary = s.getOther( primary ).getDB( "test" );
numObjs = 10;
-for (i=0; i < numObjs; i++){
- db.foo.insert({_id: i});
-}
-db.getLastError();
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (i=0; i < numObjs; i++){
+ bulk.insert({ _id: i });
+}
+assert.writeOK(bulk.execute());
assert.eq( 1, s.config.chunks.count() , "test requires collection to have one chunk initially" );
// we'll split the collection in two and move the second chunk while three cursors are open
diff --git a/jstests/sharding/cursor_cleanup.js b/jstests/sharding/cursor_cleanup.js
index a28b5416e38..758b60c4e5c 100644
--- a/jstests/sharding/cursor_cleanup.js
+++ b/jstests/sharding/cursor_cleanup.js
@@ -24,11 +24,14 @@ st.printShardingStatus(true);
jsTest.log("Insert enough data to overwhelm a query batch.");
+var bulk = coll.initializeUnorderedBulkOp();
+var bulk2 = collUnsharded.initializeUnorderedBulkOp();
for (var i = -150; i < 150; i++) {
- coll.insert({ _id : i });
- collUnsharded.insert({ _id : i });
+ bulk.insert({ _id : i });
+ bulk2.insert({ _id : i });
}
-assert.eq(null, coll.getDB().getLastError());
+assert.writeOK(bulk.execute());
+assert.writeOK(bulk2.execute());
jsTest.log("Open a cursor to a sharded and unsharded collection.");
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index 1e1d791679d..6d8b04aaf16 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -105,9 +105,8 @@ s.adminCommand( { split : "test.foo4" , middle : { num : 10 } } );
s.admin.runCommand({ movechunk: "test.foo4", find: { num: 20 },
to: s.getOther( s.getServer( "test" ) ).name });
-db.foo4.save( { num : 5 } );
-db.foo4.save( { num : 15 } );
-db.getLastError();
+assert.writeOK(db.foo4.save( { num : 5 } ));
+assert.writeOK(db.foo4.save( { num : 15 } ));
s.sync();
assert.eq( 1 , a.foo4.count() , "ua1" );
assert.eq( 1 , b.foo4.count() , "ub1" );
@@ -121,9 +120,7 @@ assert( b.foo4.getIndexes()[1].unique , "ub3" );
assert.eq( 2 , db.foo4.count() , "uc1" )
db.foo4.save( { num : 7 } )
assert.eq( 3 , db.foo4.count() , "uc2" )
-db.foo4.save( { num : 7 } )
-gle = db.getLastErrorObj();
-assert( gle.err , "uc3" )
+assert.writeError(db.foo4.save( { num : 7 } ));
assert.eq( 3 , db.foo4.count() , "uc4" )
// --- don't let you convertToCapped ----
@@ -181,15 +178,13 @@ assert.throws( function(){ db.foo6.group( { key : { a : 1 } , initial : { count
// ---- can't shard non-empty collection without index -----
-db.foo8.save( { a : 1 } );
-db.getLastError();
+assert.writeOK(db.foo8.save( { a : 1 } ));
assert( ! s.admin.runCommand( { shardcollection : "test.foo8" , key : { a : 1 } } ).ok , "non-empty collection" );
// ---- can't shard non-empty collection with null values in shard key ----
-db.foo9.save( { b : 1 } );
-db.getLastError();
+assert.writeOK(db.foo9.save( { b : 1 } ));
db.foo9.ensureIndex( { a : 1 } );
assert( ! s.admin.runCommand( { shardcollection : "test.foo9" , key : { a : 1 } } ).ok , "entry with null value" );
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index 2a32218b717..4b9843a724c 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -60,7 +60,6 @@ assert.eq( 0 , db.foo.count() , "D7" );
db.foo2.save( { _id : new ObjectId() } );
db.foo2.save( { _id : new ObjectId() } );
db.foo2.save( { _id : new ObjectId() } );
-db.getLastError();
assert.eq( 1 , s.onNumShards( "foo2" ) , "F1" );
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index 83171d86f40..3b6b114a781 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -30,10 +30,11 @@ s.adminCommand({moveChunk: "test.foo", find: {_id: 3},
s.setBalancer(true)
// insert 10k small documents into the sharded collection
+var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < numDocs; i++)
- db.foo.insert({_id: i});
+ bulk.insert({ _id: i });
+assert.writeOK(bulk.execute());
-db.getLastError();
var x = db.foo.stats();
// verify the colleciton has been sharded and documents are evenly distributed
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
index c7670957ebd..058016e22c1 100644
--- a/jstests/sharding/findandmodify1.js
+++ b/jstests/sharding/findandmodify1.js
@@ -16,10 +16,11 @@ s.adminCommand( { shardcollection : "test.stuff" , key : {_id:1} } );
s.adminCommand( { split: "test.stuff" , middle : { _id : numObjs/2 } } );
s.adminCommand( { movechunk : "test.stuff" , find : { _id : numObjs/2 } , to : secondary.getMongo().name } ) ;
+var bulk = db.stuff.initializeUnorderedBulkOp();
for (var i=0; i < numObjs; i++){
- db.stuff.insert({_id: i});
+ bulk.insert({_id: i});
}
-db.getLastError()
+assert.writeOK(bulk.execute());
// put two docs in each chunk (avoid the split in 0, since there are no docs less than 0)
for (var i=2; i < numObjs; i+=2){
diff --git a/jstests/sharding/findandmodify2.js b/jstests/sharding/findandmodify2.js
index 542818c7167..ad7b1688ca2 100644
--- a/jstests/sharding/findandmodify2.js
+++ b/jstests/sharding/findandmodify2.js
@@ -45,7 +45,6 @@ function via_fam() {
{ big: big }
}});
}
- db.getLastError();
}
// upsert via findAndModify
@@ -55,7 +54,6 @@ function via_fam_upsert() {
{ big: big }
}, upsert: true});
}
- db.getLastError();
}
// update data using basic update
@@ -69,7 +67,6 @@ function via_update() {
{ big: big }
});
}
- db.getLastError();
}
// upsert data using basic update
@@ -79,7 +76,6 @@ function via_update_upsert() {
{ big: big }
}, true);
}
- db.getLastError();
}
print("---------- Update via findAndModify...");
diff --git a/jstests/sharding/forget_mr_temp_ns.js b/jstests/sharding/forget_mr_temp_ns.js
index 54eeb88d9b5..e79793580a1 100644
--- a/jstests/sharding/forget_mr_temp_ns.js
+++ b/jstests/sharding/forget_mr_temp_ns.js
@@ -11,10 +11,11 @@ var admin = mongos.getDB( "admin" );
var coll = mongos.getCollection( "foo.bar" );
var outputColl = mongos.getCollection( (coll.getDB() + "") + ".mrOutput" );
+var bulk = coll.initializeUnorderedBulkOp();
for ( var i = 0; i < 10; i++ ) {
- coll.insert({ _id : i, even : (i % 2 == 0) });
+ bulk.insert({ _id : i, even : (i % 2 == 0) });
}
-assert.eq( null, coll.getDB().getLastError() );
+assert.writeOK(bulk.execute());
var map = function() { emit( this.even, 1 ); };
var reduce = function( key, values ) { return Array.sum(values); };
diff --git a/jstests/sharding/fts_score_sort_sharded.js b/jstests/sharding/fts_score_sort_sharded.js
index 3136401be1c..3ff699ac9b9 100644
--- a/jstests/sharding/fts_score_sort_sharded.js
+++ b/jstests/sharding/fts_score_sort_sharded.js
@@ -25,13 +25,11 @@ assert.commandWorked(admin.runCommand({moveChunk: coll.getFullName(),
//
// Insert documents into collection and create text index.
//
-coll.insert({_id: 1, a: "pizza"});
-coll.insert({_id: -1, a: "pizza pizza"});
-coll.insert({_id: 2, a: "pizza pizza pizza"});
-coll.insert({_id: -2, a: "pizza pizza pizza pizza"});
-assert.gleSuccess(coll.getDB());
-coll.ensureIndex({a: "text"});
-assert.gleSuccess(coll.getDB());
+assert.writeOK(coll.insert({ _id: 1, a: "pizza" }));
+assert.writeOK(coll.insert({ _id: -1, a: "pizza pizza" }));
+assert.writeOK(coll.insert({ _id: 2, a: "pizza pizza pizza" }));
+assert.writeOK(coll.insert({ _id: -2, a: "pizza pizza pizza pizza"}));
+assert.commandWorked(coll.ensureIndex({ a: "text" }));
//
// Execute query with sort on document score, verify results are in correct order.
diff --git a/jstests/sharding/geo_shardedgeonear.js b/jstests/sharding/geo_shardedgeonear.js
index 269531f4caa..85aef9eb894 100644
--- a/jstests/sharding/geo_shardedgeonear.js
+++ b/jstests/sharding/geo_shardedgeonear.js
@@ -26,13 +26,11 @@ function test(db, sharded, indexType) {
for (var i=0; i < numPts; i++) {
var lat = 90 - Random.rand() * 180;
var lng = 180 - Random.rand() * 360;
- db[coll].insert({rand:Math.random(), loc: [lng, lat]})
- assert.eq(null, db.getLastError());
+ assert.writeOK(db[coll].insert({rand:Math.random(), loc: [lng, lat]}));
}
assert.eq(db[coll].count(), numPts);
- db[coll].ensureIndex({loc: indexType})
- assert(!db.getLastError());
+ assert.commandWorked(db[coll].ensureIndex({ loc: indexType }));
var queryPoint = [0,0]
geoCmd = {geoNear: coll, near: queryPoint, spherical: true, includeLocs: true};
diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js
index 3b7cec4910f..d21490316ff 100644
--- a/jstests/sharding/group_slaveok.js
+++ b/jstests/sharding/group_slaveok.js
@@ -17,12 +17,11 @@ conn.setLogLevel( 3 )
var coll = conn.getCollection( "test.groupSlaveOk" )
coll.drop()
+var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < 300; i++ ){
- coll.insert( { i : i % 10 } )
+ bulk.insert( { i : i % 10 } );
}
-
-// Make sure the writes get through, otherwise we can continue to error these one-at-a-time
-coll.getDB().getLastError()
+assert.writeOK( bulk.execute() );
st.printShardingStatus()
@@ -51,17 +50,15 @@ assert.eq( 10, coll.group({ key : { i : true } ,
try {
conn.setSlaveOk( false )
- coll.group({ key : { i : true } ,
- reduce : function( obj, ctx ){ ctx.count += 1 } ,
- initial : { count : 0 } })
-
- print( "Should not reach here!" )
- printjson( coll.getDB().getLastError() )
- assert( false )
-
+ var res = coll.group({ key : { i : true } ,
+ reduce : function( obj, ctx ){ ctx.count += 1 } ,
+ initial : { count : 0 } });
+
+ print( "Should not reach here! Group result: " + tojson(res) );
+ assert( false );
}
catch( e ){
- print( "Non-slaveOk'd connection failed." )
+ print( "Non-slaveOk'd connection failed." + tojson(e) )
}
// Finish
diff --git a/jstests/sharding/hash_regex_targetting.js b/jstests/sharding/hash_regex_targetting.js
index 3e139496d94..7908f3883df 100644
--- a/jstests/sharding/hash_regex_targetting.js
+++ b/jstests/sharding/hash_regex_targetting.js
@@ -11,11 +11,10 @@ var doc2 = { path: "thisisabigString", val: true }
col.insert([doc1, doc2])
printjson(col.find({ path : /isa/ }).explain());
-col.update({ path : /isa/ }, { "$set" : { val : false } }, {multi:true});
-var leObj = col.getDB().getLastErrorObj();
+var res = col.update({ path : /isa/ }, { $set: { val: false }}, { multi: true });
var result = col.findOne();
assert.eq(false, result.val);
-assert.eq(2, leObj['n']);
+assert.eq(2, res.nModified);
st.stop();
diff --git a/jstests/sharding/hash_shard_unique_compound.js b/jstests/sharding/hash_shard_unique_compound.js
index ec8e6063aa0..832cb93600f 100644
--- a/jstests/sharding/hash_shard_unique_compound.js
+++ b/jstests/sharding/hash_shard_unique_compound.js
@@ -22,8 +22,7 @@ assert.commandWorked(db.adminCommand( { shardcollection : ns , key : { a : "hash
db.printShardingStatus();
// Create unique index
-coll.ensureIndex({a:1, b:1}, {unique:true})
-assert.gleSuccess(db, "unique index failed");
+assert.commandWorked(coll.ensureIndex({ a: 1, b: 1 }, { unique: true }));
jsTest.log("------ indexes -------")
jsTest.log(tojson(coll.getIndexes()));
@@ -33,8 +32,7 @@ jsTest.log("------ dropping sharded collection to start part 2 -------")
coll.drop();
//Create unique index
-coll.ensureIndex({a:1, b:1}, {unique:true})
-assert.gleSuccess(db, "unique index failed 2");
+assert.commandWorked(coll.ensureIndex({ a: 1, b: 1 }, { unique: true }));
// shard a fresh collection using a hashed shard key
assert.commandWorked(db.adminCommand( { shardcollection : ns , key : { a : "hashed" } } ),
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
index b66d0c405c7..2767c18dab3 100644
--- a/jstests/sharding/index1.js
+++ b/jstests/sharding/index1.js
@@ -4,180 +4,181 @@ s = new ShardingTest( "shard_index", 2, 0, 1 )
// Regenerate fully because of SERVER-2782
for ( var i = 0; i < 19; i++ ) {
-
- var coll = s.admin._mongo.getDB( "test" ).getCollection( "foo" + i )
- coll.drop()
-
- for ( var j = 0; j < 300; j++ ) {
- coll.insert( { num : j, x : 1 } )
- }
- assert.eq( null, coll.getDB().getLastError() );
-
- if(i == 0) s.adminCommand( { enablesharding : "" + coll._db } );
-
- print("\n\n\n\n\nTest # " + i)
-
- if ( i == 0 ) {
-
- // Unique index exists, but not the right one.
- coll.ensureIndex( { num : 1 }, { unique : true } )
- coll.ensureIndex( { x : 1 } )
-
- passed = false
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } )
- passed = true
- } catch (e) {
- print( e )
- }
- assert( !passed, "Should not shard collection when another unique index exists!")
-
- }
- if ( i == 1 ) {
-
- // Unique index exists as prefix, also index exists
- coll.ensureIndex( { x : 1 } )
- coll.ensureIndex( { x : 1, num : 1 }, { unique : true } )
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } })
- }
- catch(e){
- print(e)
- assert( false, "Should be able to shard non-unique index without unique option.")
- }
-
- }
- if ( i == 2 ) {
- // Non-unique index exists as prefix, also index exists. No unique index.
- coll.ensureIndex( { x : 1 } )
- coll.ensureIndex( { x : 1, num : 1 } )
+
+ var coll = s.admin._mongo.getDB( "test" ).getCollection( "foo" + i )
+ coll.drop()
+
+ var bulk = coll.initializeUnorderedBulkOp();
+ for ( var j = 0; j < 300; j++ ) {
+ bulk.insert({ num: j, x: 1 });
+ }
+ assert.writeOK(bulk.execute());
+
+ if(i == 0) s.adminCommand( { enablesharding : "" + coll._db } );
+
+ print("\n\n\n\n\nTest # " + i)
+
+ if ( i == 0 ) {
+
+ // Unique index exists, but not the right one.
+ coll.ensureIndex( { num : 1 }, { unique : true } )
+ coll.ensureIndex( { x : 1 } )
+
+ passed = false
+ try {
+ s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } )
+ passed = true
+ } catch (e) {
+ print( e )
+ }
+ assert( !passed, "Should not shard collection when another unique index exists!")
+
+ }
+ if ( i == 1 ) {
+
+ // Unique index exists as prefix, also index exists
+ coll.ensureIndex( { x : 1 } )
+ coll.ensureIndex( { x : 1, num : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } })
+ }
+ catch(e){
+ print(e)
+ assert( false, "Should be able to shard non-unique index without unique option.")
+ }
+
+ }
+ if ( i == 2 ) {
+ // Non-unique index exists as prefix, also index exists. No unique index.
+ coll.ensureIndex( { x : 1 } )
+ coll.ensureIndex( { x : 1, num : 1 } )
passed = false;
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } })
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } })
passed = true;
- }
- catch( e ){
- print(e)
- assert( !passed, "Should be able to shard collection with no unique index if unique not specified.")
- }
- }
- if ( i == 3 ) {
-
- // Unique index exists as prefix, also unique index exists
- coll.ensureIndex( { num : 1 }, { unique : true })
- coll.ensureIndex( { num : 1 , x : 1 }, { unique : true } )
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
- }
- catch( e ){
- print(e)
- assert( false, "Should be able to shard collection with unique prefix index.")
- }
-
- }
- if ( i == 4 ) {
-
- // Unique index exists as id, also unique prefix index exists
- coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } )
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { _id : 1 }, unique : true })
- }
- catch( e ){
- print(e)
- assert( false, "Should be able to shard collection with unique id index.")
- }
-
- }
- if ( i == 5 ) {
-
- // Unique index exists as id, also unique prefix index exists
- coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } )
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { _id : 1, num : 1 }, unique : true })
- }
- catch( e ){
- print(e)
- assert( false, "Should be able to shard collection with unique combination id index.")
- }
-
- }
- if ( i == 6 ) {
-
- coll.remove({})
-
- // Unique index does not exist, also unique prefix index exists
- coll.ensureIndex( { num : 1, _id : 1 }, { unique : true } )
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
- }
- catch( e ){
- print(e)
- assert( false, "Should be able to shard collection with no unique index but with a unique prefix index.")
- }
-
+ }
+ catch( e ){
+ print(e)
+ assert( !passed, "Should be able to shard collection with no unique index if unique not specified.")
+ }
+ }
+ if ( i == 3 ) {
+
+ // Unique index exists as prefix, also unique index exists
+ coll.ensureIndex( { num : 1 }, { unique : true })
+ coll.ensureIndex( { num : 1 , x : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with unique prefix index.")
+ }
+
+ }
+ if ( i == 4 ) {
+
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { _id : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with unique id index.")
+ }
+
+ }
+ if ( i == 5 ) {
+
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { _id : 1, num : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with unique combination id index.")
+ }
+
+ }
+ if ( i == 6 ) {
+
+ coll.remove({})
+
+ // Unique index does not exist, also unique prefix index exists
+ coll.ensureIndex( { num : 1, _id : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with no unique index but with a unique prefix index.")
+ }
+
printjson( coll.getIndexes() )
// Make sure the index created is unique!
assert.eq( 1, coll.getDB().getCollection( "system.indexes" ).find( { ns : "" + coll, key : { num : 1 }, unique : true } ).itcount() )
-
- }
+
+ }
if ( i == 7 ) {
- coll.remove({})
+ coll.remove({})
- // No index exists
+ // No index exists
- try{
- assert.eq( coll.find().itcount(), 0 )
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 } })
- }
- catch( e ){
- print(e)
+ try{
+ assert.eq( coll.find().itcount(), 0 )
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 } })
+ }
+ catch( e ){
+ print(e)
assert( false, "Should be able to shard collection with no index on shard key.")
- }
- }
+ }
+ }
if ( i == 8 ) {
- coll.remove({})
+ coll.remove({})
- // No index exists
+ // No index exists
passed = false
- try{
- assert.eq( coll.find().itcount(), 0 )
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ try{
+ assert.eq( coll.find().itcount(), 0 )
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
passed = true
- }
- catch( e ){
- print(e)
- }
+ }
+ catch( e ){
+ print(e)
+ }
assert( passed, "Should be able to shard collection with unique flag but with no unique index on shard key, if coll empty.")
printjson( coll.getIndexes() )
// Make sure the index created is unique!
assert.eq( 1, coll.getDB().getCollection( "system.indexes" ).find( { ns : "" + coll, key : { num : 1 }, unique : true } ).itcount() )
- }
+ }
if ( i == 9 ) {
- // Unique index exists on a different field as well
- coll.ensureIndex( { num : 1 }, { unique : true } )
- coll.ensureIndex( { x : 1 } )
+ // Unique index exists on a different field as well
+ coll.ensureIndex( { num : 1 }, { unique : true } )
+ coll.ensureIndex( { x : 1 } )
passed = false
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } )
+ try {
+ s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } )
passed = true
- } catch (e) {
- print( e )
- }
+ } catch (e) {
+ print( e )
+ }
assert( !passed, "Should not shard collection when another unique index exists!" )
- }
+ }
if ( i == 10 ){
//try sharding non-empty collection without any index
@@ -194,14 +195,14 @@ for ( var i = 0; i < 19; i++ ) {
//now add containing index and try sharding by prefix
coll.ensureIndex( {num : 1, x : 1} );
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( passed , "Should be able to shard collection with prefix of existing index");
+ try{
+ s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
+ passed = true;
+ }
+ catch( e ){
+ print(e);
+ }
+ assert( passed , "Should be able to shard collection with prefix of existing index");
printjson( coll.getIndexes() );
diff --git a/jstests/sharding/inserts_consistent.js b/jstests/sharding/inserts_consistent.js
index 59fb5d39a98..288cb7a9b44 100644
--- a/jstests/sharding/inserts_consistent.js
+++ b/jstests/sharding/inserts_consistent.js
@@ -1,5 +1,4 @@
-// Shows how the WBL / getLastError logic depends on the actual chunks which are moved
-// We should probably either always wait for writebacks, or don't at all
+// Test write re-routing on version mismatch.
var st = new ShardingTest({ shards : 2, mongos : 2, verbose : 2, other : { separateConfig : true } })
@@ -39,45 +38,22 @@ print( "Other shard : " + otherShard )
printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : otherShard }) )
-jsTest.log( "Inserting docs to be written back..." )
+jsTest.log( "Inserting docs that needs to be retried..." )
var nextId = -1
-// Create writebacks, could add more here
for( var i = 0; i < 2; i++ ){
printjson( "Inserting " + nextId )
- collB.insert({ _id : nextId--, hello : "world" })
+ assert.writeOK(collB.insert({ _id : nextId--, hello : "world" }));
}
-// Refresh server
-printjson( adminB.runCommand({ flushRouterConfig : 1 }) )
-
jsTest.log( "Inserting doc which successfully goes through..." )
// Do second write
-collB.insert({ _id : nextId--, goodbye : "world" })
-
-printjson( collB.getDB().getLastErrorObj() )
-
-// Get error for last write
-//assert.eq( null, collB.getDB().getLastError() )
+assert.writeOK(collB.insert({ _id : nextId--, goodbye : "world" }))
-jsTest.log( "GLE waited for the writebacks." )
-
-// Assert that we waited for the writebacks...
+// Assert that write went through
assert.eq( coll.find().itcount(), 3 )
-/*
-
-jsTest.log( "Waiting for the writeback..." )
-
-assert.soon(function(){
- var count = coll.find().itcount()
- print( "Count is : " + count )
- return count == 3
-})
-
-*/
-
jsTest.log( "Now try moving the actual chunk we're writing to..." )
// Now move the actual chunk we're writing to
@@ -85,7 +61,7 @@ printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : -1 }, to : o
jsTest.log( "Inserting second docs to get written back..." )
-// Will fail entirely if too many of these, gle will wait too long
+// Will fail entirely if too many of these, waiting for write to get applied can get too long.
for( var i = 0; i < 2; i++ ){
collB.insert({ _id : nextId--, hello : "world" })
}
@@ -96,18 +72,13 @@ printjson( adminB.runCommand({ flushRouterConfig : 1 }) )
jsTest.log( "Inserting second doc which successfully goes through..." )
// Do second write
-collB.insert({ _id : nextId--, goodbye : "world" })
-
-jsTest.log( "GLE is now waiting for the writeback!" )
-
-// Get error for last write
-assert.eq( null, collB.getDB().getLastError() )
+assert.writeOK(collB.insert({ _id : nextId--, goodbye : "world" }));
jsTest.log( "All docs written this time!" )
-// Assert that we now waited for the writeback
+// Assert that writes went through.
assert.eq( coll.find().itcount(), 6 )
jsTest.log( "DONE" )
-st.stop() \ No newline at end of file
+st.stop()
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index a344ffd481d..ccf31e83f2a 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -14,17 +14,17 @@ while ( big.length < 10000 )
big += "."
x = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
for ( ; x < 500; x++ )
- db.foo.insert( { x : x , big : big } )
+ bulk.insert( { x : x , big : big } );
for ( i=0; i<500; i++ )
- db.foo.insert( { x : x , big : big } )
+ bulk.insert( { x : x , big : big } );
for ( ; x < 2000; x++ )
- db.foo.insert( { x : x , big : big } )
+ bulk.insert( { x : x , big : big } );
-
-db.getLastError();
+assert.writeOK( bulk.execute() );
sh.status(true)
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index f40d0aa3e8b..4becd6746ce 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -137,12 +137,12 @@ for ( var i=0; i<types.length; i++ ){
assert.eq( 1 , c.find( { xx : { $exists : true } } ).count() , curT.name + " xx 2 " );
assert.eq( curT.values[3] , getKey( c.findOne( { xx : 17 } ) ) , curT.name + " xx 3 " );
- c.update( makeObjectDotted( curT.values[3] ) , { $set : { xx : 17 } } , {upsert: true});
- assert.eq( null , db.getLastError() , curT.name + " upserts should work if they include the shard key in the query" );
+ assert.writeOK(c.update( makeObjectDotted( curT.values[3] ),
+ { $set: { xx: 17 }},
+ { upsert: true }));
+
+ assert.commandWorked(c.ensureIndex( { _id: 1 } , { unique: true } ));
- c.ensureIndex( { _id : 1 } , { unique : true } );
- assert.eq( null , db.getLastError() , curT.name + " creating _id index should be ok" );
-
// multi update
var mysum = 0;
c.find().forEach( function(z){ mysum += z.xx || 0; } );
diff --git a/jstests/sharding/listDatabases.js b/jstests/sharding/listDatabases.js
index e32ab5ab82b..45a9784788f 100644
--- a/jstests/sharding/listDatabases.js
+++ b/jstests/sharding/listDatabases.js
@@ -14,11 +14,9 @@ var getDBSection = function (dbsArray, dbToFind) {
return null;
}
-mongos.getDB("blah").foo.insert({_id:1})
-mongos.getDB("foo").foo.insert({_id:1})
-mongos.getDB("raw").foo.insert({_id:1})
-//wait for writes to finish
-mongos.getDB("raw").getLastError()
+assert.writeOK(mongos.getDB("blah").foo.insert({ _id: 1 }));
+assert.writeOK(mongos.getDB("foo").foo.insert({ _id: 1 }));
+assert.writeOK(mongos.getDB("raw").foo.insert({ _id: 1 }));
//verify that the config db is not on a shard
var res = mongos.adminCommand("listDatabases");
@@ -43,4 +41,4 @@ assert(!getDBSection(dbArray, "config").shards, "config db is on a shard! 2")
assert(getDBSection(dbArray, "admin"), "admin db not found! 2")
assert(!getDBSection(dbArray, "admin").shards, "admin db is on a shard! 2")
-test.stop() \ No newline at end of file
+test.stop()
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 28f3a963239..33b4d167361 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -41,18 +41,10 @@ var assertCannotRunCommands = function(mongo, st) {
// CRUD
var test = mongo.getDB("test");
assert.throws( function() { test.system.users.findOne(); });
-
- test.foo.save({_id:0});
- assert(test.getLastError());
-
+ assert.writeError(test.foo.save({ _id: 0 }));
assert.throws( function() { test.foo.findOne({_id:0}); });
-
- test.foo.update({_id:0}, {$set:{x:20}});
- assert(test.getLastError());
-
- test.foo.remove({_id:0});
- assert(test.getLastError());
-
+ assert.writeError(test.foo.update({ _id: 0 }, { $set: { x: 20 }}));
+ assert.writeError(test.foo.remove({ _id: 0 }));
// Multi-shard
assert.throws(function() {
@@ -86,15 +78,10 @@ var assertCanRunCommands = function(mongo, st) {
// this will throw if it fails
test.system.users.findOne();
- test.foo.save({_id: 0});
- assert(test.getLastError() == null);
-
- test.foo.update({_id: 0}, {$set:{x:20}});
- assert(test.getLastError() == null);
-
- test.foo.remove({_id: 0});
- assert(test.getLastError() == null);
-
+ assert.writeOK(test.foo.save({ _id: 0 }));
+ assert.writeOK(test.foo.update({ _id: 0 }, { $set: { x: 20 }}));
+ assert.writeOK(test.foo.remove({ _id: 0 }));
+
// Multi-shard
test.foo.mapReduce(
function() { emit(1, 1); },
diff --git a/jstests/sharding/mapReduce.js b/jstests/sharding/mapReduce.js
index 55168036fa7..60b0d6c0a68 100644
--- a/jstests/sharding/mapReduce.js
+++ b/jstests/sharding/mapReduce.js
@@ -14,9 +14,21 @@ s.adminCommand( { enablesharding : "mrShard" } )
s.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } )
db = s.getDB( "mrShard" );
-for (j=0; j<100; j++) for (i=0; i<512; i++){ db.srcNonSharded.save({j:j, i:i})}
-for (j=0; j<100; j++) for (i=0; i<512; i++){ db.srcSharded.save({j:j, i:i})}
-db.getLastError();
+var bulk = db.srcNonSharded.initializeUnorderedBulkOp();
+for (j = 0; j < 100; j++) {
+ for (i = 0; i < 512; i++) {
+ bulk.insert({ j: j, i: i });
+ }
+}
+assert.writeOK(bulk.execute());
+
+bulk = db.srcSharded.initializeUnorderedBulkOp();
+for (j = 0; j < 100; j++) {
+ for (i = 0; i < 512; i++) {
+ bulk.insert({ j: j, i: i });
+ }
+}
+assert.writeOK(bulk.execute());
function map() { emit(this.i, 1); }
function reduce(key, values) { return Array.sum(values) }
diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js
index cc6098f3eea..76f8277cad2 100644
--- a/jstests/sharding/max_time_ms_sharded.js
+++ b/jstests/sharding/max_time_ms_sharded.js
@@ -50,10 +50,11 @@ assert.commandWorked(admin.runCommand({moveChunk: coll.getFullName(),
//
// Insert 100 documents into sharded collection, such that each shard owns 50.
//
+var bulk = coll.initializeUnorderedBulkOp();
for (i=-50; i<50; i++) {
- coll.insert({_id: i});
+ bulk.insert({ _id: i });
}
-assert.eq(null, coll.getDB().getLastError());
+assert.writeOK(bulk.execute());
assert.eq(50, shards[0].getCollection(coll.getFullName()).count());
assert.eq(50, shards[1].getCollection(coll.getFullName()).count());
diff --git a/jstests/sharding/merge_chunks_test.js b/jstests/sharding/merge_chunks_test.js
index 22d0e8fc0fa..a74822d1952 100644
--- a/jstests/sharding/merge_chunks_test.js
+++ b/jstests/sharding/merge_chunks_test.js
@@ -35,10 +35,9 @@ assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 90 }, to : shar
st.printShardingStatus();
// Insert some data into each of the consolidated ranges
-coll.insert({ _id : 0 });
-coll.insert({ _id : 40 });
-coll.insert({ _id : 110 });
-assert.eq( null, coll.getDB().getLastError() );
+assert.writeOK(coll.insert({ _id : 0 }));
+assert.writeOK(coll.insert({ _id : 40 }));
+assert.writeOK(coll.insert({ _id : 110 }));
var staleCollection = staleMongos.getCollection( coll + "" );
diff --git a/jstests/sharding/merge_chunks_test_with_data.js b/jstests/sharding/merge_chunks_test_with_data.js
index 0f057787454..3520ea760da 100644
--- a/jstests/sharding/merge_chunks_test_with_data.js
+++ b/jstests/sharding/merge_chunks_test_with_data.js
@@ -31,11 +31,10 @@ assert( admin.runCommand({ split : coll + "", middle : { _id : 60 } }).ok );
st.printShardingStatus();
// Insert data to allow 0->20 and 40->60 to be merged, but too much for 20->40
-coll.insert({ _id : 0 });
-coll.insert({ _id : 20 });
-coll.insert({ _id : 30 });
-coll.insert({ _id : 40 });
-assert.eq( null, coll.getDB().getLastError() );
+assert.writeOK(coll.insert({ _id : 0 }));
+assert.writeOK(coll.insert({ _id : 20 }));
+assert.writeOK(coll.insert({ _id : 30 }));
+assert.writeOK(coll.insert({ _id : 40 }));
jsTest.log( "Merging chunks with another empty chunk..." );
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 2c4d156d760..bf28cad6ea7 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -11,9 +11,11 @@ big = ""
while ( big.length < 10000 )
big += "eliot"
-for ( x=0; x<100; x++ )
- coll.insert( { x : x , big : big } )
-db.getLastError();
+var bulk = coll.initializeUnorderedBulkOp();
+for ( x=0; x<100; x++ ) {
+ bulk.insert( { x : x , big : big } );
+}
+assert.writeOK(bulk.execute());
db.printShardingStatus()
@@ -30,8 +32,7 @@ print( "direct : " + direct )
directDB = direct.getDB( "test" )
for ( done=0; done<2*1024*1024; done+=big.length ){
- directDB.foo.insert( { x : 50 + Math.random() , big : big } )
- directDB.getLastError();
+ assert.writeOK(directDB.foo.insert( { x : 50 + Math.random() , big : big } ));
}
db.printShardingStatus()
diff --git a/jstests/sharding/migrateMemory.js b/jstests/sharding/migrateMemory.js
index 5791ddabff5..be8b76ad272 100644
--- a/jstests/sharding/migrateMemory.js
+++ b/jstests/sharding/migrateMemory.js
@@ -1,3 +1,4 @@
+// TODO: move back to sharding suite after SERVER-13402 is fixed
s = new ShardingTest( "migrateMemory" , 2 , 1 , 1 , { chunksize : 1 });
diff --git a/jstests/sharding/migrate_overwrite_id.js b/jstests/sharding/migrate_overwrite_id.js
index 0a8b2252179..21439247772 100644
--- a/jstests/sharding/migrate_overwrite_id.js
+++ b/jstests/sharding/migrate_overwrite_id.js
@@ -24,11 +24,8 @@ var id = 12345;
jsTest.log( "Inserting a document with id : 12345 into both shards with diff shard key..." );
-coll.insert({ _id : id, skey : -1 });
-assert.eq( null, coll.getDB().getLastError() );
-
-coll.insert({ _id : id, skey : 1 });
-assert.eq( null, coll.getDB().getLastError() );
+assert.writeOK(coll.insert({ _id : id, skey : -1 }));
+assert.writeOK(coll.insert({ _id : id, skey : 1 }));
printjson( shards[0].conn.getCollection( coll + "" ).find({ _id : id }).toArray() );
printjson( shards[1].conn.getCollection( coll + "" ).find({ _id : id }).toArray() );
@@ -43,4 +40,4 @@ printjson( shards[0].conn.getCollection( coll + "" ).find({ _id : id }).toArray(
printjson( shards[1].conn.getCollection( coll + "" ).find({ _id : id }).toArray() );
assert.eq( 2, coll.find({ _id : id }).itcount() );
-st.stop(); \ No newline at end of file
+st.stop();
diff --git a/jstests/sharding/mongos_no_detect_sharding.js b/jstests/sharding/mongos_no_detect_sharding.js
index d5e9b53bf34..6a9d1dc63b8 100644
--- a/jstests/sharding/mongos_no_detect_sharding.js
+++ b/jstests/sharding/mongos_no_detect_sharding.js
@@ -29,14 +29,15 @@ print( "Seeing if data gets inserted unsharded..." )
print( "No splits occur here!" )
// Insert a bunch of data which should trigger a split
+var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < 100; i++ ){
- coll.insert({ i : i + 1 })
+ bulk.insert({ i : i + 1 });
}
-coll.getDB().getLastError()
+assert.writeOK(bulk.execute());
config.printShardingStatus( true )
assert.eq( coll.getShardVersion().ok, 1 )
assert.eq( 101, coll.find().itcount() )
-st.stop() \ No newline at end of file
+st.stop()
diff --git a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
index f961f00d5b8..5b8f930f8bd 100644
--- a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
@@ -29,10 +29,8 @@ var collSharded = mongos.getCollection( "fooSharded.barSharded" );
var collUnsharded = mongos.getCollection( "fooUnsharded.barUnsharded" );
// Create the unsharded database with shard0 primary
-collUnsharded.insert({ some : "doc" });
-assert.eq( null, collUnsharded.getDB().getLastError() );
-collUnsharded.remove({});
-assert.eq( null, collUnsharded.getDB().getLastError() );
+assert.writeOK(collUnsharded.insert({ some : "doc" }));
+assert.writeOK(collUnsharded.remove({}));
printjson( admin.runCommand({ movePrimary : collUnsharded.getDB().toString(),
to : shards[0]._id }) );
@@ -78,21 +76,6 @@ function authDBUsers( conn ) {
return conn;
}
-// Needed b/c the GLE command itself can fail if the shard is down ("write result unknown") - we
-// don't care if this happens in this test, we only care that we did not get "write succeeded".
-// Depending on the connection pool state, we could get either.
-function gleErrorOrThrow(database, msg) {
- var gle;
- try {
- gle = database.getLastErrorObj();
- }
- catch (ex) {
- return;
- }
- if (!gle.err) doassert("getLastError is null: " + tojson(gle) + " :" + msg);
- return;
-};
-
//
// Setup is complete
//
@@ -104,12 +87,9 @@ authDBUsers(mongosConnActive);
var mongosConnIdle = null;
var mongosConnNew = null;
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 });
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 });
-assert.eq(null, mongosConnActive.getCollection( collSharded.toString() ).getDB().getLastError());
-
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 });
-assert.eq(null, mongosConnActive.getCollection( collUnsharded.toString() ).getDB().getLastError());
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 }));
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 }));
+assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 }));
jsTest.log("Stopping primary of third shard...");
@@ -123,21 +103,15 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn
assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 });
-assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 });
-assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 });
-assert.gleSuccess(mongosConnActive.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 }));
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 }));
+assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 }));
jsTest.log("Testing idle connection with third primary down...");
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 });
-assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 });
-assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 });
-assert.gleSuccess(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 }));
+assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 }));
+assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 }));
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
@@ -153,14 +127,11 @@ mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 });
-assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 }));
mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 });
-assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 }));
mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 });
-assert.gleSuccess(mongosConnNew.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 }));
gc(); // Clean up new connections
@@ -181,21 +152,15 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn
assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 });
-assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 });
-gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 });
-assert.gleSuccess(mongosConnActive.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 }));
+assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 }));
+assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 }));
jsTest.log("Testing idle connection with second primary down...");
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 });
-assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 });
-assert.gleSuccess(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 }));
+assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 }));
+assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 }));
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
@@ -214,14 +179,11 @@ mongosConnNew.setSlaveOk();
assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 });
-assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 }));
mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 });
-gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 }));
mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 });
-assert.gleSuccess(mongosConnNew.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 }));
gc(); // Clean up new connections
@@ -239,21 +201,15 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn
assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -8 });
-gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 8 });
-gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 8 });
-gleErrorOrThrow(mongosConnActive.getCollection( collUnsharded.toString() ).getDB());
+assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -8 }));
+assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 8 }));
+assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 8 }));
jsTest.log("Testing idle connection with first primary down...");
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -9 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 9 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 9 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB());
+assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -9 }));
+assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 9 }));
+assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 9 }));
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
@@ -272,14 +228,11 @@ mongosConnNew.setSlaveOk();
assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -10 });
-gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -10 }));
mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 10 });
-gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 10 }));
mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 10 });
-gleErrorOrThrow(mongosConnNew.getCollection( collUnsharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 10 }));
gc(); // Clean up new connections
@@ -296,21 +249,15 @@ jsTest.log("Testing active connection with second shard down...");
assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -11 });
-gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 11 });
-gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 11 });
-gleErrorOrThrow(mongosConnActive.getCollection( collUnsharded.toString() ).getDB());
+assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -11 }));
+assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 11 }));
+assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 11 }));
jsTest.log("Testing idle connection with second shard down...");
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -12 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 12 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 12 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB());
+assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -12 }));
+assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 12 }));
+assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 12 }));
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
@@ -325,14 +272,11 @@ mongosConnNew.setSlaveOk();
assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -13 });
-gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -13 }));
mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 13 });
-gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 13 }));
mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 13 });
-gleErrorOrThrow(mongosConnNew.getCollection( collUnsharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 13 }));
gc(); // Clean up new connections
diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
index 23ae95e857f..730860c2bf7 100644
--- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
@@ -28,10 +28,8 @@ var collSharded = mongos.getCollection( "fooSharded.barSharded" );
var collUnsharded = mongos.getCollection( "fooUnsharded.barUnsharded" );
// Create the unsharded database
-collUnsharded.insert({ some : "doc" });
-assert.eq( null, collUnsharded.getDB().getLastError() );
-collUnsharded.remove({});
-assert.eq( null, collUnsharded.getDB().getLastError() );
+assert.writeOK(collUnsharded.insert({ some : "doc" }));
+assert.writeOK(collUnsharded.remove({}));
printjson( admin.runCommand({ movePrimary : collUnsharded.getDB().toString(),
to : shards[0]._id }) );
@@ -47,21 +45,6 @@ assert.commandWorked( admin.runCommand({ moveChunk : collSharded.toString(),
st.printShardingStatus();
-// Needed b/c the GLE command itself can fail if the shard is down ("write result unknown") - we
-// don't care if this happens in this test, we only care that we did not get "write succeeded".
-// Depending on the connection pool state, we could get either.
-function gleErrorOrThrow(database, msg) {
- var gle;
- try {
- gle = database.getLastErrorObj();
- }
- catch (ex) {
- return;
- }
- if (!gle.err) doassert("getLastError is null: " + tojson(gle) + " :" + msg);
- return;
-};
-
//
// Setup is complete
//
@@ -72,12 +55,9 @@ var mongosConnActive = new Mongo( mongos.host );
var mongosConnIdle = null;
var mongosConnNew = null;
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 });
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 });
-assert.eq(null, mongosConnActive.getCollection( collSharded.toString() ).getDB().getLastError());
-
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 });
-assert.eq(null, mongosConnActive.getCollection( collUnsharded.toString() ).getDB().getLastError());
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 }));
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 }));
+assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 }));
jsTest.log("Stopping primary of third shard...");
@@ -91,21 +71,15 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn
assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 });
-assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 });
-assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 });
-assert.gleSuccess(mongosConnActive.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 }));
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 }));
+assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 }));
jsTest.log("Testing idle connection with third primary down...");
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 });
-assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 });
-assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 });
-assert.gleSuccess(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 }));
+assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 }));
+assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 }));
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
@@ -121,14 +95,11 @@ mongosConnNew = new Mongo( mongos.host );
assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 });
-assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 }));
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 });
-assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 }));
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 });
-assert.gleSuccess(mongosConnNew.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 }));
gc(); // Clean up new connections
@@ -188,22 +159,16 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn
assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
// Writes
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 });
-assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 });
-gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 });
-assert.gleSuccess(mongosConnActive.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 }));
+assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 }));
+assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 }));
jsTest.log("Testing idle connection with second primary down...");
// Writes
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 });
-assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 });
-assert.gleSuccess(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 }));
+assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 }));
+assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 }));
// Reads with read prefs
mongosConnIdle.setSlaveOk();
@@ -332,14 +297,11 @@ assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne
// Writes
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 });
-assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 }));
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 });
-gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 }));
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 });
-assert.gleSuccess(mongosConnNew.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 }));
gc(); // Clean up new connections
@@ -356,21 +318,15 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn
assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -8 });
-gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 8 });
-gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 8 });
-gleErrorOrThrow(mongosConnActive.getCollection( collUnsharded.toString() ).getDB());
+assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -8 }));
+assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 8 }));
+assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 8 }));
jsTest.log("Testing idle connection with first primary down...");
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -9 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 9 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 9 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB());
+assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -9 }));
+assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 9 }));
+assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 9 }));
mongosConnIdle.setSlaveOk();
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
@@ -390,14 +346,11 @@ mongosConnNew.setSlaveOk();
assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -10 });
-gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -10 }));
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 10 });
-gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 10 }));
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 10 });
-gleErrorOrThrow(mongosConnNew.getCollection( collUnsharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 10 }));
gc(); // Clean up new connections
@@ -413,21 +366,15 @@ mongosConnActive.setSlaveOk();
assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -11 });
-gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 11 });
-gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 11 });
-gleErrorOrThrow(mongosConnActive.getCollection( collUnsharded.toString() ).getDB());
+assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -11 }));
+assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 11 }));
+assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 11 }));
jsTest.log("Testing idle connection with second shard down...");
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -12 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 12 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 12 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB());
+assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -12 }));
+assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 12 }));
+assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 12 }));
mongosConnIdle.setSlaveOk();
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
@@ -443,21 +390,14 @@ mongosConnNew.setSlaveOk();
assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -13 });
-gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -13 }));
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 13 });
-gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 13 }));
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 13 });
-gleErrorOrThrow(mongosConnNew.getCollection( collUnsharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 13 }));
gc(); // Clean up new connections
jsTest.log("DONE!");
st.stop();
-
-
-
-
diff --git a/jstests/sharding/mongos_shard_failure_tolerance.js b/jstests/sharding/mongos_shard_failure_tolerance.js
index 3cf1c1dc788..bfec2a4b863 100644
--- a/jstests/sharding/mongos_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_shard_failure_tolerance.js
@@ -34,29 +34,12 @@ assert.commandWorked( admin.runCommand({ moveChunk : collSharded.toString(),
to : shards[1]._id }) );
// Create the unsharded database
-collUnsharded.insert({ some : "doc" });
-assert.eq( null, collUnsharded.getDB().getLastError() );
-collUnsharded.remove({});
-assert.eq( null, collUnsharded.getDB().getLastError() );
+assert.writeOK(collUnsharded.insert({ some : "doc" }));
+assert.writeOK(collUnsharded.remove({}));
printjson( admin.runCommand({ movePrimary : collUnsharded.getDB().toString(), to : shards[0]._id }) );
st.printShardingStatus();
-// Needed b/c the GLE command itself can fail if the shard is down ("write result unknown") - we
-// don't care if this happens in this test, we only care that we did not get "write succeeded".
-// Depending on the connection pool state, we could get either.
-function gleErrorOrThrow(database, msg) {
- var gle;
- try {
- gle = database.getLastErrorObj();
- }
- catch (ex) {
- return;
- }
- if (!gle.err) doassert("getLastError is null: " + tojson(gle) + " :" + msg);
- return;
-};
-
//
// Setup is complete
//
@@ -67,12 +50,9 @@ var mongosConnActive = new Mongo( mongos.host );
var mongosConnIdle = null;
var mongosConnNew = null;
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 });
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 });
-assert.eq(null, mongosConnActive.getCollection( collSharded.toString() ).getDB().getLastError());
-
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 });
-assert.eq(null, mongosConnActive.getCollection( collUnsharded.toString() ).getDB().getLastError());
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 }));
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 }));
+assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 }));
jsTest.log("Stopping third shard...");
@@ -86,21 +66,15 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn
assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 });
-assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 });
-assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 });
-assert.gleSuccess(mongosConnActive.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 }));
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 }));
+assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 }));
jsTest.log("Testing idle connection...");
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 });
-assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 });
-assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 });
-assert.gleSuccess(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 }));
+assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 }));
+assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 }));
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
@@ -116,14 +90,11 @@ mongosConnNew = new Mongo( mongos.host );
assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 });
-assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 }));
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 });
-assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 }));
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 });
-assert.gleSuccess(mongosConnNew.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 }));
gc(); // Clean up new connections
@@ -138,21 +109,16 @@ jsTest.log("Testing active connection...");
assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 });
-assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 });
-gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB());
-mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 });
-assert.gleSuccess(mongosConnActive.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 }));
+
+assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 }));
+assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 }));
jsTest.log("Testing idle connection...");
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 });
-assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 });
-gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB());
-mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 });
-assert.gleSuccess(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 }));
+assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 }));
+assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 }));
assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
@@ -165,21 +131,14 @@ mongosConnNew = new Mongo( mongos.host );
assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 });
-assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 }));
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 });
-gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB());
+assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 }));
mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 });
-assert.gleSuccess(mongosConnNew.getCollection( collUnsharded.toString() ).getDB());
+assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 }));
gc(); // Clean up new connections
jsTest.log("DONE!");
st.stop();
-
-
-
-
diff --git a/jstests/sharding/mongos_validate_backoff.js b/jstests/sharding/mongos_validate_backoff.js
index 6462dc48ff8..877ab808dcc 100644
--- a/jstests/sharding/mongos_validate_backoff.js
+++ b/jstests/sharding/mongos_validate_backoff.js
@@ -18,8 +18,7 @@ var timeBadInsert = function(){
var start = new Date().getTime()
// Bad insert, no shard key
- coll.insert({ hello : "world" })
- assert.neq( null, coll.getDB().getLastError() )
+ assert.writeError(coll.insert({ hello : "world" }));
var end = new Date().getTime()
diff --git a/jstests/sharding/mongos_validate_writes.js b/jstests/sharding/mongos_validate_writes.js
index 6a6fb36eeef..4bfbb2048f3 100644
--- a/jstests/sharding/mongos_validate_writes.js
+++ b/jstests/sharding/mongos_validate_writes.js
@@ -39,12 +39,10 @@ coll.ensureIndex({ b : 1 })
printjson( admin.runCommand({ shardCollection : coll + "", key : { b : 1 } }) )
// Make sure that we can successfully insert, even though we have stale state
-staleCollA.insert({ b : "b" })
-assert.eq( null, staleCollA.getDB().getLastError() )
+assert.writeOK(staleCollA.insert({ b : "b" }));
// Make sure we unsuccessfully insert with old info
-staleCollB.insert({ a : "a" })
-assert.neq( null, staleCollB.getDB().getLastError() )
+assert.writeError(staleCollB.insert({ a : "a" }));
// Change the collection sharding state
coll.drop()
@@ -52,12 +50,10 @@ coll.ensureIndex({ c : 1 })
printjson( admin.runCommand({ shardCollection : coll + "", key : { c : 1 } }) )
// Make sure we can successfully upsert, even though we have stale state
-staleCollA.update({ c : "c" }, { c : "c" }, true )
-assert.eq( null, staleCollA.getDB().getLastError() )
+assert.writeOK(staleCollA.update({ c : "c" }, { c : "c" }, true ));
// Make sure we unsuccessfully upsert with old info
-staleCollB.update({ b : "b" }, { b : "b" }, true )
-assert.neq( null, staleCollB.getDB().getLastError() )
+assert.writeError(staleCollB.update({ b : "b" }, { b : "b" }, true ));
// Change the collection sharding state
coll.drop()
@@ -65,16 +61,13 @@ coll.ensureIndex({ d : 1 })
printjson( admin.runCommand({ shardCollection : coll + "", key : { d : 1 } }) )
// Make sure we can successfully update, even though we have stale state
-coll.insert({ d : "d" })
-coll.getDB().getLastError();
+assert.writeOK(coll.insert({ d : "d" }));
-staleCollA.update({ d : "d" }, { $set : { x : "x" } }, false, false )
-assert.eq( null, staleCollA.getDB().getLastError() )
+assert.writeOK(staleCollA.update({ d : "d" }, { $set : { x : "x" } }, false, false ));
assert.eq( staleCollA.findOne().x, "x" )
// Make sure we unsuccessfully update with old info
-staleCollB.update({ c : "c" }, { $set : { x : "y" } }, false, false )
-assert.neq( null, staleCollB.getDB().getLastError() )
+assert.writeError(staleCollB.update({ c : "c" }, { $set : { x : "y" } }, false, false ));
assert.eq( staleCollB.findOne().x, "x" )
// Change the collection sharding state
@@ -87,16 +80,12 @@ printjson( admin.runCommand({ split : coll + "", middle : { e : 0 } }) )
printjson( admin.runCommand({ moveChunk : coll + "", find : { e : 0 }, to : "shard0001" }) )
// Make sure we can successfully remove, even though we have stale state
-coll.insert({ e : "e" })
-// Need to make sure the insert makes it to the shard
-assert.eq( null, coll.getDB().getLastError() )
+assert.writeOK(coll.insert({ e : "e" }));
-staleCollA.remove({ e : "e" }, true)
-assert.eq( null, staleCollA.getDB().getLastError() )
+assert.writeOK(staleCollA.remove({ e : "e" }, true));
assert.eq( null, staleCollA.findOne() )
// Make sure we unsuccessfully remove with old info
-staleCollB.remove({ d : "d" }, true )
-assert.neq( null, staleCollB.getDB().getLastError() )
+assert.writeError(staleCollB.remove({ d : "d" }, true ));
st.stop()
diff --git a/jstests/sharding/movechunk_with_def_paranoia.js b/jstests/sharding/movechunk_with_def_paranoia.js
index 9adcfbe7137..97feb0b4ac3 100644
--- a/jstests/sharding/movechunk_with_def_paranoia.js
+++ b/jstests/sharding/movechunk_with_def_paranoia.js
@@ -1,3 +1,5 @@
+// TODO: move back to sharding suite after SERVER-13402 is fixed
+
/**
* This test checks that the moveChunk directory is not created
*/
diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js
index 5a485e0122b..4091792d27f 100644
--- a/jstests/sharding/movechunk_with_moveParanoia.js
+++ b/jstests/sharding/movechunk_with_moveParanoia.js
@@ -1,3 +1,5 @@
+// TODO: move back to sharding suite after SERVER-13402 is fixed
+
/**
* This test sets moveParanoia flag and then check that the directory is created with the moved data
*/
diff --git a/jstests/sharding/movechunk_with_noMoveParanoia.js b/jstests/sharding/movechunk_with_noMoveParanoia.js
index 262c014a1d6..1844528b225 100644
--- a/jstests/sharding/movechunk_with_noMoveParanoia.js
+++ b/jstests/sharding/movechunk_with_noMoveParanoia.js
@@ -1,3 +1,5 @@
+// TODO: move back to sharding suite after SERVER-13402 is fixed
+
/**
* This test sets moveParanoia flag and then check that the directory is created with the moved data
*/
diff --git a/jstests/sharding/moveprimary_ignore_sharded.js b/jstests/sharding/moveprimary_ignore_sharded.js
index ff4141fb1c0..6f8895851c8 100644
--- a/jstests/sharding/moveprimary_ignore_sharded.js
+++ b/jstests/sharding/moveprimary_ignore_sharded.js
@@ -32,12 +32,9 @@ for( var i = 0; i < 3; i++ ){
collsFooB.push( mongosB.getCollection( "foo.coll" + i ) )
collsBarA.push( mongosA.getCollection( "bar.coll" + i ) )
collsBarB.push( mongosB.getCollection( "bar.coll" + i ) )
-
- collsFooA[i].insert({ hello : "world" })
- assert.eq( null, collsFooA[i].getDB().getLastError() )
- collsBarA[i].insert({ hello : "world" })
- assert.eq( null, collsBarA[i].getDB().getLastError() )
-
+
+ assert.writeOK(collsFooA[i].insert({ hello : "world" }));
+ assert.writeOK(collsBarA[i].insert({ hello : "world" }));
}
// Enable sharding
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
index 2e04c091773..d80ef710d52 100644
--- a/jstests/sharding/mrShardedOutput.js
+++ b/jstests/sharding/mrShardedOutput.js
@@ -33,12 +33,11 @@ for (var splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
testDB.adminCommand({ split: 'test.foo', middle: { a: splitPoint }});
}
+var bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numBatch; ++i) {
- testDB.foo.save({ a: numDocs + i, y: str, i: numDocs + i });
+ bulk.insert({ a: numDocs + i, y: str, i: numDocs + i });
}
-
-var GLE = testDB.getLastError();
-assert.eq(null, GLE, "Setup FAILURE: testDB.getLastError() returned" + GLE);
+assert.writeOK(bulk.execute());
numDocs += numBatch;
@@ -94,12 +93,11 @@ for (splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
testDB.adminCommand({ split: 'test.foo', middle: { a: numDocs + splitPoint }});
}
+bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numBatch; ++i) {
- testDB.foo.save({ a: numDocs + i, y: str, i: numDocs + i });
+ bulk.insert({ a: numDocs + i, y: str, i: numDocs + i });
}
-
-GLE = testDB.getLastError();
-assert.eq(null, GLE, "Setup FAILURE: testDB.getLastError() returned" + GLE);
+assert.writeOK(bulk.execute());
jsTest.log("No errors on insert batch.");
numDocs += numBatch;
diff --git a/jstests/sharding/noUpdateButN1inAnotherCollection.js b/jstests/sharding/noUpdateButN1inAnotherCollection.js
index c8b7ef50f7e..b1ea81f0710 100644
--- a/jstests/sharding/noUpdateButN1inAnotherCollection.js
+++ b/jstests/sharding/noUpdateButN1inAnotherCollection.js
@@ -45,16 +45,14 @@ debug("Inserted docs, now split chunks");
adminSA.runCommand( { split: ns, find : { _id : 3} });
adminSA.runCommand( { movechunk: ns, find : { _id : 10}, to: "shard0001" });
-var command = 'db.coll.update({_id:9},{$set:{"a":"9"}},true);printjson(db.getLastErrorObj())';
+var command = 'printjson(db.coll.update({ _id: 9 }, { $set: { a: "9" }}, true));';
// without this first query through mongo, the second time doesn't "fail"
debug("Try query first time");
-var GLE2=runMongoProgram( "mongo", "--quiet", "--port", "" + s._mongos[1].port, "--eval", command );
-
-mongosB.getDB("test").coll2.update({_id:0}, {$set:{"c":"333"}});
-var GLE3=mongosB.getDB("test").getLastErrorObj();
-assert.eq( 0, GLE3.n );
+runMongoProgram( "mongo", "--quiet", "--port", "" + s._mongos[1].port, "--eval", command );
+var res = mongosB.getDB("test").coll2.update({ _id: 0 }, { $set: { c: "333" }});
+assert.eq( 0, res.nModified );
s.stop();
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
index fd86f9627a3..ffaa967dba6 100644
--- a/jstests/sharding/parallel.js
+++ b/jstests/sharding/parallel.js
@@ -16,10 +16,10 @@ for ( i=0; i<N; i+=(N/12) ) {
}
s.setBalancer( true )
+var bulk = db.foo.initializeUnorderedBulkOp();
for ( i=0; i<N; i++ )
- db.foo.insert( { _id : i } )
-db.getLastError();
-
+ bulk.insert({ _id: i });
+assert.writeOK(bulk.execute());
doCommand = function( dbname , cmd ) {
x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index 4eed2e72159..542463af203 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -24,27 +24,25 @@ var coll = db.foo;
var longStr = 'a';
while ( longStr.length < 1024 * 128 ) { longStr += longStr; }
+var bulk = coll.initializeUnorderedBulkOp();
for( i=0 ; i<100; i++){
- coll.save( {num : i, str : longStr} );
- coll.save( {num : i+100 , x : i, str : longStr})
+ bulk.insert({ num: i, str: longStr });
+ bulk.insert({ num: i+100, x: i, str: longStr });
}
-db.getLastError();
+assert.writeOK(bulk.execute());
//no usable index yet, should throw
assert.throws( function(){ s.adminCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ) } )
//create usable index
-coll.ensureIndex({num : 1, x : 1});
-db.getLastError();
+assert.commandWorked(coll.ensureIndex({ num: 1, x: 1 }));
//usable index, but doc with empty 'num' value, so still should throw
-coll.save({x : -5});
-assert( ! db.getLastError() , "save bad value didn't succeed");
+assert.writeOK(coll.insert({ x: -5 }));
assert.throws( function(){ s.adminCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ) } )
//remove the bad doc. now should finally succeed
-coll.remove( {x : -5});
-assert( ! db.getLastError() , "remove bad value didn't succeed");
+assert.writeOK(coll.remove({ x: -5 }));
var result1 = admin.runCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } );
printjson( result1 );
assert.eq( 1, result1.ok , "sharding didn't succeed");
@@ -143,27 +141,27 @@ for( i=0; i < 3; i++ ){
// declare a longer index
if ( i == 0 ) {
- coll2.ensureIndex( { skey : 1, extra : 1 } );
+ assert.commandWorked( coll2.ensureIndex( { skey : 1, extra : 1 } ));
}
else if ( i == 1 ) {
- coll2.ensureIndex( { skey : 1, extra : -1 } );
+ assert.commandWorked( coll2.ensureIndex( { skey : 1, extra : -1 } ));
}
else if ( i == 2 ) {
- coll2.ensureIndex( { skey : 1, extra : 1 , superfluous : -1 } );
+ assert.commandWorked( coll2.ensureIndex( { skey : 1, extra : 1 , superfluous : -1 } ));
}
- db.getLastError();
// then shard collection on prefix
var shardRes = admin.runCommand( { shardCollection : coll2 + "", key : { skey : 1 } } );
assert.eq( shardRes.ok , 1 , "collection not sharded" );
// insert docs with same value for skey
+ bulk = coll2.initializeUnorderedBulkOp();
for( var i = 0; i < 5; i++ ){
for( var j = 0; j < 5; j++ ){
- coll2.insert( { skey : 0, extra : i , superfluous : j } );
+ bulk.insert( { skey : 0, extra : i , superfluous : j } );
}
}
- assert.eq( null, coll2.getDB().getLastError() , "inserts didn't work" );
+ assert.writeOK( bulk.execute() );
// split on that key, and check it makes 2 chunks
var splitRes = admin.runCommand( { split : coll2 + "", middle : { skey : 0 } } );
diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js
index 68154923ae4..6820a5fb332 100644
--- a/jstests/sharding/presplit.js
+++ b/jstests/sharding/presplit.js
@@ -12,11 +12,12 @@ while ( bigString.length < 10000 ){
db = s.getDB( "test" );
inserted = 0;
num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
while ( inserted < ( 20 * 1024 * 1024 ) ){
- db.foo.insert( { _id : num++ , s : bigString } );
+ bulk.insert({ _id: num++, s: bigString });
inserted += bigString.length;
}
-db.getLastError();
+assert.writeOK(bulk.execute());
// Make sure that there's only one chunk holding all the data.
s.printChunks();
@@ -34,4 +35,4 @@ assert.lt( 20 , s.config.chunks.count() , "many chunks assertion" );
assert.eq( num , primary.foo.count() );
s.printChangeLog();
-s.stop(); \ No newline at end of file
+s.stop();
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index 9b97fb8daf3..e2a46c7aba5 100755
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -80,11 +80,12 @@ var doTest = function(useDollarQuerySyntax) {
var coll = conn.getDB( 'test' ).user;
assert.soon(function() {
- coll.insert({ x: 1 });
- var err = coll.getDB().getLastError(NODES);
- if (err == null) {
+ var res = coll.insert({ x: 1 }, { writeConcern: { w: NODES }});
+ if (!res.hasWriteError()) {
return true;
}
+
+ var err = res.getWriteError().errmsg;
// Transient transport errors may be expected b/c of the replSetReconfig
if (err.indexOf("transport error") == -1) {
throw err;
diff --git a/jstests/sharding/read_pref_multi_mongos_stale_config.js b/jstests/sharding/read_pref_multi_mongos_stale_config.js
index 1556adef9e8..d3c6cd3f53f 100644
--- a/jstests/sharding/read_pref_multi_mongos_stale_config.js
+++ b/jstests/sharding/read_pref_multi_mongos_stale_config.js
@@ -20,8 +20,6 @@ for (var x = 0; x < 200; x++) {
testDB2.user.insert({ x: x });
}
-testDB2.runCommand({ getLastError: 1 });
-
var cursor = testDB1.user.find({ x: 30 }).readPref('primary');
assert(cursor.hasNext());
assert.eq(30, cursor.next().x);
diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js
index db9fe5bcdfc..58da5ba919f 100644
--- a/jstests/sharding/remove1.js
+++ b/jstests/sharding/remove1.js
@@ -2,9 +2,12 @@ s = new ShardingTest( "remove_shard1", 2 );
assert.eq( 2, s.config.shards.count() , "initial server count wrong" );
-s.config.databases.insert({_id: 'local', partitioned: false, primary: 'shard0000'});
-s.config.databases.insert({_id: 'needToMove', partitioned: false, primary: 'shard0000'});
-s.config.getLastError();
+assert.writeOK(s.config.databases.insert({ _id: 'local',
+ partitioned: false,
+ primary: 'shard0000'}));
+assert.writeOK(s.config.databases.insert({ _id: 'needToMove',
+ partitioned: false,
+ primary: 'shard0000'}));
// first remove puts in draining mode, the second tells me a db needs to move, the third actually removes
assert( s.admin.runCommand( { removeshard: "shard0000" } ).ok , "failed to start draining shard" );
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index 8fb81aeb222..f6fc93c4faf 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -110,11 +110,12 @@ var str = 'a';
while( str.length < 1024 * 16 ) {
str += str;
}
+
+var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < 300; i++ ){
- coll.insert( { i : i % 10, str : str } );
+ bulk.insert({ i: i % 10, str: str });
}
-
-coll.getDB().getLastError();
+assert.writeOK(bulk.execute());
assert.eq( 300, coll.find().itcount() );
diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js
index b5cf17368c8..43602ae26ed 100644
--- a/jstests/sharding/replmonitor_bad_seed.js
+++ b/jstests/sharding/replmonitor_bad_seed.js
@@ -37,7 +37,6 @@ var coll = mongos.getDB('test').user;
var verifyInsert = function() {
var beforeCount = coll.find().count();
coll.insert({ x: 1 });
- coll.getDB().getLastError();
var afterCount = coll.find().count();
assert.eq(beforeCount + 1, afterCount);
diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js
index 5990db82fe2..548805c4f76 100644
--- a/jstests/sharding/return_partial_shards_down.js
+++ b/jstests/sharding/return_partial_shards_down.js
@@ -53,9 +53,7 @@ var inserts = [{_id : -1},
{_id : 1000}];
collOneShard.insert(inserts);
-collAllShards.insert(inserts);
-
-assert.eq(null, collOneShard.getDB().getLastError());
+assert.writeOK(collAllShards.insert(inserts));
var returnPartialFlag = 1 << 7;
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index 468d611271b..ba42f53349a 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -43,7 +43,9 @@ assert.eq( 3 , db.foo.find().length() , "after sharding, no split count failed"
var invalidDB = s.getDB( "foobar" );
// hack to bypass invalid database name checking at the DB constructor
invalidDB._name = "foo bar";
-invalidDB.blah.insert( { x : 1 } );
+assert.throws(function() {
+ invalidDB.blah.insert({ x: 1 });
+});
assert.isnull( s.config.databases.findOne( { _id : "foo bar" } ) );
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index ff03bf7b24b..006a9340682 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -40,8 +40,6 @@ db.foo.save( { num : 1 , name : "eliot" } );
db.foo.save( { num : 2 , name : "sara" } );
db.foo.save( { num : -1 , name : "joe" } );
-db.getLastError();
-
assert.eq( 3 , s.getServer( "test" ).getDB( "test" ).foo.find().length() , "not right directly to db A" );
assert.eq( 3 , db.foo.find().length() , "not right on shard" );
@@ -72,19 +70,16 @@ placeCheck( 3 );
// test inserts go to right server/shard
-db.foo.save( { num : 3 , name : "bob" } );
-db.getLastError();
+assert.writeOK(db.foo.save( { num : 3 , name : "bob" } ));
assert.eq( 1 , primary.foo.find().length() , "after move insert go wrong place?" );
assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" );
-db.foo.save( { num : -2 , name : "funny man" } );
-db.getLastError();
+assert.writeOK(db.foo.save( { num : -2 , name : "funny man" } ));
assert.eq( 2 , primary.foo.find().length() , "after move insert go wrong place?" );
assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" );
-db.foo.save( { num : 0 , name : "funny guy" } );
-db.getLastError();
+assert.writeOK(db.foo.save( { num : 0 , name : "funny guy" } ));
assert.eq( 2 , primary.foo.find().length() , "boundary A" );
assert.eq( 4 , secondary.foo.find().length() , "boundary B" );
@@ -198,22 +193,17 @@ assert.isnull( db.foo.findOne( { num : 3 } ) , "remove test E" );
placeCheck( 8 );
-// TODO: getLastError
-db.getLastError();
-db.getPrevError();
-
// more update stuff
printAll();
total = db.foo.find().count();
-db.foo.update( {} , { $inc : { x : 1 } } , false , true );
-x = db.getLastErrorObj();
+var res = assert.writeOK(db.foo.update( {}, { $inc: { x: 1 } }, false, true ));
printAll();
-assert.eq( total , x.n , "getLastError n A: " + tojson( x ) );
+assert.eq( total , res.nModified, res.toString() );
-db.foo.update( { num : -1 } , { $inc : { x : 1 } } , false , true );
-assert.eq( 1 , db.getLastErrorObj().n , "getLastErrorObj n B" );
+res = db.foo.update( { num : -1 } , { $inc : { x : 1 } } , false , true );
+assert.eq( 1, res.nModified, res.toString() );
// ---- move all to the secondary
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 785bcae1a40..0d51d2497f1 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -61,8 +61,7 @@ function doCounts( name , total , onlyItCounts ){
}
var total = doCounts( "before wrong save" )
-secondary.save( { _id : 111 , num : -3 } );
-printjson( secondary.getDB().getLastError() )
+assert.writeOK(secondary.insert( { _id : 111 , num : -3 } ));
doCounts( "after wrong save" , total , true )
e = a.find().explain();
assert.eq( 3 , e.n , "ex1" )
@@ -158,7 +157,6 @@ dbb = s2.getDB( "test2" );
dba.foo.save( { num : 1 } );
dba.foo.save( { num : 2 } );
dba.foo.save( { num : 3 } );
-dba.getLastError();
assert.eq( 1 , s.onNumShards( "foo" , "test2" ) , "B on 1 shards" );
assert.eq( 3 , dba.foo.count() , "Ba" );
diff --git a/jstests/sharding/shard7.js b/jstests/sharding/shard7.js
index 18adbda3f0a..7d37fdf60f6 100644
--- a/jstests/sharding/shard7.js
+++ b/jstests/sharding/shard7.js
@@ -37,11 +37,9 @@ assert.eq( 0, aggregate.toArray().length );
c.save( {a:null,b:null} );
c.save( {a:1,b:1} );
-c.remove( unsatisfiable );
-assert( !db.getLastError() );
+assert.writeOK( c.remove( unsatisfiable ));
assert.eq( 2, c.count() );
-c.update( unsatisfiable, {$set:{c:1}}, false, true );
-assert( !db.getLastError() );
+assert.writeOK( c.update( unsatisfiable, {$set:{c:1}}, false, true ));
assert.eq( 2, c.count() );
assert.eq( 0, c.count( {c:1} ) );
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index b16c1796cd9..bb221fe3f11 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -15,10 +15,11 @@ print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSiz
// turn off powerOf2Sizes as this tests regular allocation
db.createCollection('data', {usePowerOf2Sizes: false});
+var bulk = db.data.initializeUnorderedBulkOp();
for (i=0; i<numDocs; i++) {
- db.data.insert({_id: i, s: bigString});
+ bulk.insert({_id: i, s: bigString});
}
-db.getLastError();
+assert.writeOK(bulk.execute());
assert.lt(totalSize, db.data.stats().size);
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index 200cc009ee9..48630a0ca58 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -1,5 +1,4 @@
// replica set as solo shard
-// getLastError(2) fails on about every 170 inserts on my Macbook laptop -Tony
// TODO: Add assertion code that catches hang
load('jstests/libs/grid.js')
@@ -18,11 +17,12 @@ function go() {
// Add data to it
var conn1a = repset1.getMaster()
- var db1a = conn1a.getDB('test')
+ var db1a = conn1a.getDB('test');
+ var bulk = db1a.foo.initializeUnorderedBulkOp();
for (var i = 0; i < N; i++) {
- db1a['foo'].insert({x: i, text: Text})
- db1a.getLastError(2) // wait to be copied to at least one secondary
+ bulk.insert({ x: i, text: Text });
}
+ assert.writeOK(bulk.execute({ w: 2 }));
// Create 3 sharding config servers
var configsetSpec = new ConfigSet(3)
@@ -49,18 +49,15 @@ function go() {
// Test case where GLE should return an error
db.foo.insert({_id:'a', x:1});
- db.foo.insert({_id:'a', x:1});
- var x = db.getLastErrorObj(2, 30000)
- assert.neq(x.err, null, "C1 " + tojson(x));
+ assert.writeError(db.foo.insert({ _id: 'a', x: 1 },
+ { writeConcern: { w: 2, wtimeout: 30000 }}));
// Add more data
+ bulk = db.foo.initializeUnorderedBulkOp();
for (var i = N; i < 2*N; i++) {
- db['foo'].insert({x: i, text: Text})
- var x = db.getLastErrorObj(2, 30000) // wait to be copied to at least one secondary
- if (i % 30 == 0) print(i)
- if (i % 100 == 0 || x.err != null) printjson(x);
- assert.eq(x.err, null, "C2 " + tojson(x));
+ bulk.insert({ x: i, text: Text});
}
+ assert.writeOK(bulk.execute({ w: 2, wtimeout: 30000 }));
// take down the slave and make sure it fails over
repset1.stop(1);
@@ -83,8 +80,6 @@ function go() {
routerSpec.end()
configsetSpec.end()
repset1.stopSet()
-
- print('shard_insert_getlasterror_w2.js SUCCESS')
}
//Uncomment below to execute
diff --git a/jstests/sharding/shard_key_immutable.js b/jstests/sharding/shard_key_immutable.js
index 90cb38b5e4b..0cd79d4252d 100644
--- a/jstests/sharding/shard_key_immutable.js
+++ b/jstests/sharding/shard_key_immutable.js
@@ -53,9 +53,7 @@ var dotColl = db.getCollection('col1');
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({}, { a: 1 }, false);
-var gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({}, { a: 1 }, false));
var doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -63,18 +61,14 @@ doc = compoundColl.findOne();
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({}, { a: 1, b: 1 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({}, { a: 1, b: 1 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({}, { a: 100, b: 100 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({}, { a: 100, b: 100 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -82,27 +76,21 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({}, { a: 100, b: 100, _id: 1 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({}, { a: 100, b: 100, _id: 1 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({}, { $set: { a: 1, b: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({}, { $set: { a: 100, b: 100 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({}, { $set: { a: 100, b: 100 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -110,18 +98,14 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Cannot modify _id
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({}, { $set: { a: 1, b: 1, _id: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1, _id: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({}, { $set: { c: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({}, { $set: { c: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
@@ -131,55 +115,41 @@ assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + to
//
compoundColl.remove({}, false);
-compoundColl.update({}, { a: 1 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({}, { a: 1 }, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({}, { a: 1, b: 1 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({}, { a: 1, b: 1 }, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 1, b: 1 }), 'doc not upserted properly: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.update({}, { a: 1, b: 1, _id: 1 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({}, { a: 1, b: 1, _id: 1 }, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 1, b: 1 }), 'doc not upserted properly: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({}, { $set: { a: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({}, { $set: { a: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({}, { $set: { a: 1, b: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.update({}, { $set: { a: 1, b: 1, _id: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1, _id: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({}, { $set: { c: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({}, { $set: { c: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
@@ -189,27 +159,21 @@ assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { a: 100 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { a: 100 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { a: 2 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { a: 2 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { a: 100, b: 1 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { a: 100, b: 1 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -217,9 +181,7 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { a: 100, b: 100 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100 }, { a: 100, b: 100 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -227,27 +189,21 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { a: 100, b: 100, _id: 1 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { a: 100, b: 100, _id: 1 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { $set: { a: 100 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100 }, { $set: { a: 100 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { $set: { b: 200 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { $set: { b: 200 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -255,18 +211,14 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { $set: { b: 100 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100 }, { $set: { b: 100 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { $set: { a: 100, b: 200 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 200 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -274,9 +226,7 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { $set: { a: 100, b: 100 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 100 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -284,27 +234,21 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { $set: { c: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100 }, { $set: { c: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100 }, { $rename: { c: 'a' }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100 }, { $rename: { c: 'a' }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -314,74 +258,54 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
//
compoundColl.remove({}, false);
-compoundColl.update({ a: 100 }, { a: 100 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { a: 100 }, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100 }, { a: 2 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle), true);
+assert.writeError(compoundColl.update({ a: 100 }, { a: 2 }, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100 }, { a: 1, b: 1 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { a: 1, b: 1 }, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.update({ a: 100 }, { a: 1, b: 1, _id: 1 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { a: 1, b: 1, _id: 1 }, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100 }, { $set: { a: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100 }, { $set: { b: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { $set: { b: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100 }, { $set: { a: 100, b: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.update({ a: 100 }, { $set: { a: 100, b: 1, _id: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 1, _id: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100 }, { $set: { c: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { $set: { c: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100 }, { $rename: { c: 'a' }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100 }, { $rename: { c: 'a' }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
@@ -391,27 +315,21 @@ assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { b: 100 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { b: 100 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { b: 2 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { b: 2 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { a: 1 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { a: 1 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -419,18 +337,14 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { a: 100 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { a: 100 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { a: 1, b: 100 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 100 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -438,9 +352,7 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { a: 100, b: 100 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ b: 100 }, { a: 100, b: 100 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -448,45 +360,35 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { a: 1, b: 1, _id: 1 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 1, _id: 1 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { $set: { b: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { $set: { b: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { $set: { a: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { $set: { a: 100 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ b: 100 }, { $set: { a: 100 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { $set: { a: 1, b: 100 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1, b: 100 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -494,9 +396,7 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { $set: { a: 100, b: 100 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ b: 100 }, { $set: { a: 100, b: 100 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -504,18 +404,14 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ b: 100 }, { $set: { c: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ b: 100 }, { $set: { c: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
@@ -525,74 +421,55 @@ assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + to
//
compoundColl.remove({}, false);
-compoundColl.update({ b: 100 }, { b: 100 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { b: 100 }, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ b: 100 }, { b: 2 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { b: 2 }, true));
+
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ b: 100 }, { a: 1 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { a: 1 }, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ b: 100 }, { a: 1, b: 1 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 1 }, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.update({ b: 100 }, { a: 1, b: 1, _id: 1 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 1, _id: 1 }, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ b: 100 }, { $set: { b: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { $set: { b: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ b: 100 }, { $set: { a: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ b: 100 }, { $set: { a: 1, b: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1, b: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.update({ b: 100 }, { $set: { a: 1, b: 1, _id: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1, b: 1, _id: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ b: 100 }, { $set: { c: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ b: 100 }, { $set: { c: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
@@ -602,18 +479,14 @@ assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100, b: 100 }, { a: 100 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100, b: 100 }, { a: 100 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, c: 100 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, c: 100 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100, c: 100 }), 'doc did not change: ' + tojson(doc));
@@ -621,36 +494,28 @@ assert(friendlyEqual(doc, { a: 100, b: 100, c: 100 }), 'doc did not change: ' +
// Cannot modify _id!
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, _id: 100 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, _id: 100 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100, b: 100 }, { b: 100 }, false);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100, b: 100 }, { b: 100 }, false));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100, b: 100 }, { $set: { b: 100, c: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { b: 100, c: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, c: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, c: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
@@ -658,27 +523,21 @@ assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + to
// Cannot modify _id!
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 2, c: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 2, c: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ a: 100, b: 100 });
-compoundColl.update({ a: 100, b: 100 }, { $set: { c: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { c: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
@@ -688,68 +547,50 @@ assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + to
//
compoundColl.remove({}, false);
-compoundColl.update({ a: 100, b: 100 }, { a: 100 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100, b: 100 }, { a: 100 }, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, c: 1 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, c: 1 }, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'wrong doc: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, _id: 100 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, _id: 100 }, true));
doc = compoundColl.findOne();
assert(friendlyEqual(doc, { _id: 100, a: 100, b: 100 }), 'wrong doc: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100, b: 100 }, { b: 100 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100, b: 100 }, { b: 100 }, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100, b: 100 }, { $set: { b: 100, c: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { b: 100, c: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc != null, 'doc was not upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, c: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, c: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc != null, 'doc was not upserted: ' + tojson(doc));
// Can upsert with new _id
compoundColl.remove({}, false);
-compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc != null, 'doc was not upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 2, c: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 2, c: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ a: 100, b: 100 }, { $set: { c: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { c: 1 }}, true, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'wrong doc: ' + tojson(doc));
@@ -760,9 +601,7 @@ assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'wrong doc: ' + tojson(doc)
compoundColl.remove({}, false);
compoundColl.insert({ _id: 1, a: 100, b: 100 });
-compoundColl.update({ _id: 1 }, { a: 1 });
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ _id: 1 }, { a: 1 }));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -770,63 +609,49 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
// Special case for _id. This is for making save method work.
compoundColl.remove({}, false);
compoundColl.insert({ _id: 1, a: 100, b: 100 });
-compoundColl.update({ _id: 1 }, { a: 100, b: 100 });
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ _id: 1 }, { a: 100, b: 100 }));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ _id: 1, a: 100, b: 100 });
-compoundColl.update({ _id: 1 }, { a: 1, b: 1 });
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ _id: 1 }, { a: 1, b: 1 }));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ _id: 1, a: 100, b: 100 });
-compoundColl.update({ _id: 1 }, { $set: { a: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ _id: 1, a: 100, b: 100 });
-compoundColl.update({ _id: 1 }, { $set: { a: 100 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ _id: 1 }, { $set: { a: 100 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ _id: 1, a: 100, b: 100 });
-compoundColl.update({ _id: 1 }, { $set: { b: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ _id: 1 }, { $set: { b: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ _id: 1, a: 100, b: 100 });
-compoundColl.update({ _id: 1 }, { $set: { b: 100 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ _id: 1 }, { $set: { b: 100 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
compoundColl.insert({ _id: 1, a: 100, b: 100 });
-compoundColl.update({ _id: 1 }, { $set: { a: 1, b: 1 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1, b: 1 }}, false, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
@@ -836,38 +661,28 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
//
compoundColl.remove({}, false);
-compoundColl.update({ _id: 1 }, { a: 1 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ _id: 1 }, { a: 1 }, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ _id: 1 }, { a: 1, b: 1 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(compoundColl.update({ _id: 1 }, { a: 1, b: 1 }, true));
doc = compoundColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { a: 1, b: 1 }), 'bad doc: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ _id: 1 }, { $set: { a: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ _id: 1 }, { $set: { b: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ _id: 1 }, { $set: { b: 1 }}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.update({ _id: 1 }, { $set: { a: 1, b: 1 }}, true, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1, b: 1 }}, true, true));
assert.eq(0, compoundColl.count(), 'doc should not be inserted');
//
@@ -876,9 +691,7 @@ assert.eq(0, compoundColl.count(), 'doc should not be inserted');
dotColl.remove({}, false);
dotColl.insert({ x: { a: 100 }});
-dotColl.update({ 'x.a': 100 }, { x: { a: 100, b: 2 }});
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(dotColl.update({ 'x.a': 100 }, { x: { a: 100, b: 2 }}));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'doc did not change: ' + tojson(doc));
@@ -909,90 +722,70 @@ assert.throws(function() {
dotColl.remove({}, false);
dotColl.insert({ x: { a: 100 }});
-dotColl.update({ 'x.a': 100 }, { x: 100 });
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { x: 100 }));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
dotColl.insert({ x: { a: 100 }});
-dotColl.update({ 'x.a': 100 }, { x: { b: 100 }});
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { x: { b: 100 }}));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
dotColl.insert({ x: { a: 100 }});
-dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 100, b: 2 }}}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 100, b: 2 }}}, false, true));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'doc did not change: ' + tojson(doc));
dotColl.remove({}, false);
dotColl.insert({ x: { a: 100 }});
-dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 2 }}}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 2 }}}, false, true));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
dotColl.insert({ x: { a: 100 }});
-dotColl.update({ 'x.a': 100 }, { $set: { x: { b: 100 }}}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { b: 100 }}}, false, true));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
dotColl.insert({ x: { a: 100 }});
-dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 100, b: 2 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 100, b: 2 }}, false, true));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100 }, b: 2 }), 'doc did not change: ' + tojson(doc));
dotColl.remove({}, false);
dotColl.insert({ x: { a: 100 }});
-dotColl.update({ 'x.a': 100 }, { $set: { x: { 'a.z': 100 }}}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { 'a.z': 100 }}}, false, true));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
dotColl.insert({ x: { a: 100 }});
-dotColl.update({ 'x.a': 100 }, { $set: { 'x.a.z': 100 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a.z': 100 }}, false, true));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
dotColl.insert({ x: { a: 100 }});
-dotColl.update({ 'x.a': 100 }, { $set: { x: 100 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: 100 }}, false, true));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
dotColl.insert({ x: { a: 100 }});
-dotColl.update({ 'x.a': 100 }, { $set: { 'x.b': 200 }}, false, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.b': 200 }}, false, true));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100, b: 200 }}), 'doc did not change: ' + tojson(doc));
@@ -1002,9 +795,7 @@ assert(friendlyEqual(doc, { x: { a: 100, b: 200 }}), 'doc did not change: ' + to
//
dotColl.remove({}, false);
-dotColl.update({ 'x.a': 100 }, { x: { a: 100, b: 2 }}, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(dotColl.update({ 'x.a': 100 }, { x: { a: 100, b: 2 }}, true));
doc = dotColl.findOne();
assert(doc != null, 'doc was not upserted: ' + tojson(doc));
@@ -1030,81 +821,59 @@ assert.throws(function() {
});
dotColl.remove({}, false);
-dotColl.update({ 'x.a': 100 }, { x: 100 }, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { x: 100 }, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.update({ 'x.a': 100 }, { x: { b: 100 }}, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { x: { b: 100 }}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 100, b: 2 }}}, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 100, b: 2 }}}, true));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'bad doc: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 2 }}}, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 2 }}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.update({ 'x.a': 100 }, { $set: { x: { b: 100 }}}, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { b: 100 }}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 100, b: 3 }}, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 100, b: 3 }}, true));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100 }, b: 3 }), 'bad doc: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 2 }}, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 2 }}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.update({ 'x.a': 100 }, { $set: { x: { 'a.z': 100 }}}, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { 'a.z': 100 }}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.update({ 'x.a': 100 }, { $set: { 'x.a.z': 100 }}, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a.z': 100 }}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.update({ 'x.a': 100 }, { $set: { x: 100 }}, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err != null, 'gleObj: ' + tojson(gle));
+assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: 100 }}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.update({ 'x.a': 100 }, { $set: { 'x.b': 2 }}, true);
-gle = db.runCommand({ getLastError: 1 });
-assert(gle.err == null, 'gleObj: ' + tojson(gle));
+assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.b': 2 }}, true));
doc = dotColl.findOne();
delete doc._id;
assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'bad doc: ' + tojson(doc));
diff --git a/jstests/sharding/shard_kill_and_pooling.js b/jstests/sharding/shard_kill_and_pooling.js
index 9bd68c740fb..843d48209a1 100644
--- a/jstests/sharding/shard_kill_and_pooling.js
+++ b/jstests/sharding/shard_kill_and_pooling.js
@@ -29,10 +29,9 @@ if ( is32Bits && _isWindows() ) {
else {
// Non-Win32 platform
-
- coll.insert({ hello : "world" })
- assert.eq( null, coll.getDB().getLastError() );
-
+
+ assert.writeOK(coll.insert({ hello: "world" }));
+
jsTest.log("Creating new connections...");
// Create a bunch of connections to the primary node through mongos.
diff --git a/jstests/sharding/sharded_profile.js b/jstests/sharding/sharded_profile.js
index b1b2c3d3107..b24aa229dc6 100644
--- a/jstests/sharding/sharded_profile.js
+++ b/jstests/sharding/sharded_profile.js
@@ -26,8 +26,7 @@ var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
var inserts = [{ _id : 0 }, { _id : 1 }, { _id : 2 }];
var staleColl = st.s1.getCollection(coll.toString());
-staleColl.insert(inserts);
-assert.gleOK(staleColl.getDB().getLastErrorObj());
+assert.writeOK(staleColl.insert(inserts));
printjson(profileColl.find().toArray());
diff --git a/jstests/sharding/sharding_with_keyfile_auth.js b/jstests/sharding/sharding_with_keyfile_auth.js
index 27572f82d46..2fe594544d8 100644
--- a/jstests/sharding/sharding_with_keyfile_auth.js
+++ b/jstests/sharding/sharding_with_keyfile_auth.js
@@ -45,33 +45,33 @@ coll.ensureIndex({ insert : 1 })
print( "INSERT!" )
// Insert a bunch of data
-var toInsert = 2000
+var toInsert = 2000;
+var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < toInsert; i++ ){
- coll.insert({ my : "test", data : "to", insert : i })
+ bulk.insert({ my : "test", data : "to", insert : i });
}
-
-assert.eq( coll.getDB().getLastError(), null )
+assert.writeOK(bulk.execute());
print( "UPDATE!" )
// Update a bunch of data
-var toUpdate = toInsert
+var toUpdate = toInsert;
+bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < toUpdate; i++ ){
- var id = coll.findOne({ insert : i })._id
- coll.update({ insert : i, _id : id }, { $inc : { counter : 1 } })
+ var id = coll.findOne({ insert : i })._id;
+ bulk.find({ insert : i, _id : id }).updateOne({ $inc : { counter : 1 } });
}
-
-assert.eq( coll.getDB().getLastError(), null )
+assert.writeOK(bulk.execute());
print( "DELETE" )
// Remove a bunch of data
-var toDelete = toInsert / 2
+var toDelete = toInsert / 2;
+bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < toDelete; i++ ){
- coll.remove({ insert : i })
+ bulk.find({ insert : i }).remove();
}
-
-assert.eq( coll.getDB().getLastError(), null )
+assert.writeOK(bulk.execute());
// Make sure the right amount of data is there
assert.eq( coll.find().count(), toInsert / 2 )
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index 130e71d9020..aea55741251 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -16,7 +16,6 @@ for ( i=0; i<N; i++ ){
forward.push( i )
backward.push( ( N - 1 ) - i )
}
-db.getLastError();
s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 33 } } )
s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 66 } } )
diff --git a/jstests/sharding/split_with_force.js b/jstests/sharding/split_with_force.js
index 9033abc2402..edeb395c767 100644
--- a/jstests/sharding/split_with_force.js
+++ b/jstests/sharding/split_with_force.js
@@ -22,17 +22,19 @@ assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
jsTest.log( "Insert a bunch of data into a chunk of the collection..." );
+var bulk = coll.initializeUnorderedBulkOp();
for ( var i = 0; i < (250 * 1000) + 10; i++ ) {
- coll.insert({ _id : i });
+ bulk.insert({ _id : i });
}
-assert.eq( null, coll.getDB().getLastError() );
+assert.writeOK(bulk.execute());
jsTest.log( "Insert a bunch of data into the rest of the collection..." );
+bulk = coll.initializeUnorderedBulkOp();
for ( var i = 1; i <= (250 * 1000); i++ ) {
- coll.insert({ _id : -i });
+ bulk.insert({ _id: -i });
}
-assert.eq( null, coll.getDB().getLastError() );
+assert.writeOK(bulk.execute());
jsTest.log( "Get split points of the chunk using force : true..." );
diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js
index 02abfe6230e..54caaa46e29 100644
--- a/jstests/sharding/split_with_force_small.js
+++ b/jstests/sharding/split_with_force_small.js
@@ -26,17 +26,19 @@ jsTest.log( "Insert a bunch of data into the low chunk of a collection," +
var data128k = "x";
for ( var i = 0; i < 7; i++ ) data128k += data128k;
+var bulk = coll.initializeUnorderedBulkOp();
for ( var i = 0; i < 1024; i++ ) {
- coll.insert({ _id : -(i + 1) });
+ bulk.insert({ _id : -(i + 1) });
}
-assert.eq( null, coll.getDB().getLastError() );
+assert.writeOK(bulk.execute());
jsTest.log( "Insert 32 docs into the high chunk of a collection" );
+bulk = coll.initializeUnorderedBulkOp();
for ( var i = 0; i < 32; i++ ) {
- coll.insert({ _id : i });
+ bulk.insert({ _id : i });
}
-assert.eq( null, coll.getDB().getLastError() );
+assert.writeOK(bulk.execute());
jsTest.log( "Split off MaxKey chunk..." );
diff --git a/jstests/sharding/ssv_nochunk.js b/jstests/sharding/ssv_nochunk.js
index 20f3ea27f45..7d3f8bfdbb2 100644
--- a/jstests/sharding/ssv_nochunk.js
+++ b/jstests/sharding/ssv_nochunk.js
@@ -13,8 +13,7 @@ configDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
var testDB = st.s.getDB('test');
-testDB.user.insert({ x: 1 });
-testDB.runCommand({ getLastError: 1 });
+assert.writeOK(testDB.user.insert({ x: 1 }));
var doc = testDB.user.findOne();
diff --git a/jstests/sharding/writeback_shard_version.js b/jstests/sharding/stale_version_write.js
index f896ed47bb9..1d665b6c65e 100644
--- a/jstests/sharding/writeback_shard_version.js
+++ b/jstests/sharding/stale_version_write.js
@@ -12,12 +12,10 @@ var mongosB = st.s1
jsTest.log( "Adding new collections...")
var collA = mongosA.getCollection( jsTestName() + ".coll" )
-collA.insert({ hello : "world" })
-assert.eq( null, collA.getDB().getLastError() )
+assert.writeOK(collA.insert({ hello : "world" }));
var collB = mongosB.getCollection( "" + collA )
-collB.insert({ hello : "world" })
-assert.eq( null, collB.getDB().getLastError() )
+assert.writeOK(collB.insert({ hello : "world" }));
jsTest.log( "Enabling sharding..." )
@@ -27,14 +25,13 @@ printjson( mongosA.getDB( "admin" ).runCommand({ shardCollection : "" + collA, k
// MongoD doesn't know about the config shard version *until* MongoS tells it
collA.findOne()
-jsTest.log( "Trigger wbl..." )
+jsTest.log( "Trigger shard version mismatch..." );
-collB.insert({ goodbye : "world" })
-assert.eq( null, collB.getDB().getLastError() )
+assert.writeOK(collB.insert({ goodbye : "world" }));
print( "Inserted..." )
assert.eq( 3, collA.find().itcount() )
assert.eq( 3, collB.find().itcount() )
-st.stop() \ No newline at end of file
+st.stop()
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
index 7d551128cab..8660c782a41 100644
--- a/jstests/sharding/stats.js
+++ b/jstests/sharding/stats.js
@@ -26,9 +26,10 @@ s.adminCommand( { split : "test.foo" , middle : { _id : N/2 } } )
s.adminCommand({ moveChunk: "test.foo", find: { _id: 3 },
to: s.getNonPrimaries("test")[0], _waitForDelete: true });
+var bulk = db.foo.initializeUnorderedBulkOp();
for ( i=0; i<N; i++ )
- db.foo.insert( { _id : i } )
-db.getLastError();
+ bulk.insert( { _id : i } );
+assert.writeOK(bulk.execute());
x = db.foo.stats();
assert.eq( N , x.count , "coll total count expected" )
diff --git a/jstests/sharding/sync2.js b/jstests/sharding/sync2.js
index 04a6f420768..cba7faafd89 100644
--- a/jstests/sharding/sync2.js
+++ b/jstests/sharding/sync2.js
@@ -54,7 +54,6 @@ assert.eq( 0 , s.config.big.find().itcount() , "C1" );
for ( i=0; i<50; i++ ){
s.config.big.insert( { _id : i } );
}
-s.config.getLastError();
assert.eq( 50 , s.config.big.find().itcount() , "C2" );
assert.eq( 50 , s.config.big.find().count() , "C3" );
assert.eq( 50 , s.config.big.find().batchSize(5).itcount() , "C4" );
diff --git a/jstests/sharding/test_stacked_migration_cleanup.js b/jstests/sharding/test_stacked_migration_cleanup.js
index 4effb1f9cb5..40b989ac95c 100644
--- a/jstests/sharding/test_stacked_migration_cleanup.js
+++ b/jstests/sharding/test_stacked_migration_cleanup.js
@@ -26,10 +26,11 @@ for (var i = 0; i < numChunks; i++) {
jsTest.log("Inserting a lot of small documents...")
// Insert a lot of small documents to make multiple cursor batches
+var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 10 * 1000; i++) {
- coll.insert({ _id : i })
+ bulk.insert({ _id : i });
}
-assert.eq(null, coll.getDB().getLastError());
+assert.writeOK(bulk.execute());
jsTest.log("Opening a mongod cursor...");
@@ -47,10 +48,11 @@ for (var i = 0; i < numChunks; i++) {
jsTest.log("Dropping and re-creating collection...")
coll.drop()
+bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < numChunks; i++) {
- coll.insert({ _id : i })
+ bulk.insert({ _id : i });
}
-assert.eq(null, coll.getDB().getLastError());
+assert.writeOK(bulk.execute());
sleep(10 * 1000);
diff --git a/jstests/sharding/trace_missing_docs_test.js b/jstests/sharding/trace_missing_docs_test.js
index 04d818869db..03877ad4125 100644
--- a/jstests/sharding/trace_missing_docs_test.js
+++ b/jstests/sharding/trace_missing_docs_test.js
@@ -24,10 +24,9 @@ printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._i
coll.ensureIndex({ sk : 1 });
assert( admin.runCommand({ shardCollection : coll + "", key : { sk : 1 } }).ok );
-coll.insert({ _id : 12345, sk : 67890, hello : "world" });
-coll.update({ _id : 12345 }, { $set : { baz : 'biz' } });
-coll.update({ sk : 67890 }, { $set : { baz : 'boz' } });
-assert.eq( null, coll.getDB().getLastError() );
+assert.writeOK(coll.insert({ _id : 12345, sk : 67890, hello : "world" }));
+assert.writeOK(coll.update({ _id : 12345 }, { $set : { baz : 'biz' } }));
+assert.writeOK(coll.update({ sk : 67890 }, { $set : { baz : 'boz' } }));
assert( admin.runCommand({ moveChunk : coll + "",
find : { sk : 0 },
diff --git a/jstests/sharding/update1.js b/jstests/sharding/update1.js
index 96a1df2d861..d555331bc7a 100644
--- a/jstests/sharding/update1.js
+++ b/jstests/sharding/update1.js
@@ -41,13 +41,10 @@ for(i=0; i < 2; i++){
assert.eq(x._id, x.other, "_id == other");
});
-
- coll.update({_id:1, key:1}, {$set: {key:2}});
- err = db.getLastErrorObj();
+ assert.writeError(coll.update({ _id: 1, key: 1 }, { $set: { key: 2 }}));
assert.eq(coll.findOne({_id:1}).key, 1, 'key unchanged');
- coll.update({_id:1, key:1}, {$set: {foo:2}});
- assert.isnull(db.getLastError(), 'getLastError reset');
+ assert.writeOK(coll.update({ _id: 1, key: 1 }, { $set: { foo: 2 }}));
coll.update( { key : 17 } , { $inc : { x : 5 } } , true );
assert.eq( 5 , coll.findOne( { key : 17 } ).x , "up1" )
diff --git a/jstests/sharding/update_immutable_fields.js b/jstests/sharding/update_immutable_fields.js
index 35c0c77b721..25fb489e39e 100644
--- a/jstests/sharding/update_immutable_fields.js
+++ b/jstests/sharding/update_immutable_fields.js
@@ -40,58 +40,38 @@ var shard0Coll = getDirectShardedConn(st, coll.getFullName()).getCollection(coll
// No shard key
shard0Coll.remove({})
-shard0Coll.save({_id:3})
-assert.gleError(shard0Coll.getDB(), function(gle) {
- return "save without shard key passed - " + tojson(gle) + " doc: " + tojson(shard0Coll.findOne())
-});
+assert.writeError(shard0Coll.save({ _id: 3 }));
// Full shard key in save
-shard0Coll.save({_id: 1, a: 1})
-assert.gleSuccess(shard0Coll.getDB(), "save with shard key failed");
+assert.writeOK(shard0Coll.save({ _id: 1, a: 1 }));
// Full shard key on replacement (basically the same as above)
shard0Coll.remove({})
-shard0Coll.update({_id: 1}, {a:1}, true)
-assert.gleSuccess(shard0Coll.getDB(), "update + upsert (replacement) with shard key failed");
+assert.writeOK(shard0Coll.update({ _id: 1 }, { a: 1 }, true));
// Full shard key after $set
shard0Coll.remove({})
-shard0Coll.update({_id: 1}, {$set: {a: 1}}, true)
-assert.gleSuccess(shard0Coll.getDB(), "update + upsert ($set) with shard key failed");
+assert.writeOK(shard0Coll.update({ _id: 1 }, { $set: { a: 1 }}, true));
// Update existing doc (replacement), same shard key value
-shard0Coll.update({_id: 1}, {a:1})
-assert.gleSuccess(shard0Coll.getDB(), "update (replacement) with shard key failed");
+assert.writeOK(shard0Coll.update({ _id: 1 }, { a: 1 }));
//Update existing doc ($set), same shard key value
-shard0Coll.update({_id: 1}, {$set: {a: 1}})
-assert.gleSuccess(shard0Coll.getDB(), "update ($set) with shard key failed");
+assert.writeOK(shard0Coll.update({ _id: 1 }, { $set: { a: 1 }}));
// Error due to mutating the shard key (replacement)
-shard0Coll.update({_id: 1}, {b:1})
-assert.gleError(shard0Coll.getDB(), "update (replacement) removes shard key");
+assert.writeError(shard0Coll.update({ _id: 1 }, { b: 1 }));
// Error due to mutating the shard key ($set)
-shard0Coll.update({_id: 1}, {$unset: {a: 1}})
-assert.gleError(shard0Coll.getDB(), "update ($unset) removes shard key");
+assert.writeError(shard0Coll.update({ _id: 1 }, { $unset: { a: 1 }}));
// Error due to removing all the embedded fields.
shard0Coll.remove({})
-shard0Coll.save({_id: 2, a:{c:1, b:1}})
-assert.gleSuccess(shard0Coll.getDB(), "save with shard key failed -- 1");
+assert.writeOK(shard0Coll.save({ _id: 2, a: { c: 1, b: 1 }}));
-shard0Coll.update({}, {$unset: {"a.c": 1}})
-assert.gleError(shard0Coll.getDB(), function(gle) {
- return "unsetting part of shard key passed - " + tojson(gle) +
- " doc: " + tojson(shard0Coll.findOne())
-});
-
-shard0Coll.update({}, {$unset: {"a.b": 1, "a.c": 1}})
-assert.gleError(shard0Coll.getDB(), function(gle) {
- return "unsetting nested fields of shard key passed - " + tojson(gle) +
- " doc: " + tojson(shard0Coll.findOne())
-});
+assert.writeError(shard0Coll.update({}, { $unset: { "a.c": 1 }}));
+assert.writeError(shard0Coll.update({}, { $unset: { "a.b": 1, "a.c": 1 }}));
jsTest.log("DONE!"); // distinguishes shutdown failures
st.stop();
diff --git a/jstests/sharding/user_flags_sharded.js b/jstests/sharding/user_flags_sharded.js
index 6378b53e85d..292cfe9f3f9 100644
--- a/jstests/sharding/user_flags_sharded.js
+++ b/jstests/sharding/user_flags_sharded.js
@@ -26,8 +26,9 @@ assert.eq( res.ok , 1 , "collMod failed" );
// and insert some stuff, for the hell of it
var numdocs = 20;
-for( i=0; i < numdocs; i++){ db1.getCollection( coll ).insert( {_id : i} ); }
-db1.getLastError()
+for( i=0; i < numdocs; i++){
+ assert.writeOK(db1.getCollection( coll ).insert({ _id : i }));
+}
// Next verify that userFlags has changed to 0
collstats = db1.getCollection( coll ).stats()
diff --git a/jstests/sharding/wbl_not_cleared.js b/jstests/sharding/wbl_not_cleared.js
index a3b353225ca..12dbfca842f 100644
--- a/jstests/sharding/wbl_not_cleared.js
+++ b/jstests/sharding/wbl_not_cleared.js
@@ -26,8 +26,7 @@ st.printShardingStatus();
jsTest.log("Making mongos stale...");
-coll.insert({ _id : 0 });
-coll.getDB().getLastErrorObj();
+assert.writeOK(coll.insert({ _id : 0 }));
// Make sure the stale mongos knows about the collection at the original version
assert.neq(null, staleMongos.getCollection(coll + "").findOne());
@@ -37,27 +36,16 @@ printjson(admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : sha
jsTest.log("Running a stale insert...");
-staleMongos.getCollection(coll + "").insert({ _id : 0, dup : "key" });
+// duplicate _id
+assert.writeError(staleMongos.getCollection(coll + "").insert({ _id : 0, dup : "key" }));
-jsTest.log("Getting initial GLE result...");
-
-printjson(staleMongos.getDB(coll.getDB() + "").getLastErrorObj());
-printjson(staleMongos.getDB(coll.getDB() + "").getLastErrorObj());
st.printShardingStatus();
jsTest.log("Performing insert op on the same shard...");
-staleMongos.getCollection(coll + "").insert({ _id : 1, key : "isOk" })
-
-jsTest.log("Getting GLE result...");
-
-printjson(staleMongos.getDB(coll.getDB() + "").getLastErrorObj());
-assert.eq(null, staleMongos.getDB(coll.getDB() + "").getLastError());
+assert.writeOK(staleMongos.getCollection(coll + "").insert({ _id : 1, key : "isOk" }));
jsTest.log("DONE!");
st.stop();
-
-
-
diff --git a/jstests/sharding/writeback_bulk_insert.js b/jstests/sharding/writeback_bulk_insert.js
deleted file mode 100644
index 9f22875d046..00000000000
--- a/jstests/sharding/writeback_bulk_insert.js
+++ /dev/null
@@ -1,91 +0,0 @@
-//
-// Tests whether a writeback error during bulk insert hangs GLE
-//
-
-jsTest.log("Starting sharded cluster...")
-
-var st = new ShardingTest({shards : 1,
- mongos : 3,
- verbose : 2,
- other : {separateConfig : true,
- mongosOptions : {noAutoSplit : ""}}})
-
-st.stopBalancer()
-
-var mongosA = st.s0
-var mongosB = st.s1
-var mongosC = st.s2
-
-jsTest.log("Adding new collection...")
-
-var collA = mongosA.getCollection(jsTestName() + ".coll")
-collA.insert({hello : "world"})
-assert.eq(null, collA.getDB().getLastError())
-
-var collB = mongosB.getCollection("" + collA)
-collB.insert({hello : "world"})
-assert.eq(null, collB.getDB().getLastError())
-
-jsTest.log("Enabling sharding...")
-
-printjson(mongosA.getDB("admin").runCommand({enableSharding : collA.getDB()
- + ""}))
-printjson(mongosA.getDB("admin").runCommand({shardCollection : collA + "",
- key : {_id : 1}}))
-
-// MongoD doesn't know about the config shard version *until* MongoS tells it
-collA.findOne()
-
-jsTest.log("Preparing bulk insert...")
-
-var data1MB = "x"
-while (data1MB.length < 1024 * 1024)
- data1MB += data1MB;
-
-
-var data7MB = ""
-// Data now at 7MB
-for ( var i = 0; i < 7; i++)
- data7MB += data1MB;
-
-print("7MB object size is : " + Object.bsonsize({_id : 0,
- d : data7MB}))
-
-var dataCloseTo8MB = data7MB;
-// WARNING - MAGIC NUMBERS HERE
-// The idea is to just hit the 16MB limit so that the message gets passed in the
-// shell, but adding additional writeback information could fail.
-for ( var i = 0; i < 1024 * 1024 - 70; i++) {
- dataCloseTo8MB += "x"
-}
-
-
-var data8MB = "";
-for ( var i = 0; i < 8; i++) {
- data8MB += data1MB;
-}
-
-print("Object size is: " + Object.bsonsize([{_id : 0,
- d : dataCloseTo8MB},
- {_id : 1,
- d : data8MB}]))
-
-jsTest.log("Trigger wbl for mongosB...")
-
-collB.insert([{_id : 0,
- d : dataCloseTo8MB},
- {_id : 1,
- d : data8MB}])
-
-// Should succeed since our insert size is 16MB (plus very small overhead)
-jsTest.log("Waiting for GLE...")
-
-assert.eq(null, collB.getDB().getLastError())
-
-print("GLE Successful...")
-
-// Check that the counts via both mongoses are the same
-assert.eq(4, collA.find().itcount())
-assert.eq(4, collB.find().itcount())
-
-st.stop()
diff --git a/jstests/sharding/writeback_server7958.js b/jstests/sharding/writeback_server7958.js
deleted file mode 100644
index 20064ef53d1..00000000000
--- a/jstests/sharding/writeback_server7958.js
+++ /dev/null
@@ -1,94 +0,0 @@
-jsTest.log("Starting sharded cluster for wrong duplicate error setup");
-
-s = new ShardingTest( name="writeback_server7958", shards = 2, verbose=0, mongos = 4 );
-
-var mongosA=s.s0;
-var mongosB=s.s1;
-var mongosC=s.s2;
-var mongosD=s.s3;
-
-ns1 = "test.trans";
-ns2 = "test.node";
-
-adminSA = mongosA.getDB( "admin" );
-adminSB = mongosB.getDB( "admin" );
-adminSD = mongosD.getDB( "admin" );
-adminSA.runCommand({ enableSharding : "test"});
-adminSA.runCommand({ shardCollection : ns1, key : { owner : 1 }, unique: true });
-//adminSA.runCommand({ shardCollection : ns1, key : { owner : 1 } });
-
-try {
- s.stopBalancer();
-} catch (e) {
- print("coundn't stop balancer via command");
-}
-
-adminSA.settings.update({ _id: 'balancer' }, { $set: { stopped: true }});
-
-var db = mongosA.getDB( "test" );
-var dbB = mongosB.getDB( "test" );
-var dbC = mongosC.getDB( "test" );
-var dbD = mongosD.getDB( "test" );
-var trans = db.trans;
-var node = db.node;
-var transB = dbB.trans;
-var nodeB = dbB.node;
-var transC = dbC.trans;
-var nodeC = dbC.node;
-var transD = dbD.trans;
-var nodeD = dbD.node;
-
-var primary = s.getServerName("test");
-var shard1 = s._shardNames[0];
-var shard2 = s._shardNames[1];
-if (primary == shard1) {
- other = shard2;
-} else {
- other = shard1;
-}
-
-
-trans.insert({"owner":NumberLong("1234567890"),"tid":"2c4ba280-450a-11e2-bcfd-0800200c9a66"});
-db.runCommand({getLastError:1, j:1});
-
-node.insert({"owner":NumberLong("1234567890"),"parent":NumberLong("0"),_id:NumberLong("1234567890"), "counts":0});
-db.runCommand({getLastError:1, j:1});
-for (var i=0; i<1000; i++) {
- trans.insert({"owner":NumberLong(i),"tid":"2c4ba280-450a-11e2-bcfd-0800200c9a66"});
- node.insert({"owner":NumberLong(i),"parent":NumberLong(i+1000),_id:NumberLong(i+1234567890), "counts":0});
-}
-
-transB.insert({"owner":NumberLong("1234567890"),"tid":"2c4ba280-450a-11e2-bcfd-0800200c9a66"});
-var r1=dbB.runCommand( { getLastError: 1, w: 1 } );
-assert( r1.n == 0 && r1.err.length > 0 && r1.hasOwnProperty("code"), tojson( r1 ) );
-
-jsTest.log("Inserted dup (failed), now split chunks and move data");
-
-adminSD.runCommand( { split: ns1, middle : { owner : 100} });
-adminSD.runCommand( { movechunk: ns1, find : { owner : 105}, to: other});
-
-jsTest.log("Kicking off dup inserts and updates");
-
-errors=[];
-i=0;
-trans.insert({"owner":NumberLong("1234567890"),"tid":"2c4ba280-450a-11e2-bcfd-0800200c9a66"});
-var r1=db.runCommand( { getLastError: 1, w: 1 } );
-assert( r1.n == 0 && r1.err.length > 0 && r1.hasOwnProperty("code"), tojson( r1 ) );
-transB.insert({"owner":NumberLong("1234567890"),"tid":"2c4ba280-450a-11e2-bcfd-0800200c9a66"});
-var rB1=dbB.runCommand( { getLastError: 1, w: 1 } );
-assert( rB1.n == 0 && rB1.err.length > 0 && rB1.hasOwnProperty("code"), tojson( r1 ) );
-
-nodeB.update({"owner":NumberLong("1234567890"),"parent":NumberLong("0"),_id:NumberLong("1234567890")},{"$inc":{"counts":1}});
-var resultB = dbB.runCommand( { getLastError: 1, w: 1 } )
-node.update({"owner":NumberLong("1234567890"),"parent":NumberLong("0"),_id:NumberLong("1234567890")},{"$inc":{"counts":1}});
-var result = db.runCommand( { getLastError: 1, w: 1 } )
-
-assert.eq( 2, node.findOne().counts );
-
-printjson( result )
-printjson( resultB )
-
-assert( result.n==1 && result.updatedExisting==true && result.err == null, "update succeeded on collection node on mongos A but GLE was\nn=" + result.n + ",\nupdatedExisting=" + result.updatedExisting + ",\nerr=" + result.err);
-assert( resultB.n==1 && resultB.updatedExisting==true && resultB.err == null, "update succeeded on collection node on mongos B but GLE was\nn=" + resultB.n + ",\nupdatedExisting=" + resultB.updatedExisting + ",\nerr=" + resultB.err);
-
-s.stop();
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js
index b40fe4a9c94..07219374a66 100644
--- a/jstests/sharding/zbigMapReduce.js
+++ b/jstests/sharding/zbigMapReduce.js
@@ -31,12 +31,15 @@ else {
for (var i = 0; i < 4*1024; i++) str += "a";
}
-for (j=0; j<100; j++) for (i=0; i<512; i++){ db.foo.save({ i : idInc++, val: valInc++, y:str})}
-
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (j=0; j<100; j++) {
+ for (i=0; i<512; i++){
+ bulk.insert({ i: idInc++, val: valInc++, y:str });
+ }
+}
+assert.writeOK(bulk.execute());
jsTest.log( "Documents inserted, waiting for error..." )
-db.getLastError();
-
jsTest.log( "Doing double-checks of insert..." )
// Collect some useful stats to figure out what happened
@@ -118,17 +121,15 @@ jsTest.log( )
valInc = 0;
for (j=0; j<100; j++){
print( "Inserted document: " + (j * 100) );
- for (i=0; i<512; i++){ db.foo.save({ i : idInc++, val: valInc++, y:str}) }
+ bulk = db.foo.initializeUnorderedBulkOp();
+ for (i=0; i<512; i++){
+ bulk.insert({ i : idInc++, val: valInc++, y: str });
+ }
// wait for replication to catch up
- db.runCommand({getLastError:1, w:2, wtimeout:10000});
+ assert.writeOK(bulk.execute({ w: 2 }));
}
-jsTest.log( "Waiting for errors..." )
-
-assert.eq( null, db.getLastError() )
-
-jsTest.log( "No errors..." )
-
+jsTest.log( "No errors..." );
map2 = function() { emit(this.val, 1); }
reduce2 = function(key, values) { return Array.sum(values); }