diff options
Diffstat (limited to 'jstests')
100 files changed, 1659 insertions, 836 deletions
diff --git a/jstests/sharding/SERVER-7379.js b/jstests/sharding/SERVER-7379.js index f4730495624..8ad1a12fb80 100644 --- a/jstests/sharding/SERVER-7379.js +++ b/jstests/sharding/SERVER-7379.js @@ -9,38 +9,42 @@ var testDoc = { "_id": 123, "categoryId": 9881, "store": "NEW" } offerChange.remove({}, false); offerChange.insert(testDoc) -assert.writeError(offerChange.update({ _id: 123 }, { $set: { store: "NEWEST" } }, true, false)); +offerChange.update({ "_id": 123 }, { $set: { "store": "NEWEST" } }, true, false); +var gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); var doc = offerChange.findOne(); assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc)); offerChange.remove({}, false); offerChange.insert(testDoc) -assert.writeError(offerChange.update({ _id: 123 }, - { _id: 123, categoryId: 9881, store: "NEWEST" }, - true, false)); -doc = offerChange.findOne(); +offerChange.update({ "_id": 123 }, { "_id": 123, "categoryId": 9881, "store": "NEWEST" }, true, false); +var gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); +var doc = offerChange.findOne(); assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc)); offerChange.remove({}, false); offerChange.insert(testDoc) -assert.writeError(offerChange.save({ "_id": 123, "categoryId": 9881, "store": "NEWEST" })); -doc = offerChange.findOne(); +offerChange.save({ "_id": 123, "categoryId": 9881, "store": "NEWEST" }) +var gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); +var doc = offerChange.findOne(); assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc)); offerChange.remove({}, false); offerChange.insert(testDoc); -assert.writeError(offerChange.update({ _id: 123, store: "NEW" }, - { _id: 123, categoryId: 9881, store: "NEWEST" }, - true, false)); -doc = offerChange.findOne(); +offerChange.update({ "_id": 123, "store": "NEW" }, { "_id": 123, "categoryId": 9881, "store": "NEWEST" }, true, false); +var gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); +var doc = offerChange.findOne(); assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc)); offerChange.remove({}, false); offerChange.insert(testDoc); -assert.writeError(offerChange.update({ _id: 123, categoryId: 9881 }, - { _id: 123, categoryId: 9881, store: "NEWEST" }, - true, false)); -doc = offerChange.findOne(); +offerChange.update({ "_id": 123, "categoryId": 9881 }, { "_id": 123, "categoryId": 9881, "store": "NEWEST" }, true, false); +var gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); +var doc = offerChange.findOne(); assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc)); st.stop(); diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js index 42342351d8a..0ca6a832a49 100644 --- a/jstests/sharding/addshard1.js +++ b/jstests/sharding/addshard1.js @@ -9,9 +9,10 @@ conn1 = startMongodTest( 29000 ); db1 = conn1.getDB( "testDB" ); numObjs = 0; for (i=0; i<3; i++){ - assert.writeOK(db1.foo.save( { a : i } )); + db1.foo.save( { a : i } ); numObjs++; } +db1.getLastError() newShard = "myShard"; assert( s.admin.runCommand( { addshard: "localhost:29000" , name: newShard } ).ok, "did not accepted non-duplicated shard" ); @@ -19,9 +20,11 @@ assert( s.admin.runCommand( { addshard: "localhost:29000" , name: newShard } ).o // a mongod with an existing database name should not be allowed to become a shard conn2 = startMongodTest( 29001 ); db2 = conn2.getDB( "otherDB" ); -assert.writeOK(db2.foo.save({ a: 1 })); +db2.foo.save( {a:1} ); +db2.getLastError() db3 = conn2.getDB( "testDB" ); -assert.writeOK(db3.foo.save({ a: 1 } )); +db3.foo.save( {a:1} ); +db3.getLastError() s.config.databases.find().forEach( printjson ) rejectedShard = "rejectedShard"; diff --git a/jstests/sharding/addshard5.js b/jstests/sharding/addshard5.js index 6c13985a436..a62dbe43c98 100644 --- a/jstests/sharding/addshard5.js +++ b/jstests/sharding/addshard5.js @@ -32,7 +32,8 @@ printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._i printjson( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }) ) // Insert one document -assert.writeOK(coll.insert({ hello : "world" })); +coll.insert({ hello : "world" }) +assert.eq( null, coll.getDB().getLastError() ) // Migrate the collection to and from shard2 so shard1 loads the shard2 host printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id, _waitForDelete : true }) ) diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js index 0c8d7e3a3dc..309f6191414 100644 --- a/jstests/sharding/array_shard_key.js +++ b/jstests/sharding/array_shard_key.js @@ -14,65 +14,98 @@ st.printShardingStatus() print( "1: insert some invalid data" ) -var value = null; +var value = null + +var checkError = function( shouldError ){ + var error = coll.getDB().getLastError() + + if( error != null ) printjson( error ) + + if( error == null && ! shouldError ) return + if( error != null && shouldError ) return + + if( error == null ) print( "No error detected!" ) + else print( "Unexpected error!" ) + + assert( false ) +} // Insert an object with invalid array key -assert.writeError(coll.insert({ i : [ 1, 2 ] })); +coll.insert({ i : [ 1, 2 ] }) +checkError( true ) // Insert an object with all the right fields, but an invalid array val for _id -assert.writeError(coll.insert({ _id : [ 1, 2 ] , i : 3})); +coll.insert({ _id : [ 1, 2 ] , i : 3}) +checkError( true ) // Insert an object with valid array key -assert.writeOK(coll.insert({ i : 1 })); +coll.insert({ i : 1 }) +checkError( false ) // Update the value with valid other field value = coll.findOne({ i : 1 }) -assert.writeOK(coll.update( value, { $set : { j : 2 } } )); +coll.update( value, { $set : { j : 2 } } ) +checkError( false ) // Update the value with invalid other fields value = coll.findOne({ i : 1 }) -assert.writeError(coll.update( value, Object.merge( value, { i : [ 3 ] } ) )); +coll.update( value, Object.merge( value, { i : [ 3 ] } ) ) +checkError( true ) // Multi-update the value with invalid other fields value = coll.findOne({ i : 1 }) -assert.writeError(coll.update( value, Object.merge( value, { i : [ 3, 4 ] } ), false, true)); +coll.update( value, Object.merge( value, { i : [ 3, 4 ] } ), false, true) +checkError( true ) // Multi-update the value with other fields (won't work, but no error) value = coll.findOne({ i : 1 }) -assert.writeOK(coll.update( Object.merge( value, { i : [ 1, 1 ] } ), { $set : { k : 4 } }, false, true)); +coll.update( Object.merge( value, { i : [ 1, 1 ] } ), { $set : { k : 4 } }, false, true) +checkError( false ) // Query the value with other fields (won't work, but no error) value = coll.findOne({ i : 1 }) coll.find( Object.merge( value, { i : [ 1, 1 ] } ) ).toArray() +checkError( false ) // Can't remove using multikey, but shouldn't error value = coll.findOne({ i : 1 }) coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4 ] } ) ) +checkError( false ) // Can't remove using multikey, but shouldn't error value = coll.findOne({ i : 1 }) -assert.writeOK(coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4, 5 ] } ) )); +coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4, 5 ] } ) ) +error = coll.getDB().getLastError() +assert.eq( error, null ) assert.eq( coll.find().itcount(), 1 ) value = coll.findOne({ i : 1 }) -assert.writeOK(coll.remove( Object.extend( value, { i : 1 } ) )); +coll.remove( Object.extend( value, { i : 1 } ) ) +error = coll.getDB().getLastError() +assert.eq( error, null ) assert.eq( coll.find().itcount(), 0 ) coll.ensureIndex({ _id : 1, i : 1, j: 1 }); // Can insert document that will make index into a multi-key as long as it's not part of shard key. coll.remove({}); -assert.writeOK(coll.insert({ i: 1, j: [1, 2] })); +coll.insert({ i: 1, j: [1, 2] }); +error = coll.getDB().getLastError(); +assert.eq( error, null ); assert.eq( coll.find().itcount(), 1 ); // Same is true for updates. coll.remove({}); coll.insert({ _id: 1, i: 1 }); -assert.writeOK(coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] })); +coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] }); +error = coll.getDB().getLastError(); +assert.eq( error, null ); assert.eq( coll.find().itcount(), 1 ); // Same for upserts. coll.remove({}); -assert.writeOK(coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] }, true)); +coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] }, true); +error = coll.getDB().getLastError(); +assert.eq( error, null ); assert.eq( coll.find().itcount(), 1 ); printjson( "Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey" ) @@ -81,7 +114,8 @@ printjson( "Sharding-then-inserting-multikey tested, now trying inserting-then-s var coll = mongos.getCollection( "" + coll + "2" ) for( var i = 0; i < 10; i++ ){ // TODO : does not check weird cases like [ i, i ] - assert.writeOK(coll.insert({ i : [ i, i + 1 ] })); + coll.insert({ i : [ i, i + 1 ] }) + checkError( false ) } coll.ensureIndex({ _id : 1, i : 1 }) @@ -99,7 +133,8 @@ st.printShardingStatus() var coll = mongos.getCollection( "" + coll + "3" ) for( var i = 0; i < 10; i++ ){ // TODO : does not check weird cases like [ i, i ] - assert.writeOK(coll.insert({ i : i })); + coll.insert({ i : i }) + checkError( false ) } coll.ensureIndex({ _id : 1, i : 1 }) diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js index 1a9837cb1ff..be2f7803b46 100644 --- a/jstests/sharding/auth.js +++ b/jstests/sharding/auth.js @@ -58,8 +58,8 @@ else { } login(adminUser); -assert.writeOK(s.getDB( "config" ).settings.update({ _id: "chunksize" }, - { $set: { value : 1 }}, true )); +s.getDB( "config" ).settings.update( { _id : "chunksize" }, {$set : {value : 1 }}, true ); +printjson(s.getDB("config").runCommand({getlasterror:1})); printjson(s.getDB("config").settings.find().toArray()); print("restart mongos"); @@ -139,7 +139,9 @@ login(testUser); assert.eq(s.getDB("test").foo.findOne(), null); print("insert try 2"); -assert.writeOK(s.getDB("test").foo.insert({ x: 1 })); +s.getDB("test").foo.insert({x:1}); +result = s.getDB("test").getLastErrorObj(); +assert.eq(result.err, null); assert.eq( 1 , s.getDB( "test" ).foo.find().itcount() , tojson(result) ); logout(testUser); @@ -162,11 +164,15 @@ ReplSetTest.awaitRSClientHosts(s.s, d2.nodes, {ok: true }); s.getDB("test").foo.remove({}) var num = 100000; -var bulk = s.getDB("test").foo.initializeUnorderedBulkOp(); for (i=0; i<num; i++) { - bulk.insert({ _id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market" }); + s.getDB("test").foo.insert({_id:i, x:i, abc : "defg", date : new Date(), str : "all the talk on the market"}); +} + +// Make sure all data gets sent through +printjson( s.getDB("test").getLastError() ) +for (var i = 0; i < s._connections.length; i++) { // SERVER-4356 + s._connections[i].getDB("test").getLastError(); } -assert.writeOK(bulk.execute()); var d1Chunks = s.getDB("config").chunks.count({shard : "d1"}); var d2Chunks = s.getDB("config").chunks.count({shard : "d2"}); @@ -196,7 +202,7 @@ if (numDocs != num) { lastDocNumber = docs[i].x; numDocsSeen++; } - assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()") + assert.eq(numDocs, numDocsSeen, "More docs discovered on second find() even though getLastError was already called") assert.eq(num - numDocs, missingDocNumbers.length); load('jstests/libs/trace_missing_docs.js'); @@ -280,7 +286,9 @@ print( " testing find that should work" ); readOnlyDB.foo.findOne(); print( " testing write that should fail" ); -assert.writeError(readOnlyDB.foo.insert({ eliot: 1 })); +readOnlyDB.foo.insert( { eliot : 1 } ); +result = readOnlyDB.getLastError(); +assert( ! result.ok , tojson( result ) ) print( " testing read command (should succeed)" ); assert.commandWorked(readOnlyDB.runCommand({count : "foo"})); diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js index a3e5a712416..93d178222f5 100644 --- a/jstests/sharding/authCommands.js +++ b/jstests/sharding/authCommands.js @@ -1,5 +1,3 @@ -// TODO: move back to sharding suite after SERVER-13402 is fixed - /** * This tests using DB commands with authentication enabled when sharded. */ diff --git a/jstests/sharding/auth_config_down.js b/jstests/sharding/auth_config_down.js index 6f97051a864..ba1eaa955aa 100644 --- a/jstests/sharding/auth_config_down.js +++ b/jstests/sharding/auth_config_down.js @@ -14,7 +14,8 @@ var configs = st._configServers printjson( configs ) st.printShardingStatus() -assert.writeOK(mongos.getCollection( "foo.bar" ).insert({ hello : "world" })); +mongos.getCollection( "foo.bar" ).insert({ hello : "world" }) +assert.eq( null, mongos.getDB( "foo" ).getLastError() ) var stopOrder = [ 1, 0 ] diff --git a/jstests/sharding/auth_repl.js b/jstests/sharding/auth_repl.js index c07719843b1..3c0a5d14be2 100644 --- a/jstests/sharding/auth_repl.js +++ b/jstests/sharding/auth_repl.js @@ -11,7 +11,8 @@ var conn = new Mongo(replTest.getURL()); var testDB = conn.getDB('test'); var testColl = testDB.user; -assert.writeOK(testColl.insert({ x: 1 }, { writeConcern: { w: nodeCount }})); +testColl.insert({ x: 1 }); +testDB.runCommand({ getLastError: 1, w: nodeCount }); // Setup the cached connection for primary and secondary in DBClientReplicaSet // before setting up authentication diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js index f89a6da086a..2ba8bec2b79 100644 --- a/jstests/sharding/auth_slaveok_routing.js +++ b/jstests/sharding/auth_slaveok_routing.js @@ -54,11 +54,11 @@ coll.setSlaveOk( true ); ReplSetTest.awaitRSClientHosts( mongos, replTest.getSecondaries(), { ok : true, secondary : true }); -var bulk = coll.initializeUnorderedBulkOp(); for ( var x = 0; x < 20; x++ ) { - bulk.insert({ v: x, k: 10 }); + coll.insert({ v: x, k: 10 }); } -assert.writeOK(bulk.execute({ w: nodeCount })); + +coll.runCommand({ getLastError: 1, w: nodeCount }); /* Although mongos never caches query results, try to do a different query * everytime just to be sure. diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js index 31a1bcf18f9..52e69e88848 100644 --- a/jstests/sharding/authmr.js +++ b/jstests/sharding/authmr.js @@ -20,12 +20,19 @@ var test1User = { roles: [{role: 'readWrite', db: 'test1', hasRole: true, canDelegate: false}] }; +function assertGLEOK(status) { + assert(status.ok && status.err === null, + "Expected OK status object; found " + tojson(status)); +} + function assertRemove(collection, pattern) { - assert.writeOK(collection.remove(pattern)); + collection.remove(pattern); + assertGLEOK(collection.getDB().getLastErrorObj()); } function assertInsert(collection, obj) { - assert.writeOK(collection.insert(obj)); + collection.insert(obj); + assertGLEOK(collection.getDB().getLastErrorObj()); } var cluster = new ShardingTest("authwhere", 1, 0, 1, diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js index 9516499580b..3cba1aee4e4 100644 --- a/jstests/sharding/authwhere.js +++ b/jstests/sharding/authwhere.js @@ -20,12 +20,19 @@ var test1Reader = { roles: [{role: 'read', db: 'test1', hasRole:true, canDelegate: false}] }; +function assertGLEOK(status) { + assert(status.ok && status.err === null, + "Expected OK status object; found " + tojson(status)); +} + function assertRemove(collection, pattern) { - assert.writeOK(collection.remove(pattern)); + collection.remove(pattern); + assertGLEOK(collection.getDB().getLastErrorObj()); } function assertInsert(collection, obj) { - assert.writeOK(collection.insert(obj)); + collection.insert(obj); + assertGLEOK(collection.getDB().getLastErrorObj()); } var cluster = new ShardingTest("authwhere", 1, 0, 1, diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js index 6e5800efff4..f41d535bbb6 100644 --- a/jstests/sharding/auto1.js +++ b/jstests/sharding/auto1.js @@ -14,11 +14,10 @@ coll = db.foo; var i=0; -var bulk = coll.initializeUnorderedBulkOp(); for ( ; i<100; i++ ){ - bulk.insert( { num : i , s : bigString } ); + coll.save( { num : i , s : bigString } ); } -assert.writeOK( bulk.execute() ); +db.getLastError(); primary = s.getServer( "test" ).getDB( "test" ); @@ -30,31 +29,28 @@ assert.eq(100, db.foo.find().itcount()); print( "datasize: " + tojson( s.getServer( "test" ).getDB( "admin" ).runCommand( { datasize : "test.foo" } ) ) ); -bulk = coll.initializeUnorderedBulkOp(); for ( ; i<200; i++ ){ - bulk.insert( { num : i , s : bigString } ); + coll.save( { num : i , s : bigString } ); } -assert.writeOK( bulk.execute() ); +db.getLastError(); s.printChunks() s.printChangeLog() counts.push( s.config.chunks.count() ); -bulk = coll.initializeUnorderedBulkOp(); for ( ; i<400; i++ ){ - bulk.insert( { num : i , s : bigString } ); + coll.save( { num : i , s : bigString } ); } -assert.writeOK( bulk.execute() ); +db.getLastError(); s.printChunks(); s.printChangeLog() counts.push( s.config.chunks.count() ); -bulk = coll.initializeUnorderedBulkOp(); for ( ; i<700; i++ ){ - bulk.insert( { num : i , s : bigString } ); + coll.save( { num : i , s : bigString } ); } -assert.writeOK( bulk.execute() ); +db.getLastError(); s.printChunks(); s.printChangeLog() diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js index f95944a83f3..41cb38a4309 100644 --- a/jstests/sharding/auto2.js +++ b/jstests/sharding/auto2.js @@ -18,12 +18,10 @@ for ( j=0; j<30; j++ ){ print( "j:" + j + " : " + Date.timeFunc( function(){ - var bulk = coll.initializeUnorderedBulkOp(); for ( var k=0; k<100; k++ ){ - bulk.insert( { num : i , s : bigString } ); + coll.save( { num : i , s : bigString } ); i++; } - assert.writeOK(bulk.execute()); } ) ); @@ -31,6 +29,7 @@ for ( j=0; j<30; j++ ){ assert.eq( i , j * 100 , "setup" ); // Until SERVER-9715 is fixed, the sync command must be run on a diff connection new Mongo( s.s.host ).adminCommand( "connpoolsync" ); +db.getLastError(); print( "done inserting data" ); diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js index f06cdb92cf7..800267d3ab8 100644 --- a/jstests/sharding/batch_write_command_sharded.js +++ b/jstests/sharding/batch_write_command_sharded.js @@ -119,7 +119,8 @@ var oldChunks = config.chunks.find().toArray(); var staleMongos = MongoRunner.runMongos({ configdb : configConnStr }); brokenColl = staleMongos.getCollection(brokenColl.toString()); -assert.writeOK(brokenColl.insert({ hello : "world" })); +brokenColl.insert({ hello : "world" }); +assert.eq(null, brokenColl.getDB().getLastError()); // Modify the chunks to make shards at a higher version @@ -129,9 +130,11 @@ assert.commandWorked(admin.runCommand({ moveChunk : brokenColl.toString(), // Rewrite the old chunks back to the config server -assert.writeOK(config.chunks.remove({})); +config.chunks.remove({}); +assert.eq(null, config.getLastError()); for ( var i = 0; i < oldChunks.length; i++ ) - assert.writeOK(config.chunks.insert(oldChunks[i])); + config.chunks.insert(oldChunks[i]); +assert.eq(null, config.getLastError()); // Stale mongos can no longer bring itself up-to-date! // END SETUP diff --git a/jstests/sharding/bulk_insert.js b/jstests/sharding/bulk_insert.js index 7b420d707a0..e4eb9291906 100644 --- a/jstests/sharding/bulk_insert.js +++ b/jstests/sharding/bulk_insert.js @@ -48,10 +48,21 @@ printjson(admin.runCommand({ moveChunk: collSh + "", var resetColls = function() { - assert.writeOK(collSh.remove({})); - assert.writeOK(collUn.remove({})); - assert.writeOK(collDi.remove({})); -}; + collSh.remove({}) + assert.eq(null, collSh.getDB().getLastError()); + + collUn.remove({}) + assert.eq(null, collUn.getDB().getLastError()); + + collDi.remove({}) + assert.eq(null, collDi.getDB().getLastError()); +} + +var printPass = function(str) +{ + print(str); + return str; +} var isDupKeyError = function(err) { @@ -71,13 +82,16 @@ resetColls(); var inserts = [{ukey : 0}, {ukey : 1}] -assert.writeOK(collSh.insert(inserts)); +collSh.insert(inserts); +assert.eq(null, printPass(collSh.getDB().getLastError())); assert.eq(2, collSh.find().itcount()); -assert.writeOK(collUn.insert(inserts)); +collUn.insert(inserts); +assert.eq(null, printPass(collUn.getDB().getLastError())); assert.eq(2, collUn.find().itcount()); -assert.writeOK(collDi.insert(inserts)); +collDi.insert(inserts); +assert.eq(null, printPass(collDi.getDB().getLastError())); assert.eq(2, collDi.find().itcount()); jsTest.log("Bulk insert (no COE) with mongos error...") @@ -87,7 +101,8 @@ var inserts = [{ukey : 0}, {hello : "world"}, {ukey : 1}] -assert.writeError(collSh.insert(inserts)); +collSh.insert(inserts); +assert.neq(null, printPass(collSh.getDB().getLastError())); assert.eq(1, collSh.find().itcount()); jsTest.log("Bulk insert (no COE) with mongod error...") @@ -97,13 +112,16 @@ var inserts = [{ukey : 0}, {ukey : 0}, {ukey : 1}] -assert.writeError(collSh.insert(inserts)); +collSh.insert(inserts); +assert.neq(null, printPass(collSh.getDB().getLastError())); assert.eq(1, collSh.find().itcount()); -assert.writeError(collUn.insert(inserts)); +collUn.insert(inserts); +assert.neq(null, printPass(collUn.getDB().getLastError())); assert.eq(1, collUn.find().itcount()); -assert.writeError(collDi.insert(inserts)); +collDi.insert(inserts); +assert.neq(null, printPass(collDi.getDB().getLastError())); assert.eq(1, collDi.find().itcount()); jsTest.log("Bulk insert (no COE) with mongod and mongos error...") @@ -114,16 +132,22 @@ var inserts = [{ukey : 0}, {ukey : 1}, {hello : "world"}] -var res = assert.writeError(collSh.insert(inserts)); -assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString()); +collSh.insert(inserts); +var err = printPass(collSh.getDB().getLastError()); +assert.neq(null, err); +assert(isDupKeyError(err)); assert.eq(1, collSh.find().itcount()); -res = assert.writeError(collUn.insert(inserts)); -assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString()); +collUn.insert(inserts); +var err = printPass(collUn.getDB().getLastError()); +assert.neq(null, err); +assert(isDupKeyError(err)); assert.eq(1, collUn.find().itcount()); -res = assert.writeError(collDi.insert(inserts)); -assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString()); +collDi.insert(inserts); +var err = printPass(collDi.getDB().getLastError()); +assert.neq(null, err); +assert(isDupKeyError(err)); assert.eq(1, collDi.find().itcount()); jsTest.log("Bulk insert (no COE) on second shard...") @@ -132,13 +156,16 @@ resetColls(); var inserts = [{ukey : 0}, {ukey : -1}] -assert.writeOK(collSh.insert(inserts)); +collSh.insert(inserts); +assert.eq(null, printPass(collSh.getDB().getLastError())); assert.eq(2, collSh.find().itcount()); -assert.writeOK(collUn.insert(inserts)); +collUn.insert(inserts); +assert.eq(null, printPass(collUn.getDB().getLastError())); assert.eq(2, collUn.find().itcount()); -assert.writeOK(collDi.insert(inserts)); +collDi.insert(inserts); +assert.eq(null, printPass(collDi.getDB().getLastError())); assert.eq(2, collDi.find().itcount()); jsTest.log("Bulk insert to second shard (no COE) with mongos error...") @@ -149,7 +176,8 @@ var inserts = [{ukey : 0}, {ukey : -1}, {hello : "world"}] -assert.writeError(collSh.insert(inserts)); +collSh.insert(inserts); +assert.neq(null, printPass(collSh.getDB().getLastError())); assert.eq(3, collSh.find().itcount()); jsTest.log("Bulk insert to second shard (no COE) with mongod error...") @@ -161,16 +189,20 @@ var inserts = [{ukey : 0}, {ukey : -2}, {ukey : -2}] -assert.writeError(collSh.insert(inserts)); +collSh.insert(inserts); +assert.neq(null, printPass(collSh.getDB().getLastError())); assert.eq(4, collSh.find().itcount()); -assert.writeError(collUn.insert(inserts)); +collUn.insert(inserts); +assert.neq(null, printPass(collUn.getDB().getLastError())); assert.eq(4, collUn.find().itcount()); -assert.writeError(collDi.insert(inserts)); +collDi.insert(inserts); +assert.neq(null, printPass(collDi.getDB().getLastError())); assert.eq(4, collDi.find().itcount()); -jsTest.log("Bulk insert to third shard (no COE) with mongod and mongos error...") +jsTest + .log("Bulk insert to third shard (no COE) with mongod and mongos error...") resetColls(); var inserts = [{ukey : 0}, @@ -181,16 +213,22 @@ var inserts = [{ukey : 0}, {ukey : 4}, {hello : "world"}] -res = assert.writeError(collSh.insert(inserts)); -assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString()); +collSh.insert(inserts); +var err = printPass(collSh.getDB().getLastError()); +assert.neq(null, err); +assert(isDupKeyError(err)); assert.eq(5, collSh.find().itcount()); -res = assert.writeError(collUn.insert(inserts)); -assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString()); +collUn.insert(inserts); +var err = printPass(collUn.getDB().getLastError()); +assert.neq(null, err); +assert(isDupKeyError(err)); assert.eq(5, collUn.find().itcount()); -res = assert.writeError(collDi.insert(inserts)); -assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString()); +collDi.insert(inserts); +var err = printPass(collDi.getDB().getLastError()); +assert.neq(null, err); +assert(isDupKeyError(err)); assert.eq(5, collDi.find().itcount()); // @@ -204,7 +242,8 @@ var inserts = [{ukey : 0}, {hello : "world"}, {ukey : 1}] -assert.writeError(collSh.insert(inserts, 1)); // COE +collSh.insert(inserts, 1); // COE +assert.neq(null, printPass(collSh.getDB().getLastError())); assert.eq(2, collSh.find().itcount()); jsTest.log("Bulk insert (yes COE) with mongod error...") @@ -214,13 +253,16 @@ var inserts = [{ukey : 0}, {ukey : 0}, {ukey : 1}] -assert.writeError(collSh.insert(inserts, 1)); +collSh.insert(inserts, 1); +assert.neq(null, printPass(collSh.getDB().getLastError())); assert.eq(2, collSh.find().itcount()); -assert.writeError(collUn.insert(inserts, 1)); +collUn.insert(inserts, 1); +assert.neq(null, printPass(collUn.getDB().getLastError())); assert.eq(2, collUn.find().itcount()); -assert.writeError(collDi.insert(inserts, 1)); +collDi.insert(inserts, 1); +assert.neq(null, printPass(collDi.getDB().getLastError())); assert.eq(2, collDi.find().itcount()); jsTest @@ -236,17 +278,23 @@ var inserts = [{ukey : 0}, {hello : "world"}] // Last error here is mongos error -res = assert.writeError(collSh.insert(inserts, 1)); -assert(!isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString()); +collSh.insert(inserts, 1); +var err = printPass(collSh.getDB().getLastError()); +assert.neq(null, err); +assert(!isDupKeyError(err)); assert.eq(5, collSh.find().itcount()); // Extra insert goes through, since mongos error "doesn't count" -res = assert.writeError(collUn.insert(inserts, 1)); -assert.eq(6, res.nInserted, res.toString()); +collUn.insert(inserts, 1); +var err = printPass(collUn.getDB().getLastError()); +assert.neq(null, err); +assert(isDupKeyError(err)); assert.eq(6, collUn.find().itcount()); -res = assert.writeError(collDi.insert(inserts, 1)); -assert.eq(6, res.nInserted, res.toString()); +collDi.insert(inserts, 1); +var err = printPass(collDi.getDB().getLastError()); +assert.neq(null, err); +assert(isDupKeyError(err)); assert.eq(6, collDi.find().itcount()); jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error " @@ -262,17 +310,23 @@ var inserts = [{ukey : 0}, {ukey : 4}] // Last error here is mongos error -res = assert.writeError(collSh.insert(inserts, 1)); -assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString()); +collSh.insert(inserts, 1); +var err = printPass(collSh.getDB().getLastError()); +assert.neq(null, err); +assert(isDupKeyError(err)); assert.eq(5, collSh.find().itcount()); // Extra insert goes through, since mongos error "doesn't count" -res = assert.writeError(collUn.insert(inserts, 1)); -assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString()); +collUn.insert(inserts, 1); +var err = printPass(collUn.getDB().getLastError()); +assert.neq(null, err); +assert(isDupKeyError(err)); assert.eq(6, collUn.find().itcount()); -res = assert.writeError(collDi.insert(inserts, 1)); -assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString()); +collDi.insert(inserts, 1); +var err = printPass(collDi.getDB().getLastError()); +assert.neq(null, err); +assert(isDupKeyError(err)); assert.eq(6, collDi.find().itcount()); // @@ -297,7 +351,9 @@ printjson(admin.runCommand({moveChunk : collSh + "", to : shards[0]._id, _waitForDelete: true})); -assert.writeOK(staleCollSh.insert(inserts)); +staleCollSh.insert(inserts); +var err = printPass(staleCollSh.getDB().getLastError()); +assert.eq(null, err); // // Test when the objects to be bulk inserted are 10MB, and so can't be inserted @@ -336,6 +392,8 @@ printjson(admin.runCommand({moveChunk : collSh + "", to : shards[0]._id, _waitForDelete: true})); -assert.writeOK(staleCollSh.insert(inserts)); +staleCollSh.insert(inserts); +var err = printPass(staleCollSh.getDB().getLastError()); +assert.eq(null, err); st.stop() diff --git a/jstests/sharding/cleanup_orphaned_cmd.js b/jstests/sharding/cleanup_orphaned_cmd.js index 84ca52e0f18..872d124cfda 100644 --- a/jstests/sharding/cleanup_orphaned_cmd.js +++ b/jstests/sharding/cleanup_orphaned_cmd.js @@ -25,16 +25,16 @@ st.printShardingStatus(); jsTest.log( "Inserting some regular docs..." ); -var bulk = coll.initializeUnorderedBulkOp(); -for ( var i = -50; i < 50; i++ ) bulk.insert({ _id : i }); -assert.writeOK( bulk.execute() ); +for ( var i = -50; i < 50; i++ ) coll.insert({ _id : i }); +assert.eq( null, coll.getDB().getLastError() ); // Half of the data is on each shard jsTest.log( "Inserting some orphaned docs..." ); var shard0Coll = st.shard0.getCollection( coll + "" ); -assert.writeOK( shard0Coll.insert({ _id : 10 })); +shard0Coll.insert({ _id : 10 }); +assert.eq( null, shard0Coll.getDB().getLastError() ); assert.neq( 50, shard0Coll.count() ); assert.eq( 100, coll.find().itcount() ); @@ -68,13 +68,12 @@ assert( admin.runCommand({ moveChunk : coll + "", jsTest.log( "Inserting some more orphaned docs..." ); -st.printShardingStatus(); - var shard0Coll = st.shard0.getCollection( coll + "" ); -assert.writeOK(shard0Coll.insert({ _id : -35 })); -assert.writeOK(shard0Coll.insert({ _id : -11 })); -assert.writeOK(shard0Coll.insert({ _id : 0 })); -assert.writeOK(shard0Coll.insert({ _id : 10 })); +shard0Coll.insert({ _id : -36 }); +shard0Coll.insert({ _id : -10 }); +shard0Coll.insert({ _id : 0 }); +shard0Coll.insert({ _id : 10 }); +assert.eq( null, shard0Coll.getDB().getLastError() ); assert.neq( 25, shard0Coll.count() ); assert.eq( 100, coll.find().itcount() ); diff --git a/jstests/sharding/cleanup_orphaned_cmd_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_hashed.js index 219f23d3282..db19e55a79f 100644 --- a/jstests/sharding/cleanup_orphaned_cmd_hashed.js +++ b/jstests/sharding/cleanup_orphaned_cmd_hashed.js @@ -36,9 +36,8 @@ jsTest.log( "Inserting some docs on each shard, so 1/2 will be orphaned..." ); for ( var s = 0; s < 2; s++ ) { var shardColl = ( s == 0 ? st.shard0 : st.shard1 ).getCollection( coll + "" ); - var bulk = shardColl.initializeUnorderedBulkOp(); - for ( var i = 0; i < 100; i++ ) bulk.insert({ _id : i }); - assert.writeOK(bulk.execute()); + for ( var i = 0; i < 100; i++ ) shardColl.insert({ _id : i }); + assert.eq( null, shardColl.getDB().getLastError() ); } assert.eq( 200, st.shard0.getCollection( coll + "" ).find().itcount() + diff --git a/jstests/sharding/coll_epoch_test1.js b/jstests/sharding/coll_epoch_test1.js index c814805d531..824e2b45167 100644 --- a/jstests/sharding/coll_epoch_test1.js +++ b/jstests/sharding/coll_epoch_test1.js @@ -30,11 +30,9 @@ jsTest.log( "Enabling sharding for the first time..." ) admin.runCommand({ enableSharding : coll.getDB() + "" }) admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }) -var bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp(); -for( var i = 0; i < 100; i++ ) { - bulk.insert({ _id : i, test : "a" }); -} -assert.writeOK( bulk.execute() ); +for( var i = 0; i < 100; i++ ) + insertMongos.getCollection( coll + "" ).insert({ _id : i, test : "a" }) +assert.eq( null, insertMongos.getDB( coll.getDB() + "" ).getLastError() ) assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "a" }).itcount() ) coll.drop() @@ -50,11 +48,9 @@ admin.runCommand({ enableSharding : coll.getDB() + "" }) coll.ensureIndex({ notId : 1 }) admin.runCommand({ shardCollection : coll + "", key : { notId : 1 } }) -bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp(); -for( var i = 0; i < 100; i++ ) { - bulk.insert({ notId : i, test : "b" }); -} -assert.writeOK( bulk.execute() ); +for( var i = 0; i < 100; i++ ) + insertMongos.getCollection( coll + "" ).insert({ notId : i, test : "b" }) +assert.eq( null, insertMongos.getDB( coll.getDB() + "" ).getLastError() ) assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "b" }).itcount() ) assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a" ] } }).itcount() ) @@ -77,11 +73,12 @@ admin.runCommand({ movePrimary : coll.getDB() + "", to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) }) jsTest.log( "moved primary..." ) - -bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp(); + for( var i = 0; i < 100; i++ ) - bulk.insert({ test : "c" }); -assert.writeOK( bulk.execute() ); + insertMongos.getCollection( coll + "" ).insert({ test : "c" }) +assert.eq( null, insertMongos.getDB( coll.getDB() + "" ).getLastError() ) + +jsTest.log( "waited for gle..." ) assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "c" }).itcount() ) assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a", "b" ] } }).itcount() ) @@ -100,11 +97,9 @@ admin.runCommand({ movePrimary : coll.getDB() + "", to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) }) admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }) -bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp(); for( var i = 0; i < 100; i++ ) - bulk.insert({ test : "d" }); -assert.writeOK( bulk.execute() ); - + insertMongos.getCollection( coll + "" ).insert({ test : "d" }) +assert.eq( null, insertMongos.getDB( coll.getDB() + "" ).getLastError() ) assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "d" }).itcount() ) assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a", "b", "c" ] } }).itcount() ) @@ -112,4 +107,4 @@ coll.drop() jsTest.log( "Done!" ) -st.stop() +st.stop()
\ No newline at end of file diff --git a/jstests/sharding/coll_epoch_test2.js b/jstests/sharding/coll_epoch_test2.js index 20f1fe40774..43528296cc9 100644 --- a/jstests/sharding/coll_epoch_test2.js +++ b/jstests/sharding/coll_epoch_test2.js @@ -34,7 +34,8 @@ jsTest.log( "Enabling sharding for the first time..." ) admin.runCommand({ enableSharding : coll.getDB() + "" }) admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }) -assert.writeOK(coll.insert({ hello : "world" })); +coll.insert({ hello : "world" }) +assert.eq( null, coll.getDB().getLastError() ) jsTest.log( "Sharding collection across multiple shards..." ) @@ -80,9 +81,8 @@ assert(droppedCollDoc.lastmodEpoch.equals(new ObjectId("000000000000000000000000 admin.runCommand({ enableSharding : coll.getDB() + "" }) admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }) -var bulk = coll.initializeUnorderedBulkOp(); -for( var i = 0; i < 100; i++ ) bulk.insert({ _id : i }); -assert.writeOK(bulk.execute()); +for( var i = 0; i < 100; i++ ) coll.insert({ _id : i }) +assert.eq( null, coll.getDB().getLastError() ) printjson( admin.runCommand({ split : coll + "", middle : { _id : 200 } }) ) printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 200 }, @@ -100,18 +100,20 @@ assert.neq( null, readMongos.getCollection( coll + "" ).findOne({ _id : 1 }) ) jsTest.log( "Checking update...") // Ensure that updating an element finds the right location -assert.writeOK(updateMongos.getCollection( coll + "" ).update({ _id : 1 }, - { $set : { updated : true } })); +updateMongos.getCollection( coll + "" ).update({ _id : 1 }, { $set : { updated : true } }) +assert.eq( null, updateMongos.getDB( coll.getDB() + "" ).getLastError() ) assert.neq( null, coll.findOne({ updated : true }) ) jsTest.log( "Checking insert..." ) // Ensure that inserting an element finds the right shard -assert.writeOK(insertMongos.getCollection( coll + "" ).insert({ _id : 101 })); +insertMongos.getCollection( coll + "" ).insert({ _id : 101 }) +assert.eq( null, insertMongos.getDB( coll.getDB() + "" ).getLastError() ) assert.neq( null, coll.findOne({ _id : 101 }) ) jsTest.log( "Checking remove..." ) // Ensure that removing an element finds the right shard, verified by the mongos doing the sharding -assert.writeOK(removeMongos.getCollection( coll + "" ).remove({ _id : 2 })); +removeMongos.getCollection( coll + "" ).remove({ _id : 2 }) +assert.eq( null, removeMongos.getDB( coll.getDB() + "" ).getLastError() ) assert.eq( null, coll.findOne({ _id : 2 }) ) coll.drop() diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js index 08e80996a85..fadee5d81aa 100644 --- a/jstests/sharding/count_slaveok.js +++ b/jstests/sharding/count_slaveok.js @@ -19,16 +19,17 @@ conn.setLogLevel( 3 ) var coll = conn.getCollection( "test.countSlaveOk" ) coll.drop() -var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < 300; i++ ){ - bulk.insert({ i: i % 10 }); + coll.insert( { i : i % 10 } ) } -assert.writeOK(bulk.execute()); var connA = conn var connB = new Mongo( st.s.host ) var connC = new Mongo( st.s.host ) +// Make sure the writes get through, otherwise we can continue to error these one-at-a-time +coll.getDB().getLastError() + st.printShardingStatus() // Wait for client to update itself and replication to finish @@ -63,6 +64,7 @@ try { coll.find({ i : 0 }).count() print( "Should not reach here!" ) + printjson( coll.getDB().getLastError() ) assert( false ) } diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js index 6ca7e5ff1cb..d0e9751b226 100644 --- a/jstests/sharding/cursor1.js +++ b/jstests/sharding/cursor1.js @@ -16,11 +16,10 @@ primary = s.getServer( "test" ).getDB( "test" ); secondary = s.getOther( primary ).getDB( "test" ); numObjs = 10; -var bulk = db.foo.initializeUnorderedBulkOp(); -for (i=0; i < numObjs; i++){ - bulk.insert({ _id: i }); -} -assert.writeOK(bulk.execute()); +for (i=0; i < numObjs; i++){ + db.foo.insert({_id: i}); +} +db.getLastError(); assert.eq( 1, s.config.chunks.count() , "test requires collection to have one chunk initially" ); // we'll split the collection in two and move the second chunk while three cursors are open diff --git a/jstests/sharding/cursor_cleanup.js b/jstests/sharding/cursor_cleanup.js index 758b60c4e5c..a28b5416e38 100644 --- a/jstests/sharding/cursor_cleanup.js +++ b/jstests/sharding/cursor_cleanup.js @@ -24,14 +24,11 @@ st.printShardingStatus(true); jsTest.log("Insert enough data to overwhelm a query batch."); -var bulk = coll.initializeUnorderedBulkOp(); -var bulk2 = collUnsharded.initializeUnorderedBulkOp(); for (var i = -150; i < 150; i++) { - bulk.insert({ _id : i }); - bulk2.insert({ _id : i }); + coll.insert({ _id : i }); + collUnsharded.insert({ _id : i }); } -assert.writeOK(bulk.execute()); -assert.writeOK(bulk2.execute()); +assert.eq(null, coll.getDB().getLastError()); jsTest.log("Open a cursor to a sharded and unsharded collection."); diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js index 6d8b04aaf16..1e1d791679d 100644 --- a/jstests/sharding/features1.js +++ b/jstests/sharding/features1.js @@ -105,8 +105,9 @@ s.adminCommand( { split : "test.foo4" , middle : { num : 10 } } ); s.admin.runCommand({ movechunk: "test.foo4", find: { num: 20 }, to: s.getOther( s.getServer( "test" ) ).name }); -assert.writeOK(db.foo4.save( { num : 5 } )); -assert.writeOK(db.foo4.save( { num : 15 } )); +db.foo4.save( { num : 5 } ); +db.foo4.save( { num : 15 } ); +db.getLastError(); s.sync(); assert.eq( 1 , a.foo4.count() , "ua1" ); assert.eq( 1 , b.foo4.count() , "ub1" ); @@ -120,7 +121,9 @@ assert( b.foo4.getIndexes()[1].unique , "ub3" ); assert.eq( 2 , db.foo4.count() , "uc1" ) db.foo4.save( { num : 7 } ) assert.eq( 3 , db.foo4.count() , "uc2" ) -assert.writeError(db.foo4.save( { num : 7 } )); +db.foo4.save( { num : 7 } ) +gle = db.getLastErrorObj(); +assert( gle.err , "uc3" ) assert.eq( 3 , db.foo4.count() , "uc4" ) // --- don't let you convertToCapped ---- @@ -178,13 +181,15 @@ assert.throws( function(){ db.foo6.group( { key : { a : 1 } , initial : { count // ---- can't shard non-empty collection without index ----- -assert.writeOK(db.foo8.save( { a : 1 } )); +db.foo8.save( { a : 1 } ); +db.getLastError(); assert( ! s.admin.runCommand( { shardcollection : "test.foo8" , key : { a : 1 } } ).ok , "non-empty collection" ); // ---- can't shard non-empty collection with null values in shard key ---- -assert.writeOK(db.foo9.save( { b : 1 } )); +db.foo9.save( { b : 1 } ); +db.getLastError(); db.foo9.ensureIndex( { a : 1 } ); assert( ! s.admin.runCommand( { shardcollection : "test.foo9" , key : { a : 1 } } ).ok , "entry with null value" ); diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js index 4b9843a724c..2a32218b717 100644 --- a/jstests/sharding/features2.js +++ b/jstests/sharding/features2.js @@ -60,6 +60,7 @@ assert.eq( 0 , db.foo.count() , "D7" ); db.foo2.save( { _id : new ObjectId() } ); db.foo2.save( { _id : new ObjectId() } ); db.foo2.save( { _id : new ObjectId() } ); +db.getLastError(); assert.eq( 1 , s.onNumShards( "foo2" ) , "F1" ); diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js index 3b6b114a781..83171d86f40 100644 --- a/jstests/sharding/features3.js +++ b/jstests/sharding/features3.js @@ -30,11 +30,10 @@ s.adminCommand({moveChunk: "test.foo", find: {_id: 3}, s.setBalancer(true) // insert 10k small documents into the sharded collection -var bulk = db.foo.initializeUnorderedBulkOp(); for (i = 0; i < numDocs; i++) - bulk.insert({ _id: i }); -assert.writeOK(bulk.execute()); + db.foo.insert({_id: i}); +db.getLastError(); var x = db.foo.stats(); // verify the colleciton has been sharded and documents are evenly distributed diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js index 058016e22c1..c7670957ebd 100644 --- a/jstests/sharding/findandmodify1.js +++ b/jstests/sharding/findandmodify1.js @@ -16,11 +16,10 @@ s.adminCommand( { shardcollection : "test.stuff" , key : {_id:1} } ); s.adminCommand( { split: "test.stuff" , middle : { _id : numObjs/2 } } ); s.adminCommand( { movechunk : "test.stuff" , find : { _id : numObjs/2 } , to : secondary.getMongo().name } ) ; -var bulk = db.stuff.initializeUnorderedBulkOp(); for (var i=0; i < numObjs; i++){ - bulk.insert({_id: i}); + db.stuff.insert({_id: i}); } -assert.writeOK(bulk.execute()); +db.getLastError() // put two docs in each chunk (avoid the split in 0, since there are no docs less than 0) for (var i=2; i < numObjs; i+=2){ diff --git a/jstests/sharding/findandmodify2.js b/jstests/sharding/findandmodify2.js index ad7b1688ca2..542818c7167 100644 --- a/jstests/sharding/findandmodify2.js +++ b/jstests/sharding/findandmodify2.js @@ -45,6 +45,7 @@ function via_fam() { { big: big } }}); } + db.getLastError(); } // upsert via findAndModify @@ -54,6 +55,7 @@ function via_fam_upsert() { { big: big } }, upsert: true}); } + db.getLastError(); } // update data using basic update @@ -67,6 +69,7 @@ function via_update() { { big: big } }); } + db.getLastError(); } // upsert data using basic update @@ -76,6 +79,7 @@ function via_update_upsert() { { big: big } }, true); } + db.getLastError(); } print("---------- Update via findAndModify..."); diff --git a/jstests/sharding/forget_mr_temp_ns.js b/jstests/sharding/forget_mr_temp_ns.js index e79793580a1..54eeb88d9b5 100644 --- a/jstests/sharding/forget_mr_temp_ns.js +++ b/jstests/sharding/forget_mr_temp_ns.js @@ -11,11 +11,10 @@ var admin = mongos.getDB( "admin" ); var coll = mongos.getCollection( "foo.bar" ); var outputColl = mongos.getCollection( (coll.getDB() + "") + ".mrOutput" ); -var bulk = coll.initializeUnorderedBulkOp(); for ( var i = 0; i < 10; i++ ) { - bulk.insert({ _id : i, even : (i % 2 == 0) }); + coll.insert({ _id : i, even : (i % 2 == 0) }); } -assert.writeOK(bulk.execute()); +assert.eq( null, coll.getDB().getLastError() ); var map = function() { emit( this.even, 1 ); }; var reduce = function( key, values ) { return Array.sum(values); }; diff --git a/jstests/sharding/fts_score_sort_sharded.js b/jstests/sharding/fts_score_sort_sharded.js index 3ff699ac9b9..3136401be1c 100644 --- a/jstests/sharding/fts_score_sort_sharded.js +++ b/jstests/sharding/fts_score_sort_sharded.js @@ -25,11 +25,13 @@ assert.commandWorked(admin.runCommand({moveChunk: coll.getFullName(), // // Insert documents into collection and create text index. // -assert.writeOK(coll.insert({ _id: 1, a: "pizza" })); -assert.writeOK(coll.insert({ _id: -1, a: "pizza pizza" })); -assert.writeOK(coll.insert({ _id: 2, a: "pizza pizza pizza" })); -assert.writeOK(coll.insert({ _id: -2, a: "pizza pizza pizza pizza"})); -assert.commandWorked(coll.ensureIndex({ a: "text" })); +coll.insert({_id: 1, a: "pizza"}); +coll.insert({_id: -1, a: "pizza pizza"}); +coll.insert({_id: 2, a: "pizza pizza pizza"}); +coll.insert({_id: -2, a: "pizza pizza pizza pizza"}); +assert.gleSuccess(coll.getDB()); +coll.ensureIndex({a: "text"}); +assert.gleSuccess(coll.getDB()); // // Execute query with sort on document score, verify results are in correct order. diff --git a/jstests/sharding/geo_shardedgeonear.js b/jstests/sharding/geo_shardedgeonear.js index 85aef9eb894..269531f4caa 100644 --- a/jstests/sharding/geo_shardedgeonear.js +++ b/jstests/sharding/geo_shardedgeonear.js @@ -26,11 +26,13 @@ function test(db, sharded, indexType) { for (var i=0; i < numPts; i++) { var lat = 90 - Random.rand() * 180; var lng = 180 - Random.rand() * 360; - assert.writeOK(db[coll].insert({rand:Math.random(), loc: [lng, lat]})); + db[coll].insert({rand:Math.random(), loc: [lng, lat]}) + assert.eq(null, db.getLastError()); } assert.eq(db[coll].count(), numPts); - assert.commandWorked(db[coll].ensureIndex({ loc: indexType })); + db[coll].ensureIndex({loc: indexType}) + assert(!db.getLastError()); var queryPoint = [0,0] geoCmd = {geoNear: coll, near: queryPoint, spherical: true, includeLocs: true}; diff --git a/jstests/gle/gle_sharded_wc.js b/jstests/sharding/gle_sharded_wc.js index 54050acb141..7c5055ceaa7 100644 --- a/jstests/gle/gle_sharded_wc.js +++ b/jstests/sharding/gle_sharded_wc.js @@ -111,8 +111,15 @@ assert.eq(coll.count({ _id : 1 }), 1); // NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get // successful writes from. coll.remove({ _id : 1 }); -coll.insert([{ _id : 1 }, { _id : -1 }]); - +// The insert throws if write commands are enabled, since we get a response +if ( coll.getMongo().useWriteCommands() ) { + assert.throws( function() { + coll.insert([{ _id : 1 }, { _id : -1 }]); + }); +} +else { + coll.insert([{ _id : 1 }, { _id : -1 }]); +} printjson(gle = coll.getDB().runCommand({ getLastError : 1 })); assert(gle.ok); assert(gle.err); diff --git a/jstests/gle/gle_sharded_write.js b/jstests/sharding/gle_sharded_write.js index 0f602a5e4d7..e4b135178a3 100644 --- a/jstests/gle/gle_sharded_write.js +++ b/jstests/sharding/gle_sharded_write.js @@ -164,7 +164,15 @@ assert(gle.errmsg); // NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get // successful writes from. coll.remove({ _id : 1 }); -coll.insert([{ _id : 1 }, { _id : -1 }]); +// The insert throws if write commands are enabled, since we get a response +if ( coll.getMongo().useWriteCommands() ) { + assert.throws( function() { + coll.insert([{ _id : 1 }, { _id : -1 }]); + }); +} +else { + coll.insert([{ _id : 1 }, { _id : -1 }]); +} printjson(gle = coll.getDB().runCommand({ getLastError : 1 })); assert(gle.ok); assert(gle.err); diff --git a/jstests/sharding/conf_server_write_concern.js b/jstests/sharding/gle_with_conf_servers.js index 30002ae9f46..f9b54678078 100644 --- a/jstests/sharding/conf_server_write_concern.js +++ b/jstests/sharding/gle_with_conf_servers.js @@ -1,25 +1,29 @@ /** - * Test write concern with w parameter when writing directly to the config servers will + * Test getLastError with w parameter when writing directly to the config servers will * not cause an error. */ function writeToConfigTest(){ var st = new ShardingTest({ shards: 2 }); var confDB = st.s.getDB( 'config' ); - assert.writeOK(confDB.settings.update({ _id: 'balancer' }, - { $set: { stopped: true }}, - { writeConcern: { w: 'majority' }})); + confDB.settings.update({ _id: 'balancer' }, { $set: { stopped: true }}); + var gleObj = confDB.runCommand({ getLastError: 1, w: 'majority' }); + + assert( gleObj.ok ); + assert.eq(null, gleObj.err); // w:1 should still work - assert.writeOK(confDB.settings.update({ _id: 'balancer' }, - { $set: { stopped: true }}, - { writeConcern: { w: 1 }})); + confDB.settings.update({ _id: 'balancer' }, { $set: { stopped: true }}); + var gleObj = confDB.runCommand({ getLastError: 1, w: 1 }); + + assert(gleObj.ok); + assert.eq(null, gleObj.err); st.stop(); } /** - * Test write concern with w parameter will not cause an error when writes to mongos + * Test getLastError with w parameter will not cause an error when writes to mongos * would trigger writes to config servers (in this test, split chunks is used). */ function configTest( configCount ){ @@ -43,11 +47,15 @@ function configTest( configCount ){ var x = 0; while( currChunks <= initChunks ){ - assert.writeOK(coll.insert({ x: x++ }, { writeConcern: { w: 'majority' }})); + coll.insert({ x: x++ }); + gleObj = testDB.runCommand({ getLastError: 1, w: 'majority' }); currChunks = chunkCount(); } - st.stop(); + assert( gleObj.ok ); + assert.eq( null, gleObj.err ); + + st.stop(); } writeToConfigTest(); diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js index d21490316ff..3b7cec4910f 100644 --- a/jstests/sharding/group_slaveok.js +++ b/jstests/sharding/group_slaveok.js @@ -17,11 +17,12 @@ conn.setLogLevel( 3 ) var coll = conn.getCollection( "test.groupSlaveOk" ) coll.drop() -var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < 300; i++ ){ - bulk.insert( { i : i % 10 } ); + coll.insert( { i : i % 10 } ) } -assert.writeOK( bulk.execute() ); + +// Make sure the writes get through, otherwise we can continue to error these one-at-a-time +coll.getDB().getLastError() st.printShardingStatus() @@ -50,15 +51,17 @@ assert.eq( 10, coll.group({ key : { i : true } , try { conn.setSlaveOk( false ) - var res = coll.group({ key : { i : true } , - reduce : function( obj, ctx ){ ctx.count += 1 } , - initial : { count : 0 } }); - - print( "Should not reach here! Group result: " + tojson(res) ); - assert( false ); + coll.group({ key : { i : true } , + reduce : function( obj, ctx ){ ctx.count += 1 } , + initial : { count : 0 } }) + + print( "Should not reach here!" ) + printjson( coll.getDB().getLastError() ) + assert( false ) + } catch( e ){ - print( "Non-slaveOk'd connection failed." + tojson(e) ) + print( "Non-slaveOk'd connection failed." ) } // Finish diff --git a/jstests/sharding/hash_shard_unique_compound.js b/jstests/sharding/hash_shard_unique_compound.js index 832cb93600f..ec8e6063aa0 100644 --- a/jstests/sharding/hash_shard_unique_compound.js +++ b/jstests/sharding/hash_shard_unique_compound.js @@ -22,7 +22,8 @@ assert.commandWorked(db.adminCommand( { shardcollection : ns , key : { a : "hash db.printShardingStatus(); // Create unique index -assert.commandWorked(coll.ensureIndex({ a: 1, b: 1 }, { unique: true })); +coll.ensureIndex({a:1, b:1}, {unique:true}) +assert.gleSuccess(db, "unique index failed"); jsTest.log("------ indexes -------") jsTest.log(tojson(coll.getIndexes())); @@ -32,7 +33,8 @@ jsTest.log("------ dropping sharded collection to start part 2 -------") coll.drop(); //Create unique index -assert.commandWorked(coll.ensureIndex({ a: 1, b: 1 }, { unique: true })); +coll.ensureIndex({a:1, b:1}, {unique:true}) +assert.gleSuccess(db, "unique index failed 2"); // shard a fresh collection using a hashed shard key assert.commandWorked(db.adminCommand( { shardcollection : ns , key : { a : "hashed" } } ), diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js index 2767c18dab3..b66d0c405c7 100644 --- a/jstests/sharding/index1.js +++ b/jstests/sharding/index1.js @@ -4,181 +4,180 @@ s = new ShardingTest( "shard_index", 2, 0, 1 ) // Regenerate fully because of SERVER-2782 for ( var i = 0; i < 19; i++ ) { - - var coll = s.admin._mongo.getDB( "test" ).getCollection( "foo" + i ) - coll.drop() - - var bulk = coll.initializeUnorderedBulkOp(); - for ( var j = 0; j < 300; j++ ) { - bulk.insert({ num: j, x: 1 }); - } - assert.writeOK(bulk.execute()); - - if(i == 0) s.adminCommand( { enablesharding : "" + coll._db } ); - - print("\n\n\n\n\nTest # " + i) - - if ( i == 0 ) { - - // Unique index exists, but not the right one. - coll.ensureIndex( { num : 1 }, { unique : true } ) - coll.ensureIndex( { x : 1 } ) - - passed = false - try { - s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } ) - passed = true - } catch (e) { - print( e ) - } - assert( !passed, "Should not shard collection when another unique index exists!") - - } - if ( i == 1 ) { - - // Unique index exists as prefix, also index exists - coll.ensureIndex( { x : 1 } ) - coll.ensureIndex( { x : 1, num : 1 }, { unique : true } ) - - try{ - s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } }) - } - catch(e){ - print(e) - assert( false, "Should be able to shard non-unique index without unique option.") - } - - } - if ( i == 2 ) { - // Non-unique index exists as prefix, also index exists. No unique index. - coll.ensureIndex( { x : 1 } ) - coll.ensureIndex( { x : 1, num : 1 } ) + + var coll = s.admin._mongo.getDB( "test" ).getCollection( "foo" + i ) + coll.drop() + + for ( var j = 0; j < 300; j++ ) { + coll.insert( { num : j, x : 1 } ) + } + assert.eq( null, coll.getDB().getLastError() ); + + if(i == 0) s.adminCommand( { enablesharding : "" + coll._db } ); + + print("\n\n\n\n\nTest # " + i) + + if ( i == 0 ) { + + // Unique index exists, but not the right one. + coll.ensureIndex( { num : 1 }, { unique : true } ) + coll.ensureIndex( { x : 1 } ) + + passed = false + try { + s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } ) + passed = true + } catch (e) { + print( e ) + } + assert( !passed, "Should not shard collection when another unique index exists!") + + } + if ( i == 1 ) { + + // Unique index exists as prefix, also index exists + coll.ensureIndex( { x : 1 } ) + coll.ensureIndex( { x : 1, num : 1 }, { unique : true } ) + + try{ + s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } }) + } + catch(e){ + print(e) + assert( false, "Should be able to shard non-unique index without unique option.") + } + + } + if ( i == 2 ) { + // Non-unique index exists as prefix, also index exists. No unique index. + coll.ensureIndex( { x : 1 } ) + coll.ensureIndex( { x : 1, num : 1 } ) passed = false; - try{ - s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } }) + try{ + s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } }) passed = true; - } - catch( e ){ - print(e) - assert( !passed, "Should be able to shard collection with no unique index if unique not specified.") - } - } - if ( i == 3 ) { - - // Unique index exists as prefix, also unique index exists - coll.ensureIndex( { num : 1 }, { unique : true }) - coll.ensureIndex( { num : 1 , x : 1 }, { unique : true } ) - - try{ - s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true }) - } - catch( e ){ - print(e) - assert( false, "Should be able to shard collection with unique prefix index.") - } - - } - if ( i == 4 ) { - - // Unique index exists as id, also unique prefix index exists - coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } ) - - try{ - s.adminCommand({ shardcollection : "" + coll, key : { _id : 1 }, unique : true }) - } - catch( e ){ - print(e) - assert( false, "Should be able to shard collection with unique id index.") - } - - } - if ( i == 5 ) { - - // Unique index exists as id, also unique prefix index exists - coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } ) - - try{ - s.adminCommand({ shardcollection : "" + coll, key : { _id : 1, num : 1 }, unique : true }) - } - catch( e ){ - print(e) - assert( false, "Should be able to shard collection with unique combination id index.") - } - - } - if ( i == 6 ) { - - coll.remove({}) - - // Unique index does not exist, also unique prefix index exists - coll.ensureIndex( { num : 1, _id : 1 }, { unique : true } ) - - try{ - s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true }) - } - catch( e ){ - print(e) - assert( false, "Should be able to shard collection with no unique index but with a unique prefix index.") - } - + } + catch( e ){ + print(e) + assert( !passed, "Should be able to shard collection with no unique index if unique not specified.") + } + } + if ( i == 3 ) { + + // Unique index exists as prefix, also unique index exists + coll.ensureIndex( { num : 1 }, { unique : true }) + coll.ensureIndex( { num : 1 , x : 1 }, { unique : true } ) + + try{ + s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true }) + } + catch( e ){ + print(e) + assert( false, "Should be able to shard collection with unique prefix index.") + } + + } + if ( i == 4 ) { + + // Unique index exists as id, also unique prefix index exists + coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } ) + + try{ + s.adminCommand({ shardcollection : "" + coll, key : { _id : 1 }, unique : true }) + } + catch( e ){ + print(e) + assert( false, "Should be able to shard collection with unique id index.") + } + + } + if ( i == 5 ) { + + // Unique index exists as id, also unique prefix index exists + coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } ) + + try{ + s.adminCommand({ shardcollection : "" + coll, key : { _id : 1, num : 1 }, unique : true }) + } + catch( e ){ + print(e) + assert( false, "Should be able to shard collection with unique combination id index.") + } + + } + if ( i == 6 ) { + + coll.remove({}) + + // Unique index does not exist, also unique prefix index exists + coll.ensureIndex( { num : 1, _id : 1 }, { unique : true } ) + + try{ + s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true }) + } + catch( e ){ + print(e) + assert( false, "Should be able to shard collection with no unique index but with a unique prefix index.") + } + printjson( coll.getIndexes() ) // Make sure the index created is unique! assert.eq( 1, coll.getDB().getCollection( "system.indexes" ).find( { ns : "" + coll, key : { num : 1 }, unique : true } ).itcount() ) - - } + + } if ( i == 7 ) { - coll.remove({}) + coll.remove({}) - // No index exists + // No index exists - try{ - assert.eq( coll.find().itcount(), 0 ) - s.adminCommand({ shardcollection : "" + coll, key : { num : 1 } }) - } - catch( e ){ - print(e) + try{ + assert.eq( coll.find().itcount(), 0 ) + s.adminCommand({ shardcollection : "" + coll, key : { num : 1 } }) + } + catch( e ){ + print(e) assert( false, "Should be able to shard collection with no index on shard key.") - } - } + } + } if ( i == 8 ) { - coll.remove({}) + coll.remove({}) - // No index exists + // No index exists passed = false - try{ - assert.eq( coll.find().itcount(), 0 ) - s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true }) + try{ + assert.eq( coll.find().itcount(), 0 ) + s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true }) passed = true - } - catch( e ){ - print(e) - } + } + catch( e ){ + print(e) + } assert( passed, "Should be able to shard collection with unique flag but with no unique index on shard key, if coll empty.") printjson( coll.getIndexes() ) // Make sure the index created is unique! assert.eq( 1, coll.getDB().getCollection( "system.indexes" ).find( { ns : "" + coll, key : { num : 1 }, unique : true } ).itcount() ) - } + } if ( i == 9 ) { - // Unique index exists on a different field as well - coll.ensureIndex( { num : 1 }, { unique : true } ) - coll.ensureIndex( { x : 1 } ) + // Unique index exists on a different field as well + coll.ensureIndex( { num : 1 }, { unique : true } ) + coll.ensureIndex( { x : 1 } ) passed = false - try { - s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } ) + try { + s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } ) passed = true - } catch (e) { - print( e ) - } + } catch (e) { + print( e ) + } assert( !passed, "Should not shard collection when another unique index exists!" ) - } + } if ( i == 10 ){ //try sharding non-empty collection without any index @@ -195,14 +194,14 @@ for ( var i = 0; i < 19; i++ ) { //now add containing index and try sharding by prefix coll.ensureIndex( {num : 1, x : 1} ); - try{ - s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } ); - passed = true; - } - catch( e ){ - print(e); - } - assert( passed , "Should be able to shard collection with prefix of existing index"); + try{ + s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } ); + passed = true; + } + catch( e ){ + print(e); + } + assert( passed , "Should be able to shard collection with prefix of existing index"); printjson( coll.getIndexes() ); diff --git a/jstests/sharding/inserts_consistent.js b/jstests/sharding/inserts_consistent.js index 288cb7a9b44..59fb5d39a98 100644 --- a/jstests/sharding/inserts_consistent.js +++ b/jstests/sharding/inserts_consistent.js @@ -1,4 +1,5 @@ -// Test write re-routing on version mismatch. +// Shows how the WBL / getLastError logic depends on the actual chunks which are moved +// We should probably either always wait for writebacks, or don't at all var st = new ShardingTest({ shards : 2, mongos : 2, verbose : 2, other : { separateConfig : true } }) @@ -38,22 +39,45 @@ print( "Other shard : " + otherShard ) printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : otherShard }) ) -jsTest.log( "Inserting docs that needs to be retried..." ) +jsTest.log( "Inserting docs to be written back..." ) var nextId = -1 +// Create writebacks, could add more here for( var i = 0; i < 2; i++ ){ printjson( "Inserting " + nextId ) - assert.writeOK(collB.insert({ _id : nextId--, hello : "world" })); + collB.insert({ _id : nextId--, hello : "world" }) } +// Refresh server +printjson( adminB.runCommand({ flushRouterConfig : 1 }) ) + jsTest.log( "Inserting doc which successfully goes through..." ) // Do second write -assert.writeOK(collB.insert({ _id : nextId--, goodbye : "world" })) +collB.insert({ _id : nextId--, goodbye : "world" }) + +printjson( collB.getDB().getLastErrorObj() ) + +// Get error for last write +//assert.eq( null, collB.getDB().getLastError() ) -// Assert that write went through +jsTest.log( "GLE waited for the writebacks." ) + +// Assert that we waited for the writebacks... assert.eq( coll.find().itcount(), 3 ) +/* + +jsTest.log( "Waiting for the writeback..." ) + +assert.soon(function(){ + var count = coll.find().itcount() + print( "Count is : " + count ) + return count == 3 +}) + +*/ + jsTest.log( "Now try moving the actual chunk we're writing to..." ) // Now move the actual chunk we're writing to @@ -61,7 +85,7 @@ printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : -1 }, to : o jsTest.log( "Inserting second docs to get written back..." ) -// Will fail entirely if too many of these, waiting for write to get applied can get too long. +// Will fail entirely if too many of these, gle will wait too long for( var i = 0; i < 2; i++ ){ collB.insert({ _id : nextId--, hello : "world" }) } @@ -72,13 +96,18 @@ printjson( adminB.runCommand({ flushRouterConfig : 1 }) ) jsTest.log( "Inserting second doc which successfully goes through..." ) // Do second write -assert.writeOK(collB.insert({ _id : nextId--, goodbye : "world" })); +collB.insert({ _id : nextId--, goodbye : "world" }) + +jsTest.log( "GLE is now waiting for the writeback!" ) + +// Get error for last write +assert.eq( null, collB.getDB().getLastError() ) jsTest.log( "All docs written this time!" ) -// Assert that writes went through. +// Assert that we now waited for the writeback assert.eq( coll.find().itcount(), 6 ) jsTest.log( "DONE" ) -st.stop() +st.stop()
\ No newline at end of file diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js index ccf31e83f2a..a344ffd481d 100644 --- a/jstests/sharding/jumbo1.js +++ b/jstests/sharding/jumbo1.js @@ -14,17 +14,17 @@ while ( big.length < 10000 ) big += "." x = 0; -var bulk = db.foo.initializeUnorderedBulkOp(); for ( ; x < 500; x++ ) - bulk.insert( { x : x , big : big } ); + db.foo.insert( { x : x , big : big } ) for ( i=0; i<500; i++ ) - bulk.insert( { x : x , big : big } ); + db.foo.insert( { x : x , big : big } ) for ( ; x < 2000; x++ ) - bulk.insert( { x : x , big : big } ); + db.foo.insert( { x : x , big : big } ) -assert.writeOK( bulk.execute() ); + +db.getLastError(); sh.status(true) diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js index 4becd6746ce..f40d0aa3e8b 100644 --- a/jstests/sharding/key_many.js +++ b/jstests/sharding/key_many.js @@ -137,12 +137,12 @@ for ( var i=0; i<types.length; i++ ){ assert.eq( 1 , c.find( { xx : { $exists : true } } ).count() , curT.name + " xx 2 " ); assert.eq( curT.values[3] , getKey( c.findOne( { xx : 17 } ) ) , curT.name + " xx 3 " ); - assert.writeOK(c.update( makeObjectDotted( curT.values[3] ), - { $set: { xx: 17 }}, - { upsert: true })); - - assert.commandWorked(c.ensureIndex( { _id: 1 } , { unique: true } )); + c.update( makeObjectDotted( curT.values[3] ) , { $set : { xx : 17 } } , {upsert: true}); + assert.eq( null , db.getLastError() , curT.name + " upserts should work if they include the shard key in the query" ); + c.ensureIndex( { _id : 1 } , { unique : true } ); + assert.eq( null , db.getLastError() , curT.name + " creating _id index should be ok" ); + // multi update var mysum = 0; c.find().forEach( function(z){ mysum += z.xx || 0; } ); diff --git a/jstests/sharding/listDatabases.js b/jstests/sharding/listDatabases.js index 45a9784788f..e32ab5ab82b 100644 --- a/jstests/sharding/listDatabases.js +++ b/jstests/sharding/listDatabases.js @@ -14,9 +14,11 @@ var getDBSection = function (dbsArray, dbToFind) { return null; } -assert.writeOK(mongos.getDB("blah").foo.insert({ _id: 1 })); -assert.writeOK(mongos.getDB("foo").foo.insert({ _id: 1 })); -assert.writeOK(mongos.getDB("raw").foo.insert({ _id: 1 })); +mongos.getDB("blah").foo.insert({_id:1}) +mongos.getDB("foo").foo.insert({_id:1}) +mongos.getDB("raw").foo.insert({_id:1}) +//wait for writes to finish +mongos.getDB("raw").getLastError() //verify that the config db is not on a shard var res = mongos.adminCommand("listDatabases"); @@ -41,4 +43,4 @@ assert(!getDBSection(dbArray, "config").shards, "config db is on a shard! 2") assert(getDBSection(dbArray, "admin"), "admin db not found! 2") assert(!getDBSection(dbArray, "admin").shards, "admin db is on a shard! 2") -test.stop() +test.stop()
\ No newline at end of file diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js index 33b4d167361..28f3a963239 100644 --- a/jstests/sharding/localhostAuthBypass.js +++ b/jstests/sharding/localhostAuthBypass.js @@ -41,10 +41,18 @@ var assertCannotRunCommands = function(mongo, st) { // CRUD var test = mongo.getDB("test"); assert.throws( function() { test.system.users.findOne(); }); - assert.writeError(test.foo.save({ _id: 0 })); + + test.foo.save({_id:0}); + assert(test.getLastError()); + assert.throws( function() { test.foo.findOne({_id:0}); }); - assert.writeError(test.foo.update({ _id: 0 }, { $set: { x: 20 }})); - assert.writeError(test.foo.remove({ _id: 0 })); + + test.foo.update({_id:0}, {$set:{x:20}}); + assert(test.getLastError()); + + test.foo.remove({_id:0}); + assert(test.getLastError()); + // Multi-shard assert.throws(function() { @@ -78,10 +86,15 @@ var assertCanRunCommands = function(mongo, st) { // this will throw if it fails test.system.users.findOne(); - assert.writeOK(test.foo.save({ _id: 0 })); - assert.writeOK(test.foo.update({ _id: 0 }, { $set: { x: 20 }})); - assert.writeOK(test.foo.remove({ _id: 0 })); - + test.foo.save({_id: 0}); + assert(test.getLastError() == null); + + test.foo.update({_id: 0}, {$set:{x:20}}); + assert(test.getLastError() == null); + + test.foo.remove({_id: 0}); + assert(test.getLastError() == null); + // Multi-shard test.foo.mapReduce( function() { emit(1, 1); }, diff --git a/jstests/sharding/mapReduce.js b/jstests/sharding/mapReduce.js index 60b0d6c0a68..55168036fa7 100644 --- a/jstests/sharding/mapReduce.js +++ b/jstests/sharding/mapReduce.js @@ -14,21 +14,9 @@ s.adminCommand( { enablesharding : "mrShard" } ) s.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } ) db = s.getDB( "mrShard" ); -var bulk = db.srcNonSharded.initializeUnorderedBulkOp(); -for (j = 0; j < 100; j++) { - for (i = 0; i < 512; i++) { - bulk.insert({ j: j, i: i }); - } -} -assert.writeOK(bulk.execute()); - -bulk = db.srcSharded.initializeUnorderedBulkOp(); -for (j = 0; j < 100; j++) { - for (i = 0; i < 512; i++) { - bulk.insert({ j: j, i: i }); - } -} -assert.writeOK(bulk.execute()); +for (j=0; j<100; j++) for (i=0; i<512; i++){ db.srcNonSharded.save({j:j, i:i})} +for (j=0; j<100; j++) for (i=0; i<512; i++){ db.srcSharded.save({j:j, i:i})} +db.getLastError(); function map() { emit(this.i, 1); } function reduce(key, values) { return Array.sum(values) } diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js index 76f8277cad2..cc6098f3eea 100644 --- a/jstests/sharding/max_time_ms_sharded.js +++ b/jstests/sharding/max_time_ms_sharded.js @@ -50,11 +50,10 @@ assert.commandWorked(admin.runCommand({moveChunk: coll.getFullName(), // // Insert 100 documents into sharded collection, such that each shard owns 50. // -var bulk = coll.initializeUnorderedBulkOp(); for (i=-50; i<50; i++) { - bulk.insert({ _id: i }); + coll.insert({_id: i}); } -assert.writeOK(bulk.execute()); +assert.eq(null, coll.getDB().getLastError()); assert.eq(50, shards[0].getCollection(coll.getFullName()).count()); assert.eq(50, shards[1].getCollection(coll.getFullName()).count()); diff --git a/jstests/sharding/merge_chunks_test.js b/jstests/sharding/merge_chunks_test.js index a74822d1952..22d0e8fc0fa 100644 --- a/jstests/sharding/merge_chunks_test.js +++ b/jstests/sharding/merge_chunks_test.js @@ -35,9 +35,10 @@ assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 90 }, to : shar st.printShardingStatus(); // Insert some data into each of the consolidated ranges -assert.writeOK(coll.insert({ _id : 0 })); -assert.writeOK(coll.insert({ _id : 40 })); -assert.writeOK(coll.insert({ _id : 110 })); +coll.insert({ _id : 0 }); +coll.insert({ _id : 40 }); +coll.insert({ _id : 110 }); +assert.eq( null, coll.getDB().getLastError() ); var staleCollection = staleMongos.getCollection( coll + "" ); diff --git a/jstests/sharding/merge_chunks_test_with_data.js b/jstests/sharding/merge_chunks_test_with_data.js index 3520ea760da..0f057787454 100644 --- a/jstests/sharding/merge_chunks_test_with_data.js +++ b/jstests/sharding/merge_chunks_test_with_data.js @@ -31,10 +31,11 @@ assert( admin.runCommand({ split : coll + "", middle : { _id : 60 } }).ok ); st.printShardingStatus(); // Insert data to allow 0->20 and 40->60 to be merged, but too much for 20->40 -assert.writeOK(coll.insert({ _id : 0 })); -assert.writeOK(coll.insert({ _id : 20 })); -assert.writeOK(coll.insert({ _id : 30 })); -assert.writeOK(coll.insert({ _id : 40 })); +coll.insert({ _id : 0 }); +coll.insert({ _id : 20 }); +coll.insert({ _id : 30 }); +coll.insert({ _id : 40 }); +assert.eq( null, coll.getDB().getLastError() ); jsTest.log( "Merging chunks with another empty chunk..." ); diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js index bf28cad6ea7..2c4d156d760 100644 --- a/jstests/sharding/migrateBig.js +++ b/jstests/sharding/migrateBig.js @@ -11,11 +11,9 @@ big = "" while ( big.length < 10000 ) big += "eliot" -var bulk = coll.initializeUnorderedBulkOp(); -for ( x=0; x<100; x++ ) { - bulk.insert( { x : x , big : big } ); -} -assert.writeOK(bulk.execute()); +for ( x=0; x<100; x++ ) + coll.insert( { x : x , big : big } ) +db.getLastError(); db.printShardingStatus() @@ -32,7 +30,8 @@ print( "direct : " + direct ) directDB = direct.getDB( "test" ) for ( done=0; done<2*1024*1024; done+=big.length ){ - assert.writeOK(directDB.foo.insert( { x : 50 + Math.random() , big : big } )); + directDB.foo.insert( { x : 50 + Math.random() , big : big } ) + directDB.getLastError(); } db.printShardingStatus() diff --git a/jstests/sharding/migrateMemory.js b/jstests/sharding/migrateMemory.js index be8b76ad272..5791ddabff5 100644 --- a/jstests/sharding/migrateMemory.js +++ b/jstests/sharding/migrateMemory.js @@ -1,4 +1,3 @@ -// TODO: move back to sharding suite after SERVER-13402 is fixed s = new ShardingTest( "migrateMemory" , 2 , 1 , 1 , { chunksize : 1 }); diff --git a/jstests/sharding/migrate_overwrite_id.js b/jstests/sharding/migrate_overwrite_id.js index 21439247772..0a8b2252179 100644 --- a/jstests/sharding/migrate_overwrite_id.js +++ b/jstests/sharding/migrate_overwrite_id.js @@ -24,8 +24,11 @@ var id = 12345; jsTest.log( "Inserting a document with id : 12345 into both shards with diff shard key..." ); -assert.writeOK(coll.insert({ _id : id, skey : -1 })); -assert.writeOK(coll.insert({ _id : id, skey : 1 })); +coll.insert({ _id : id, skey : -1 }); +assert.eq( null, coll.getDB().getLastError() ); + +coll.insert({ _id : id, skey : 1 }); +assert.eq( null, coll.getDB().getLastError() ); printjson( shards[0].conn.getCollection( coll + "" ).find({ _id : id }).toArray() ); printjson( shards[1].conn.getCollection( coll + "" ).find({ _id : id }).toArray() ); @@ -40,4 +43,4 @@ printjson( shards[0].conn.getCollection( coll + "" ).find({ _id : id }).toArray( printjson( shards[1].conn.getCollection( coll + "" ).find({ _id : id }).toArray() ); assert.eq( 2, coll.find({ _id : id }).itcount() ); -st.stop(); +st.stop();
\ No newline at end of file diff --git a/jstests/sharding/mongos_no_detect_sharding.js b/jstests/sharding/mongos_no_detect_sharding.js index 6a9d1dc63b8..d5e9b53bf34 100644 --- a/jstests/sharding/mongos_no_detect_sharding.js +++ b/jstests/sharding/mongos_no_detect_sharding.js @@ -29,15 +29,14 @@ print( "Seeing if data gets inserted unsharded..." ) print( "No splits occur here!" ) // Insert a bunch of data which should trigger a split -var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < 100; i++ ){ - bulk.insert({ i : i + 1 }); + coll.insert({ i : i + 1 }) } -assert.writeOK(bulk.execute()); +coll.getDB().getLastError() config.printShardingStatus( true ) assert.eq( coll.getShardVersion().ok, 1 ) assert.eq( 101, coll.find().itcount() ) -st.stop() +st.stop()
\ No newline at end of file diff --git a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js index 5b8f930f8bd..f961f00d5b8 100644 --- a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js +++ b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js @@ -29,8 +29,10 @@ var collSharded = mongos.getCollection( "fooSharded.barSharded" ); var collUnsharded = mongos.getCollection( "fooUnsharded.barUnsharded" ); // Create the unsharded database with shard0 primary -assert.writeOK(collUnsharded.insert({ some : "doc" })); -assert.writeOK(collUnsharded.remove({})); +collUnsharded.insert({ some : "doc" }); +assert.eq( null, collUnsharded.getDB().getLastError() ); +collUnsharded.remove({}); +assert.eq( null, collUnsharded.getDB().getLastError() ); printjson( admin.runCommand({ movePrimary : collUnsharded.getDB().toString(), to : shards[0]._id }) ); @@ -76,6 +78,21 @@ function authDBUsers( conn ) { return conn; } +// Needed b/c the GLE command itself can fail if the shard is down ("write result unknown") - we +// don't care if this happens in this test, we only care that we did not get "write succeeded". +// Depending on the connection pool state, we could get either. +function gleErrorOrThrow(database, msg) { + var gle; + try { + gle = database.getLastErrorObj(); + } + catch (ex) { + return; + } + if (!gle.err) doassert("getLastError is null: " + tojson(gle) + " :" + msg); + return; +}; + // // Setup is complete // @@ -87,9 +104,12 @@ authDBUsers(mongosConnActive); var mongosConnIdle = null; var mongosConnNew = null; -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 })); -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 })); -assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 }); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 }); +assert.eq(null, mongosConnActive.getCollection( collSharded.toString() ).getDB().getLastError()); + +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 }); +assert.eq(null, mongosConnActive.getCollection( collUnsharded.toString() ).getDB().getLastError()); jsTest.log("Stopping primary of third shard..."); @@ -103,15 +123,21 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 })); assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 })); -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 })); -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 })); -assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 }); +assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 }); +assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 }); +assert.gleSuccess(mongosConnActive.getCollection( collUnsharded.toString() ).getDB()); jsTest.log("Testing idle connection with third primary down..."); -assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 })); -assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 })); -assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 })); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 }); +assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 }); +assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 }); +assert.gleSuccess(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB()); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) ); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) ); @@ -127,11 +153,14 @@ mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) ); mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); -assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 }); +assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); -assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 }); +assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); -assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 })); +mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 }); +assert.gleSuccess(mongosConnNew.getCollection( collUnsharded.toString() ).getDB()); gc(); // Clean up new connections @@ -152,15 +181,21 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 })); assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 })); -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 })); -assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 })); -assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 }); +assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 }); +gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 }); +assert.gleSuccess(mongosConnActive.getCollection( collUnsharded.toString() ).getDB()); jsTest.log("Testing idle connection with second primary down..."); -assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 })); -assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 })); -assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 })); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 }); +assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 }); +assert.gleSuccess(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB()); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) ); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) ); @@ -179,11 +214,14 @@ mongosConnNew.setSlaveOk(); assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) ); mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); -assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 }); +assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); -assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 }); +gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); -assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 })); +mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 }); +assert.gleSuccess(mongosConnNew.getCollection( collUnsharded.toString() ).getDB()); gc(); // Clean up new connections @@ -201,15 +239,21 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 })); assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 })); -assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -8 })); -assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 8 })); -assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 8 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -8 }); +gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 8 }); +gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 8 }); +gleErrorOrThrow(mongosConnActive.getCollection( collUnsharded.toString() ).getDB()); jsTest.log("Testing idle connection with first primary down..."); -assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -9 })); -assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 9 })); -assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 9 })); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -9 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 9 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 9 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB()); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) ); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) ); @@ -228,11 +272,14 @@ mongosConnNew.setSlaveOk(); assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) ); mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); -assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -10 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -10 }); +gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); -assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 10 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 10 }); +gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); -assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 10 })); +mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 10 }); +gleErrorOrThrow(mongosConnNew.getCollection( collUnsharded.toString() ).getDB()); gc(); // Clean up new connections @@ -249,15 +296,21 @@ jsTest.log("Testing active connection with second shard down..."); assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 })); assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 })); -assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -11 })); -assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 11 })); -assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 11 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -11 }); +gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 11 }); +gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 11 }); +gleErrorOrThrow(mongosConnActive.getCollection( collUnsharded.toString() ).getDB()); jsTest.log("Testing idle connection with second shard down..."); -assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -12 })); -assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 12 })); -assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 12 })); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -12 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 12 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 12 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB()); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) ); assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) ); @@ -272,11 +325,14 @@ mongosConnNew.setSlaveOk(); assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) ); mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); -assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -13 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -13 }); +gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); -assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 13 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 13 }); +gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = authDBUsers( new Mongo( mongos.host ) ); -assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 13 })); +mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 13 }); +gleErrorOrThrow(mongosConnNew.getCollection( collUnsharded.toString() ).getDB()); gc(); // Clean up new connections diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js index 730860c2bf7..23ae95e857f 100644 --- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js +++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js @@ -28,8 +28,10 @@ var collSharded = mongos.getCollection( "fooSharded.barSharded" ); var collUnsharded = mongos.getCollection( "fooUnsharded.barUnsharded" ); // Create the unsharded database -assert.writeOK(collUnsharded.insert({ some : "doc" })); -assert.writeOK(collUnsharded.remove({})); +collUnsharded.insert({ some : "doc" }); +assert.eq( null, collUnsharded.getDB().getLastError() ); +collUnsharded.remove({}); +assert.eq( null, collUnsharded.getDB().getLastError() ); printjson( admin.runCommand({ movePrimary : collUnsharded.getDB().toString(), to : shards[0]._id }) ); @@ -45,6 +47,21 @@ assert.commandWorked( admin.runCommand({ moveChunk : collSharded.toString(), st.printShardingStatus(); +// Needed b/c the GLE command itself can fail if the shard is down ("write result unknown") - we +// don't care if this happens in this test, we only care that we did not get "write succeeded". +// Depending on the connection pool state, we could get either. +function gleErrorOrThrow(database, msg) { + var gle; + try { + gle = database.getLastErrorObj(); + } + catch (ex) { + return; + } + if (!gle.err) doassert("getLastError is null: " + tojson(gle) + " :" + msg); + return; +}; + // // Setup is complete // @@ -55,9 +72,12 @@ var mongosConnActive = new Mongo( mongos.host ); var mongosConnIdle = null; var mongosConnNew = null; -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 })); -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 })); -assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 }); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 }); +assert.eq(null, mongosConnActive.getCollection( collSharded.toString() ).getDB().getLastError()); + +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 }); +assert.eq(null, mongosConnActive.getCollection( collUnsharded.toString() ).getDB().getLastError()); jsTest.log("Stopping primary of third shard..."); @@ -71,15 +91,21 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 })); assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 })); -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 })); -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 })); -assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 }); +assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 }); +assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 }); +assert.gleSuccess(mongosConnActive.getCollection( collUnsharded.toString() ).getDB()); jsTest.log("Testing idle connection with third primary down..."); -assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 })); -assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 })); -assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 })); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 }); +assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 }); +assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 }); +assert.gleSuccess(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB()); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) ); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) ); @@ -95,11 +121,14 @@ mongosConnNew = new Mongo( mongos.host ); assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) ); mongosConnNew = new Mongo( mongos.host ); -assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 }); +assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = new Mongo( mongos.host ); -assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 }); +assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = new Mongo( mongos.host ); -assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 })); +mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 }); +assert.gleSuccess(mongosConnNew.getCollection( collUnsharded.toString() ).getDB()); gc(); // Clean up new connections @@ -159,16 +188,22 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 })); // Writes -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 })); -assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 })); -assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 }); +assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 }); +gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 }); +assert.gleSuccess(mongosConnActive.getCollection( collUnsharded.toString() ).getDB()); jsTest.log("Testing idle connection with second primary down..."); // Writes -assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 })); -assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 })); -assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 })); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 }); +assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 }); +assert.gleSuccess(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB()); // Reads with read prefs mongosConnIdle.setSlaveOk(); @@ -297,11 +332,14 @@ assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne // Writes mongosConnNew = new Mongo( mongos.host ); -assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 }); +assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = new Mongo( mongos.host ); -assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 }); +gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = new Mongo( mongos.host ); -assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 })); +mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 }); +assert.gleSuccess(mongosConnNew.getCollection( collUnsharded.toString() ).getDB()); gc(); // Clean up new connections @@ -318,15 +356,21 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 })); assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 })); -assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -8 })); -assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 8 })); -assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 8 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -8 }); +gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 8 }); +gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 8 }); +gleErrorOrThrow(mongosConnActive.getCollection( collUnsharded.toString() ).getDB()); jsTest.log("Testing idle connection with first primary down..."); -assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -9 })); -assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 9 })); -assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 9 })); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -9 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 9 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 9 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB()); mongosConnIdle.setSlaveOk(); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) ); @@ -346,11 +390,14 @@ mongosConnNew.setSlaveOk(); assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) ); mongosConnNew = new Mongo( mongos.host ); -assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -10 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -10 }); +gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = new Mongo( mongos.host ); -assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 10 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 10 }); +gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = new Mongo( mongos.host ); -assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 10 })); +mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 10 }); +gleErrorOrThrow(mongosConnNew.getCollection( collUnsharded.toString() ).getDB()); gc(); // Clean up new connections @@ -366,15 +413,21 @@ mongosConnActive.setSlaveOk(); assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 })); assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 })); -assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -11 })); -assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 11 })); -assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 11 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -11 }); +gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 11 }); +gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 11 }); +gleErrorOrThrow(mongosConnActive.getCollection( collUnsharded.toString() ).getDB()); jsTest.log("Testing idle connection with second shard down..."); -assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -12 })); -assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 12 })); -assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 12 })); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -12 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 12 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 12 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB()); mongosConnIdle.setSlaveOk(); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) ); @@ -390,14 +443,21 @@ mongosConnNew.setSlaveOk(); assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) ); mongosConnNew = new Mongo( mongos.host ); -assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -13 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -13 }); +gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = new Mongo( mongos.host ); -assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 13 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 13 }); +gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = new Mongo( mongos.host ); -assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 13 })); +mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 13 }); +gleErrorOrThrow(mongosConnNew.getCollection( collUnsharded.toString() ).getDB()); gc(); // Clean up new connections jsTest.log("DONE!"); st.stop(); + + + + diff --git a/jstests/sharding/mongos_shard_failure_tolerance.js b/jstests/sharding/mongos_shard_failure_tolerance.js index bfec2a4b863..3cf1c1dc788 100644 --- a/jstests/sharding/mongos_shard_failure_tolerance.js +++ b/jstests/sharding/mongos_shard_failure_tolerance.js @@ -34,12 +34,29 @@ assert.commandWorked( admin.runCommand({ moveChunk : collSharded.toString(), to : shards[1]._id }) ); // Create the unsharded database -assert.writeOK(collUnsharded.insert({ some : "doc" })); -assert.writeOK(collUnsharded.remove({})); +collUnsharded.insert({ some : "doc" }); +assert.eq( null, collUnsharded.getDB().getLastError() ); +collUnsharded.remove({}); +assert.eq( null, collUnsharded.getDB().getLastError() ); printjson( admin.runCommand({ movePrimary : collUnsharded.getDB().toString(), to : shards[0]._id }) ); st.printShardingStatus(); +// Needed b/c the GLE command itself can fail if the shard is down ("write result unknown") - we +// don't care if this happens in this test, we only care that we did not get "write succeeded". +// Depending on the connection pool state, we could get either. +function gleErrorOrThrow(database, msg) { + var gle; + try { + gle = database.getLastErrorObj(); + } + catch (ex) { + return; + } + if (!gle.err) doassert("getLastError is null: " + tojson(gle) + " :" + msg); + return; +}; + // // Setup is complete // @@ -50,9 +67,12 @@ var mongosConnActive = new Mongo( mongos.host ); var mongosConnIdle = null; var mongosConnNew = null; -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 })); -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 })); -assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 }); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 }); +assert.eq(null, mongosConnActive.getCollection( collSharded.toString() ).getDB().getLastError()); + +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 }); +assert.eq(null, mongosConnActive.getCollection( collUnsharded.toString() ).getDB().getLastError()); jsTest.log("Stopping third shard..."); @@ -66,15 +86,21 @@ assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOn assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 })); assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 })); -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 })); -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 })); -assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 }); +assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 }); +assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 }); +assert.gleSuccess(mongosConnActive.getCollection( collUnsharded.toString() ).getDB()); jsTest.log("Testing idle connection..."); -assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 })); -assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 })); -assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 })); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 }); +assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 }); +assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 }); +assert.gleSuccess(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB()); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) ); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) ); @@ -90,11 +116,14 @@ mongosConnNew = new Mongo( mongos.host ); assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) ); mongosConnNew = new Mongo( mongos.host ); -assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 }); +assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = new Mongo( mongos.host ); -assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 }); +assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = new Mongo( mongos.host ); -assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 })); +mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 }); +assert.gleSuccess(mongosConnNew.getCollection( collUnsharded.toString() ).getDB()); gc(); // Clean up new connections @@ -109,16 +138,21 @@ jsTest.log("Testing active connection..."); assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }) ); assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) ); -assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 })); - -assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 })); -assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 })); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 }); +assert.gleSuccess(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 }); +gleErrorOrThrow(mongosConnActive.getCollection( collSharded.toString() ).getDB()); +mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 }); +assert.gleSuccess(mongosConnActive.getCollection( collUnsharded.toString() ).getDB()); jsTest.log("Testing idle connection..."); -assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 })); -assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 })); -assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 })); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 }); +assert.gleSuccess(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 }); +gleErrorOrThrow(mongosConnIdle.getCollection( collSharded.toString() ).getDB()); +mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 }); +assert.gleSuccess(mongosConnIdle.getCollection( collUnsharded.toString() ).getDB()); assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) ); assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) ); @@ -131,14 +165,21 @@ mongosConnNew = new Mongo( mongos.host ); assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) ); mongosConnNew = new Mongo( mongos.host ); -assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 }); +assert.gleSuccess(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = new Mongo( mongos.host ); -assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 })); +mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 }); +gleErrorOrThrow(mongosConnNew.getCollection( collSharded.toString() ).getDB()); mongosConnNew = new Mongo( mongos.host ); -assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 })); +mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 }); +assert.gleSuccess(mongosConnNew.getCollection( collUnsharded.toString() ).getDB()); gc(); // Clean up new connections jsTest.log("DONE!"); st.stop(); + + + + diff --git a/jstests/sharding/mongos_validate_backoff.js b/jstests/sharding/mongos_validate_backoff.js index 877ab808dcc..6462dc48ff8 100644 --- a/jstests/sharding/mongos_validate_backoff.js +++ b/jstests/sharding/mongos_validate_backoff.js @@ -18,7 +18,8 @@ var timeBadInsert = function(){ var start = new Date().getTime() // Bad insert, no shard key - assert.writeError(coll.insert({ hello : "world" })); + coll.insert({ hello : "world" }) + assert.neq( null, coll.getDB().getLastError() ) var end = new Date().getTime() diff --git a/jstests/sharding/mongos_validate_writes.js b/jstests/sharding/mongos_validate_writes.js index 4bfbb2048f3..6a6fb36eeef 100644 --- a/jstests/sharding/mongos_validate_writes.js +++ b/jstests/sharding/mongos_validate_writes.js @@ -39,10 +39,12 @@ coll.ensureIndex({ b : 1 }) printjson( admin.runCommand({ shardCollection : coll + "", key : { b : 1 } }) ) // Make sure that we can successfully insert, even though we have stale state -assert.writeOK(staleCollA.insert({ b : "b" })); +staleCollA.insert({ b : "b" }) +assert.eq( null, staleCollA.getDB().getLastError() ) // Make sure we unsuccessfully insert with old info -assert.writeError(staleCollB.insert({ a : "a" })); +staleCollB.insert({ a : "a" }) +assert.neq( null, staleCollB.getDB().getLastError() ) // Change the collection sharding state coll.drop() @@ -50,10 +52,12 @@ coll.ensureIndex({ c : 1 }) printjson( admin.runCommand({ shardCollection : coll + "", key : { c : 1 } }) ) // Make sure we can successfully upsert, even though we have stale state -assert.writeOK(staleCollA.update({ c : "c" }, { c : "c" }, true )); +staleCollA.update({ c : "c" }, { c : "c" }, true ) +assert.eq( null, staleCollA.getDB().getLastError() ) // Make sure we unsuccessfully upsert with old info -assert.writeError(staleCollB.update({ b : "b" }, { b : "b" }, true )); +staleCollB.update({ b : "b" }, { b : "b" }, true ) +assert.neq( null, staleCollB.getDB().getLastError() ) // Change the collection sharding state coll.drop() @@ -61,13 +65,16 @@ coll.ensureIndex({ d : 1 }) printjson( admin.runCommand({ shardCollection : coll + "", key : { d : 1 } }) ) // Make sure we can successfully update, even though we have stale state -assert.writeOK(coll.insert({ d : "d" })); +coll.insert({ d : "d" }) +coll.getDB().getLastError(); -assert.writeOK(staleCollA.update({ d : "d" }, { $set : { x : "x" } }, false, false )); +staleCollA.update({ d : "d" }, { $set : { x : "x" } }, false, false ) +assert.eq( null, staleCollA.getDB().getLastError() ) assert.eq( staleCollA.findOne().x, "x" ) // Make sure we unsuccessfully update with old info -assert.writeError(staleCollB.update({ c : "c" }, { $set : { x : "y" } }, false, false )); +staleCollB.update({ c : "c" }, { $set : { x : "y" } }, false, false ) +assert.neq( null, staleCollB.getDB().getLastError() ) assert.eq( staleCollB.findOne().x, "x" ) // Change the collection sharding state @@ -80,12 +87,16 @@ printjson( admin.runCommand({ split : coll + "", middle : { e : 0 } }) ) printjson( admin.runCommand({ moveChunk : coll + "", find : { e : 0 }, to : "shard0001" }) ) // Make sure we can successfully remove, even though we have stale state -assert.writeOK(coll.insert({ e : "e" })); +coll.insert({ e : "e" }) +// Need to make sure the insert makes it to the shard +assert.eq( null, coll.getDB().getLastError() ) -assert.writeOK(staleCollA.remove({ e : "e" }, true)); +staleCollA.remove({ e : "e" }, true) +assert.eq( null, staleCollA.getDB().getLastError() ) assert.eq( null, staleCollA.findOne() ) // Make sure we unsuccessfully remove with old info -assert.writeError(staleCollB.remove({ d : "d" }, true )); +staleCollB.remove({ d : "d" }, true ) +assert.neq( null, staleCollB.getDB().getLastError() ) st.stop() diff --git a/jstests/sharding/movechunk_with_def_paranoia.js b/jstests/sharding/movechunk_with_def_paranoia.js index 97feb0b4ac3..9adcfbe7137 100644 --- a/jstests/sharding/movechunk_with_def_paranoia.js +++ b/jstests/sharding/movechunk_with_def_paranoia.js @@ -1,5 +1,3 @@ -// TODO: move back to sharding suite after SERVER-13402 is fixed - /** * This test checks that the moveChunk directory is not created */ diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js index 4091792d27f..5a485e0122b 100644 --- a/jstests/sharding/movechunk_with_moveParanoia.js +++ b/jstests/sharding/movechunk_with_moveParanoia.js @@ -1,5 +1,3 @@ -// TODO: move back to sharding suite after SERVER-13402 is fixed - /** * This test sets moveParanoia flag and then check that the directory is created with the moved data */ diff --git a/jstests/sharding/movechunk_with_noMoveParanoia.js b/jstests/sharding/movechunk_with_noMoveParanoia.js index 1844528b225..262c014a1d6 100644 --- a/jstests/sharding/movechunk_with_noMoveParanoia.js +++ b/jstests/sharding/movechunk_with_noMoveParanoia.js @@ -1,5 +1,3 @@ -// TODO: move back to sharding suite after SERVER-13402 is fixed - /** * This test sets moveParanoia flag and then check that the directory is created with the moved data */ diff --git a/jstests/sharding/moveprimary_ignore_sharded.js b/jstests/sharding/moveprimary_ignore_sharded.js index 6f8895851c8..ff4141fb1c0 100644 --- a/jstests/sharding/moveprimary_ignore_sharded.js +++ b/jstests/sharding/moveprimary_ignore_sharded.js @@ -32,9 +32,12 @@ for( var i = 0; i < 3; i++ ){ collsFooB.push( mongosB.getCollection( "foo.coll" + i ) ) collsBarA.push( mongosA.getCollection( "bar.coll" + i ) ) collsBarB.push( mongosB.getCollection( "bar.coll" + i ) ) - - assert.writeOK(collsFooA[i].insert({ hello : "world" })); - assert.writeOK(collsBarA[i].insert({ hello : "world" })); + + collsFooA[i].insert({ hello : "world" }) + assert.eq( null, collsFooA[i].getDB().getLastError() ) + collsBarA[i].insert({ hello : "world" }) + assert.eq( null, collsBarA[i].getDB().getLastError() ) + } // Enable sharding diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js index d80ef710d52..2e04c091773 100644 --- a/jstests/sharding/mrShardedOutput.js +++ b/jstests/sharding/mrShardedOutput.js @@ -33,11 +33,12 @@ for (var splitPoint = 0; splitPoint < numBatch; splitPoint += 400) { testDB.adminCommand({ split: 'test.foo', middle: { a: splitPoint }}); } -var bulk = testDB.foo.initializeUnorderedBulkOp(); for (var i = 0; i < numBatch; ++i) { - bulk.insert({ a: numDocs + i, y: str, i: numDocs + i }); + testDB.foo.save({ a: numDocs + i, y: str, i: numDocs + i }); } -assert.writeOK(bulk.execute()); + +var GLE = testDB.getLastError(); +assert.eq(null, GLE, "Setup FAILURE: testDB.getLastError() returned" + GLE); numDocs += numBatch; @@ -93,11 +94,12 @@ for (splitPoint = 0; splitPoint < numBatch; splitPoint += 400) { testDB.adminCommand({ split: 'test.foo', middle: { a: numDocs + splitPoint }}); } -bulk = testDB.foo.initializeUnorderedBulkOp(); for (var i = 0; i < numBatch; ++i) { - bulk.insert({ a: numDocs + i, y: str, i: numDocs + i }); + testDB.foo.save({ a: numDocs + i, y: str, i: numDocs + i }); } -assert.writeOK(bulk.execute()); + +GLE = testDB.getLastError(); +assert.eq(null, GLE, "Setup FAILURE: testDB.getLastError() returned" + GLE); jsTest.log("No errors on insert batch."); numDocs += numBatch; diff --git a/jstests/sharding/noUpdateButN1inAnotherCollection.js b/jstests/sharding/noUpdateButN1inAnotherCollection.js index b1ea81f0710..c8b7ef50f7e 100644 --- a/jstests/sharding/noUpdateButN1inAnotherCollection.js +++ b/jstests/sharding/noUpdateButN1inAnotherCollection.js @@ -45,14 +45,16 @@ debug("Inserted docs, now split chunks"); adminSA.runCommand( { split: ns, find : { _id : 3} }); adminSA.runCommand( { movechunk: ns, find : { _id : 10}, to: "shard0001" }); -var command = 'printjson(db.coll.update({ _id: 9 }, { $set: { a: "9" }}, true));'; +var command = 'db.coll.update({_id:9},{$set:{"a":"9"}},true);printjson(db.getLastErrorObj())'; // without this first query through mongo, the second time doesn't "fail" debug("Try query first time"); -runMongoProgram( "mongo", "--quiet", "--port", "" + s._mongos[1].port, "--eval", command ); +var GLE2=runMongoProgram( "mongo", "--quiet", "--port", "" + s._mongos[1].port, "--eval", command ); + +mongosB.getDB("test").coll2.update({_id:0}, {$set:{"c":"333"}}); +var GLE3=mongosB.getDB("test").getLastErrorObj(); +assert.eq( 0, GLE3.n ); -var res = mongosB.getDB("test").coll2.update({ _id: 0 }, { $set: { c: "333" }}); -assert.eq( 0, res.nModified ); s.stop(); diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js index ffaa967dba6..fd86f9627a3 100644 --- a/jstests/sharding/parallel.js +++ b/jstests/sharding/parallel.js @@ -16,10 +16,10 @@ for ( i=0; i<N; i+=(N/12) ) { } s.setBalancer( true ) -var bulk = db.foo.initializeUnorderedBulkOp(); for ( i=0; i<N; i++ ) - bulk.insert({ _id: i }); -assert.writeOK(bulk.execute()); + db.foo.insert( { _id : i } ) +db.getLastError(); + doCommand = function( dbname , cmd ) { x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] , diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js index 542463af203..4eed2e72159 100644 --- a/jstests/sharding/prefix_shard_key.js +++ b/jstests/sharding/prefix_shard_key.js @@ -24,25 +24,27 @@ var coll = db.foo; var longStr = 'a'; while ( longStr.length < 1024 * 128 ) { longStr += longStr; } -var bulk = coll.initializeUnorderedBulkOp(); for( i=0 ; i<100; i++){ - bulk.insert({ num: i, str: longStr }); - bulk.insert({ num: i+100, x: i, str: longStr }); + coll.save( {num : i, str : longStr} ); + coll.save( {num : i+100 , x : i, str : longStr}) } -assert.writeOK(bulk.execute()); +db.getLastError(); //no usable index yet, should throw assert.throws( function(){ s.adminCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ) } ) //create usable index -assert.commandWorked(coll.ensureIndex({ num: 1, x: 1 })); +coll.ensureIndex({num : 1, x : 1}); +db.getLastError(); //usable index, but doc with empty 'num' value, so still should throw -assert.writeOK(coll.insert({ x: -5 })); +coll.save({x : -5}); +assert( ! db.getLastError() , "save bad value didn't succeed"); assert.throws( function(){ s.adminCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ) } ) //remove the bad doc. now should finally succeed -assert.writeOK(coll.remove({ x: -5 })); +coll.remove( {x : -5}); +assert( ! db.getLastError() , "remove bad value didn't succeed"); var result1 = admin.runCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ); printjson( result1 ); assert.eq( 1, result1.ok , "sharding didn't succeed"); @@ -141,27 +143,27 @@ for( i=0; i < 3; i++ ){ // declare a longer index if ( i == 0 ) { - assert.commandWorked( coll2.ensureIndex( { skey : 1, extra : 1 } )); + coll2.ensureIndex( { skey : 1, extra : 1 } ); } else if ( i == 1 ) { - assert.commandWorked( coll2.ensureIndex( { skey : 1, extra : -1 } )); + coll2.ensureIndex( { skey : 1, extra : -1 } ); } else if ( i == 2 ) { - assert.commandWorked( coll2.ensureIndex( { skey : 1, extra : 1 , superfluous : -1 } )); + coll2.ensureIndex( { skey : 1, extra : 1 , superfluous : -1 } ); } + db.getLastError(); // then shard collection on prefix var shardRes = admin.runCommand( { shardCollection : coll2 + "", key : { skey : 1 } } ); assert.eq( shardRes.ok , 1 , "collection not sharded" ); // insert docs with same value for skey - bulk = coll2.initializeUnorderedBulkOp(); for( var i = 0; i < 5; i++ ){ for( var j = 0; j < 5; j++ ){ - bulk.insert( { skey : 0, extra : i , superfluous : j } ); + coll2.insert( { skey : 0, extra : i , superfluous : j } ); } } - assert.writeOK( bulk.execute() ); + assert.eq( null, coll2.getDB().getLastError() , "inserts didn't work" ); // split on that key, and check it makes 2 chunks var splitRes = admin.runCommand( { split : coll2 + "", middle : { skey : 0 } } ); diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js index 6820a5fb332..68154923ae4 100644 --- a/jstests/sharding/presplit.js +++ b/jstests/sharding/presplit.js @@ -12,12 +12,11 @@ while ( bigString.length < 10000 ){ db = s.getDB( "test" ); inserted = 0; num = 0; -var bulk = db.foo.initializeUnorderedBulkOp(); while ( inserted < ( 20 * 1024 * 1024 ) ){ - bulk.insert({ _id: num++, s: bigString }); + db.foo.insert( { _id : num++ , s : bigString } ); inserted += bigString.length; } -assert.writeOK(bulk.execute()); +db.getLastError(); // Make sure that there's only one chunk holding all the data. s.printChunks(); @@ -35,4 +34,4 @@ assert.lt( 20 , s.config.chunks.count() , "many chunks assertion" ); assert.eq( num , primary.foo.count() ); s.printChangeLog(); -s.stop(); +s.stop();
\ No newline at end of file diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js index e2a46c7aba5..9b97fb8daf3 100755 --- a/jstests/sharding/read_pref.js +++ b/jstests/sharding/read_pref.js @@ -80,12 +80,11 @@ var doTest = function(useDollarQuerySyntax) { var coll = conn.getDB( 'test' ).user; assert.soon(function() { - var res = coll.insert({ x: 1 }, { writeConcern: { w: NODES }}); - if (!res.hasWriteError()) { + coll.insert({ x: 1 }); + var err = coll.getDB().getLastError(NODES); + if (err == null) { return true; } - - var err = res.getWriteError().errmsg; // Transient transport errors may be expected b/c of the replSetReconfig if (err.indexOf("transport error") == -1) { throw err; diff --git a/jstests/sharding/read_pref_multi_mongos_stale_config.js b/jstests/sharding/read_pref_multi_mongos_stale_config.js index d3c6cd3f53f..1556adef9e8 100644 --- a/jstests/sharding/read_pref_multi_mongos_stale_config.js +++ b/jstests/sharding/read_pref_multi_mongos_stale_config.js @@ -20,6 +20,8 @@ for (var x = 0; x < 200; x++) { testDB2.user.insert({ x: x }); } +testDB2.runCommand({ getLastError: 1 }); + var cursor = testDB1.user.find({ x: 30 }).readPref('primary'); assert(cursor.hasNext()); assert.eq(30, cursor.next().x); diff --git a/jstests/noPassthrough/refresh_syncclusterconn.js b/jstests/sharding/refresh_syncclusterconn.js index b12cf504d75..b12cf504d75 100644 --- a/jstests/noPassthrough/refresh_syncclusterconn.js +++ b/jstests/sharding/refresh_syncclusterconn.js diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js index 58da5ba919f..db9fe5bcdfc 100644 --- a/jstests/sharding/remove1.js +++ b/jstests/sharding/remove1.js @@ -2,12 +2,9 @@ s = new ShardingTest( "remove_shard1", 2 ); assert.eq( 2, s.config.shards.count() , "initial server count wrong" ); -assert.writeOK(s.config.databases.insert({ _id: 'local', - partitioned: false, - primary: 'shard0000'})); -assert.writeOK(s.config.databases.insert({ _id: 'needToMove', - partitioned: false, - primary: 'shard0000'})); +s.config.databases.insert({_id: 'local', partitioned: false, primary: 'shard0000'}); +s.config.databases.insert({_id: 'needToMove', partitioned: false, primary: 'shard0000'}); +s.config.getLastError(); // first remove puts in draining mode, the second tells me a db needs to move, the third actually removes assert( s.admin.runCommand( { removeshard: "shard0000" } ).ok , "failed to start draining shard" ); diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js index f6fc93c4faf..8fb81aeb222 100644 --- a/jstests/sharding/remove2.js +++ b/jstests/sharding/remove2.js @@ -110,12 +110,11 @@ var str = 'a'; while( str.length < 1024 * 16 ) { str += str; } - -var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < 300; i++ ){ - bulk.insert({ i: i % 10, str: str }); + coll.insert( { i : i % 10, str : str } ); } -assert.writeOK(bulk.execute()); + +coll.getDB().getLastError(); assert.eq( 300, coll.find().itcount() ); diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js index 43602ae26ed..b5cf17368c8 100644 --- a/jstests/sharding/replmonitor_bad_seed.js +++ b/jstests/sharding/replmonitor_bad_seed.js @@ -37,6 +37,7 @@ var coll = mongos.getDB('test').user; var verifyInsert = function() { var beforeCount = coll.find().count(); coll.insert({ x: 1 }); + coll.getDB().getLastError(); var afterCount = coll.find().count(); assert.eq(beforeCount + 1, afterCount); diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js index 548805c4f76..5990db82fe2 100644 --- a/jstests/sharding/return_partial_shards_down.js +++ b/jstests/sharding/return_partial_shards_down.js @@ -53,7 +53,9 @@ var inserts = [{_id : -1}, {_id : 1000}]; collOneShard.insert(inserts); -assert.writeOK(collAllShards.insert(inserts)); +collAllShards.insert(inserts); + +assert.eq(null, collOneShard.getDB().getLastError()); var returnPartialFlag = 1 << 7; diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js index ba42f53349a..468d611271b 100644 --- a/jstests/sharding/shard1.js +++ b/jstests/sharding/shard1.js @@ -43,9 +43,7 @@ assert.eq( 3 , db.foo.find().length() , "after sharding, no split count failed" var invalidDB = s.getDB( "foobar" ); // hack to bypass invalid database name checking at the DB constructor invalidDB._name = "foo bar"; -assert.throws(function() { - invalidDB.blah.insert({ x: 1 }); -}); +invalidDB.blah.insert( { x : 1 } ); assert.isnull( s.config.databases.findOne( { _id : "foo bar" } ) ); diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js index 006a9340682..ff03bf7b24b 100644 --- a/jstests/sharding/shard2.js +++ b/jstests/sharding/shard2.js @@ -40,6 +40,8 @@ db.foo.save( { num : 1 , name : "eliot" } ); db.foo.save( { num : 2 , name : "sara" } ); db.foo.save( { num : -1 , name : "joe" } ); +db.getLastError(); + assert.eq( 3 , s.getServer( "test" ).getDB( "test" ).foo.find().length() , "not right directly to db A" ); assert.eq( 3 , db.foo.find().length() , "not right on shard" ); @@ -70,16 +72,19 @@ placeCheck( 3 ); // test inserts go to right server/shard -assert.writeOK(db.foo.save( { num : 3 , name : "bob" } )); +db.foo.save( { num : 3 , name : "bob" } ); +db.getLastError(); assert.eq( 1 , primary.foo.find().length() , "after move insert go wrong place?" ); assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" ); -assert.writeOK(db.foo.save( { num : -2 , name : "funny man" } )); +db.foo.save( { num : -2 , name : "funny man" } ); +db.getLastError(); assert.eq( 2 , primary.foo.find().length() , "after move insert go wrong place?" ); assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" ); -assert.writeOK(db.foo.save( { num : 0 , name : "funny guy" } )); +db.foo.save( { num : 0 , name : "funny guy" } ); +db.getLastError(); assert.eq( 2 , primary.foo.find().length() , "boundary A" ); assert.eq( 4 , secondary.foo.find().length() , "boundary B" ); @@ -193,17 +198,22 @@ assert.isnull( db.foo.findOne( { num : 3 } ) , "remove test E" ); placeCheck( 8 ); +// TODO: getLastError +db.getLastError(); +db.getPrevError(); + // more update stuff printAll(); total = db.foo.find().count(); -var res = assert.writeOK(db.foo.update( {}, { $inc: { x: 1 } }, false, true )); +db.foo.update( {} , { $inc : { x : 1 } } , false , true ); +x = db.getLastErrorObj(); printAll(); -assert.eq( total , res.nModified, res.toString() ); +assert.eq( total , x.n , "getLastError n A: " + tojson( x ) ); -res = db.foo.update( { num : -1 } , { $inc : { x : 1 } } , false , true ); -assert.eq( 1, res.nModified, res.toString() ); +db.foo.update( { num : -1 } , { $inc : { x : 1 } } , false , true ); +assert.eq( 1 , db.getLastErrorObj().n , "getLastErrorObj n B" ); // ---- move all to the secondary diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js index 5e8a8df4a4b..d202b0f37c4 100644 --- a/jstests/sharding/shard3.js +++ b/jstests/sharding/shard3.js @@ -61,7 +61,8 @@ function doCounts( name , total , onlyItCounts ){ } var total = doCounts( "before wrong save" ) -assert.writeOK(secondary.insert( { _id : 111 , num : -3 } )); +secondary.save( { _id : 111 , num : -3 } ); +printjson( secondary.getDB().getLastError() ) doCounts( "after wrong save" , total , true ) e = a.find().explain(); assert.eq( 3 , e.n , "ex1" ) @@ -142,6 +143,7 @@ dbb = s2.getDB( "test2" ); dba.foo.save( { num : 1 } ); dba.foo.save( { num : 2 } ); dba.foo.save( { num : 3 } ); +dba.getLastError(); assert.eq( 1 , s.onNumShards( "foo" , "test2" ) , "B on 1 shards" ); assert.eq( 3 , dba.foo.count() , "Ba" ); diff --git a/jstests/sharding/shard7.js b/jstests/sharding/shard7.js index 7d37fdf60f6..18adbda3f0a 100644 --- a/jstests/sharding/shard7.js +++ b/jstests/sharding/shard7.js @@ -37,9 +37,11 @@ assert.eq( 0, aggregate.toArray().length ); c.save( {a:null,b:null} ); c.save( {a:1,b:1} ); -assert.writeOK( c.remove( unsatisfiable )); +c.remove( unsatisfiable ); +assert( !db.getLastError() ); assert.eq( 2, c.count() ); -assert.writeOK( c.update( unsatisfiable, {$set:{c:1}}, false, true )); +c.update( unsatisfiable, {$set:{c:1}}, false, true ); +assert( !db.getLastError() ); assert.eq( 2, c.count() ); assert.eq( 0, c.count( {c:1} ) ); diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js index bb221fe3f11..b16c1796cd9 100644 --- a/jstests/sharding/shard_existing.js +++ b/jstests/sharding/shard_existing.js @@ -15,11 +15,10 @@ print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSiz // turn off powerOf2Sizes as this tests regular allocation db.createCollection('data', {usePowerOf2Sizes: false}); -var bulk = db.data.initializeUnorderedBulkOp(); for (i=0; i<numDocs; i++) { - bulk.insert({_id: i, s: bigString}); + db.data.insert({_id: i, s: bigString}); } -assert.writeOK(bulk.execute()); +db.getLastError(); assert.lt(totalSize, db.data.stats().size); diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js index 48630a0ca58..200cc009ee9 100644 --- a/jstests/sharding/shard_insert_getlasterror_w2.js +++ b/jstests/sharding/shard_insert_getlasterror_w2.js @@ -1,4 +1,5 @@ // replica set as solo shard +// getLastError(2) fails on about every 170 inserts on my Macbook laptop -Tony // TODO: Add assertion code that catches hang load('jstests/libs/grid.js') @@ -17,12 +18,11 @@ function go() { // Add data to it var conn1a = repset1.getMaster() - var db1a = conn1a.getDB('test'); - var bulk = db1a.foo.initializeUnorderedBulkOp(); + var db1a = conn1a.getDB('test') for (var i = 0; i < N; i++) { - bulk.insert({ x: i, text: Text }); + db1a['foo'].insert({x: i, text: Text}) + db1a.getLastError(2) // wait to be copied to at least one secondary } - assert.writeOK(bulk.execute({ w: 2 })); // Create 3 sharding config servers var configsetSpec = new ConfigSet(3) @@ -49,15 +49,18 @@ function go() { // Test case where GLE should return an error db.foo.insert({_id:'a', x:1}); - assert.writeError(db.foo.insert({ _id: 'a', x: 1 }, - { writeConcern: { w: 2, wtimeout: 30000 }})); + db.foo.insert({_id:'a', x:1}); + var x = db.getLastErrorObj(2, 30000) + assert.neq(x.err, null, "C1 " + tojson(x)); // Add more data - bulk = db.foo.initializeUnorderedBulkOp(); for (var i = N; i < 2*N; i++) { - bulk.insert({ x: i, text: Text}); + db['foo'].insert({x: i, text: Text}) + var x = db.getLastErrorObj(2, 30000) // wait to be copied to at least one secondary + if (i % 30 == 0) print(i) + if (i % 100 == 0 || x.err != null) printjson(x); + assert.eq(x.err, null, "C2 " + tojson(x)); } - assert.writeOK(bulk.execute({ w: 2, wtimeout: 30000 })); // take down the slave and make sure it fails over repset1.stop(1); @@ -80,6 +83,8 @@ function go() { routerSpec.end() configsetSpec.end() repset1.stopSet() + + print('shard_insert_getlasterror_w2.js SUCCESS') } //Uncomment below to execute diff --git a/jstests/sharding/shard_key_immutable.js b/jstests/sharding/shard_key_immutable.js index 0cd79d4252d..90cb38b5e4b 100644 --- a/jstests/sharding/shard_key_immutable.js +++ b/jstests/sharding/shard_key_immutable.js @@ -53,7 +53,9 @@ var dotColl = db.getCollection('col1'); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({}, { a: 1 }, false)); +compoundColl.update({}, { a: 1 }, false); +var gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); var doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -61,14 +63,18 @@ doc = compoundColl.findOne(); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({}, { a: 1, b: 1 }, false)); +compoundColl.update({}, { a: 1, b: 1 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({}, { a: 100, b: 100 }, false)); +compoundColl.update({}, { a: 100, b: 100 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -76,21 +82,27 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Cannot modify _id! compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({}, { a: 100, b: 100, _id: 1 }, false)); +compoundColl.update({}, { a: 100, b: 100, _id: 1 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1 }}, false, true)); +compoundColl.update({}, { $set: { a: 1, b: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({}, { $set: { a: 100, b: 100 }}, false, true)); +compoundColl.update({}, { $set: { a: 100, b: 100 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -98,14 +110,18 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Cannot modify _id compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1, _id: 1 }}, false, true)); +compoundColl.update({}, { $set: { a: 1, b: 1, _id: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({}, { $set: { c: 1 }}, false, true)); +compoundColl.update({}, { $set: { c: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc)); @@ -115,41 +131,55 @@ assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + to // compoundColl.remove({}, false); -assert.writeError(compoundColl.update({}, { a: 1 }, true)); +compoundColl.update({}, { a: 1 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeOK(compoundColl.update({}, { a: 1, b: 1 }, true)); +compoundColl.update({}, { a: 1, b: 1 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 1, b: 1 }), 'doc not upserted properly: ' + tojson(doc)); // Cannot modify _id! compoundColl.remove({}, false); -assert.writeOK(compoundColl.update({}, { a: 1, b: 1, _id: 1 }, true)); +compoundColl.update({}, { a: 1, b: 1, _id: 1 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 1, b: 1 }), 'doc not upserted properly: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({}, { $set: { a: 1 }}, true, true)); +compoundColl.update({}, { $set: { a: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1 }}, true, true)); +compoundColl.update({}, { $set: { a: 1, b: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); // Cannot modify _id! compoundColl.remove({}, false); -assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1, _id: 1 }}, true, true)); +compoundColl.update({}, { $set: { a: 1, b: 1, _id: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({}, { $set: { c: 1 }}, true, true)); +compoundColl.update({}, { $set: { c: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); @@ -159,21 +189,27 @@ assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ a: 100 }, { a: 100 }, false)); +compoundColl.update({ a: 100 }, { a: 100 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ a: 100 }, { a: 2 }, false)); +compoundColl.update({ a: 100 }, { a: 2 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ a: 100 }, { a: 100, b: 1 }, false)); +compoundColl.update({ a: 100 }, { a: 100, b: 1 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -181,7 +217,9 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Inspecting query and update alone is not enough to tell whether a shard key will change. compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ a: 100 }, { a: 100, b: 100 }, false)); +compoundColl.update({ a: 100 }, { a: 100, b: 100 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -189,21 +227,27 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Cannot modify _id! compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ a: 100 }, { a: 100, b: 100, _id: 1 }, false)); +compoundColl.update({ a: 100 }, { a: 100, b: 100, _id: 1 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ a: 100 }, { $set: { a: 100 }}, false, true)); +compoundColl.update({ a: 100 }, { $set: { a: 100 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ a: 100 }, { $set: { b: 200 }}, false, true)); +compoundColl.update({ a: 100 }, { $set: { b: 200 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -211,14 +255,18 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Inspecting query and update alone is not enough to tell whether a shard key will change. compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ a: 100 }, { $set: { b: 100 }}, false, true)); +compoundColl.update({ a: 100 }, { $set: { b: 100 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 200 }}, false, true)); +compoundColl.update({ a: 100 }, { $set: { a: 100, b: 200 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -226,7 +274,9 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Inspecting query and update alone is not enough to tell whether a shard key will change. compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 100 }}, false, true)); +compoundColl.update({ a: 100 }, { $set: { a: 100, b: 100 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -234,21 +284,27 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Cannot modify _id! compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true)); +compoundColl.update({ a: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ a: 100 }, { $set: { c: 1 }}, false, true)); +compoundColl.update({ a: 100 }, { $set: { c: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ a: 100 }, { $rename: { c: 'a' }}, false, true)); +compoundColl.update({ a: 100 }, { $rename: { c: 'a' }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -258,54 +314,74 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100 }, { a: 100 }, true)); +compoundColl.update({ a: 100 }, { a: 100 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100 }, { a: 2 }, true)); +compoundColl.update({ a: 100 }, { a: 2 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle), true); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100 }, { a: 1, b: 1 }, true)); +compoundColl.update({ a: 100 }, { a: 1, b: 1 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); // Cannot modify _id! compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100 }, { a: 1, b: 1, _id: 1 }, true)); +compoundColl.update({ a: 100 }, { a: 1, b: 1, _id: 1 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 1 }}, true, true)); +compoundColl.update({ a: 100 }, { $set: { a: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100 }, { $set: { b: 1 }}, true, true)); +compoundColl.update({ a: 100 }, { $set: { b: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 1 }}, true, true)); +compoundColl.update({ a: 100 }, { $set: { a: 100, b: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); // Cannot modify _id! compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 1, _id: 1 }}, true, true)); +compoundColl.update({ a: 100 }, { $set: { a: 100, b: 1, _id: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100 }, { $set: { c: 1 }}, true, true)); +compoundColl.update({ a: 100 }, { $set: { c: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100 }, { $rename: { c: 'a' }}, true, true)); +compoundColl.update({ a: 100 }, { $rename: { c: 'a' }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); @@ -315,21 +391,27 @@ assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ b: 100 }, { b: 100 }, false)); +compoundColl.update({ b: 100 }, { b: 100 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ b: 100 }, { b: 2 }, false)); +compoundColl.update({ b: 100 }, { b: 2 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ b: 100 }, { a: 1 }, false)); +compoundColl.update({ b: 100 }, { a: 1 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -337,14 +419,18 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Inspecting query and update alone is not enough to tell whether a shard key will change. compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ b: 100 }, { a: 100 }, false)); +compoundColl.update({ b: 100 }, { a: 100 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 100 }, false)); +compoundColl.update({ b: 100 }, { a: 1, b: 100 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -352,7 +438,9 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Inspecting query and update alone is not enough to tell whether a shard key will change. compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ b: 100 }, { a: 100, b: 100 }, false)); +compoundColl.update({ b: 100 }, { a: 100, b: 100 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -360,35 +448,45 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Cannot modify _id! compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 1, _id: 1 }, false)); +compoundColl.update({ b: 100 }, { a: 1, b: 1, _id: 1 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ b: 100 }, { $set: { b: 1 }}, false, true)); +compoundColl.update({ b: 100 }, { $set: { b: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1 }}, false, true)); +compoundColl.update({ b: 100 }, { $set: { a: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ b: 100 }, { $set: { a: 100 }}, false, true)); +compoundColl.update({ b: 100 }, { $set: { a: 100 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1, b: 100 }}, false, true)); +compoundColl.update({ b: 100 }, { $set: { a: 1, b: 100 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -396,7 +494,9 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Inspecting query and update alone is not enough to tell whether a shard key will change. compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ b: 100 }, { $set: { a: 100, b: 100 }}, false, true)); +compoundColl.update({ b: 100 }, { $set: { a: 100, b: 100 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -404,14 +504,18 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Cannot modify _id! compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true)); +compoundColl.update({ b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ b: 100 }, { $set: { c: 1 }}, false, true)); +compoundColl.update({ b: 100 }, { $set: { c: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc)); @@ -421,55 +525,74 @@ assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + to // compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ b: 100 }, { b: 100 }, true)); +compoundColl.update({ b: 100 }, { b: 100 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ b: 100 }, { b: 2 }, true)); - +compoundColl.update({ b: 100 }, { b: 2 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ b: 100 }, { a: 1 }, true)); +compoundColl.update({ b: 100 }, { a: 1 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 1 }, true)); +compoundColl.update({ b: 100 }, { a: 1, b: 1 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); // Cannot modify _id! compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 1, _id: 1 }, true)); +compoundColl.update({ b: 100 }, { a: 1, b: 1, _id: 1 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ b: 100 }, { $set: { b: 1 }}, true, true)); +compoundColl.update({ b: 100 }, { $set: { b: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1 }}, true, true)); +compoundColl.update({ b: 100 }, { $set: { a: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1, b: 1 }}, true, true)); +compoundColl.update({ b: 100 }, { $set: { a: 1, b: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); // Cannot modify _id! compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1, b: 1, _id: 1 }}, true, true)); +compoundColl.update({ b: 100 }, { $set: { a: 1, b: 1, _id: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ b: 100 }, { $set: { c: 1 }}, true, true)); +compoundColl.update({ b: 100 }, { $set: { c: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc upserted: ' + tojson(doc)); @@ -479,14 +602,18 @@ assert(doc == null, 'doc upserted: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ a: 100, b: 100 }, { a: 100 }, false)); +compoundColl.update({ a: 100, b: 100 }, { a: 100 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, c: 100 }, false)); +compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, c: 100 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100, c: 100 }), 'doc did not change: ' + tojson(doc)); @@ -494,28 +621,36 @@ assert(friendlyEqual(doc, { a: 100, b: 100, c: 100 }), 'doc did not change: ' + // Cannot modify _id! compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, _id: 100 }, false)); +compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, _id: 100 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ a: 100, b: 100 }, { b: 100 }, false)); +compoundColl.update({ a: 100, b: 100 }, { b: 100 }, false); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { b: 100, c: 1 }}, false, true)); +compoundColl.update({ a: 100, b: 100 }, { $set: { b: 100, c: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, c: 1 }}, false, true)); +compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, c: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc)); @@ -523,21 +658,27 @@ assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + to // Cannot modify _id! compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true)); +compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeError(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 2, c: 1 }}, false, true)); +compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 2, c: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { c: 1 }}, false, true)); +compoundColl.update({ a: 100, b: 100 }, { $set: { c: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc)); @@ -547,50 +688,68 @@ assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + to // compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100, b: 100 }, { a: 100 }, true)); +compoundColl.update({ a: 100, b: 100 }, { a: 100 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, c: 1 }, true)); +compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, c: 1 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'wrong doc: ' + tojson(doc)); // Cannot modify _id! compoundColl.remove({}, false); -assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, _id: 100 }, true)); +compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, _id: 100 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(friendlyEqual(doc, { _id: 100, a: 100, b: 100 }), 'wrong doc: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100, b: 100 }, { b: 100 }, true)); +compoundColl.update({ a: 100, b: 100 }, { b: 100 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { b: 100, c: 1 }}, true, true)); +compoundColl.update({ a: 100, b: 100 }, { $set: { b: 100, c: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc != null, 'doc was not upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, c: 1 }}, true, true)); +compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, c: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc != null, 'doc was not upserted: ' + tojson(doc)); // Can upsert with new _id compoundColl.remove({}, false); -assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, true, true)); +compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc != null, 'doc was not upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 2, c: 1 }}, true, true)); +compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 2, c: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { c: 1 }}, true, true)); +compoundColl.update({ a: 100, b: 100 }, { $set: { c: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'wrong doc: ' + tojson(doc)); @@ -601,7 +760,9 @@ assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'wrong doc: ' + tojson(doc) compoundColl.remove({}, false); compoundColl.insert({ _id: 1, a: 100, b: 100 }); -assert.writeError(compoundColl.update({ _id: 1 }, { a: 1 })); +compoundColl.update({ _id: 1 }, { a: 1 }); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -609,49 +770,63 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // Special case for _id. This is for making save method work. compoundColl.remove({}, false); compoundColl.insert({ _id: 1, a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ _id: 1 }, { a: 100, b: 100 })); +compoundColl.update({ _id: 1 }, { a: 100, b: 100 }); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ _id: 1, a: 100, b: 100 }); -assert.writeError(compoundColl.update({ _id: 1 }, { a: 1, b: 1 })); +compoundColl.update({ _id: 1 }, { a: 1, b: 1 }); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ _id: 1, a: 100, b: 100 }); -assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1 }}, false, true)); +compoundColl.update({ _id: 1 }, { $set: { a: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ _id: 1, a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ _id: 1 }, { $set: { a: 100 }}, false, true)); +compoundColl.update({ _id: 1 }, { $set: { a: 100 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ _id: 1, a: 100, b: 100 }); -assert.writeError(compoundColl.update({ _id: 1 }, { $set: { b: 1 }}, false, true)); +compoundColl.update({ _id: 1 }, { $set: { b: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ _id: 1, a: 100, b: 100 }); -assert.writeOK(compoundColl.update({ _id: 1 }, { $set: { b: 100 }}, false, true)); +compoundColl.update({ _id: 1 }, { $set: { b: 100 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); compoundColl.remove({}, false); compoundColl.insert({ _id: 1, a: 100, b: 100 }); -assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1, b: 1 }}, false, true)); +compoundColl.update({ _id: 1 }, { $set: { a: 1, b: 1 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); @@ -661,28 +836,38 @@ assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc)); // compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ _id: 1 }, { a: 1 }, true)); +compoundColl.update({ _id: 1 }, { a: 1 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeOK(compoundColl.update({ _id: 1 }, { a: 1, b: 1 }, true)); +compoundColl.update({ _id: 1 }, { a: 1, b: 1 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { a: 1, b: 1 }), 'bad doc: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1 }}, true, true)); +compoundColl.update({ _id: 1 }, { $set: { a: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ _id: 1 }, { $set: { b: 1 }}, true, true)); +compoundColl.update({ _id: 1 }, { $set: { b: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = compoundColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); compoundColl.remove({}, false); -assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1, b: 1 }}, true, true)); +compoundColl.update({ _id: 1 }, { $set: { a: 1, b: 1 }}, true, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); assert.eq(0, compoundColl.count(), 'doc should not be inserted'); // @@ -691,7 +876,9 @@ assert.eq(0, compoundColl.count(), 'doc should not be inserted'); dotColl.remove({}, false); dotColl.insert({ x: { a: 100 }}); -assert.writeOK(dotColl.update({ 'x.a': 100 }, { x: { a: 100, b: 2 }})); +dotColl.update({ 'x.a': 100 }, { x: { a: 100, b: 2 }}); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'doc did not change: ' + tojson(doc)); @@ -722,70 +909,90 @@ assert.throws(function() { dotColl.remove({}, false); dotColl.insert({ x: { a: 100 }}); -assert.writeError(dotColl.update({ 'x.a': 100 }, { x: 100 })); +dotColl.update({ 'x.a': 100 }, { x: 100 }); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc)); dotColl.remove({}, false); dotColl.insert({ x: { a: 100 }}); -assert.writeError(dotColl.update({ 'x.a': 100 }, { x: { b: 100 }})); +dotColl.update({ 'x.a': 100 }, { x: { b: 100 }}); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc)); dotColl.remove({}, false); dotColl.insert({ x: { a: 100 }}); -assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 100, b: 2 }}}, false, true)); +dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 100, b: 2 }}}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'doc did not change: ' + tojson(doc)); dotColl.remove({}, false); dotColl.insert({ x: { a: 100 }}); -assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 2 }}}, false, true)); +dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 2 }}}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc)); dotColl.remove({}, false); dotColl.insert({ x: { a: 100 }}); -assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { b: 100 }}}, false, true)); +dotColl.update({ 'x.a': 100 }, { $set: { x: { b: 100 }}}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc)); dotColl.remove({}, false); dotColl.insert({ x: { a: 100 }}); -assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 100, b: 2 }}, false, true)); +dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 100, b: 2 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100 }, b: 2 }), 'doc did not change: ' + tojson(doc)); dotColl.remove({}, false); dotColl.insert({ x: { a: 100 }}); -assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { 'a.z': 100 }}}, false, true)); +dotColl.update({ 'x.a': 100 }, { $set: { x: { 'a.z': 100 }}}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc)); dotColl.remove({}, false); dotColl.insert({ x: { a: 100 }}); -assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a.z': 100 }}, false, true)); +dotColl.update({ 'x.a': 100 }, { $set: { 'x.a.z': 100 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc)); dotColl.remove({}, false); dotColl.insert({ x: { a: 100 }}); -assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: 100 }}, false, true)); +dotColl.update({ 'x.a': 100 }, { $set: { x: 100 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc)); dotColl.remove({}, false); dotColl.insert({ x: { a: 100 }}); -assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.b': 200 }}, false, true)); +dotColl.update({ 'x.a': 100 }, { $set: { 'x.b': 200 }}, false, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100, b: 200 }}), 'doc did not change: ' + tojson(doc)); @@ -795,7 +1002,9 @@ assert(friendlyEqual(doc, { x: { a: 100, b: 200 }}), 'doc did not change: ' + to // dotColl.remove({}, false); -assert.writeOK(dotColl.update({ 'x.a': 100 }, { x: { a: 100, b: 2 }}, true)); +dotColl.update({ 'x.a': 100 }, { x: { a: 100, b: 2 }}, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); assert(doc != null, 'doc was not upserted: ' + tojson(doc)); @@ -821,59 +1030,81 @@ assert.throws(function() { }); dotColl.remove({}, false); -assert.writeError(dotColl.update({ 'x.a': 100 }, { x: 100 }, true)); +dotColl.update({ 'x.a': 100 }, { x: 100 }, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); dotColl.remove({}, false); -assert.writeError(dotColl.update({ 'x.a': 100 }, { x: { b: 100 }}, true)); +dotColl.update({ 'x.a': 100 }, { x: { b: 100 }}, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); dotColl.remove({}, false); -assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 100, b: 2 }}}, true)); +dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 100, b: 2 }}}, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'bad doc: ' + tojson(doc)); dotColl.remove({}, false); -assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 2 }}}, true)); +dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 2 }}}, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); dotColl.remove({}, false); -assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { b: 100 }}}, true)); +dotColl.update({ 'x.a': 100 }, { $set: { x: { b: 100 }}}, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); dotColl.remove({}, false); -assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 100, b: 3 }}, true)); +dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 100, b: 3 }}, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100 }, b: 3 }), 'bad doc: ' + tojson(doc)); dotColl.remove({}, false); -assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 2 }}, true)); +dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 2 }}, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); dotColl.remove({}, false); -assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { 'a.z': 100 }}}, true)); +dotColl.update({ 'x.a': 100 }, { $set: { x: { 'a.z': 100 }}}, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); dotColl.remove({}, false); -assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a.z': 100 }}, true)); +dotColl.update({ 'x.a': 100 }, { $set: { 'x.a.z': 100 }}, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); dotColl.remove({}, false); -assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: 100 }}, true)); +dotColl.update({ 'x.a': 100 }, { $set: { x: 100 }}, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err != null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); assert(doc == null, 'doc was upserted: ' + tojson(doc)); dotColl.remove({}, false); -assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.b': 2 }}, true)); +dotColl.update({ 'x.a': 100 }, { $set: { 'x.b': 2 }}, true); +gle = db.runCommand({ getLastError: 1 }); +assert(gle.err == null, 'gleObj: ' + tojson(gle)); doc = dotColl.findOne(); delete doc._id; assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'bad doc: ' + tojson(doc)); diff --git a/jstests/sharding/shard_kill_and_pooling.js b/jstests/sharding/shard_kill_and_pooling.js index 843d48209a1..9bd68c740fb 100644 --- a/jstests/sharding/shard_kill_and_pooling.js +++ b/jstests/sharding/shard_kill_and_pooling.js @@ -29,9 +29,10 @@ if ( is32Bits && _isWindows() ) { else { // Non-Win32 platform - - assert.writeOK(coll.insert({ hello: "world" })); - + + coll.insert({ hello : "world" }) + assert.eq( null, coll.getDB().getLastError() ); + jsTest.log("Creating new connections..."); // Create a bunch of connections to the primary node through mongos. diff --git a/jstests/sharding/sharded_profile.js b/jstests/sharding/sharded_profile.js index b24aa229dc6..b1b2c3d3107 100644 --- a/jstests/sharding/sharded_profile.js +++ b/jstests/sharding/sharded_profile.js @@ -26,7 +26,8 @@ var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile; var inserts = [{ _id : 0 }, { _id : 1 }, { _id : 2 }]; var staleColl = st.s1.getCollection(coll.toString()); -assert.writeOK(staleColl.insert(inserts)); +staleColl.insert(inserts); +assert.gleOK(staleColl.getDB().getLastErrorObj()); printjson(profileColl.find().toArray()); diff --git a/jstests/sharding/sharding_with_keyfile_auth.js b/jstests/sharding/sharding_with_keyfile_auth.js index 2fe594544d8..27572f82d46 100644 --- a/jstests/sharding/sharding_with_keyfile_auth.js +++ b/jstests/sharding/sharding_with_keyfile_auth.js @@ -45,33 +45,33 @@ coll.ensureIndex({ insert : 1 }) print( "INSERT!" ) // Insert a bunch of data -var toInsert = 2000; -var bulk = coll.initializeUnorderedBulkOp(); +var toInsert = 2000 for( var i = 0; i < toInsert; i++ ){ - bulk.insert({ my : "test", data : "to", insert : i }); + coll.insert({ my : "test", data : "to", insert : i }) } -assert.writeOK(bulk.execute()); + +assert.eq( coll.getDB().getLastError(), null ) print( "UPDATE!" ) // Update a bunch of data -var toUpdate = toInsert; -bulk = coll.initializeUnorderedBulkOp(); +var toUpdate = toInsert for( var i = 0; i < toUpdate; i++ ){ - var id = coll.findOne({ insert : i })._id; - bulk.find({ insert : i, _id : id }).updateOne({ $inc : { counter : 1 } }); + var id = coll.findOne({ insert : i })._id + coll.update({ insert : i, _id : id }, { $inc : { counter : 1 } }) } -assert.writeOK(bulk.execute()); + +assert.eq( coll.getDB().getLastError(), null ) print( "DELETE" ) // Remove a bunch of data -var toDelete = toInsert / 2; -bulk = coll.initializeUnorderedBulkOp(); +var toDelete = toInsert / 2 for( var i = 0; i < toDelete; i++ ){ - bulk.find({ insert : i }).remove(); + coll.remove({ insert : i }) } -assert.writeOK(bulk.execute()); + +assert.eq( coll.getDB().getLastError(), null ) // Make sure the right amount of data is there assert.eq( coll.find().count(), toInsert / 2 ) diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js index aea55741251..130e71d9020 100644 --- a/jstests/sharding/sort1.js +++ b/jstests/sharding/sort1.js @@ -16,6 +16,7 @@ for ( i=0; i<N; i++ ){ forward.push( i ) backward.push( ( N - 1 ) - i ) } +db.getLastError(); s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 33 } } ) s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 66 } } ) diff --git a/jstests/sharding/split_with_force.js b/jstests/sharding/split_with_force.js index edeb395c767..9033abc2402 100644 --- a/jstests/sharding/split_with_force.js +++ b/jstests/sharding/split_with_force.js @@ -22,19 +22,17 @@ assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok ); jsTest.log( "Insert a bunch of data into a chunk of the collection..." ); -var bulk = coll.initializeUnorderedBulkOp(); for ( var i = 0; i < (250 * 1000) + 10; i++ ) { - bulk.insert({ _id : i }); + coll.insert({ _id : i }); } -assert.writeOK(bulk.execute()); +assert.eq( null, coll.getDB().getLastError() ); jsTest.log( "Insert a bunch of data into the rest of the collection..." ); -bulk = coll.initializeUnorderedBulkOp(); for ( var i = 1; i <= (250 * 1000); i++ ) { - bulk.insert({ _id: -i }); + coll.insert({ _id : -i }); } -assert.writeOK(bulk.execute()); +assert.eq( null, coll.getDB().getLastError() ); jsTest.log( "Get split points of the chunk using force : true..." ); diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js index 54caaa46e29..02abfe6230e 100644 --- a/jstests/sharding/split_with_force_small.js +++ b/jstests/sharding/split_with_force_small.js @@ -26,19 +26,17 @@ jsTest.log( "Insert a bunch of data into the low chunk of a collection," + var data128k = "x"; for ( var i = 0; i < 7; i++ ) data128k += data128k; -var bulk = coll.initializeUnorderedBulkOp(); for ( var i = 0; i < 1024; i++ ) { - bulk.insert({ _id : -(i + 1) }); + coll.insert({ _id : -(i + 1) }); } -assert.writeOK(bulk.execute()); +assert.eq( null, coll.getDB().getLastError() ); jsTest.log( "Insert 32 docs into the high chunk of a collection" ); -bulk = coll.initializeUnorderedBulkOp(); for ( var i = 0; i < 32; i++ ) { - bulk.insert({ _id : i }); + coll.insert({ _id : i }); } -assert.writeOK(bulk.execute()); +assert.eq( null, coll.getDB().getLastError() ); jsTest.log( "Split off MaxKey chunk..." ); diff --git a/jstests/sharding/ssv_nochunk.js b/jstests/sharding/ssv_nochunk.js index 7d3f8bfdbb2..20f3ea27f45 100644 --- a/jstests/sharding/ssv_nochunk.js +++ b/jstests/sharding/ssv_nochunk.js @@ -13,7 +13,8 @@ configDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}); var testDB = st.s.getDB('test'); -assert.writeOK(testDB.user.insert({ x: 1 })); +testDB.user.insert({ x: 1 }); +testDB.runCommand({ getLastError: 1 }); var doc = testDB.user.findOne(); diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js index 8660c782a41..7d551128cab 100644 --- a/jstests/sharding/stats.js +++ b/jstests/sharding/stats.js @@ -26,10 +26,9 @@ s.adminCommand( { split : "test.foo" , middle : { _id : N/2 } } ) s.adminCommand({ moveChunk: "test.foo", find: { _id: 3 }, to: s.getNonPrimaries("test")[0], _waitForDelete: true }); -var bulk = db.foo.initializeUnorderedBulkOp(); for ( i=0; i<N; i++ ) - bulk.insert( { _id : i } ); -assert.writeOK(bulk.execute()); + db.foo.insert( { _id : i } ) +db.getLastError(); x = db.foo.stats(); assert.eq( N , x.count , "coll total count expected" ) diff --git a/jstests/noPassthrough/sync1.js b/jstests/sharding/sync1.js index 490d2a53c5a..490d2a53c5a 100644 --- a/jstests/noPassthrough/sync1.js +++ b/jstests/sharding/sync1.js diff --git a/jstests/sharding/sync2.js b/jstests/sharding/sync2.js index cba7faafd89..04a6f420768 100644 --- a/jstests/sharding/sync2.js +++ b/jstests/sharding/sync2.js @@ -54,6 +54,7 @@ assert.eq( 0 , s.config.big.find().itcount() , "C1" ); for ( i=0; i<50; i++ ){ s.config.big.insert( { _id : i } ); } +s.config.getLastError(); assert.eq( 50 , s.config.big.find().itcount() , "C2" ); assert.eq( 50 , s.config.big.find().count() , "C3" ); assert.eq( 50 , s.config.big.find().batchSize(5).itcount() , "C4" ); diff --git a/jstests/noPassthrough/sync4.js b/jstests/sharding/sync4.js index 6733f07089d..6733f07089d 100644 --- a/jstests/noPassthrough/sync4.js +++ b/jstests/sharding/sync4.js diff --git a/jstests/noPassthrough/sync8.js b/jstests/sharding/sync8.js index 241ad655569..241ad655569 100644 --- a/jstests/noPassthrough/sync8.js +++ b/jstests/sharding/sync8.js diff --git a/jstests/sharding/test_stacked_migration_cleanup.js b/jstests/sharding/test_stacked_migration_cleanup.js index 40b989ac95c..4effb1f9cb5 100644 --- a/jstests/sharding/test_stacked_migration_cleanup.js +++ b/jstests/sharding/test_stacked_migration_cleanup.js @@ -26,11 +26,10 @@ for (var i = 0; i < numChunks; i++) { jsTest.log("Inserting a lot of small documents...") // Insert a lot of small documents to make multiple cursor batches -var bulk = coll.initializeUnorderedBulkOp(); for (var i = 0; i < 10 * 1000; i++) { - bulk.insert({ _id : i }); + coll.insert({ _id : i }) } -assert.writeOK(bulk.execute()); +assert.eq(null, coll.getDB().getLastError()); jsTest.log("Opening a mongod cursor..."); @@ -48,11 +47,10 @@ for (var i = 0; i < numChunks; i++) { jsTest.log("Dropping and re-creating collection...") coll.drop() -bulk = coll.initializeUnorderedBulkOp(); for (var i = 0; i < numChunks; i++) { - bulk.insert({ _id : i }); + coll.insert({ _id : i }) } -assert.writeOK(bulk.execute()); +assert.eq(null, coll.getDB().getLastError()); sleep(10 * 1000); diff --git a/jstests/sharding/trace_missing_docs_test.js b/jstests/sharding/trace_missing_docs_test.js index 03877ad4125..04d818869db 100644 --- a/jstests/sharding/trace_missing_docs_test.js +++ b/jstests/sharding/trace_missing_docs_test.js @@ -24,9 +24,10 @@ printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._i coll.ensureIndex({ sk : 1 }); assert( admin.runCommand({ shardCollection : coll + "", key : { sk : 1 } }).ok ); -assert.writeOK(coll.insert({ _id : 12345, sk : 67890, hello : "world" })); -assert.writeOK(coll.update({ _id : 12345 }, { $set : { baz : 'biz' } })); -assert.writeOK(coll.update({ sk : 67890 }, { $set : { baz : 'boz' } })); +coll.insert({ _id : 12345, sk : 67890, hello : "world" }); +coll.update({ _id : 12345 }, { $set : { baz : 'biz' } }); +coll.update({ sk : 67890 }, { $set : { baz : 'boz' } }); +assert.eq( null, coll.getDB().getLastError() ); assert( admin.runCommand({ moveChunk : coll + "", find : { sk : 0 }, diff --git a/jstests/sharding/update1.js b/jstests/sharding/update1.js index d555331bc7a..96a1df2d861 100644 --- a/jstests/sharding/update1.js +++ b/jstests/sharding/update1.js @@ -41,10 +41,13 @@ for(i=0; i < 2; i++){ assert.eq(x._id, x.other, "_id == other"); }); - assert.writeError(coll.update({ _id: 1, key: 1 }, { $set: { key: 2 }})); + + coll.update({_id:1, key:1}, {$set: {key:2}}); + err = db.getLastErrorObj(); assert.eq(coll.findOne({_id:1}).key, 1, 'key unchanged'); - assert.writeOK(coll.update({ _id: 1, key: 1 }, { $set: { foo: 2 }})); + coll.update({_id:1, key:1}, {$set: {foo:2}}); + assert.isnull(db.getLastError(), 'getLastError reset'); coll.update( { key : 17 } , { $inc : { x : 5 } } , true ); assert.eq( 5 , coll.findOne( { key : 17 } ).x , "up1" ) diff --git a/jstests/sharding/update_immutable_fields.js b/jstests/sharding/update_immutable_fields.js index 25fb489e39e..35c0c77b721 100644 --- a/jstests/sharding/update_immutable_fields.js +++ b/jstests/sharding/update_immutable_fields.js @@ -40,38 +40,58 @@ var shard0Coll = getDirectShardedConn(st, coll.getFullName()).getCollection(coll // No shard key shard0Coll.remove({}) -assert.writeError(shard0Coll.save({ _id: 3 })); +shard0Coll.save({_id:3}) +assert.gleError(shard0Coll.getDB(), function(gle) { + return "save without shard key passed - " + tojson(gle) + " doc: " + tojson(shard0Coll.findOne()) +}); // Full shard key in save -assert.writeOK(shard0Coll.save({ _id: 1, a: 1 })); +shard0Coll.save({_id: 1, a: 1}) +assert.gleSuccess(shard0Coll.getDB(), "save with shard key failed"); // Full shard key on replacement (basically the same as above) shard0Coll.remove({}) -assert.writeOK(shard0Coll.update({ _id: 1 }, { a: 1 }, true)); +shard0Coll.update({_id: 1}, {a:1}, true) +assert.gleSuccess(shard0Coll.getDB(), "update + upsert (replacement) with shard key failed"); // Full shard key after $set shard0Coll.remove({}) -assert.writeOK(shard0Coll.update({ _id: 1 }, { $set: { a: 1 }}, true)); +shard0Coll.update({_id: 1}, {$set: {a: 1}}, true) +assert.gleSuccess(shard0Coll.getDB(), "update + upsert ($set) with shard key failed"); // Update existing doc (replacement), same shard key value -assert.writeOK(shard0Coll.update({ _id: 1 }, { a: 1 })); +shard0Coll.update({_id: 1}, {a:1}) +assert.gleSuccess(shard0Coll.getDB(), "update (replacement) with shard key failed"); //Update existing doc ($set), same shard key value -assert.writeOK(shard0Coll.update({ _id: 1 }, { $set: { a: 1 }})); +shard0Coll.update({_id: 1}, {$set: {a: 1}}) +assert.gleSuccess(shard0Coll.getDB(), "update ($set) with shard key failed"); // Error due to mutating the shard key (replacement) -assert.writeError(shard0Coll.update({ _id: 1 }, { b: 1 })); +shard0Coll.update({_id: 1}, {b:1}) +assert.gleError(shard0Coll.getDB(), "update (replacement) removes shard key"); // Error due to mutating the shard key ($set) -assert.writeError(shard0Coll.update({ _id: 1 }, { $unset: { a: 1 }})); +shard0Coll.update({_id: 1}, {$unset: {a: 1}}) +assert.gleError(shard0Coll.getDB(), "update ($unset) removes shard key"); // Error due to removing all the embedded fields. shard0Coll.remove({}) -assert.writeOK(shard0Coll.save({ _id: 2, a: { c: 1, b: 1 }})); +shard0Coll.save({_id: 2, a:{c:1, b:1}}) +assert.gleSuccess(shard0Coll.getDB(), "save with shard key failed -- 1"); -assert.writeError(shard0Coll.update({}, { $unset: { "a.c": 1 }})); -assert.writeError(shard0Coll.update({}, { $unset: { "a.b": 1, "a.c": 1 }})); +shard0Coll.update({}, {$unset: {"a.c": 1}}) +assert.gleError(shard0Coll.getDB(), function(gle) { + return "unsetting part of shard key passed - " + tojson(gle) + + " doc: " + tojson(shard0Coll.findOne()) +}); + +shard0Coll.update({}, {$unset: {"a.b": 1, "a.c": 1}}) +assert.gleError(shard0Coll.getDB(), function(gle) { + return "unsetting nested fields of shard key passed - " + tojson(gle) + + " doc: " + tojson(shard0Coll.findOne()) +}); jsTest.log("DONE!"); // distinguishes shutdown failures st.stop(); diff --git a/jstests/gle/updated_existing.js b/jstests/sharding/updated_existing.js index bd03c535099..bd03c535099 100644 --- a/jstests/gle/updated_existing.js +++ b/jstests/sharding/updated_existing.js diff --git a/jstests/sharding/user_flags_sharded.js b/jstests/sharding/user_flags_sharded.js index 292cfe9f3f9..6378b53e85d 100644 --- a/jstests/sharding/user_flags_sharded.js +++ b/jstests/sharding/user_flags_sharded.js @@ -26,9 +26,8 @@ assert.eq( res.ok , 1 , "collMod failed" ); // and insert some stuff, for the hell of it var numdocs = 20; -for( i=0; i < numdocs; i++){ - assert.writeOK(db1.getCollection( coll ).insert({ _id : i })); -} +for( i=0; i < numdocs; i++){ db1.getCollection( coll ).insert( {_id : i} ); } +db1.getLastError() // Next verify that userFlags has changed to 0 collstats = db1.getCollection( coll ).stats() diff --git a/jstests/sharding/wbl_not_cleared.js b/jstests/sharding/wbl_not_cleared.js index 12dbfca842f..a3b353225ca 100644 --- a/jstests/sharding/wbl_not_cleared.js +++ b/jstests/sharding/wbl_not_cleared.js @@ -26,7 +26,8 @@ st.printShardingStatus(); jsTest.log("Making mongos stale..."); -assert.writeOK(coll.insert({ _id : 0 })); +coll.insert({ _id : 0 }); +coll.getDB().getLastErrorObj(); // Make sure the stale mongos knows about the collection at the original version assert.neq(null, staleMongos.getCollection(coll + "").findOne()); @@ -36,16 +37,27 @@ printjson(admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : sha jsTest.log("Running a stale insert..."); -// duplicate _id -assert.writeError(staleMongos.getCollection(coll + "").insert({ _id : 0, dup : "key" })); +staleMongos.getCollection(coll + "").insert({ _id : 0, dup : "key" }); +jsTest.log("Getting initial GLE result..."); + +printjson(staleMongos.getDB(coll.getDB() + "").getLastErrorObj()); +printjson(staleMongos.getDB(coll.getDB() + "").getLastErrorObj()); st.printShardingStatus(); jsTest.log("Performing insert op on the same shard..."); -assert.writeOK(staleMongos.getCollection(coll + "").insert({ _id : 1, key : "isOk" })); +staleMongos.getCollection(coll + "").insert({ _id : 1, key : "isOk" }) + +jsTest.log("Getting GLE result..."); + +printjson(staleMongos.getDB(coll.getDB() + "").getLastErrorObj()); +assert.eq(null, staleMongos.getDB(coll.getDB() + "").getLastError()); jsTest.log("DONE!"); st.stop(); + + + diff --git a/jstests/sharding/writeback_bulk_insert.js b/jstests/sharding/writeback_bulk_insert.js new file mode 100644 index 00000000000..9f22875d046 --- /dev/null +++ b/jstests/sharding/writeback_bulk_insert.js @@ -0,0 +1,91 @@ +// +// Tests whether a writeback error during bulk insert hangs GLE +// + +jsTest.log("Starting sharded cluster...") + +var st = new ShardingTest({shards : 1, + mongos : 3, + verbose : 2, + other : {separateConfig : true, + mongosOptions : {noAutoSplit : ""}}}) + +st.stopBalancer() + +var mongosA = st.s0 +var mongosB = st.s1 +var mongosC = st.s2 + +jsTest.log("Adding new collection...") + +var collA = mongosA.getCollection(jsTestName() + ".coll") +collA.insert({hello : "world"}) +assert.eq(null, collA.getDB().getLastError()) + +var collB = mongosB.getCollection("" + collA) +collB.insert({hello : "world"}) +assert.eq(null, collB.getDB().getLastError()) + +jsTest.log("Enabling sharding...") + +printjson(mongosA.getDB("admin").runCommand({enableSharding : collA.getDB() + + ""})) +printjson(mongosA.getDB("admin").runCommand({shardCollection : collA + "", + key : {_id : 1}})) + +// MongoD doesn't know about the config shard version *until* MongoS tells it +collA.findOne() + +jsTest.log("Preparing bulk insert...") + +var data1MB = "x" +while (data1MB.length < 1024 * 1024) + data1MB += data1MB; + + +var data7MB = "" +// Data now at 7MB +for ( var i = 0; i < 7; i++) + data7MB += data1MB; + +print("7MB object size is : " + Object.bsonsize({_id : 0, + d : data7MB})) + +var dataCloseTo8MB = data7MB; +// WARNING - MAGIC NUMBERS HERE +// The idea is to just hit the 16MB limit so that the message gets passed in the +// shell, but adding additional writeback information could fail. +for ( var i = 0; i < 1024 * 1024 - 70; i++) { + dataCloseTo8MB += "x" +} + + +var data8MB = ""; +for ( var i = 0; i < 8; i++) { + data8MB += data1MB; +} + +print("Object size is: " + Object.bsonsize([{_id : 0, + d : dataCloseTo8MB}, + {_id : 1, + d : data8MB}])) + +jsTest.log("Trigger wbl for mongosB...") + +collB.insert([{_id : 0, + d : dataCloseTo8MB}, + {_id : 1, + d : data8MB}]) + +// Should succeed since our insert size is 16MB (plus very small overhead) +jsTest.log("Waiting for GLE...") + +assert.eq(null, collB.getDB().getLastError()) + +print("GLE Successful...") + +// Check that the counts via both mongoses are the same +assert.eq(4, collA.find().itcount()) +assert.eq(4, collB.find().itcount()) + +st.stop() diff --git a/jstests/sharding/writeback_server7958.js b/jstests/sharding/writeback_server7958.js new file mode 100644 index 00000000000..20064ef53d1 --- /dev/null +++ b/jstests/sharding/writeback_server7958.js @@ -0,0 +1,94 @@ +jsTest.log("Starting sharded cluster for wrong duplicate error setup"); + +s = new ShardingTest( name="writeback_server7958", shards = 2, verbose=0, mongos = 4 ); + +var mongosA=s.s0; +var mongosB=s.s1; +var mongosC=s.s2; +var mongosD=s.s3; + +ns1 = "test.trans"; +ns2 = "test.node"; + +adminSA = mongosA.getDB( "admin" ); +adminSB = mongosB.getDB( "admin" ); +adminSD = mongosD.getDB( "admin" ); +adminSA.runCommand({ enableSharding : "test"}); +adminSA.runCommand({ shardCollection : ns1, key : { owner : 1 }, unique: true }); +//adminSA.runCommand({ shardCollection : ns1, key : { owner : 1 } }); + +try { + s.stopBalancer(); +} catch (e) { + print("coundn't stop balancer via command"); +} + +adminSA.settings.update({ _id: 'balancer' }, { $set: { stopped: true }}); + +var db = mongosA.getDB( "test" ); +var dbB = mongosB.getDB( "test" ); +var dbC = mongosC.getDB( "test" ); +var dbD = mongosD.getDB( "test" ); +var trans = db.trans; +var node = db.node; +var transB = dbB.trans; +var nodeB = dbB.node; +var transC = dbC.trans; +var nodeC = dbC.node; +var transD = dbD.trans; +var nodeD = dbD.node; + +var primary = s.getServerName("test"); +var shard1 = s._shardNames[0]; +var shard2 = s._shardNames[1]; +if (primary == shard1) { + other = shard2; +} else { + other = shard1; +} + + +trans.insert({"owner":NumberLong("1234567890"),"tid":"2c4ba280-450a-11e2-bcfd-0800200c9a66"}); +db.runCommand({getLastError:1, j:1}); + +node.insert({"owner":NumberLong("1234567890"),"parent":NumberLong("0"),_id:NumberLong("1234567890"), "counts":0}); +db.runCommand({getLastError:1, j:1}); +for (var i=0; i<1000; i++) { + trans.insert({"owner":NumberLong(i),"tid":"2c4ba280-450a-11e2-bcfd-0800200c9a66"}); + node.insert({"owner":NumberLong(i),"parent":NumberLong(i+1000),_id:NumberLong(i+1234567890), "counts":0}); +} + +transB.insert({"owner":NumberLong("1234567890"),"tid":"2c4ba280-450a-11e2-bcfd-0800200c9a66"}); +var r1=dbB.runCommand( { getLastError: 1, w: 1 } ); +assert( r1.n == 0 && r1.err.length > 0 && r1.hasOwnProperty("code"), tojson( r1 ) ); + +jsTest.log("Inserted dup (failed), now split chunks and move data"); + +adminSD.runCommand( { split: ns1, middle : { owner : 100} }); +adminSD.runCommand( { movechunk: ns1, find : { owner : 105}, to: other}); + +jsTest.log("Kicking off dup inserts and updates"); + +errors=[]; +i=0; +trans.insert({"owner":NumberLong("1234567890"),"tid":"2c4ba280-450a-11e2-bcfd-0800200c9a66"}); +var r1=db.runCommand( { getLastError: 1, w: 1 } ); +assert( r1.n == 0 && r1.err.length > 0 && r1.hasOwnProperty("code"), tojson( r1 ) ); +transB.insert({"owner":NumberLong("1234567890"),"tid":"2c4ba280-450a-11e2-bcfd-0800200c9a66"}); +var rB1=dbB.runCommand( { getLastError: 1, w: 1 } ); +assert( rB1.n == 0 && rB1.err.length > 0 && rB1.hasOwnProperty("code"), tojson( r1 ) ); + +nodeB.update({"owner":NumberLong("1234567890"),"parent":NumberLong("0"),_id:NumberLong("1234567890")},{"$inc":{"counts":1}}); +var resultB = dbB.runCommand( { getLastError: 1, w: 1 } ) +node.update({"owner":NumberLong("1234567890"),"parent":NumberLong("0"),_id:NumberLong("1234567890")},{"$inc":{"counts":1}}); +var result = db.runCommand( { getLastError: 1, w: 1 } ) + +assert.eq( 2, node.findOne().counts ); + +printjson( result ) +printjson( resultB ) + +assert( result.n==1 && result.updatedExisting==true && result.err == null, "update succeeded on collection node on mongos A but GLE was\nn=" + result.n + ",\nupdatedExisting=" + result.updatedExisting + ",\nerr=" + result.err); +assert( resultB.n==1 && resultB.updatedExisting==true && resultB.err == null, "update succeeded on collection node on mongos B but GLE was\nn=" + resultB.n + ",\nupdatedExisting=" + resultB.updatedExisting + ",\nerr=" + resultB.err); + +s.stop(); diff --git a/jstests/sharding/stale_version_write.js b/jstests/sharding/writeback_shard_version.js index 1d665b6c65e..f896ed47bb9 100644 --- a/jstests/sharding/stale_version_write.js +++ b/jstests/sharding/writeback_shard_version.js @@ -12,10 +12,12 @@ var mongosB = st.s1 jsTest.log( "Adding new collections...") var collA = mongosA.getCollection( jsTestName() + ".coll" ) -assert.writeOK(collA.insert({ hello : "world" })); +collA.insert({ hello : "world" }) +assert.eq( null, collA.getDB().getLastError() ) var collB = mongosB.getCollection( "" + collA ) -assert.writeOK(collB.insert({ hello : "world" })); +collB.insert({ hello : "world" }) +assert.eq( null, collB.getDB().getLastError() ) jsTest.log( "Enabling sharding..." ) @@ -25,13 +27,14 @@ printjson( mongosA.getDB( "admin" ).runCommand({ shardCollection : "" + collA, k // MongoD doesn't know about the config shard version *until* MongoS tells it collA.findOne() -jsTest.log( "Trigger shard version mismatch..." ); +jsTest.log( "Trigger wbl..." ) -assert.writeOK(collB.insert({ goodbye : "world" })); +collB.insert({ goodbye : "world" }) +assert.eq( null, collB.getDB().getLastError() ) print( "Inserted..." ) assert.eq( 3, collA.find().itcount() ) assert.eq( 3, collB.find().itcount() ) -st.stop() +st.stop()
\ No newline at end of file diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js index 07219374a66..b40fe4a9c94 100644 --- a/jstests/sharding/zbigMapReduce.js +++ b/jstests/sharding/zbigMapReduce.js @@ -31,15 +31,12 @@ else { for (var i = 0; i < 4*1024; i++) str += "a"; } -var bulk = db.foo.initializeUnorderedBulkOp(); -for (j=0; j<100; j++) { - for (i=0; i<512; i++){ - bulk.insert({ i: idInc++, val: valInc++, y:str }); - } -} -assert.writeOK(bulk.execute()); +for (j=0; j<100; j++) for (i=0; i<512; i++){ db.foo.save({ i : idInc++, val: valInc++, y:str})} + jsTest.log( "Documents inserted, waiting for error..." ) +db.getLastError(); + jsTest.log( "Doing double-checks of insert..." ) // Collect some useful stats to figure out what happened @@ -121,15 +118,17 @@ jsTest.log( ) valInc = 0; for (j=0; j<100; j++){ print( "Inserted document: " + (j * 100) ); - bulk = db.foo.initializeUnorderedBulkOp(); - for (i=0; i<512; i++){ - bulk.insert({ i : idInc++, val: valInc++, y: str }); - } + for (i=0; i<512; i++){ db.foo.save({ i : idInc++, val: valInc++, y:str}) } // wait for replication to catch up - assert.writeOK(bulk.execute({ w: 2 })); + db.runCommand({getLastError:1, w:2, wtimeout:10000}); } -jsTest.log( "No errors..." ); +jsTest.log( "Waiting for errors..." ) + +assert.eq( null, db.getLastError() ) + +jsTest.log( "No errors..." ) + map2 = function() { emit(this.val, 1); } reduce2 = function(key, values) { return Array.sum(values); } |