From eb41492c6f1228077b92239524e4a607b70cd8e3 Mon Sep 17 00:00:00 2001 From: Randolph Tan Date: Mon, 3 Mar 2014 11:27:18 -0500 Subject: SERVER-13190 migrate replset jstest suite to use write commands api --- buildscripts/smoke.py | 2 +- jstests/replsets/auth1.js | 17 ++- jstests/replsets/auth2.js | 6 +- jstests/replsets/auth3.js | 5 +- jstests/replsets/cloneDb.js | 8 +- jstests/replsets/fastsync.js | 9 +- jstests/replsets/index_delete.js | 14 ++- jstests/replsets/index_restart_secondary.js | 4 +- jstests/replsets/indexbg_drop.js | 12 ++- jstests/replsets/indexbg_interrupts.js | 14 ++- jstests/replsets/indexbg_restart_secondary.js | 12 ++- .../indexbg_restart_sigkill_secondary_noretry.js | 12 ++- jstests/replsets/initial_sync1.js | 13 ++- jstests/replsets/initial_sync3.js | 6 +- jstests/replsets/initial_sync4.js | 9 +- jstests/replsets/localhostAuthBypass.js | 37 +++---- jstests/replsets/maintenance.js | 14 ++- jstests/replsets/oplog_format.js | 114 ++++++++------------- jstests/replsets/optime.js | 9 +- jstests/replsets/regex.js | 18 +--- jstests/replsets/replset1.js | 5 +- jstests/replsets/replset2.js | 84 ++++++--------- jstests/replsets/replset3.js | 3 +- jstests/replsets/replset5.js | 24 ++--- jstests/replsets/replset7.js | 19 ++-- jstests/replsets/replset8.js | 19 ++-- jstests/replsets/replset9.js | 20 ++-- jstests/replsets/resync.js | 8 +- jstests/replsets/rollback.js | 12 ++- jstests/replsets/rollback4.js | 18 ++-- jstests/replsets/rollback5.js | 12 +-- jstests/replsets/server_status_metrics.js | 29 +++--- jstests/replsets/single_server_majority.js | 8 +- jstests/replsets/slaveDelay2.js | 7 +- jstests/replsets/slavedelay1.js | 6 +- jstests/replsets/stepdown3.js | 4 +- jstests/replsets/sync2.js | 12 +-- jstests/replsets/tags.js | 71 ++++++------- jstests/replsets/tags2.js | 12 +-- jstests/replsets/tags_with_reconfig.js | 30 ++---- jstests/replsets/temp_namespace.js | 3 +- jstests/replsets/toostale.js | 11 +- 42 files changed, 359 insertions(+), 393 deletions(-) diff --git a/buildscripts/smoke.py b/buildscripts/smoke.py index d8a4ec39006..109e485d90c 100755 --- a/buildscripts/smoke.py +++ b/buildscripts/smoke.py @@ -468,7 +468,7 @@ def runTest(test, result): path = argv[1] elif ext == ".js": argv = [shell_executable, "--port", mongod_port, '--authenticationMechanism', authMechanism] - if use_write_commands or "aggregation" in path: + if use_write_commands or "aggregation" in path or "replsets" in path: argv += ["--writeMode", "commands"] else: argv += ["--writeMode", shell_write_mode] diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js index 37f9da92488..b65085c5702 100644 --- a/jstests/replsets/auth1.js +++ b/jstests/replsets/auth1.js @@ -39,7 +39,6 @@ m = startMongodTest( port[0], name+"-0", 0 ); m.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles}); m.getDB("test").createUser({user: "bar", pwd: "baz", roles: jsTest.basicUserRoles}); print("make sure user is written before shutting down"); -m.getDB("test").getLastError(); stopMongod(port[0]); print("start up rs"); @@ -63,9 +62,7 @@ wait(function() { return status.members && status.members[1].state == 2 && status.members[2].state == 2; }); -master.foo.insert({x:1}); -master.runCommand({getlasterror:1, w:3, wtimeout:60000}); - +master.foo.insert({ x: 1 }, { writeConcern: { w:3, wtimeout:60000 }}); print("try some legal and illegal reads"); var r = master.foo.findOne(); @@ -108,11 +105,11 @@ assert.eq(r.x, 1); print("add some data"); master.auth("bar", "baz"); +var bulk = master.foo.initializeUnorderedBulkOp(); for (var i=0; i<1000; i++) { - master.foo.insert({x:i, foo : "bar"}); + bulk.insert({ x: i, foo: "bar" }); } -master.runCommand({getlasterror:1, w:3, wtimeout:60000}); - +assert.writeOK(bulk.execute({ w: 3, wtimeout: 60000 })); print("fail over"); rs.stop(0); @@ -149,11 +146,11 @@ rs.restart(0, {"keyFile" : path+"key1"}); print("add some more data 2"); +var bulk = master.foo.initializeUnorderedBulkOp(); for (var i=0; i<1000; i++) { - master.foo.insert({x:i, foo : "bar"}); + bulk.insert({ x: i, foo: "bar" }); } -master.runCommand({getlasterror:1, w:3, wtimeout:60000}); - +bulk.execute({ w:3, wtimeout:60000 }); print("add member with wrong key"); var conn = new MongodRunner(port[3], MongoRunner.dataPath+name+"-3", null, null, ["--replSet","rs_auth1","--rest","--oplogSize","2", "--keyFile", path+"key2"], {no_bind : true}); diff --git a/jstests/replsets/auth2.js b/jstests/replsets/auth2.js index 8287928c008..cd20f1ceae6 100644 --- a/jstests/replsets/auth2.js +++ b/jstests/replsets/auth2.js @@ -28,11 +28,7 @@ var setupReplSet = function() { var checkNoAuth = function() { print("without an admin user, things should work"); - master.getDB("foo").bar.insert({x:1}); - var result = master.getDB("admin").runCommand({getLastError:1}); - - printjson(result); - assert.eq(result.err, null); + assert.writeOK(master.getDB("foo").bar.insert({ x: 1 })); } var checkInvalidAuthStates = function() { diff --git a/jstests/replsets/auth3.js b/jstests/replsets/auth3.js index d15a5603cc8..846debb626c 100644 --- a/jstests/replsets/auth3.js +++ b/jstests/replsets/auth3.js @@ -20,10 +20,7 @@ var checkValidState = function(i) { var safeInsert = function() { master = rs.getMaster(); master.getDB("admin").auth("foo", "bar"); - master.getDB("foo").bar.insert({x:1}); - var insertWorked = master.getDB("foo").runCommand({getlasterror:1}); - printjson(insertWorked); - assert.eq(insertWorked.ok, 1); + assert.writeOK(master.getDB("foo").bar.insert({ x: 1 })); } print("authing"); diff --git a/jstests/replsets/cloneDb.js b/jstests/replsets/cloneDb.js index a92dd195edc..5f58805e427 100644 --- a/jstests/replsets/cloneDb.js +++ b/jstests/replsets/cloneDb.js @@ -18,11 +18,8 @@ doTest = function( signal ) { print("Insert data"); for (var i = 0; i < N; i++) { - db1['foo'].insert({x: i, text: Text}) - var le = db1.getLastErrorObj(2, 1000); // wait to be copied to at least one secondary - if (le.err) { - printjson(le); - } + var option = { writeConcern: { w: 2, wtimeout: 1000}}; + assert.writeOK(db1['foo'].insert({ x: i, text: Text }, option)); } print("Create single server"); @@ -43,7 +40,6 @@ doTest = function( signal ) { db2 = soloConn.getDB('test2') for (var i = 0; i < N; i++) { db2['foo'].insert({x: i, text: Text}) - db2.getLastError() } db1.cloneDatabase (solo.host()) assert.eq (Text, db2['foo'] .findOne({x: N-1}) ['text'], 'cloneDatabase failed (test2)') diff --git a/jstests/replsets/fastsync.js b/jstests/replsets/fastsync.js index 9ae04187f37..bd615675904 100644 --- a/jstests/replsets/fastsync.js +++ b/jstests/replsets/fastsync.js @@ -1,5 +1,5 @@ /* - * 1. insert 100000 objects + * 1. insert 10000 objects * 2. export to two dbpaths * 3. add one node w/fastsync * 4. check that we never get "errmsg" : "initial sync cloning db: whatever" @@ -74,12 +74,13 @@ assert.soon(function() { result = false; }); print("1"); -for (var i=0; i<100000; i++) { - foo.bar.insert({date : new Date(), x : i, str : "all the talk on the market"}); +var bulk = foo.bar.initializeUnorderedBulkOp(); +for (var i=0; i<10000; i++) { + bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" }); } +assert.writeOK(bulk.execute()); print("total in foo: "+foo.bar.count()); - print("2"); admin.runCommand( {fsync:1,lock:1} ); copyDbpath( basePath + "-p", basePath + "-s"+1 ); diff --git a/jstests/replsets/index_delete.js b/jstests/replsets/index_delete.js index adbd4e994f1..018387d99c6 100644 --- a/jstests/replsets/index_delete.js +++ b/jstests/replsets/index_delete.js @@ -1,3 +1,11 @@ +/** + * TODO: SERVER-13204 + * This tests inserts a huge number of documents, initiates a background index build + * and tries to perform another task in parallel while the background index task is + * active. The problem is that this is timing dependent and the current test setup + * tries to achieve this by inserting insane amount of documents. + */ + /** * Starts a replica set with arbiter, build an index * drop index once secondary starts building index, @@ -36,12 +44,14 @@ var second = replTest.getSecondary(); var masterDB = master.getDB('fgIndexSec'); var secondDB = second.getDB('fgIndexSec'); -var size = 500000; +var size = 50000; jsTest.log("creating test data " + size + " documents"); +var bulk = masterDB.jstests_fgsec.initializeUnorderedBulkOp(); for(var i = 0; i < size; ++i) { - masterDB.jstests_fgsec.save( {i:i} ); + bulk.insert({ i: i }); } +assert.writeOK(bulk.execute()); jsTest.log("Creating index"); masterDB.jstests_fgsec.ensureIndex( {i:1} ); diff --git a/jstests/replsets/index_restart_secondary.js b/jstests/replsets/index_restart_secondary.js index 1273d2a6e2f..92b43d2bfd5 100644 --- a/jstests/replsets/index_restart_secondary.js +++ b/jstests/replsets/index_restart_secondary.js @@ -26,9 +26,11 @@ var secondDB = second.getDB('fgIndexSec'); var size = 500000; jsTest.log("creating test data " + size + " documents"); +var bulk = masterDB.jstests_fgsec.initializeUnorderedBulkOp(); for(var i = 0; i < size; ++i) { - masterDB.jstests_fgsec.save( {i:i} ); + bulk.insert({ i: i }); } +assert.writeOK(bulk.execute()); jsTest.log("Creating index"); masterDB.jstests_fgsec.ensureIndex( {i:1} ); diff --git a/jstests/replsets/indexbg_drop.js b/jstests/replsets/indexbg_drop.js index 77c13fb3e5a..ab69d6c5455 100644 --- a/jstests/replsets/indexbg_drop.js +++ b/jstests/replsets/indexbg_drop.js @@ -1,3 +1,11 @@ +/** + * TODO: SERVER-13204 + * This tests inserts a huge number of documents, initiates a background index build + * and tries to perform another task in parallel while the background index task is + * active. The problem is that this is timing dependent and the current test setup + * tries to achieve this by inserting insane amount of documents. + */ + // Index drop race var dbname = 'dropbgindex'; @@ -32,9 +40,11 @@ var dc = {dropIndexes: collection, index: "i_1"}; // set up collections masterDB.dropDatabase(); jsTest.log("creating test data " + size + " documents"); +var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp(); for( i = 0; i < size; ++i ) { - masterDB.getCollection(collection).save( {i: Random.rand()} ); + bulk.insert( {i: Random.rand()} ); } +assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing for test of: " + tojson(dc)); masterDB.getCollection(collection).ensureIndex( {i:1}, {background:true} ); diff --git a/jstests/replsets/indexbg_interrupts.js b/jstests/replsets/indexbg_interrupts.js index c903f981794..d7814d2a025 100644 --- a/jstests/replsets/indexbg_interrupts.js +++ b/jstests/replsets/indexbg_interrupts.js @@ -1,3 +1,11 @@ +/** + * TODO: SERVER-13204 + * This tests inserts a huge number of documents, initiates a background index build + * and tries to perform another task in parallel while the background index task is + * active. The problem is that this is timing dependent and the current test setup + * tries to achieve this by inserting insane amount of documents. + */ + /** * Starts a replica set with arbiter, builds an index in background, * run through drop indexes, drop collection, drop database. @@ -45,7 +53,7 @@ var dropAction = [ {dropIndexes: collection, index: "i_1"}, {drop: collection}, {dropDatabase: 1 }, - {convertToCapped: collection, size: 20000} + {convertToCapped: collection, size: 20} ]; @@ -56,9 +64,11 @@ for (var idx = 0; idx < dropAction.length; idx++) { // set up collections masterDB.dropDatabase(); jsTest.log("creating test data " + size + " documents"); + var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp(); for(var i = 0; i < size; ++i ) { - masterDB.getCollection(collection).save( {i:i} ); + bulk.insert({ i: i }); } + assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing for test of: " + JSON.stringify(dc)); masterDB.getCollection(collection).ensureIndex( {i:1}, {background:true} ); diff --git a/jstests/replsets/indexbg_restart_secondary.js b/jstests/replsets/indexbg_restart_secondary.js index 42a31991393..0867c47c2bf 100644 --- a/jstests/replsets/indexbg_restart_secondary.js +++ b/jstests/replsets/indexbg_restart_secondary.js @@ -1,3 +1,11 @@ +/** + * TODO: SERVER-13204 + * This tests inserts a huge number of documents, initiates a background index build + * and tries to perform another task in parallel while the background index task is + * active. The problem is that this is timing dependent and the current test setup + * tries to achieve this by inserting insane amount of documents. + */ + /** * Starts a replica set with arbiter, builds an index in background * restart secondary once it starts building index, secondary should @@ -28,9 +36,11 @@ var secondDB = second.getDB('bgIndexSec'); var size = 500000; jsTest.log("creating test data " + size + " documents"); +var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp(); for(var i = 0; i < size; ++i) { - masterDB.jstests_bgsec.save( {i:i} ); + bulk.insert({ i: i }); } +assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing"); masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} ); diff --git a/jstests/replsets/indexbg_restart_sigkill_secondary_noretry.js b/jstests/replsets/indexbg_restart_sigkill_secondary_noretry.js index 18fb4cb9e72..a15b330ec2d 100644 --- a/jstests/replsets/indexbg_restart_sigkill_secondary_noretry.js +++ b/jstests/replsets/indexbg_restart_sigkill_secondary_noretry.js @@ -1,3 +1,11 @@ +/** + * TODO: SERVER-13204 + * This tests inserts a huge number of documents, initiates a background index build + * and tries to perform another task in parallel while the background index task is + * active. The problem is that this is timing dependent and the current test setup + * tries to achieve this by inserting insane amount of documents. + */ + /** * Starts a replica set, builds an index in background * restart secondary once it starts building index. Secondary is issued SIGKILL @@ -54,9 +62,11 @@ var size = 500000; jsTest.log("creating test data " + size + " documents"); + var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp(); for( i = 0; i < size; ++i ) { - masterDB.jstests_bgsec.save( {i:i} ); + bulk.insert({ i: i }); } + assert.writeOK(bulk.execute()); jsTest.log("Starting background indexing"); masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} ); diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js index f6c3e9dfecf..205d0b7531a 100644 --- a/jstests/replsets/initial_sync1.js +++ b/jstests/replsets/initial_sync1.js @@ -35,9 +35,11 @@ var admin_s1 = slave1.getDB("admin"); var local_s1 = slave1.getDB("local"); print("2. Insert some data"); -for (var i=0; i<10000; i++) { - foo.bar.insert({date : new Date(), x : i, str : "all the talk on the market"}); +var bulk = foo.bar.initializeUnorderedBulkOp(); +for (var i = 0; i < 100; i++) { + bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" }); } +assert.writeOK(bulk.execute()); print("total in foo: "+foo.bar.count()); @@ -119,10 +121,11 @@ wait(function() { print("10. Insert some stuff"); master = replTest.getMaster(); -for (var i=0; i<10000; i++) { - foo.bar.insert({date : new Date(), x : i, str : "all the talk on the market"}); +bulk = foo.bar.initializeUnorderedBulkOp(); +for (var i = 0; i < 100; i++) { + bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" }); } - +assert.writeOK(bulk.execute()); print("11. Everyone happy eventually"); replTest.awaitReplication(300000); diff --git a/jstests/replsets/initial_sync3.js b/jstests/replsets/initial_sync3.js index b13ad486388..40c23053c3a 100644 --- a/jstests/replsets/initial_sync3.js +++ b/jstests/replsets/initial_sync3.js @@ -97,10 +97,8 @@ rs2.awaitReplication(); master = rs2.getMaster(); -master.getDB("foo").bar.baz.insert({x:2}); -var x = master.getDB("foo").runCommand({getLastError : 1, w : 3, wtimeout : 60000}); -printjson(x); -assert.eq(null, x.err); +var option = { writeConcern: { w : 3, wtimeout : 60000 }}; +assert.writeOK(master.getDB("foo").bar.baz.insert({ x: 2 }, option)); rs2.stopSet(); diff --git a/jstests/replsets/initial_sync4.js b/jstests/replsets/initial_sync4.js index 0b556fe0dfd..9e0d869d62f 100644 --- a/jstests/replsets/initial_sync4.js +++ b/jstests/replsets/initial_sync4.js @@ -13,12 +13,13 @@ md = m.getDB("d"); mc = m.getDB("d")["c"]; print("2. Insert some data"); -N = 50000; -mc.ensureIndex({x:1}) +N = 5000; +mc.ensureIndex({x:1}); +var bulk = mc.initializeUnorderedBulkOp(); for( i = 0; i < N; ++i ) { - mc.save( {_id:i,x:i,a:{}} ); + bulk.insert({ _id: i, x: i, a: {} }); } -md.getLastError(); +assert.writeOK(bulk.execute()); print("3. Make sure synced"); replTest.awaitReplication(); diff --git a/jstests/replsets/localhostAuthBypass.js b/jstests/replsets/localhostAuthBypass.js index ba48c3fd7dd..6f00002a61b 100644 --- a/jstests/replsets/localhostAuthBypass.js +++ b/jstests/replsets/localhostAuthBypass.js @@ -19,17 +19,19 @@ var assertCannotRunCommands = function(mongo) { var test = mongo.getDB("test"); assert.throws( function() { test.system.users.findOne(); }); + assert.throws( function() { test.foo.findOne({ _id: 0 }); }); - test.foo.save({_id:0}); - assert(test.getLastError()); - - assert.throws( function() { test.foo.findOne({_id:0}); }); - - test.foo.update({_id:0}, {$set:{x:20}}); - assert(test.getLastError()); - - test.foo.remove({_id:0}); - assert(test.getLastError()); + assert.throws(function() { + test.foo.save({ _id: 0 }) + }); + + assert.throws(function() { + test.foo.update({ _id: 0 }, { $set: { x: 20 }}) + }); + + assert.throws(function() { + test.foo.remove({ _id: 0 }) + }); assert.throws(function() { test.foo.mapReduce( @@ -46,15 +48,10 @@ var assertCanRunCommands = function(mongo) { // will throw on failure test.system.users.findOne(); - test.foo.save({_id: 0}); - assert(test.getLastError() == null); - - test.foo.update({_id: 0}, {$set:{x:20}}); - assert(test.getLastError() == null); - - test.foo.remove({_id: 0}); - assert(test.getLastError() == null); - + assert.writeOK(test.foo.save({_id: 0 })); + assert.writeOK(test.foo.update({ _id: 0 }, { $set: { x: 20 }})); + assert.writeOK(test.foo.remove({ _id: 0 })); + test.foo.mapReduce( function() { emit(1, 1); }, function(id, count) { return Array.sum(count); }, @@ -121,4 +118,4 @@ var runTest = function(useHostName) { } runTest(false); -runTest(true); \ No newline at end of file +runTest(true); diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js index 0bfeff4752c..7259e6b9acb 100644 --- a/jstests/replsets/maintenance.js +++ b/jstests/replsets/maintenance.js @@ -1,14 +1,18 @@ var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 2} ); -var conns = replTest.startSet(); +var conns = replTest.startSet({ verbose: 1 }); replTest.initiate(); // Make sure we have a master var master = replTest.getMaster(); -for (i=0;i<10000; i++) { master.getDB("bar").foo.insert({x:1,y:i,abc:123,str:"foo bar baz"}); } -for (i=0;i<1000; i++) { master.getDB("bar").foo.update({y:i},{$push :{foo : "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"}}); } +for (i = 0; i < 20; i++) { + master.getDB("bar").foo.insert({x:1,y:i,abc:123,str:"foo bar baz"}); +} +for (i = 0; i < 20; i++) { + master.getDB("bar").foo.update({ y: i }, { $push: { foo: "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"}}); +} replTest.awaitReplication(); @@ -46,8 +50,8 @@ result = master.getDB("admin").runCommand({replSetMaintenance : 1}); assert.eq(result.ok, 0, tojson(result)); print("check getMore works on a secondary, not on a recovering node"); -var cursor = conns[1].getDB("bar").foo.find(); -for (var i=0; i<50; i++) { +var cursor = conns[1].getDB("bar").foo.find().batchSize(2); +for (var i = 0; i < 5; i++) { cursor.next(); } diff --git a/jstests/replsets/oplog_format.js b/jstests/replsets/oplog_format.js index a1054256af3..c07d4f66cef 100644 --- a/jstests/replsets/oplog_format.js +++ b/jstests/replsets/oplog_format.js @@ -34,40 +34,32 @@ assertLastOplog({_id:1}, null, "save -- setup "); var msg = "IncRewriteExistingField: $inc $set" coll.save({_id:1, a:2}); assertLastOplog({_id:1, a:2}, {_id:1}, "save " + msg); -coll.update({}, {$inc:{a:1}, $set:{b:2}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +var res = assert.writeOK(coll.update({}, { $inc: { a: 1 }, $set: { b: 2 }})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a:3, b:2}, coll.findOne({}), msg); assertLastOplog({$set:{a:3, b:2}}, {_id:1}, msg); var msg = "IncRewriteNonExistingField: $inc $set" coll.save({_id:1, c:0}); assertLastOplog({_id:1, c:0}, {_id:1}, "save " + msg); -coll.update({}, {$inc:{a:1}, $set:{b:2}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({}, { $inc: { a: 1 }, $set: { b: 2 }})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, c:0, a:1, b:2}, coll.findOne({}), msg); assertLastOplog({$set:{a:1, b:2}}, {_id:1}, msg); var msg = "TwoNestedPulls: two $pull" coll.save({_id:1, a:{ b:[ 1, 2 ], c:[ 1, 2 ] }}); assertLastOplog({_id:1, a:{ b:[ 1, 2 ], c:[ 1, 2 ] }}, {_id:1}, "save " + msg); -coll.update({}, {$pull:{ 'a.b':2, 'a.c':2 }}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({}, { $pull: { 'a.b': 2, 'a.c': 2 }})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a:{ b:[ 1 ], c:[ 1 ] }}, coll.findOne({}), msg); assertLastOplog({$set:{'a.b':[1], 'a.c':[1]}}, {_id:1}, msg); var msg = "MultiSets: two $set" coll.save({_id:1, a:1, b:1}); assertLastOplog({_id:1, a:1, b:1}, {_id:1}, "save " + msg); -coll.update({}, {$set: {a:2, b:2}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({}, { $set: { a: 2, b: 2 }})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a:2, b:2}, coll.findOne({}), msg); assertLastOplog({$set:{a:2, b:2}}, {_id:1}, msg); @@ -76,48 +68,37 @@ assertLastOplog({$set:{a:2, b:2}}, {_id:1}, msg); var msg = "bad single $set" coll.save({_id:1, a:1}); assertLastOplog({_id:1, a:1}, {_id:1}, "save " + msg); -coll.update({}, {$set:{a:2}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({}, { $set: { a: 2 }})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a:2}, coll.findOne({}), msg); assertLastOplog({$set:{a:2}}, {_id:1}, msg); var msg = "bad single $inc"; -coll.update({}, {$inc:{a:1}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({}, { $inc: { a: 1 }})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a:3}, coll.findOne({}), msg); assertLastOplog({$set:{a:3}}, {_id:1}, msg); var msg = "bad double $set"; -coll.update({}, {$set:{a:2, b:2}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({}, { $set: { a: 2, b: 2 }})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a:2, b:2}, coll.findOne({}), msg); assertLastOplog({$set:{a:2, b:2}}, {_id:1}, msg); var msg = "bad save"; -coll.save({_id:1, a:[2]}); -assert.isnull(gle.err, msg); +assert.writeOK(coll.save({ _id: 1, a: [2] })); assert.docEq({_id:1, a:[2]}, coll.findOne({}), msg); assertLastOplog({_id:1, a:[2]}, {_id:1}, msg); var msg = "bad array $inc"; -coll.update({}, {$inc:{"a.0":1}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({}, { $inc: { "a.0": 1 }})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a:[3]}, coll.findOne({}), msg); var lastTS = assertLastOplog({$set:{"a.0": 3}}, {_id:1}, msg); var msg = "bad $setOnInsert"; -coll.update({}, {$setOnInsert:{"a":-1}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({}, { $setOnInsert: { a: -1 }})); +assert.eq(res.nMatched, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a:[3]}, coll.findOne({}), msg); // No-op var otherTS = assertLastOplog({$set:{"a.0": 3}}, {_id:1}, msg); // Nothing new assert.eq(lastTS, otherTS, "new oplog was not expected -- " + msg) // No new oplog entry @@ -126,13 +107,11 @@ coll.remove({}) assert.eq(coll.count(), 0, "collection not empty") var msg = "bad $setOnInsert w/upsert"; -coll.update({}, {$setOnInsert:{"a":200}}, {upsert:true}); // upsert -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); -assert(gle.upserted, "not upserted"); -assert.docEq({_id:gle.upserted, a:200}, coll.findOne({}), msg); // No-op -assertLastOplog({_id:gle.upserted, "a": 200}, null, msg); // No new oplog entry +res = assert.writeOK(coll.update({}, { $setOnInsert: { a: 200 }}, { upsert: true })); // upsert +assert.eq(res.nUpserted, 1, "update failed for '" + msg + "': " + res.toString()); +var id = res.getUpsertedId()._id; +assert.docEq({_id: id, a: 200 }, coll.findOne({}), msg); // No-op +assertLastOplog({ _id: id, a: 200 }, null, msg); // No new oplog entry coll.remove({}) assert.eq(coll.count(), 0, "collection not empty-2") @@ -152,55 +131,52 @@ assertLastOplog({$set:{"a": [1,2,3]}}, {_id:1}, msg); // new format var msg = "bad array $push 2"; coll.save({_id:1, a:"foo"}) -coll.update({}, {$push:{c:18}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({}, { $push: { c: 18 }})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a:"foo", c:[18]}, coll.findOne({}), msg); assertLastOplog({$set:{"c": [18]}}, {_id:1}, msg); var msg = "bad array $push $slice"; coll.save({_id:1, a:{b:[18]}}) -coll.update({_id:{$gt:0}}, {$push:{"a.b":{$each:[1,2], $slice:-2}}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({ _id: { $gt: 0 }}, + { $push: { "a.b": { $each: [1, 2], $slice: -2 }}})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a: {b:[1,2]}}, coll.findOne({}), msg); assertLastOplog({$set:{"a.b": [1,2]}}, {_id:1}, msg); var msg = "bad array $push $sort ($slice -100)"; coll.save({_id:1, a:{b:[{c:2}, {c:1}]}}) -coll.update({}, {$push:{"a.b":{$each:[{c:-1}], $sort:{"c":1}, $slice:-100}}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({}, { $push: { "a.b": { $each: [{ c: -1 }], + $sort: { c: 1 }, + $slice: -100 }}})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a: {b:[{c:-1}, {c:1}, {c:2}]}}, coll.findOne({}), msg); assertLastOplog({$set:{"a.b": [{c:-1},{c:1}, {c:2}]}}, {_id:1}, msg); var msg = "bad array $push $slice $sort"; coll.save({_id:1, a:[{b:2}, {b:1}]}) -coll.update({_id:{$gt:0}}, {$push:{"a":{$each:[{b:-1}], $slice:-2, $sort:{b:1}}}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({ _id: { $gt: 0 }}, { $push: { a: { $each: [{ b: -1 }], + $slice:-2, + $sort: { b: 1 }}}})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a: [{b:1}, {b:2}]}, coll.findOne({}), msg); assertLastOplog({$set:{a: [{b:1},{b:2}]}}, {_id:1}, msg); var msg = "bad array $push $slice $sort first two"; coll.save({_id:1, a:{b:[{c:2}, {c:1}]}}) -coll.update({_id:{$gt:0}}, {$push:{"a.b":{$each:[{c:-1}], $slice:-2, $sort:{"c":1}}}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({ _id: { $gt: 0 }}, { $push: { "a.b": { $each: [{ c: -1 }], + $slice: -2, + $sort: { c: 1 }}}})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a: {b:[{c:1}, {c:2}]}}, coll.findOne({}), msg); assertLastOplog({$set:{"a.b": [{c:1},{c:2}]}}, {_id:1}, msg); var msg = "bad array $push $slice $sort reversed first two"; coll.save({_id:1, a:{b:[{c:1}, {c:2}]}}) -coll.update({_id:{$gt:0}}, {$push:{"a.b":{$each:[{c:-1}], $slice:-2, $sort:{"c":-1}}}}); -var gle = cdb.getLastErrorObj(); -assert.isnull(gle.err, msg); -assert.eq(gle.n, 1, "update failed for '" + msg +"': "+ tojson(gle)); +res = assert.writeOK(coll.update({ _id: { $gt: 0 }}, { $push: { "a.b": { $each: [{ c: -1 }], + $slice: -2, + $sort: { c: -1 }}}})); +assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); assert.docEq({_id:1, a: {b:[{c:1}, {c:-1}]}}, coll.findOne({}), msg); assertLastOplog({$set:{"a.b": [{c:1},{c:-1}]}}, {_id:1}, msg); diff --git a/jstests/replsets/optime.js b/jstests/replsets/optime.js index de9e50d8add..87393234c51 100644 --- a/jstests/replsets/optime.js +++ b/jstests/replsets/optime.js @@ -43,8 +43,8 @@ var initialInfo = master.getDB('admin').serverStatus({oplog:true}).oplog; // Do an insert to increment optime, but without rolling the oplog // latestOptime should be updated, but earliestOptime should be unchanged -master.getDB('test').foo.insert({a:1}); -master.getDB('test').getLastError(replTest.nodes.length); +var options = { writeConcern: { w: replTest.nodes.length }}; +assert.writeOK(master.getDB('test').foo.insert({ a: 1 }, options)); assert(optimesAreEqual(replTest)); var info = master.getDB('admin').serverStatus({oplog:true}).oplog; @@ -54,8 +54,7 @@ assert.eq(timestampCompare(info.earliestOptime, initialInfo.earliestOptime), 0); // Insert some large documents to force the oplog to roll over var largeString = new Array(1024*100).toString(); for (var i = 0; i < 15; i++) { - master.getDB('test').foo.insert({largeString: largeString}); - master.getDB('test').getLastError(replTest.nodes.length); + master.getDB('test').foo.insert({ largeString: largeString }, options); } assert(optimesAreEqual(replTest)); @@ -64,4 +63,4 @@ info = master.getDB('admin').serverStatus({oplog:true}).oplog; assert.gt(timestampCompare(info.latestOptime, initialInfo.latestOptime), 0); assert.gt(timestampCompare(info.earliestOptime, initialInfo.earliestOptime), 0); -replTest.stopSet(); \ No newline at end of file +replTest.stopSet(); diff --git a/jstests/replsets/regex.js b/jstests/replsets/regex.js index 34484f99c43..c94159e85a9 100644 --- a/jstests/replsets/regex.js +++ b/jstests/replsets/regex.js @@ -1,23 +1,15 @@ // don't allow regex as _id: SERVER-9502 -function assertNotOkForStorage() { - var gle = master.getDB("test").runCommand({getLastError : 1, w : 2, wtimeout : 60000}); - failed = gle.code > 0; - assert(failed, tojson(gle)); -} - var replTest = new ReplSetTest( {name: "server9502", nodes: 2} ); var nodes = replTest.startSet(); replTest.initiate(); var master = replTest.getMaster(); var mdb = master.getDB("test"); -mdb.foo.insert({ _id: "ABCDEF" }); -var gle = master.getDB("test").runCommand({getLastError : 1, w : 2, wtimeout : 60000}); -assert(gle.err === null); +mdb.setWriteConcern({ w: 2, wtimeout: 60000 }); +assert.writeOK(mdb.foo.insert({ _id: "ABCDEF" })); -mdb.foo.insert({ _id: /^A/ }); -assertNotOkForStorage(); +assert.writeError(mdb.foo.insert({ _id: /^A/ })); // _id doesn't have to be first; still disallowed -mdb.foo.insert({ xxx: "ABCDEF", _id: /ABCDEF/ }); -assertNotOkForStorage(); +assert.writeError(mdb.foo.insert({ xxx: "ABCDEF", _id: /ABCDEF/ })); + diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js index f156c8ec6a9..fb5d163cae5 100644 --- a/jstests/replsets/replset1.js +++ b/jstests/replsets/replset1.js @@ -68,10 +68,11 @@ doTest = function( signal ) { assert.eq( 1000 , cppconn.foo.findOne().a , "cppconn 2" ); // Now let's write some documents to the new master + var bulk = new_master.getDB("bar").bar.initializeUnorderedBulkOp(); for(var i=0; i<1000; i++) { - new_master.getDB("bar").bar.save({a: i}); + bulk.insert({ a: i }); } - new_master.getDB("admin").runCommand({getlasterror: 1}); + bulk.execute(); // Here's how to restart the old master node: slave = replTest.restart(master_id); diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js index 9588567ea50..b0302d1058f 100644 --- a/jstests/replsets/replset2.js +++ b/jstests/replsets/replset2.js @@ -2,10 +2,7 @@ load("jstests/replsets/rslib.js"); doTest = function (signal) { - // FAILING TEST - // See below: - - // Test replication with getLastError + // Test replication with write concern. // Replica set testing API // Create a new replica set test. Specify set name and the number of nodes you want. @@ -34,37 +31,21 @@ doTest = function (signal) { var slaves = replTest.liveNodes.slaves; slaves.forEach(function (slave) { slave.setSlaveOk(); }); - var failed = false; - var callGetLastError = function (w, timeout, db) { - try { - var result = master.getDB(db).getLastErrorObj(w, timeout); - print("replset2.js getLastError result: " + tojson(result)); - if (result['ok'] != 1) { - print("replset2.js FAILURE getlasterror not ok"); - failed = true; - } - } - catch (e) { - print("\nreplset2.js exception in getLastError: " + e + '\n'); - throw e; - } - } - - // Test getlasterror with multiple inserts - // TEST FAILS HEREg + // Test write concern with multiple inserts. print("\n\nreplset2.js **** Try inserting a multiple records -- first insert ****") printjson(master.getDB("admin").runCommand("replSetGetStatus")); - master.getDB(testDB).foo.insert({ n: 1 }); - master.getDB(testDB).foo.insert({ n: 2 }); - master.getDB(testDB).foo.insert({ n: 3 }); + var bulk = master.getDB(testDB).foo.initializeUnorderedBulkOp(); + bulk.insert({ n: 1 }); + bulk.insert({ n: 2 }); + bulk.insert({ n: 3 }); print("\nreplset2.js **** TEMP 1 ****") printjson(master.getDB("admin").runCommand("replSetGetStatus")); - callGetLastError(3, 25000, testDB); + assert.writeOK(bulk.execute({ w: 3, wtimeout: 25000 })); print("replset2.js **** TEMP 1a ****") @@ -80,11 +61,11 @@ doTest = function (signal) { var s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 }); assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1 on multiple inserts"); - // Test getlasterror with a simple insert + // Test write concern with a simple insert print("replset2.js **** Try inserting a single record ****") master.getDB(testDB).dropDatabase(); - master.getDB(testDB).foo.insert({ n: 1 }); - callGetLastError(3, 10000, testDB); + var options = { writeConcern: { w: 3, wtimeout: 10000 }}; + assert.writeOK(master.getDB(testDB).foo.insert({ n: 1 }, options)); m1 = master.getDB(testDB).foo.findOne({ n: 1 }); printjson(m1); @@ -99,28 +80,27 @@ doTest = function (signal) { // Test getlasterror with large insert print("replset2.js **** Try inserting many records ****") try { - bigData = new Array(2000).toString() - for (var n = 0; n < 1000; n++) { - master.getDB(testDB).baz.insert({ n: n, data: bigData }); - } - callGetLastError(3, 60000, testDB); - - print("replset2.js **** V1 ") - - var verifyReplication = function (nodeName, collection) { - data = collection.findOne({ n: 1 }); - assert(data['n'] == 1, "replset2.js Failed to save to " + nodeName); - data = collection.findOne({ n: 999 }); - assert(data['n'] == 999, "replset2.js Failed to save to " + nodeName); - } - - print("replset2.js **** V2 ") - - verifyReplication("master", master.getDB(testDB).baz); - verifyReplication("slave 0", slaves[0].getDB(testDB).baz); - verifyReplication("slave 1", slaves[1].getDB(testDB).baz); - - assert(failed == false, "replset2.js Replication with getLastError failed. See errors."); + var bigData = new Array(2000).toString(); + bulk = master.getDB(testDB).baz.initializeUnorderedBulkOp(); + for (var n = 0; n < 1000; n++) { + bulk.insert({ n: n, data: bigData }); + } + assert.writeOK(bulk.execute({ w: 3, wtimeout: 60000 })); + + print("replset2.js **** V1 ") + + var verifyReplication = function (nodeName, collection) { + data = collection.findOne({ n: 1 }); + assert(data['n'] == 1, "replset2.js Failed to save to " + nodeName); + data = collection.findOne({ n: 999 }); + assert(data['n'] == 999, "replset2.js Failed to save to " + nodeName); + }; + + print("replset2.js **** V2 "); + + verifyReplication("master", master.getDB(testDB).baz); + verifyReplication("slave 0", slaves[0].getDB(testDB).baz); + verifyReplication("slave 1", slaves[1].getDB(testDB).baz); } catch(e) { print("ERROR: " + e); @@ -130,9 +110,9 @@ doTest = function (signal) { printjson(slaves[0].getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next()); print("Slave 1 oplog findOne:"); printjson(slaves[1].getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next()); + // TODO: SERVER-13203 } - replTest.stopSet(signal); } diff --git a/jstests/replsets/replset3.js b/jstests/replsets/replset3.js index ba08eac1cd0..4f97b816c8d 100644 --- a/jstests/replsets/replset3.js +++ b/jstests/replsets/replset3.js @@ -20,8 +20,7 @@ doTest = function (signal) { // Write some data to master // NOTE: this test fails unless we write some data. - master.getDB("foo").foo.save({ a: 1 }); - master.getDB("foo").runCommand({ getlasterror: 1, w: 3, wtimeout: 20000 }); + master.getDB("foo").foo.insert({ a: 1 }, { writeConcern: { w: 3, wtimeout: 20000 }}); var phase = 1; diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js index 3e04c7c9bc5..7f86899d350 100644 --- a/jstests/replsets/replset5.js +++ b/jstests/replsets/replset5.js @@ -2,12 +2,12 @@ doTest = function (signal) { - // Test getLastError defaults + // Test write concern defaults var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 }); var nodes = replTest.startSet(); - // Initiate set with default for getLastError + // Initiate set with default for write concern var config = replTest.getReplSetConfig(); config.settings = {}; config.settings.getLastErrorDefaults = { 'w': 3, 'wtimeout': 20000 }; @@ -26,24 +26,24 @@ doTest = function (signal) { // These writes should be replicated immediately var docNum = 5000; + var bulk = master.getDB(testDB).foo.initializeUnorderedBulkOp(); for (var n = 0; n < docNum; n++) { - master.getDB(testDB).foo.insert({ n: n }); + bulk.insert({ n: n }); } - + // should use the configured last error defaults from above, that's what we're testing. // // If you want to test failure, just add values for w and wtimeout (e.g. w=1) // to the following command. This will override the default set above and // prevent replication from happening in time for the count tests below. // - var result = master.getDB("admin").runCommand({ getlasterror: 1 }); - print("replset5.js getlasterror result:"); - printjson(result); - - if (result.err == "timeout") { - print("\WARNING getLastError timed out and should not have.\nThis machine seems extremely slow. Stopping test without failing it\n") - replTest.stopSet(signal); - print("\WARNING getLastError timed out and should not have.\nThis machine seems extremely slow. Stopping test without failing it\n") + var result = bulk.execute(); + var wcError = result.getWriteConcernError(); + + if (wcError != null) { + print("\WARNING getLastError timed out and should not have: " + result.toString()); + print("This machine seems extremely slow. Stopping test without failing it\n"); + replTest.stopSet(signal); return; } diff --git a/jstests/replsets/replset7.js b/jstests/replsets/replset7.js index 0c4cbe2b841..c6ee3d6f943 100644 --- a/jstests/replsets/replset7.js +++ b/jstests/replsets/replset7.js @@ -11,14 +11,14 @@ var md = master.getDB( 'd' ); var mdc = md[ 'c' ]; // prep the data -var doccount = 50000; +var doccount = 5000; +var bulk = mdc.initializeUnorderedBulkOp(); for( i = 0; i < doccount; ++i ) { - mdc.insert( { _id:i, x:i } ); + bulk.insert( { _id:i, x:i } ); } -md.getLastError(); +assert.writeOK(bulk.execute()); -mdc.ensureIndex( { x : 1 }, { unique: true } ); -md.getLastError(); +assert.writeOK(mdc.ensureIndex( { x : 1 }, { unique: true } )); // add a secondary var slave = rt.add(); @@ -32,11 +32,14 @@ slave.setSlaveOk(); // Move all documents to the end by growing it +bulk = mdc.initializeUnorderedBulkOp(); +var bigStr = "ayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayay" + + "ayayayayayayayayayayayay"; for (i = 0; i < doccount; ++i) { - mdc.remove( { _id:i, x:i } ); - mdc.insert( { _id:doccount+i, x:i, bigstring: "ayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayay" } ); - md.getLastError(); + bulk.find({ _id: i, x: i }).remove(); + bulk.insert({ _id: doccount + i, x: i, bigstring: bigStr }); } +assert.writeOK(bulk.execute()); // Wait for replication to catch up. rt.awaitSecondaryNodes(); diff --git a/jstests/replsets/replset8.js b/jstests/replsets/replset8.js index cc900195cde..51cae86670a 100644 --- a/jstests/replsets/replset8.js +++ b/jstests/replsets/replset8.js @@ -17,28 +17,31 @@ var mdc = md[ 'c' ]; // documents to be increasing size. // this should result in the updates moving the docs backwards. -var doccount = 10000; +var doccount = 5000; // Avoid empty extent issues mdc.insert( { _id:-1, x:"dummy" } ); print ("inserting bigstrings"); +var bulk = mdc.initializeUnorderedBulkOp(); for( i = 0; i < doccount; ++i ) { - mdc.insert( { _id:i, x:bigstring } ); + bulk.insert( { _id:i, x:bigstring } ); bigstring += "a"; } -md.getLastError(); +assert.writeOK(bulk.execute()); print ("inserting x"); +bulk = mdc.initializeUnorderedBulkOp(); for( i = doccount; i < doccount*2; ++i ) { - mdc.insert( { _id:i, x:i } ); + bulk.insert( { _id:i, x:i } ); } -md.getLastError(); +assert.writeOK(bulk.execute()); print ("deleting bigstrings"); +bulk = mdc.initializeUnorderedBulkOp(); for( i = 0; i < doccount; ++i ) { - mdc.remove( { _id:i } ); + bulk.find({ _id: i }).remove(); } -md.getLastError(); +assert.writeOK(bulk.execute()); // add a secondary var slave = rt.add(); @@ -50,9 +53,9 @@ sleep(25000); print ("updating documents backwards"); // Move all documents to the beginning by growing them to sizes that should // fit the holes we made in phase 1 +bulk = mdc.initializeUnorderedBulkOp(); for (i = doccount*2; i > doccount; --i) { mdc.update( { _id:i, x:i }, { _id:i, x:bigstring } ); - md.getLastError(); bigstring = bigstring.slice(0, -1); // remove last char } print ("finished"); diff --git a/jstests/replsets/replset9.js b/jstests/replsets/replset9.js index 401d71faac1..d2317780bcb 100644 --- a/jstests/replsets/replset9.js +++ b/jstests/replsets/replset9.js @@ -18,17 +18,19 @@ mdc.insert( { _id:-1, x:"dummy" } ); // Make this db big so that cloner takes a while. print ("inserting bigstrings"); +var bulk = mdc.initializeUnorderedBulkOp(); for( i = 0; i < doccount; ++i ) { - mdc.insert( { _id:i, x:bigstring } ); + mdc.insert({ _id: i, x: bigstring }); } -md.getLastError(); +assert.writeOK(bulk.execute()); // Insert some docs to update and remove print ("inserting x"); +bulk = mdc.initializeUnorderedBulkOp(); for( i = doccount; i < doccount*2; ++i ) { - mdc.insert( { _id:i, bs:bigstring, x:i } ); + bulk.insert({ _id: i, bs: bigstring, x: i }); } -md.getLastError(); +assert.writeOK(bulk.execute()); // add a secondary; start cloning var slave = rt.add(); @@ -57,12 +59,14 @@ var sc = slave.getDB( 'd' )[ 'c' ]; slave.setSlaveOk(); print ("updating and deleting documents"); +bulk = mdc.initializeUnorderedBulkOp(); for (i = doccount*4; i > doccount; --i) { - mdc.update( { _id:i }, { $inc: { x : 1 } } ); - mdc.remove( { _id:i } ); - mdc.insert( { bs:bigstring } ); + bulk.find({ _id: i }).update({ $inc: { x: 1 }}); + bulk.find({ _id: i }).remove(); + bulk.insert({ bs: bigstring }); } -md.getLastError(); +assert.writeOK(bulk.execute()); + print ("finished"); // Wait for replication to catch up. rt.awaitReplication(640000); diff --git a/jstests/replsets/resync.js b/jstests/replsets/resync.js index e890a9947c1..a1611be6ac8 100644 --- a/jstests/replsets/resync.js +++ b/jstests/replsets/resync.js @@ -26,16 +26,16 @@ assert(master == conns[0], "conns[0] assumed to be master"); assert(a_conn.host == master.host); // create an oplog entry with an insert -A.foo.insert({x:1}); -A.foo.runCommand({getLastError : 1, w : 3, wtimeout : 60000}); +assert.writeOK( A.foo.insert({ x: 1 }, { writeConcern: { w: 3, wtimeout: 60000 }})); replTest.stop(BID); // insert enough to cycle oplog +var bulk = A.foo.initializeUnorderedBulkOp(); for (i=2; i < 10000; i++) { - A.foo.insert({x:i}); + bulk.insert({x:i}); } // wait for secondary to also have its oplog cycle -A.foo.runCommand({getLastError : 1, w : 2, wtimeout : 60000}); +assert.writeOK(bulk.execute({ w: 2, wtimeout : 60000 })); // bring node B and it will enter recovery mode because its newest oplog entry is too old replTest.restart(BID); diff --git a/jstests/replsets/rollback.js b/jstests/replsets/rollback.js index 7ec3810fbdc..fd174ff4703 100644 --- a/jstests/replsets/rollback.js +++ b/jstests/replsets/rollback.js @@ -51,7 +51,7 @@ function wait(f) { doTest = function (signal) { - var replTest = new ReplSetTest({ name: 'unicomplex', nodes: 3 }); + var replTest = new ReplSetTest({ name: 'unicomplex', nodes: 3, oplogSize: 1 }); var nodes = replTest.nodeList(); //print(tojson(nodes)); @@ -93,8 +93,13 @@ doTest = function (signal) { var first = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0]; a.roll.insert({ x: 1 }); while (1) { - for (var i = 0; i < 10000; i++) - a.roll.update({}, { $inc: { x: 1} }); + var bulk = a.roll.initializeUnorderedBulkOp(); + for (var i = 0; i < 1000; i++) { + bulk.find({}).update({ $inc: { x: 1 }}); + } + // unlikely secondary isn't keeping up, but let's avoid possible intermittent issues with that. + bulk.execute({ w: 2 }); + var op = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0]; if (tojson(op.h) != tojson(first.h)) { printjson(op); @@ -102,7 +107,6 @@ doTest = function (signal) { break; } pass++; - a.getLastError(2); // unlikely secondary isn't keeping up, but let's avoid possible intermittent issues with that. } print("PASSES FOR OPLOG ROLL: " + pass); } diff --git a/jstests/replsets/rollback4.js b/jstests/replsets/rollback4.js index 2fc1386a66f..c835b7f1bd5 100644 --- a/jstests/replsets/rollback4.js +++ b/jstests/replsets/rollback4.js @@ -24,9 +24,8 @@ printjson(master.adminCommand("replSetGetStatus")); var mColl = master.getCollection('test.foo'); -mColl.insert({}); +assert.writeOK(mColl.insert({}, { writeConcern: { w: 7, wtimeout: 30*1000 }})); printjson(master.adminCommand("replSetGetStatus")); -printjson(master.adminCommand({getLastError:1, w:7, wtimeout:30*1000})); // partition 012 | 3456 with 0 and 6 the old and new master @@ -49,16 +48,15 @@ printjson({endPartition: new Date()}); var gotThrough = 0 try { while (true){ - mColl.insert({}) - out = master.adminCommand({getLastError:1, w:3}); - if (out.err) + res = mColl.insert({}, { writeConcern: { w: 3 }}); + if (res.hasWriteErrors()) break; gotThrough++; } } catch (e) { - print("caught exception"); + print("caught exception: " + tojson(e)); } printjson({gotThrough: gotThrough}); @@ -76,8 +74,7 @@ printjson(master2.adminCommand("replSetGetStatus")); var m2Coll = master2.getCollection('test.foo'); var sentinel = {_id: 'sentinel'} // used to detect which master's data is used -m2Coll.insert(sentinel); -printjson(master2.adminCommand({getLastError:1, w:4, wtimeout:30*1000})); +assert.writeOK(m2Coll.insert(sentinel, { writeConcern: { w: 4, wtimeout: 30*1000 }})); printjson(master2.adminCommand("replSetGetStatus")); m2Coll.insert({}); // this shouldn't be necessary but the next GLE doesn't work without it @@ -99,12 +96,11 @@ printjson({endUnPartition: new Date()}); assert.soon(function() { try { - printjson(master2.adminCommand({getLastError:1, w:7, wtimeout:30*1000})); + m2Coll.insert({}, { writeConcern: { w: 7, wtimeout: 10*1000 }}); return true; } catch (e) { - print("getLastError returned an exception; retrying"); - print(e); + print("getLastError returned an exception; retrying: " + tojson(e)); return false; } }); diff --git a/jstests/replsets/rollback5.js b/jstests/replsets/rollback5.js index a5dcd0f55f7..1d325cb1f3e 100644 --- a/jstests/replsets/rollback5.js +++ b/jstests/replsets/rollback5.js @@ -34,20 +34,20 @@ assert.soon(function () { return res.myState == 7; }, "Arbiter failed to initialize."); -A.foo.update({key:'value1'}, {$set: {req: 'req'}}, true); -A.foo.runCommand({getLastError : 1, w : 2, wtimeout : 60000}); +var options = { writeConcern: { w: 2, wtimeout: 60000 }, upsert: true }; +assert.writeOK(A.foo.update({ key: 'value1' }, { $set: { req: 'req' }}, options)); replTest.stop(AID); master = replTest.getMaster(); assert(b_conn.host == master.host); -B.foo.update({key:'value1'}, {$set: {res: 'res'}}, true); -B.foo.runCommand({getLastError : 1, w : 1, wtimeout : 60000}); +options = { writeConcern: { w: 1, wtimeout: 60000 }, upsert: true }; +assert.writeOK(B.foo.update({key:'value1'}, {$set: {res: 'res'}}, options)); replTest.stop(BID); replTest.restart(AID); master = replTest.getMaster(); assert(a_conn.host == master.host); -A.foo.update({key:'value2'}, {$set: {req: 'req'}}, true); -A.foo.runCommand({getLastError : 1, w : 1, wtimeout : 60000}); +options = { writeConcern: { w: 1, wtimeout: 60000 }, upsert: true }; +assert.writeOK(A.foo.update({ key: 'value2' }, { $set: { req: 'req' }}, options)); replTest.restart(BID); // should rollback reconnect(B); diff --git a/jstests/replsets/server_status_metrics.js b/jstests/replsets/server_status_metrics.js index 2147e144987..b5327fc2fe1 100644 --- a/jstests/replsets/server_status_metrics.js +++ b/jstests/replsets/server_status_metrics.js @@ -35,24 +35,24 @@ var secondary = rt.getSecondary(); var primary = rt.getPrimary(); var testDB = primary.getDB("test"); -testDB.b.insert( {} ); -printjson( testDB.getLastErrorObj(2) ); +assert.writeOK(testDB.b.insert({}, { writeConcern: { w: 2 }})); var ss = secondary.getDB("test").serverStatus(); var secondaryBaseOplogInserts = ss.metrics.repl.apply.ops; //add test docs -for(x=0;x<10000;x++){ testDB.a.insert({}) } - -testDB.getLastError(2); - -testSecondaryMetrics(secondary, 10000, secondaryBaseOplogInserts ); +var bulk = testDB.a.initializeUnorderedBulkOp(); +for(x = 0; x < 1000; x++) { + bulk.insert({}); +} +assert.writeOK(bulk.execute({ w: 2 })); -testDB.a.update({}, {$set:{d:new Date()}},true, true) -testDB.getLastError(2); +testSecondaryMetrics(secondary, 1000, secondaryBaseOplogInserts ); -testSecondaryMetrics(secondary, 20000, secondaryBaseOplogInserts ); +var options = { writeConcern: { w: 2 }, multi: true, upsert: true }; +assert.writeOK(testDB.a.update({}, { $set: { d: new Date() }}, options)); +testSecondaryMetrics(secondary, 2000, secondaryBaseOplogInserts ); // Test getLastError.wtime and that it only records stats for w > 1, see SERVER-9005 var startMillis = testDB.serverStatus().metrics.getLastError.wtime.totalMillis @@ -60,19 +60,20 @@ var startNum = testDB.serverStatus().metrics.getLastError.wtime.num printjson(primary.getDB("test").serverStatus().metrics); -testDB.getLastError(1, 50 ); +assert.writeOK(testDB.a.insert({ x: 1 }, { writeConcern: { w: 1, wtimeout: 50 }})); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.totalMillis, startMillis); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum); -testDB.getLastError(-11, 50 ); +assert.writeOK(testDB.a.insert({ x: 1 }, { writeConcern: { w: -11, wtimeout: 50 }})); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.totalMillis, startMillis); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum); -testDB.getLastError(2, 50 ); +assert.writeOK(testDB.a.insert({ x: 1 }, { writeConcern: { w: 2, wtimeout: 50 }})); assert(testDB.serverStatus().metrics.getLastError.wtime.totalMillis >= startMillis); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum + 1); -testDB.getLastError(3, 50 ); +// Write will fail because there are only 2 nodes +assert.writeError(testDB.a.insert({ x: 1 }, { writeConcern: { w: 3, wtimeout: 50 }})); assert(testDB.serverStatus().metrics.getLastError.wtime.totalMillis >= startMillis + 50); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum + 2); diff --git a/jstests/replsets/single_server_majority.js b/jstests/replsets/single_server_majority.js index b329bb126af..136b1213001 100644 --- a/jstests/replsets/single_server_majority.js +++ b/jstests/replsets/single_server_majority.js @@ -3,15 +3,13 @@ // set up a mongod and connect a mongo port = allocatePorts(1)[0]; var baseName = "single_server_majority"; -var mongod = startMongod("--port", port, "--dbpath", MongoRunner.dataPath + baseName); +var mongod = startMongod("--port", port, "--dbpath", MongoRunner.dataPath + baseName, "-v"); var mongo = startMongoProgram("mongo", "--port", port); // get db and collection, then preform a trivial insert db = mongo.getDB("test") col = db.getCollection("single_server_majority"); col.drop(); -col.save({a: "test"}); - // see if we can get a majority write on this single server -result = db.getLastErrorObj("majority"); -assert(result.err === null); +assert.writeOK(col.save({ a: "test" }, { writeConcern: { w: 'majority' }})); + diff --git a/jstests/replsets/slaveDelay2.js b/jstests/replsets/slaveDelay2.js index 3ef968f3a53..2fe54e677ae 100644 --- a/jstests/replsets/slaveDelay2.js +++ b/jstests/replsets/slaveDelay2.js @@ -20,11 +20,12 @@ var initialize = function() { var populate = function(master) { // insert records + var bulk = master.foo.initializeUnorderedBulkOp(); for (var i =0; i<1000; i++) { - master.foo.insert({_id:1}); + bulk.insert({ _id: i }); } - - master.runCommand({getlasterror:1}); + + assert.writeOK(bulk.execute()); } doTest = function( signal ) { diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js index 623fa8a5ab4..f81dd6d8895 100644 --- a/jstests/replsets/slavedelay1.js +++ b/jstests/replsets/slavedelay1.js @@ -27,8 +27,7 @@ doTest = function( signal ) { waitForAllMembers(master); // insert a record - master.foo.insert({x:1}); - master.runCommand({getlasterror:1, w:2}); + assert.writeOK(master.foo.insert({ x: 1 }, { writeConcern: { w: 2 }})); var doc = master.foo.findOne(); assert.eq(doc.x, 1); @@ -69,8 +68,7 @@ doTest = function( signal ) { // wait for the node to catch up replTest.awaitReplication(); - master.foo.insert({_id : 123, "x" : "foo"}); - master.runCommand({getlasterror:1,w:2}); + assert.writeOK(master.foo.insert({ _id: 123, x: 'foo' }, { writeConcern: { w: 2 }})); for (var i=0; i<8; i++) { assert.eq(conn.getDB(name).foo.findOne({_id:123}), null); diff --git a/jstests/replsets/stepdown3.js b/jstests/replsets/stepdown3.js index 33e90fd1af5..9f9925b2b9e 100644 --- a/jstests/replsets/stepdown3.js +++ b/jstests/replsets/stepdown3.js @@ -14,8 +14,8 @@ replTest.awaitReplication(); // do another write, because the first one might be longer than 10 seconds ago // on the secondary (due to starting up), and we need to be within 10 seconds // to step down. -master.getDB("test").foo.insert({x:2}); -master.getDB("test").runCommand({getLastError : 1, w : 2, wtimeout : 30000 }); +var options = { writeConcern: { w: 2, wtimeout: 30000 }}; +assert.writeOK(master.getDB("test").foo.insert({ x: 2 }, options)); // lock secondary, to pause replication print("\nlock secondary"); var locked = replTest.liveNodes.slaves[0]; diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js index 6890d85dc98..bcccea88d1a 100644 --- a/jstests/replsets/sync2.js +++ b/jstests/replsets/sync2.js @@ -34,18 +34,14 @@ replTest.partition(4,1); replTest.partition(4,3); jsTestLog("Checking that ops still replicate correctly"); -master.getDB("foo").bar.insert({x:1}); - -var result = master.getDB("admin").runCommand({getLastError:1,w:5,wtimeout:30000}); -assert.eq(null, result.err, tojson(result)); +var option = { writeConcern: { w: 5, wtimeout: 30000 }}; +assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }, option)); // 4 is connected to 3 replTest.partition(4,2); replTest.unPartition(4,3); -master.getDB("foo").bar.insert({x:1}); - -result = master.getDB("admin").runCommand({getLastError:1,w:5,wtimeout:30000}); -assert.eq(null, result.err, tojson(result)); +option = { writeConcern: { w: 5, wtimeout: 30000 }}; +assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }, option)); replTest.stopSet(); diff --git a/jstests/replsets/tags.js b/jstests/replsets/tags.js index 405820443f4..d4d6e2a9cea 100644 --- a/jstests/replsets/tags.js +++ b/jstests/replsets/tags.js @@ -98,27 +98,26 @@ master = replTest.getMaster(); printjson(master.getDB("admin").runCommand({replSetGetStatus:1})); -var timeout = 20000; +var timeout = 3000; -master.getDB("foo").bar.insert({x:1}); -var result = master.getDB("foo").runCommand({getLastError:1,w:"3 or 4",wtimeout:timeout}); -printjson(result); -assert.eq(result.err, "timeout"); +var options = { writeConcern: { w: "3 or 4", wtimeout: timeout }}; +var result = master.getDB("foo").bar.insert({ x: 1 }, options); +assert.neq(null, result.getWriteConcernError()); +assert(result.getWriteConcernError().errInfo.wtimeout); replTest.unPartition(1,4); myprint("partitions: [1-4] [0-1-2-0] [3]"); myprint("test2"); -master.getDB("foo").bar.insert({x:1}); -result = master.getDB("foo").runCommand({getLastError:1,w:"3 or 4",wtimeout:timeout}); -printjson(result); -assert.eq(result.err, null); +options = { writeConcern: { w: "3 or 4", wtimeout: timeout }}; +assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }, options)); myprint("partitions: [1-4] [0-1-2-0] [3]"); myprint("test3"); -result = master.getDB("foo").runCommand({getLastError:1,w:"3 and 4",wtimeout:timeout}); -printjson(result); -assert.eq(result.err, "timeout"); +options = { writeConcern: { w: "3 and 4", wtimeout: timeout }}; +result = assert.writeError(master.getDB("foo").bar.insert({ x: 1 }, options)); +assert.neq(null, result.getWriteConcernError()); +assert(result.getWriteConcernError().errInfo.wtimeout, tojson(result.getWriteConcernError())); replTest.unPartition(3,4); @@ -126,33 +125,26 @@ myprint("partitions: [0-4-3] [0-1-2-0]"); myprint("31004 should sync from 31001 (31026)"); myprint("31003 should sync from 31004 (31024)"); myprint("test4"); -result = master.getDB("foo").runCommand({getLastError:1,w:"3 and 4",wtimeout:timeout}); -printjson(result); -assert.eq(result.err, null); +options = { writeConcern: { w: "3 and 4", wtimeout: timeout }}; +assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }, options)); myprint("non-existent w"); -result = master.getDB("foo").runCommand({getLastError:1,w:"blahblah",wtimeout:timeout}); -printjson(result); -assert.eq(result.code, 79); -assert.eq(result.ok, 0); +options = { writeConcern: { w: "blahblah", wtimeout: timeout }}; +result = assert.writeError(master.getDB("foo").bar.insert({ x: 1 }, options)); +assert.neq(null, result.getWriteConcernError()); +assert.eq(79, result.getWriteConcernError().code, tojson(result.getWriteConcernError())); myprint("test mode 2"); -master.getDB("foo").bar.insert({x:1}); -result = master.getDB("foo").runCommand({getLastError:1,w:"2",wtimeout:0}); -printjson(result); -assert.eq(result.err, null); +options = { writeConcern: { w: "2", wtimeout: 0 }}; +assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }, options)); myprint("test two on the primary"); -master.getDB("foo").bar.insert({x:1}); -result = master.getDB("foo").runCommand({getLastError:1,w:"1 and 2",wtimeout:0}); -printjson(result); -assert.eq(result.err, null); +options = { writeConcern: { w: "1 and 2", wtimeout: 0 }}; +assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }, options)); myprint("test5"); -master.getDB("foo").bar.insert({x:1}); -result = master.getDB("foo").runCommand({getLastError:1,w:"2 dc and 3 server",wtimeout:timeout}); -printjson(result); -assert.eq(result.err, null); +options = { writeConcern: { w: "2 dc and 3 server", wtimeout: timeout }}; +assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }, options)); replTest.unPartition(1,3); @@ -160,20 +152,19 @@ replTest.partition(2, 0); replTest.partition(2, 1); replTest.stop(2); -myprint("1 must become primary here because otherwise the other members will take too long timing out their old sync threads"); +myprint("1 must become primary here because otherwise the other members will take too long " + + "timing out their old sync threads"); master = replTest.getMaster(); myprint("test6"); -master.getDB("foo").bar.insert({x:1}); -result = master.getDB("foo").runCommand({getLastError:1,w:"3 and 4",wtimeout:timeout}); -printjson(result); -assert.eq(result.err, null); +options = { writeConcern: { w: "3 and 4", wtimeout: timeout }}; +assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }, options)); myprint("test mode 2"); -master.getDB("foo").bar.insert({x:1}); -result = master.getDB("foo").runCommand({getLastError:1,w:"2",wtimeout:timeout}); -printjson(result); -assert.eq(result.err, "timeout"); +options = { writeConcern: { w: "2", wtimeout: timeout }}; +result = assert.writeError(master.getDB("foo").bar.insert({ x: 1 }, options)); +assert.neq(null, result.getWriteConcernError()); +assert(result.getWriteConcernError().errInfo.wtimeout); replTest.stopSet(); myprint("\n\ntags.js SUCCESS\n\n"); diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js index 068574bc9c9..55e68768eee 100644 --- a/jstests/replsets/tags2.js +++ b/jstests/replsets/tags2.js @@ -26,9 +26,7 @@ replTest.awaitReplication(); master = replTest.getMaster(); var db = master.getDB("test"); -db.foo.insert( {x:1} ); -var result = db.runCommand( {getLastError:1, w:"backedUp", wtimeout:20000} ); -assert.eq (result.err, null); +assert.writeOK(db.foo.insert({ x: 1 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }})); conf.version = 2; conf.settings.getLastErrorModes.backedUp.backup = 3; @@ -37,9 +35,7 @@ replTest.awaitReplication(); master = replTest.getMaster(); var db = master.getDB("test"); -db.foo.insert( {x:2} ); -var result = db.runCommand( {getLastError:1, w:"backedUp", wtimeout:20000} ); -assert.eq (result.err, null); +assert.writeOK(db.foo.insert({ x: 2 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }})); conf.version = 3; conf.members[0].priorty = 3; @@ -48,8 +44,6 @@ master.getDB("admin").runCommand( {replSetReconfig: conf} ); master = replTest.getMaster(); var db = master.getDB("test"); -db.foo.insert( {x:3} ); -var result = db.runCommand( {getLastError:1, w:"backedUp", wtimeout:20000} ); -assert.eq (result.err, null); +assert.writeOK(db.foo.insert({ x: 3 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }})); replTest.stopSet(); diff --git a/jstests/replsets/tags_with_reconfig.js b/jstests/replsets/tags_with_reconfig.js index 8de023f5d4e..755fd744de5 100644 --- a/jstests/replsets/tags_with_reconfig.js +++ b/jstests/replsets/tags_with_reconfig.js @@ -27,18 +27,11 @@ replTest.awaitReplication(); master = replTest.getMaster(); var db = master.getDB("test"); -// Insert a document and getLastError with write concern : anydc -db.foo.insert( {x:1} ); -var result = db.runCommand( {getLastError:1, w:"anydc", wtimeout:20000} ); -printjson (result) -assert.eq (result.err, null); - -// Insert a document and getLastError with write concern : alldc -db.foo.insert( {x:2} ); -var result = db.runCommand( {getLastError:1, w:"alldc", wtimeout:20000} ); -printjson (result) -assert.eq (result.err, null); +// Insert a document with write concern : anydc +assert.writeOK(db.foo.insert({ x: 1 }, { writeConcern: { w: 'anydc', wtimeout: 20000 }})); +// Insert a document with write concern : alldc +assert.writeOK(db.foo.insert({ x: 2 }, { writeConcern: { w: 'alldc', wtimeout: 20000 }})); // Add a new tag to the replica set var config = master.getDB("local").system.replset.findOne(); @@ -64,17 +57,10 @@ printjson(config); master = replTest.getMaster(); var db = master.getDB("test"); -// Insert a document and getLastError with write concern : anydc -db.foo.insert( {x:3} ); -var result = db.runCommand( {getLastError:1, w:"anydc", wtimeout:20000} ); -printjson (result) -assert.eq (result.err, null); - -// Insert a document and getLastError with write concern : alldc -db.foo.insert( {x:4} ); -var result = db.runCommand( {getLastError:1, w:"alldc", wtimeout:20000} ); -printjson (result) -assert.eq (result.err, null); +// Insert a document with write concern : anydc +assert.writeOK(db.foo.insert({ x: 3 }, { writeConcern: { w: 'anydc', wtimeout: 20000 }})); +// Insert a document with write concern : alldc +assert.writeOK(db.foo.insert({ x: 4 }, { writeConcern: { w: 'alldc', wtimeout: 20000 }})); replTest.stopSet(); diff --git a/jstests/replsets/temp_namespace.js b/jstests/replsets/temp_namespace.js index 019163e1ca0..90d72400518 100644 --- a/jstests/replsets/temp_namespace.js +++ b/jstests/replsets/temp_namespace.js @@ -30,8 +30,7 @@ masterDB.temp2.ensureIndex({x:1}); masterDB.runCommand({create: 'keep1', temp: false}); masterDB.runCommand({create: 'keep2', temp: 0}); masterDB.runCommand({create: 'keep3'}); -masterDB.keep4.insert({}); -masterDB.getLastError(2); +assert.writeOK(masterDB.keep4.insert({}, { writeConcern: { w: 2 }})); // make sure they exist on primary and secondary assert.eq(masterDB.system.namespaces.count({name: /temp\d$/}) , 2); // collections diff --git a/jstests/replsets/toostale.js b/jstests/replsets/toostale.js index 08b1a9c2c6f..9aaffd210cc 100644 --- a/jstests/replsets/toostale.js +++ b/jstests/replsets/toostale.js @@ -45,7 +45,7 @@ var reconnect = function(a) { var name = "toostale" -var replTest = new ReplSetTest( {name: name, nodes: 3}); +var replTest = new ReplSetTest({ name: name, nodes: 3, oplogSize: 5 }); var host = getHostName(); var nodes = replTest.startSet(); @@ -76,10 +76,13 @@ reconnect(master.getDB("local")); var count = master.getDB("local").oplog.rs.count(); var prevCount = -1; while (count != prevCount) { - print("inserting 10000"); - for (var i = 0; i < 10000; i++) { - mdb.bar.insert({x:i, date : new Date(), str : "safkaldmfaksndfkjansfdjanfjkafa"}); + print("inserting 1000"); + var bulk = mdb.bar.initializeUnorderedBulkOp(); + for (var i = 0; i < 1000; i++) { + bulk.insert({ x: i, date: new Date(), str: "safkaldmfaksndfkjansfdjanfjkafa" }); } + assert.writeOK(bulk.execute()); + prevCount = count; replTest.awaitReplication(); count = master.getDB("local").oplog.rs.count(); -- cgit v1.2.1