diff options
Diffstat (limited to 'jstests/replsets')
133 files changed, 4431 insertions, 4384 deletions
diff --git a/jstests/replsets/apply_batch_only_goes_forward.js b/jstests/replsets/apply_batch_only_goes_forward.js index e380c981566..4744fcf4342 100644 --- a/jstests/replsets/apply_batch_only_goes_forward.js +++ b/jstests/replsets/apply_batch_only_goes_forward.js @@ -35,7 +35,10 @@ var sLocal = slave.getDB("local"); var sMinvalid = sLocal["replset.minvalid"]; var stepDownSecs = 30; - var stepDownCmd = {replSetStepDown: stepDownSecs, force: true}; + var stepDownCmd = { + replSetStepDown: stepDownSecs, + force: true + }; // Write op assert.writeOK(mTest.foo.save({}, {writeConcern: {w: 3}})); @@ -44,21 +47,20 @@ // Set minvalid to something far in the future for the current primary, to simulate recovery. // Note: This is so far in the future (5 days) that it will never become secondary. - var farFutureTS = new Timestamp(Math.floor(new Date().getTime()/1000) + - (60 * 60 * 24 * 5 /* in five days*/), 0); + var farFutureTS = new Timestamp( + Math.floor(new Date().getTime() / 1000) + (60 * 60 * 24 * 5 /* in five days*/), 0); var rsgs = assert.commandWorked(mLocal.adminCommand("replSetGetStatus")); - var primaryOpTime = rsgs.members.filter( function (member) { - return member.self;} - )[0].optime; + var primaryOpTime = rsgs.members.filter(function(member) { + return member.self; + })[0].optime; jsTest.log("future TS: " + tojson(farFutureTS) + ", date:" + tsToDate(farFutureTS)); // We do an update in case there is a minvalid document on the primary already. // If the doc doesn't exist then upsert:true will create it, and the writeConcern ensures // that update returns details of the write, like whether an update or insert was performed. - printjson(assert.writeOK(mMinvalid.update({}, - { ts: farFutureTS, - t: NumberLong(-1), - begin: primaryOpTime}, - { upsert: true, writeConcern: {w: 1}}))); + printjson( + assert.writeOK(mMinvalid.update({}, + {ts: farFutureTS, t: NumberLong(-1), begin: primaryOpTime}, + {upsert: true, writeConcern: {w: 1}}))); jsTest.log("restart primary"); replTest.restart(master); @@ -70,9 +72,13 @@ assert.soon(function() { var mv; - try {mv = mMinvalid.findOne();} catch (e) { return false; } - var msg = "ts !=, " + farFutureTS + - "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv) + " - " + tsToDate(mv.ts); + try { + mv = mMinvalid.findOne(); + } catch (e) { + return false; + } + var msg = "ts !=, " + farFutureTS + "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv) + + " - " + tsToDate(mv.ts); assert.eq(farFutureTS, mv.ts, msg); return true; }); diff --git a/jstests/replsets/apply_ops_lastop.js b/jstests/replsets/apply_ops_lastop.js index 7a0faadd72b..1e7df9a9035 100644 --- a/jstests/replsets/apply_ops_lastop.js +++ b/jstests/replsets/apply_ops_lastop.js @@ -3,66 +3,58 @@ // lastOp is used as the optime to wait for when write concern waits for replication. // -(function () { -"use strict"; - -var rs = new ReplSetTest({name: "applyOpsOptimeTest", nodes: 3}); -rs.startSet(); -var nodes = rs.nodeList(); -rs.initiate({"_id": "applyOpsOptimeTest", - "members": [ - {"_id" : 0, "host" : nodes[0]}, - {"_id" : 1, "host" : nodes[1]}, - {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true} ]}); -var primary = rs.getPrimary(); -var db = primary.getDB('foo'); -var coll = primary.getCollection('foo.bar'); -// Two connections -var m1 = new Mongo(primary.host); -var m2 = new Mongo(primary.host); - -var insertApplyOps = [ - { - op: "i", - ns: 'foo.bar', - o: { _id: 1, a: "b" } - } - ]; -var deleteApplyOps = [ - { - op: "d", - ns: 'foo.bar', - o: { _id: 1, a: "b" } - } - ]; -var badPreCondition = [ - { - ns: 'foo.bar', - q: { _id: 10, a: "aaa" }, - res: { a: "aaa" } - } - ]; -var majorityWriteConcern = { w: 'majority', wtimeout: 30000 }; - -// Set up some data -assert.writeOK(coll.insert({x: 1})); // creating the collection so applyOps works -assert.commandWorked(m1.getDB('foo').runCommand({ applyOps: insertApplyOps, - writeConcern: majorityWriteConcern })); -var insertOp = m1.getDB('foo').getLastErrorObj('majority', 30000).lastOp; - -// No-op applyOps -var res = m2.getDB('foo').runCommand({ applyOps: deleteApplyOps, - preCondition: badPreCondition, - writeConcern: majorityWriteConcern }); -assert.commandFailed(res, "The applyOps command was expected to fail, but instead succeeded."); -assert.eq(res.errmsg, "pre-condition failed", "The applyOps command failed for the wrong reason."); -var noOp = m2.getDB('foo').getLastErrorObj('majority', 30000).lastOp; - -// Check that each connection has the same last optime -assert.eq(noOp, insertOp, "The connections' last optimes do " + - "not match: applyOps failed to update lastop on no-op"); - -rs.stopSet(); +(function() { + "use strict"; + + var rs = new ReplSetTest({name: "applyOpsOptimeTest", nodes: 3}); + rs.startSet(); + var nodes = rs.nodeList(); + rs.initiate({ + "_id": "applyOpsOptimeTest", + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], "arbiterOnly": true} + ] + }); + var primary = rs.getPrimary(); + var db = primary.getDB('foo'); + var coll = primary.getCollection('foo.bar'); + // Two connections + var m1 = new Mongo(primary.host); + var m2 = new Mongo(primary.host); + + var insertApplyOps = [{op: "i", ns: 'foo.bar', o: {_id: 1, a: "b"}}]; + var deleteApplyOps = [{op: "d", ns: 'foo.bar', o: {_id: 1, a: "b"}}]; + var badPreCondition = [{ns: 'foo.bar', q: {_id: 10, a: "aaa"}, res: {a: "aaa"}}]; + var majorityWriteConcern = { + w: 'majority', + wtimeout: 30000 + }; + + // Set up some data + assert.writeOK(coll.insert({x: 1})); // creating the collection so applyOps works + assert.commandWorked(m1.getDB('foo').runCommand( + {applyOps: insertApplyOps, writeConcern: majorityWriteConcern})); + var insertOp = m1.getDB('foo').getLastErrorObj('majority', 30000).lastOp; + + // No-op applyOps + var res = m2.getDB('foo').runCommand({ + applyOps: deleteApplyOps, + preCondition: badPreCondition, + writeConcern: majorityWriteConcern + }); + assert.commandFailed(res, "The applyOps command was expected to fail, but instead succeeded."); + assert.eq( + res.errmsg, "pre-condition failed", "The applyOps command failed for the wrong reason."); + var noOp = m2.getDB('foo').getLastErrorObj('majority', 30000).lastOp; + + // Check that each connection has the same last optime + assert.eq(noOp, + insertOp, + "The connections' last optimes do " + + "not match: applyOps failed to update lastop on no-op"); + + rs.stopSet(); })(); - diff --git a/jstests/replsets/apply_ops_wc.js b/jstests/replsets/apply_ops_wc.js index cb50b9b9070..0b8a49e19bd 100644 --- a/jstests/replsets/apply_ops_wc.js +++ b/jstests/replsets/apply_ops_wc.js @@ -12,7 +12,7 @@ (function() { "use strict"; var nodeCount = 3; - var replTest = new ReplSetTest({ name: 'applyOpsWCSet', nodes: nodeCount}); + var replTest = new ReplSetTest({name: 'applyOpsWCSet', nodes: nodeCount}); replTest.startSet(); var cfg = replTest.getReplSetConfig(); cfg.settings = {}; @@ -34,32 +34,13 @@ dropTestCollection(); // Set up the applyOps command. - var applyOpsReq = { applyOps: [ - { - op: "i", - ns: coll.getFullName(), - o: { - _id: 2, - x: "b" - } - }, - { - op: "i", - ns: coll.getFullName(), - o: { - _id: 3, - x: "c" - } - }, - { - op: "i", - ns: coll.getFullName(), - o: { - _id: 4, - x: "d" - } - }, - ]}; + var applyOpsReq = { + applyOps: [ + {op: "i", ns: coll.getFullName(), o: {_id: 2, x: "b"}}, + {op: "i", ns: coll.getFullName(), o: {_id: 3, x: "c"}}, + {op: "i", ns: coll.getFullName(), o: {_id: 4, x: "d"}}, + ] + }; function assertApplyOpsCommandWorked(res) { assert.eq(3, res.applied); @@ -73,10 +54,7 @@ assert(res.writeConcernError.errmsg); } - var invalidWriteConcerns = [ - { w: 'invalid' }, - { w: nodeCount + 1 } - ]; + var invalidWriteConcerns = [{w: 'invalid'}, {w: nodeCount + 1}]; function testInvalidWriteConcern(wc) { jsTest.log("Testing invalid write concern " + tojson(wc)); @@ -85,28 +63,24 @@ var res = coll.runCommand(applyOpsReq); assertApplyOpsCommandWorked(res); assertWriteConcernError(res); - } // Verify that invalid write concerns yield an error. - coll.insert({ _id: 1, x: "a" }); + coll.insert({_id: 1, x: "a"}); invalidWriteConcerns.forEach(testInvalidWriteConcern); var secondaries = replTest.getSecondaries(); - var majorityWriteConcerns = [ - { w: 2, wtimeout: 30000 }, - { w: 'majority', wtimeout: 30000 }, - ]; + var majorityWriteConcerns = [{w: 2, wtimeout: 30000}, {w: 'majority', wtimeout: 30000}, ]; function testMajorityWriteConcerns(wc) { jsTest.log("Testing " + tojson(wc)); // Reset secondaries to ensure they can replicate. - secondaries[0].getDB('admin').runCommand({ configureFailPoint: 'rsSyncApplyStop', - mode: 'off' }); - secondaries[1].getDB('admin').runCommand({ configureFailPoint: 'rsSyncApplyStop', - mode: 'off' }); + secondaries[0].getDB('admin').runCommand( + {configureFailPoint: 'rsSyncApplyStop', mode: 'off'}); + secondaries[1].getDB('admin').runCommand( + {configureFailPoint: 'rsSyncApplyStop', mode: 'off'}); // Set the writeConcern of the applyOps command. applyOpsReq.writeConcern = wc; @@ -114,36 +88,37 @@ dropTestCollection(); // applyOps with a full replica set should succeed. - coll.insert({ _id: 1, x: "a" }); + coll.insert({_id: 1, x: "a"}); var res = db.runCommand(applyOpsReq); assertApplyOpsCommandWorked(res); - assert(!res.writeConcernError, 'applyOps on a full replicaset had writeConcern error ' + - tojson(res.writeConcernError)); + assert(!res.writeConcernError, + 'applyOps on a full replicaset had writeConcern error ' + + tojson(res.writeConcernError)); dropTestCollection(); // Stop replication at one secondary. - secondaries[0].getDB('admin').runCommand({ configureFailPoint: 'rsSyncApplyStop', - mode: 'alwaysOn' }); + secondaries[0].getDB('admin').runCommand( + {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}); // applyOps should succeed with only 1 node not replicating. - coll.insert({ _id: 1, x: "a" }); + coll.insert({_id: 1, x: "a"}); res = db.runCommand(applyOpsReq); assertApplyOpsCommandWorked(res); assert(!res.writeConcernError, - 'applyOps on a replicaset with 2 working nodes had writeConcern error ' + - tojson(res.writeConcernError)); + 'applyOps on a replicaset with 2 working nodes had writeConcern error ' + + tojson(res.writeConcernError)); dropTestCollection(); // Stop replication at a second secondary. - secondaries[1].getDB('admin').runCommand({ configureFailPoint: 'rsSyncApplyStop', - mode: 'alwaysOn' }); + secondaries[1].getDB('admin').runCommand( + {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}); // applyOps should fail after two nodes have stopped replicating. - coll.insert({ _id: 1, x: "a" }); + coll.insert({_id: 1, x: "a"}); applyOpsReq.writeConcern.wtimeout = 5000; res = db.runCommand(applyOpsReq); diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js index b665eec2d90..d41ef9ba5ef 100644 --- a/jstests/replsets/auth1.js +++ b/jstests/replsets/auth1.js @@ -10,30 +10,31 @@ var port = allocatePorts(5); var path = "jstests/libs/"; // These keyFiles have their permissions set to 600 later in the test. -var key1_600 = path+"key1"; -var key2_600 = path+"key2"; +var key1_600 = path + "key1"; +var key2_600 = path + "key2"; // This keyFile has its permissions set to 644 later in the test. -var key1_644 = path+"key1_644"; +var key1_644 = path + "key1_644"; print("try starting mongod with auth"); -var m = MongoRunner.runMongod({auth : "", port : port[4], dbpath : MongoRunner.dataDir + "/wrong-auth"}); +var m = + MongoRunner.runMongod({auth: "", port: port[4], dbpath: MongoRunner.dataDir + "/wrong-auth"}); assert.eq(m.getDB("local").auth("__system", ""), 0); MongoRunner.stopMongod(m); - print("reset permissions"); run("chmod", "644", key1_644); - print("try starting mongod"); -m = runMongoProgram( "mongod", "--keyFile", key1_644, "--port", port[0], "--dbpath", MongoRunner.dataPath + name); - +m = runMongoProgram( + "mongod", "--keyFile", key1_644, "--port", port[0], "--dbpath", MongoRunner.dataPath + name); print("should fail with wrong permissions"); -assert.eq(m, _isWindows()? 100 : 1, "mongod should exit w/ 1 (EXIT_FAILURE): permissions too open"); +assert.eq(m, + _isWindows() ? 100 : 1, + "mongod should exit w/ 1 (EXIT_FAILURE): permissions too open"); MongoRunner.stopMongod(port[0]); print("add a user to server0: foo"); @@ -44,27 +45,27 @@ print("make sure user is written before shutting down"); MongoRunner.stopMongod(m); print("start up rs"); -var rs = new ReplSetTest({"name" : name, "nodes" : 3}); +var rs = new ReplSetTest({"name": name, "nodes": 3}); print("restart 0 with keyFile"); -m = rs.restart(0, {"keyFile" : key1_600}); +m = rs.restart(0, {"keyFile": key1_600}); print("restart 1 with keyFile"); -rs.start(1, {"keyFile" : key1_600}); +rs.start(1, {"keyFile": key1_600}); print("restart 2 with keyFile"); -rs.start(2, {"keyFile" : key1_600}); +rs.start(2, {"keyFile": key1_600}); var result = m.getDB("admin").auth("foo", "bar"); assert.eq(result, 1, "login failed"); print("Initializing replSet with config: " + tojson(rs.getReplSetConfig())); -result = m.getDB("admin").runCommand({replSetInitiate : rs.getReplSetConfig()}); -assert.eq(result.ok, 1, "couldn't initiate: "+tojson(result)); -m.getDB('admin').logout(); // In case this node doesn't become primary, make sure its not auth'd +result = m.getDB("admin").runCommand({replSetInitiate: rs.getReplSetConfig()}); +assert.eq(result.ok, 1, "couldn't initiate: " + tojson(result)); +m.getDB('admin').logout(); // In case this node doesn't become primary, make sure its not auth'd var master = rs.getPrimary(); rs.awaitSecondaryNodes(); var mId = rs.getNodeId(master); var slave = rs.liveNodes.slaves[0]; assert.eq(1, master.getDB("admin").auth("foo", "bar")); -assert.writeOK(master.getDB("test").foo.insert({ x: 1 }, { writeConcern: { w:3, wtimeout:60000 }})); +assert.writeOK(master.getDB("test").foo.insert({x: 1}, {writeConcern: {w: 3, wtimeout: 60000}})); print("try some legal and illegal reads"); var r = master.getDB("test").foo.findOne(); @@ -73,7 +74,7 @@ assert.eq(r.x, 1); slave.setSlaveOk(); function doQueryOn(p) { - var error = assert.throws( function() { + var error = assert.throws(function() { r = p.getDB("test").foo.findOne(); }, [], "find did not throw, returned: " + tojson(r)).toString(); printjson(error); @@ -81,28 +82,26 @@ function doQueryOn(p) { } doQueryOn(slave); -master.adminCommand({logout:1}); +master.adminCommand({logout: 1}); print("unauthorized:"); -printjson(master.adminCommand({replSetGetStatus : 1})); +printjson(master.adminCommand({replSetGetStatus: 1})); doQueryOn(master); - result = slave.getDB("test").auth("bar", "baz"); assert.eq(result, 1); r = slave.getDB("test").foo.findOne(); assert.eq(r.x, 1); - print("add some data"); master.getDB("test").auth("bar", "baz"); var bulk = master.getDB("test").foo.initializeUnorderedBulkOp(); -for (var i=0; i<1000; i++) { - bulk.insert({ x: i, foo: "bar" }); +for (var i = 0; i < 1000; i++) { + bulk.insert({x: i, foo: "bar"}); } -assert.writeOK(bulk.execute({ w: 3, wtimeout: 60000 })); +assert.writeOK(bulk.execute({w: 3, wtimeout: 60000})); print("fail over"); rs.stop(mId); @@ -112,86 +111,84 @@ master = rs.getPrimary(); print("add some more data 1"); master.getDB("test").auth("bar", "baz"); bulk = master.getDB("test").foo.initializeUnorderedBulkOp(); -for (var i=0; i<1000; i++) { - bulk.insert({ x: i, foo: "bar" }); +for (var i = 0; i < 1000; i++) { + bulk.insert({x: i, foo: "bar"}); } -assert.writeOK(bulk.execute({ w: 2 })); +assert.writeOK(bulk.execute({w: 2})); print("resync"); -rs.restart(mId, {"keyFile" : key1_600}); +rs.restart(mId, {"keyFile": key1_600}); master = rs.getPrimary(); print("add some more data 2"); bulk = master.getDB("test").foo.initializeUnorderedBulkOp(); -for (var i=0; i<1000; i++) { - bulk.insert({ x: i, foo: "bar" }); +for (var i = 0; i < 1000; i++) { + bulk.insert({x: i, foo: "bar"}); } -bulk.execute({ w:3, wtimeout:60000 }); +bulk.execute({w: 3, wtimeout: 60000}); print("add member with wrong key"); -var conn = MongoRunner.runMongod({dbpath: MongoRunner.dataPath + name + "-3", - port: port[3], - replSet: "rs_auth1", - oplogSize: 2, - keyFile: key2_600}); - +var conn = MongoRunner.runMongod({ + dbpath: MongoRunner.dataPath + name + "-3", + port: port[3], + replSet: "rs_auth1", + oplogSize: 2, + keyFile: key2_600 +}); master.getDB("admin").auth("foo", "bar"); var config = master.getDB("local").system.replset.findOne(); -config.members.push({_id : 3, host : rs.host+":"+port[3]}); +config.members.push({_id: 3, host: rs.host + ":" + port[3]}); config.version++; try { - master.adminCommand({replSetReconfig:config}); -} -catch (e) { - print("error: "+e); + master.adminCommand({replSetReconfig: config}); +} catch (e) { + print("error: " + e); } master = rs.getPrimary(); master.getDB("admin").auth("foo", "bar"); - print("shouldn't ever sync"); -for (var i = 0; i<10; i++) { - print("iteration: " +i); - var results = master.adminCommand({replSetGetStatus:1}); +for (var i = 0; i < 10; i++) { + print("iteration: " + i); + var results = master.adminCommand({replSetGetStatus: 1}); printjson(results); assert(results.members[3].state != 2); sleep(1000); } - print("stop member"); MongoRunner.stopMongod(conn); - print("start back up with correct key"); -var conn = MongoRunner.runMongod({dbpath: MongoRunner.dataPath + name + "-3", - port: port[3], - replSet: "rs_auth1", - oplogSize: 2, - keyFile: key1_600}); +var conn = MongoRunner.runMongod({ + dbpath: MongoRunner.dataPath + name + "-3", + port: port[3], + replSet: "rs_auth1", + oplogSize: 2, + keyFile: key1_600 +}); wait(function() { try { - var results = master.adminCommand({replSetGetStatus:1}); + var results = master.adminCommand({replSetGetStatus: 1}); printjson(results); return results.members[3].state == 2; - } - catch (e) { + } catch (e) { print(e); } return false; - }); +}); print("make sure it has the config, too"); assert.soon(function() { - for (var i in rs.nodes) { - rs.nodes[i].setSlaveOk(); - rs.nodes[i].getDB("admin").auth("foo","bar"); - config = rs.nodes[i].getDB("local").system.replset.findOne(); - if (config.version != 2) { - return false; - } + for (var i in rs.nodes) { + rs.nodes[i].setSlaveOk(); + rs.nodes[i].getDB("admin").auth("foo", "bar"); + config = rs.nodes[i].getDB("local").system.replset.findOne(); + if (config.version != 2) { + return false; } - return true; - }); + } + return true; +}); diff --git a/jstests/replsets/auth2.js b/jstests/replsets/auth2.js index b7776d94572..f7b8d8ab468 100644 --- a/jstests/replsets/auth2.js +++ b/jstests/replsets/auth2.js @@ -18,39 +18,41 @@ var testInvalidAuthStates = function() { rs.waitForState(rs.nodes[0], ReplSetTest.State.SECONDARY); - rs.restart(1, {"keyFile" : key1}); - rs.restart(2, {"keyFile" : key1}); + rs.restart(1, {"keyFile": key1}); + rs.restart(2, {"keyFile": key1}); }; var name = "rs_auth2"; var path = "jstests/libs/"; // These keyFiles have their permissions set to 600 later in the test. -var key1 = path+"key1"; -var key2 = path+"key2"; +var key1 = path + "key1"; +var key2 = path + "key2"; var rs = new ReplSetTest({name: name, nodes: 3}); var nodes = rs.startSet(); var hostnames = rs.nodeList(); -rs.initiate({ "_id" : name, - "members" : [ - {"_id" : 0, "host" : hostnames[0], "priority" : 2}, - {"_id" : 1, "host" : hostnames[1], priority: 0}, - {"_id" : 2, "host" : hostnames[2], priority: 0} - ]}); +rs.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": hostnames[0], "priority": 2}, + {"_id": 1, "host": hostnames[1], priority: 0}, + {"_id": 2, "host": hostnames[2], priority: 0} + ] +}); var master = rs.getPrimary(); print("add an admin user"); -master.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles}, - {w: 3, wtimeout: 30000}); +master.getDB("admin") + .createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles}, {w: 3, wtimeout: 30000}); var m = rs.nodes[0]; print("starting 1 and 2 with key file"); rs.stop(1); -rs.restart(1, {"keyFile" : key1}); +rs.restart(1, {"keyFile": key1}); rs.stop(2); -rs.restart(2, {"keyFile" : key1}); +rs.restart(2, {"keyFile": key1}); // auth to all nodes with auth rs.nodes[1].getDB("admin").auth("foo", "bar"); @@ -60,15 +62,15 @@ testInvalidAuthStates(); print("restart mongod with bad keyFile"); rs.stop(0); -m = rs.restart(0, {"keyFile" : key2}); +m = rs.restart(0, {"keyFile": key2}); -//auth to all nodes +// auth to all nodes rs.nodes[0].getDB("admin").auth("foo", "bar"); rs.nodes[1].getDB("admin").auth("foo", "bar"); rs.nodes[2].getDB("admin").auth("foo", "bar"); testInvalidAuthStates(); rs.stop(0); -m = rs.restart(0, {"keyFile" : key1}); +m = rs.restart(0, {"keyFile": key1}); print("0 becomes a secondary"); diff --git a/jstests/replsets/auth3.js b/jstests/replsets/auth3.js index 504bfeffe9c..3ac812bcfa1 100644 --- a/jstests/replsets/auth3.js +++ b/jstests/replsets/auth3.js @@ -8,14 +8,11 @@ // run on ephemeral storage engines. // @tags: [requires_persistence] -(function () { +(function() { "use strict"; var keyfile = "jstests/libs/key1"; var master; - var rs = new ReplSetTest({ - nodes : { node0 : {}, node1 : {}, arbiter : {}}, - keyFile : keyfile - }); + var rs = new ReplSetTest({nodes: {node0: {}, node1: {}, arbiter: {}}, keyFile: keyfile}); rs.startSet(); rs.initiate(); @@ -27,11 +24,11 @@ var safeInsert = function() { master = rs.getPrimary(); master.getDB("admin").auth("foo", "bar"); - assert.writeOK(master.getDB("foo").bar.insert({ x: 1 })); + assert.writeOK(master.getDB("foo").bar.insert({x: 1})); }; jsTest.log("authing"); - for (var i=0; i<2; i++) { + for (var i = 0; i < 2; i++) { assert(rs.nodes[i].getDB("admin").auth("foo", "bar"), "could not log into " + rs.nodes[i].host); } @@ -39,7 +36,11 @@ jsTest.log("make common point"); safeInsert(); - authutil.asCluster(rs.nodes, keyfile, function() { rs.awaitReplication(); }); + authutil.asCluster(rs.nodes, + keyfile, + function() { + rs.awaitReplication(); + }); jsTest.log("write stuff to 0&2"); rs.stop(1); @@ -48,7 +49,7 @@ master.getDB("admin").auth("foo", "bar"); master.getDB("foo").bar.drop(); jsTest.log("last op: " + - tojson(master.getDB("local").oplog.rs.find().sort({$natural:-1}).limit(1).next())); + tojson(master.getDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next())); jsTest.log("write stuff to 1&2"); rs.stop(0); @@ -56,12 +57,16 @@ safeInsert(); jsTest.log("last op: " + - tojson(master.getDB("local").oplog.rs.find().sort({$natural:-1}).limit(1).next())); + tojson(master.getDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next())); rs.restart(0); jsTest.log("doing rollback!"); - authutil.asCluster(rs.nodes, keyfile, function () { rs.awaitSecondaryNodes(); }); + authutil.asCluster(rs.nodes, + keyfile, + function() { + rs.awaitSecondaryNodes(); + }); }()); diff --git a/jstests/replsets/auth_no_pri.js b/jstests/replsets/auth_no_pri.js index cce4e8020d6..d35d0ec2919 100644 --- a/jstests/replsets/auth_no_pri.js +++ b/jstests/replsets/auth_no_pri.js @@ -1,32 +1,32 @@ // Test that you can still authenticate a replset connection to a RS with no primary (SERVER-6665). -(function () { -'use strict'; +(function() { + 'use strict'; -var NODE_COUNT = 3; -var rs = new ReplSetTest({"nodes" : NODE_COUNT, keyFile : "jstests/libs/key1"}); -var nodes = rs.startSet(); -rs.initiate(); + var NODE_COUNT = 3; + var rs = new ReplSetTest({"nodes": NODE_COUNT, keyFile: "jstests/libs/key1"}); + var nodes = rs.startSet(); + rs.initiate(); -// Add user -var master = rs.getPrimary(); -master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {w: NODE_COUNT}); + // Add user + var master = rs.getPrimary(); + master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {w: NODE_COUNT}); -// Can authenticate replset connection when whole set is up. -var conn = new Mongo(rs.getURL()); -assert(conn.getDB('admin').auth('admin', 'pwd')); -assert.writeOK(conn.getDB('admin').foo.insert({a:1}, { writeConcern: { w: NODE_COUNT } })); + // Can authenticate replset connection when whole set is up. + var conn = new Mongo(rs.getURL()); + assert(conn.getDB('admin').auth('admin', 'pwd')); + assert.writeOK(conn.getDB('admin').foo.insert({a: 1}, {writeConcern: {w: NODE_COUNT}})); -// Make sure there is no primary -rs.stop(0); -rs.stop(1); -rs.waitForState(nodes[2], ReplSetTest.State.SECONDARY); + // Make sure there is no primary + rs.stop(0); + rs.stop(1); + rs.waitForState(nodes[2], ReplSetTest.State.SECONDARY); -// Make sure you can still authenticate a replset connection with no primary -var conn2 = new Mongo(rs.getURL()); -conn2.setSlaveOk(true); -assert(conn2.getDB('admin').auth({user:'admin', pwd:'pwd', mechanism:"SCRAM-SHA-1"})); -assert.eq(1, conn2.getDB('admin').foo.findOne().a); + // Make sure you can still authenticate a replset connection with no primary + var conn2 = new Mongo(rs.getURL()); + conn2.setSlaveOk(true); + assert(conn2.getDB('admin').auth({user: 'admin', pwd: 'pwd', mechanism: "SCRAM-SHA-1"})); + assert.eq(1, conn2.getDB('admin').foo.findOne().a); -rs.stopSet(); + rs.stopSet(); }()); diff --git a/jstests/replsets/await_replication_timeout.js b/jstests/replsets/await_replication_timeout.js index 03ebfa5f8a9..b0fb605567d 100644 --- a/jstests/replsets/await_replication_timeout.js +++ b/jstests/replsets/await_replication_timeout.js @@ -1,47 +1,49 @@ // Tests timeout behavior of waiting for write concern as well as its interaction with maxTimeMs (function() { -"use strict"; + "use strict"; -var exceededTimeLimitCode = 50; -var writeConcernFailedCode = 64; -var replTest = new ReplSetTest({ nodes: 3 }); -replTest.startSet(); -replTest.initiate(); -replTest.stop(0); // Make sure that there are only 2 nodes up so w:3 writes will always time out -var primary = replTest.getPrimary(); -var testDB = primary.getDB('test'); + var exceededTimeLimitCode = 50; + var writeConcernFailedCode = 64; + var replTest = new ReplSetTest({nodes: 3}); + replTest.startSet(); + replTest.initiate(); + replTest.stop( + 0); // Make sure that there are only 2 nodes up so w:3 writes will always time out + var primary = replTest.getPrimary(); + var testDB = primary.getDB('test'); -// Test wtimeout -var res = testDB.runCommand({insert: 'foo', - documents: [{a:1}], - writeConcern: {w: 3, wtimeout: 1000}}); -assert.commandWorked(res); // Commands with write concern errors still report success. -assert.eq(writeConcernFailedCode, res.writeConcernError.code); + // Test wtimeout + var res = testDB.runCommand( + {insert: 'foo', documents: [{a: 1}], writeConcern: {w: 3, wtimeout: 1000}}); + assert.commandWorked(res); // Commands with write concern errors still report success. + assert.eq(writeConcernFailedCode, res.writeConcernError.code); -// Test maxTimeMS timeout -res = testDB.runCommand({insert: 'foo', - documents: [{a:1}], - writeConcern: {w: 3}, - maxTimeMS: 1000}); -assert.commandWorked(res); // Commands with write concern errors still report success. -assert.eq(exceededTimeLimitCode, res.writeConcernError.code); + // Test maxTimeMS timeout + res = testDB.runCommand( + {insert: 'foo', documents: [{a: 1}], writeConcern: {w: 3}, maxTimeMS: 1000}); + assert.commandWorked(res); // Commands with write concern errors still report success. + assert.eq(exceededTimeLimitCode, res.writeConcernError.code); -// Test with wtimeout < maxTimeMS -res = testDB.runCommand({insert: 'foo', - documents: [{a:1}], - writeConcern: {w: 3, wtimeout: 1000}, - maxTimeMS: 10 * 1000}); -assert.commandWorked(res); // Commands with write concern errors still report success. -assert.eq(writeConcernFailedCode, res.writeConcernError.code); + // Test with wtimeout < maxTimeMS + res = testDB.runCommand({ + insert: 'foo', + documents: [{a: 1}], + writeConcern: {w: 3, wtimeout: 1000}, + maxTimeMS: 10 * 1000 + }); + assert.commandWorked(res); // Commands with write concern errors still report success. + assert.eq(writeConcernFailedCode, res.writeConcernError.code); -// Test with wtimeout > maxTimeMS -res = testDB.runCommand({insert: 'foo', - documents: [{a:1}], - writeConcern: {w: 3, wtimeout: 10* 1000}, - maxTimeMS: 1000}); -assert.commandWorked(res); // Commands with write concern errors still report success. -assert.eq(exceededTimeLimitCode, res.writeConcernError.code); -replTest.stopSet(); + // Test with wtimeout > maxTimeMS + res = testDB.runCommand({ + insert: 'foo', + documents: [{a: 1}], + writeConcern: {w: 3, wtimeout: 10 * 1000}, + maxTimeMS: 1000 + }); + assert.commandWorked(res); // Commands with write concern errors still report success. + assert.eq(exceededTimeLimitCode, res.writeConcernError.code); + replTest.stopSet(); })(); diff --git a/jstests/replsets/background_index.js b/jstests/replsets/background_index.js index 9c92f8ca8f4..6d891a66a85 100644 --- a/jstests/replsets/background_index.js +++ b/jstests/replsets/background_index.js @@ -17,8 +17,8 @@ var coll = primary.getCollection("test.foo"); var adminDB = primary.getDB("admin"); - for (var i=0; i<100; i++) { - assert.writeOK(coll.insert({_id: i, x: i*3, str: "hello world"})); + for (var i = 0; i < 100; i++) { + assert.writeOK(coll.insert({_id: i, x: i * 3, str: "hello world"})); } // Add a background index. @@ -26,9 +26,8 @@ // Rename the collection. assert.commandWorked( - adminDB.runCommand({renameCollection: "test.foo", to: "bar.test", dropTarget: true}), - "Call to renameCollection failed." - ); + adminDB.runCommand({renameCollection: "test.foo", to: "bar.test", dropTarget: true}), + "Call to renameCollection failed."); // Await replication. rst.awaitReplication(); diff --git a/jstests/replsets/batch_write_command_wc.js b/jstests/replsets/batch_write_command_wc.js index c71fa18bb45..d6f83c08e3a 100644 --- a/jstests/replsets/batch_write_command_wc.js +++ b/jstests/replsets/batch_write_command_wc.js @@ -13,8 +13,8 @@ jsTest.log("Starting no journal/repl set tests..."); // Start a single-node replica set with no journal // Allows testing immediate write concern failures and wc application failures -var rst = new ReplSetTest({ nodes : 2 }); -rst.startSet({ nojournal : "" }); +var rst = new ReplSetTest({nodes: 2}); +rst.startSet({nojournal: ""}); rst.initiate(); var mongod = rst.getPrimary(); var coll = mongod.getCollection("test.batch_write_command_wc"); @@ -22,9 +22,8 @@ var coll = mongod.getCollection("test.batch_write_command_wc"); // // Basic insert, default WC coll.remove({}); -printjson( request = {insert : coll.getName(), - documents: [{a:1}]}); -printjson( result = coll.runCommand(request) ); +printjson(request = {insert: coll.getName(), documents: [{a: 1}]}); +printjson(result = coll.runCommand(request)); assert(result.ok); assert.eq(1, result.n); assert.eq(1, coll.count()); @@ -32,10 +31,8 @@ assert.eq(1, coll.count()); // // Basic insert, majority WC coll.remove({}); -printjson( request = {insert : coll.getName(), - documents: [{a:1}], - writeConcern: {w: 'majority'}}); -printjson( result = coll.runCommand(request) ); +printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'majority'}}); +printjson(result = coll.runCommand(request)); assert(result.ok); assert.eq(1, result.n); assert.eq(1, coll.count()); @@ -43,10 +40,8 @@ assert.eq(1, coll.count()); // // Basic insert, w:2 WC coll.remove({}); -printjson( request = {insert : coll.getName(), - documents: [{a:1}], - writeConcern: {w:2}}); -printjson( result = coll.runCommand(request) ); +printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 2}}); +printjson(result = coll.runCommand(request)); assert(result.ok); assert.eq(1, result.n); assert.eq(1, coll.count()); @@ -54,20 +49,17 @@ assert.eq(1, coll.count()); // // Basic insert, immediate nojournal error coll.remove({}); -printjson( request = {insert : coll.getName(), - documents: [{a:1}], - writeConcern: {j:true}}); -printjson( result = coll.runCommand(request) ); +printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {j: true}}); +printjson(result = coll.runCommand(request)); assert(!result.ok); assert.eq(0, coll.count()); // // Basic insert, timeout wc error coll.remove({}); -printjson( request = {insert : coll.getName(), - documents: [{a:1}], - writeConcern: {w:3, wtimeout: 1}}); -printjson( result = coll.runCommand(request) ); +printjson( + request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 3, wtimeout: 1}}); +printjson(result = coll.runCommand(request)); assert(result.ok); assert.eq(1, result.n); assert(result.writeConcernError); @@ -77,10 +69,8 @@ assert.eq(1, coll.count()); // // Basic insert, wmode wc error coll.remove({}); -printjson( request = {insert : coll.getName(), - documents: [{a:1}], - writeConcern: {w: 'invalid'}}); -printjson( result = coll.runCommand(request) ); +printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'invalid'}}); +printjson(result = coll.runCommand(request)); assert(result.ok); assert.eq(1, result.n); assert(result.writeConcernError); @@ -89,10 +79,12 @@ assert.eq(1, coll.count()); // // Two ordered inserts, write error and wc error both reported coll.remove({}); -printjson( request = {insert : coll.getName(), - documents: [{a:1},{$invalid:'doc'}], - writeConcern: {w: 'invalid'}}); -printjson( result = coll.runCommand(request) ); +printjson(request = { + insert: coll.getName(), + documents: [{a: 1}, {$invalid: 'doc'}], + writeConcern: {w: 'invalid'} +}); +printjson(result = coll.runCommand(request)); assert(result.ok); assert.eq(1, result.n); assert.eq(result.writeErrors.length, 1); @@ -103,11 +95,13 @@ assert.eq(1, coll.count()); // // Two unordered inserts, write error and wc error reported coll.remove({}); -printjson( request = {insert : coll.getName(), - documents: [{a:1},{$invalid:'doc'}], - writeConcern: {w: 'invalid'}, - ordered: false}); -printjson( result = coll.runCommand(request) ); +printjson(request = { + insert: coll.getName(), + documents: [{a: 1}, {$invalid: 'doc'}], + writeConcern: {w: 'invalid'}, + ordered: false +}); +printjson(result = coll.runCommand(request)); assert(result.ok); assert.eq(1, result.n); assert.eq(result.writeErrors.length, 1); @@ -118,10 +112,12 @@ assert.eq(1, coll.count()); // // Write error with empty writeConcern object. coll.remove({}); -request = { insert: coll.getName(), - documents: [{ _id: 1 }, { _id: 1 }], - writeConcern: {}, - ordered: false }; +request = { + insert: coll.getName(), + documents: [{_id: 1}, {_id: 1}], + writeConcern: {}, + ordered: false +}; result = coll.runCommand(request); assert(result.ok); assert.eq(1, result.n); @@ -133,10 +129,12 @@ assert.eq(1, coll.count()); // // Write error with unspecified w. coll.remove({}); -request = { insert: coll.getName(), - documents: [{ _id: 1 }, { _id: 1 }], - writeConcern: { wTimeout: 1 }, - ordered: false }; +request = { + insert: coll.getName(), + documents: [{_id: 1}, {_id: 1}], + writeConcern: {wTimeout: 1}, + ordered: false +}; result = coll.runCommand(request); assert(result.ok); assert.eq(1, result.n); @@ -147,4 +145,3 @@ assert.eq(1, coll.count()); jsTest.log("DONE no journal/repl tests"); rst.stopSet(); - diff --git a/jstests/replsets/buildindexes.js b/jstests/replsets/buildindexes.js index a114011c3a0..f6a8a781014 100644 --- a/jstests/replsets/buildindexes.js +++ b/jstests/replsets/buildindexes.js @@ -2,65 +2,65 @@ (function() { - var name = "buildIndexes"; - var host = getHostName(); - - var replTest = new ReplSetTest( {name: name, nodes: 3} ); - - var nodes = replTest.startSet(); - - var config = replTest.getReplSetConfig(); - config.members[2].priority = 0; - config.members[2].buildIndexes = false; - - replTest.initiate(config); - - var master = replTest.getPrimary().getDB(name); - var slaveConns = replTest.liveNodes.slaves; - var slave = []; - for (var i in slaveConns) { - slaveConns[i].setSlaveOk(); - slave.push(slaveConns[i].getDB(name)); - } - replTest.awaitReplication(); - - master.x.ensureIndex({y : 1}); - - for (i = 0; i < 100; i++) { - master.x.insert({x:1,y:"abc",c:1}); - } - - replTest.awaitReplication(); - - assert.commandWorked(slave[0].runCommand({count: "x"})); - - var indexes = slave[0].stats().indexes; - assert.eq(indexes, 2, 'number of indexes'); - - indexes = slave[1].stats().indexes; - assert.eq(indexes, 1); - - indexes = slave[0].x.stats().indexSizes; - - var count = 0; - for (i in indexes) { - count++; - if (i == "_id_") { - continue; + var name = "buildIndexes"; + var host = getHostName(); + + var replTest = new ReplSetTest({name: name, nodes: 3}); + + var nodes = replTest.startSet(); + + var config = replTest.getReplSetConfig(); + config.members[2].priority = 0; + config.members[2].buildIndexes = false; + + replTest.initiate(config); + + var master = replTest.getPrimary().getDB(name); + var slaveConns = replTest.liveNodes.slaves; + var slave = []; + for (var i in slaveConns) { + slaveConns[i].setSlaveOk(); + slave.push(slaveConns[i].getDB(name)); } - assert(i.match(/y_/)); - } + replTest.awaitReplication(); + + master.x.ensureIndex({y: 1}); + + for (i = 0; i < 100; i++) { + master.x.insert({x: 1, y: "abc", c: 1}); + } + + replTest.awaitReplication(); - assert.eq(count, 2); - - indexes = slave[1].x.stats().indexSizes; + assert.commandWorked(slave[0].runCommand({count: "x"})); - count = 0; - for (i in indexes) { - count++; - } + var indexes = slave[0].stats().indexes; + assert.eq(indexes, 2, 'number of indexes'); + + indexes = slave[1].stats().indexes; + assert.eq(indexes, 1); + + indexes = slave[0].x.stats().indexSizes; + + var count = 0; + for (i in indexes) { + count++; + if (i == "_id_") { + continue; + } + assert(i.match(/y_/)); + } + + assert.eq(count, 2); + + indexes = slave[1].x.stats().indexSizes; + + count = 0; + for (i in indexes) { + count++; + } - assert.eq(count, 1); + assert.eq(count, 1); - replTest.stopSet(); + replTest.stopSet(); }()); diff --git a/jstests/replsets/bulk_api_wc.js b/jstests/replsets/bulk_api_wc.js index a92b536dda1..f08e4df64b9 100644 --- a/jstests/replsets/bulk_api_wc.js +++ b/jstests/replsets/bulk_api_wc.js @@ -7,9 +7,9 @@ jsTest.log("Starting bulk api write concern tests..."); // Start a 2-node replica set with no journal -//Allows testing immediate write concern failures and wc application failures -var rst = new ReplSetTest({ nodes : 2 }); -rst.startSet({ nojournal : "" }); +// Allows testing immediate write concern failures and wc application failures +var rst = new ReplSetTest({nodes: 2}); +rst.startSet({nojournal: ""}); rst.initiate(); var mongod = rst.getPrimary(); var coll = mongod.getCollection("test.bulk_api_wc"); @@ -18,7 +18,7 @@ var executeTests = function() { // Create a unique index, legacy writes validate too early to use invalid documents for write // error testing - coll.ensureIndex({ a : 1 }, { unique : true }); + coll.ensureIndex({a: 1}, {unique: true}); // // Ordered @@ -28,18 +28,22 @@ var executeTests = function() { // Fail due to nojournal coll.remove({}); var bulk = coll.initializeOrderedBulkOp(); - bulk.insert({a:1}); - bulk.insert({a:2}); - assert.throws( function(){ bulk.execute({ j : true }); } ); + bulk.insert({a: 1}); + bulk.insert({a: 2}); + assert.throws(function() { + bulk.execute({j: true}); + }); // // Fail with write error, no write concern error even though it would fail on apply for ordered coll.remove({}); var bulk = coll.initializeOrderedBulkOp(); - bulk.insert({a:1}); - bulk.insert({a:2}); - bulk.insert({a:2}); - var result = assert.throws( function() { bulk.execute({ w : 'invalid' }); } ); + bulk.insert({a: 1}); + bulk.insert({a: 2}); + bulk.insert({a: 2}); + var result = assert.throws(function() { + bulk.execute({w: 'invalid'}); + }); assert.eq(result.nInserted, 2); assert.eq(result.getWriteErrors()[0].index, 2); assert(!result.getWriteConcernError()); @@ -53,10 +57,12 @@ var executeTests = function() { // Fail with write error, write concern error reported when unordered coll.remove({}); var bulk = coll.initializeUnorderedBulkOp(); - bulk.insert({a:1}); - bulk.insert({a:2}); - bulk.insert({a:2}); - var result = assert.throws( function(){ bulk.execute({ w : 'invalid' }); } ); + bulk.insert({a: 1}); + bulk.insert({a: 2}); + bulk.insert({a: 2}); + var result = assert.throws(function() { + bulk.execute({w: 'invalid'}); + }); assert.eq(result.nInserted, 2); assert.eq(result.getWriteErrors()[0].index, 2); assert(result.getWriteConcernError()); @@ -68,10 +74,12 @@ var executeTests = function() { // multiple wc errors coll.remove({}); var bulk = coll.initializeUnorderedBulkOp(); - bulk.insert({a:1}); - bulk.insert({a:2}); - bulk.insert({a:2}); - var result = assert.throws( function() { bulk.execute({ w : 3, wtimeout : 1 }); } ); + bulk.insert({a: 1}); + bulk.insert({a: 2}); + bulk.insert({a: 2}); + var result = assert.throws(function() { + bulk.execute({w: 3, wtimeout: 1}); + }); assert.eq(result.nInserted, 2); assert.eq(result.getWriteErrors()[0].index, 2); assert.eq(100, result.getWriteConcernError().code); @@ -81,11 +89,13 @@ var executeTests = function() { // Fail with write error and upserted, write concern error reported when unordered coll.remove({}); var bulk = coll.initializeUnorderedBulkOp(); - bulk.insert({a:1}); - bulk.insert({a:2}); - bulk.find({a:3}).upsert().updateOne({a:3}); - bulk.insert({a:3}); - var result = assert.throws( function(){ bulk.execute({ w : 'invalid' }); } ); + bulk.insert({a: 1}); + bulk.insert({a: 2}); + bulk.find({a: 3}).upsert().updateOne({a: 3}); + bulk.insert({a: 3}); + var result = assert.throws(function() { + bulk.execute({w: 'invalid'}); + }); assert.eq(result.nInserted, 2); assert.eq(result.nUpserted, 1); assert.eq(result.getUpsertedIds()[0].index, 2); @@ -95,12 +105,16 @@ var executeTests = function() { }; // Use write commands -coll.getMongo().useWriteCommands = function() { return true; }; +coll.getMongo().useWriteCommands = function() { + return true; +}; executeTests(); // FAILING currently due to incorrect batch api reading of GLE // Use legacy opcodes -coll.getMongo().useWriteCommands = function() { return false; }; +coll.getMongo().useWriteCommands = function() { + return false; +}; executeTests(); jsTest.log("DONE bulk api wc tests"); diff --git a/jstests/replsets/capped_id.js b/jstests/replsets/capped_id.js index 8ba37ea7c14..8708f5752f8 100644 --- a/jstests/replsets/capped_id.js +++ b/jstests/replsets/capped_id.js @@ -8,7 +8,7 @@ // and check it got created on secondaries. // Create a new replica set test with name 'testSet' and 3 members -var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} ); +var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); // call startSet() to start each mongod in the replica set // this returns a list of nodes @@ -31,90 +31,86 @@ var slave2 = replTest.liveNodes.slaves[1]; // Calling getPrimary made available the liveNodes structure, // which looks like this: // liveNodes = {master: masterNode, slaves: [slave1, slave2] } -printjson( replTest.liveNodes ); +printjson(replTest.liveNodes); // define db names to use for this test var dbname = "dbname"; -var masterdb = master.getDB( dbname ); -var slave1db = slave1.getDB( dbname ); -var slave2db = slave2.getDB( dbname ); +var masterdb = master.getDB(dbname); +var slave1db = slave1.getDB(dbname); +var slave2db = slave2.getDB(dbname); function countIdIndexes(theDB, coll) { - return theDB[coll].getIndexes().filter(function(idx) { - return friendlyEqual(idx.key, {_id: 1}); - }).length; + return theDB[coll].getIndexes().filter(function(idx) { + return friendlyEqual(idx.key, {_id: 1}); + }).length; } var numtests = 4; -for( testnum=0; testnum < numtests; testnum++ ){ - - //define collection name +for (testnum = 0; testnum < numtests; testnum++) { + // define collection name coll = "coll" + testnum; // drop the coll on the master (just in case it already existed) // and wait for the drop to replicate - masterdb.getCollection( coll ).drop(); + masterdb.getCollection(coll).drop(); replTest.awaitReplication(); - if ( testnum == 0 ){ + if (testnum == 0) { // create a capped collection on the master // insert a bunch of things in it // wait for it to replicate - masterdb.runCommand( {create : coll , capped : true , size : 1024} ); - for(i=0; i < 500 ; i++){ - masterdb.getCollection( coll ).insert( {a: 1000} ); + masterdb.runCommand({create: coll, capped: true, size: 1024}); + for (i = 0; i < 500; i++) { + masterdb.getCollection(coll).insert({a: 1000}); } replTest.awaitReplication(); - } - else if ( testnum == 1 ){ + } else if (testnum == 1) { // create a non-capped collection on the master // insert a bunch of things in it // wait for it to replicate - masterdb.runCommand( {create : coll } ); - for(i=0; i < 500 ; i++){ - masterdb.getCollection( coll ).insert( {a: 1000} ); + masterdb.runCommand({create: coll}); + for (i = 0; i < 500; i++) { + masterdb.getCollection(coll).insert({a: 1000}); } replTest.awaitReplication(); // make sure _id index exists on primary - assert.eq( 1 , - countIdIndexes(masterdb, coll), - "master does not have _id index on normal collection"); + assert.eq(1, + countIdIndexes(masterdb, coll), + "master does not have _id index on normal collection"); // then convert it to capped - masterdb.runCommand({convertToCapped: coll , size: 1024 } ); + masterdb.runCommand({convertToCapped: coll, size: 1024}); replTest.awaitReplication(); - } - else if ( testnum == 2 ){ + } else if (testnum == 2) { // similar to first test, but check that a bunch of updates instead // of inserts triggers the _id index creation on secondaries. - masterdb.runCommand( {create : coll , capped : true , size : 1024} ); - masterdb.getCollection( coll ).insert( {a : 0} ); - for(i=0; i < 500 ; i++){ - masterdb.getCollection( coll ).update( {} , {$inc : {a : 1} } ); + masterdb.runCommand({create: coll, capped: true, size: 1024}); + masterdb.getCollection(coll).insert({a: 0}); + for (i = 0; i < 500; i++) { + masterdb.getCollection(coll).update({}, {$inc: {a: 1}}); } replTest.awaitReplication(); - } - else if ( testnum == 3 ){ + } else if (testnum == 3) { // explicitly set autoIndexId : false - masterdb.runCommand( {create : coll , capped : true , size : 1024 , autoIndexId : false } ); - for(i=0; i < 500 ; i++){ - masterdb.getCollection( coll ).insert( {a: 1000} ); + masterdb.runCommand({create: coll, capped: true, size: 1024, autoIndexId: false}); + for (i = 0; i < 500; i++) { + masterdb.getCollection(coll).insert({a: 1000}); } replTest.awaitReplication(); - assert.eq( 0 , - countIdIndexes(masterdb, coll), - "master has an _id index on capped collection when autoIndexId is false"); - assert.eq( 0 , - countIdIndexes(slave1db, coll), - "slave1 has an _id index on capped collection when autoIndexId is false"); - assert.eq( 0 , - countIdIndexes(slave2db, coll), - "slave2 has an _id index on capped collection when autoIndexId is false"); + assert.eq(0, + countIdIndexes(masterdb, coll), + "master has an _id index on capped collection when autoIndexId is false"); + assert.eq(0, + countIdIndexes(slave1db, coll), + "slave1 has an _id index on capped collection when autoIndexId is false"); + assert.eq(0, + countIdIndexes(slave2db, coll), + "slave2 has an _id index on capped collection when autoIndexId is false"); // now create the index and make sure it works - masterdb.getCollection( coll ).ensureIndex( { "_id" : 1 } ); + masterdb.getCollection(coll).ensureIndex({"_id": 1}); replTest.awaitReplication(); } @@ -132,20 +128,14 @@ for( testnum=0; testnum < numtests; testnum++ ){ print(""); // ensure all nodes have _id index - assert.eq( 1 , - countIdIndexes(masterdb, coll), - "master has an _id index on capped collection"); - assert.eq( 1 , - countIdIndexes(slave1db, coll), - "slave1 does not have _id index on capped collection"); - assert.eq( 1 , - countIdIndexes(slave2db, coll), - "slave2 does not have _id index on capped collection"); + assert.eq(1, countIdIndexes(masterdb, coll), "master has an _id index on capped collection"); + assert.eq( + 1, countIdIndexes(slave1db, coll), "slave1 does not have _id index on capped collection"); + assert.eq( + 1, countIdIndexes(slave2db, coll), "slave2 does not have _id index on capped collection"); print("capped_id.js Test # " + testnum + " SUCCESS"); } -//Finally, stop set +// Finally, stop set replTest.stopSet(); - - diff --git a/jstests/replsets/capped_insert_order.js b/jstests/replsets/capped_insert_order.js index 9b39021732c..0b17f9ff144 100644 --- a/jstests/replsets/capped_insert_order.js +++ b/jstests/replsets/capped_insert_order.js @@ -20,7 +20,7 @@ var slaveColl = slaveDb[collectionName]; // Making a large capped collection to ensure that every document fits. - masterDb.createCollection(collectionName, {capped: true, size: 1024*1024}); + masterDb.createCollection(collectionName, {capped: true, size: 1024 * 1024}); // Insert 1000 docs with _id from 0 to 999 inclusive. const nDocuments = 1000; diff --git a/jstests/replsets/chaining_removal.js b/jstests/replsets/chaining_removal.js index 79fc89c8ecd..29b50609754 100644 --- a/jstests/replsets/chaining_removal.js +++ b/jstests/replsets/chaining_removal.js @@ -9,15 +9,16 @@ var replTest = new ReplSetTest({name: name, nodes: numNodes}); var nodes = replTest.startSet(); var port = replTest.ports; - replTest.initiate({_id: name, members: - [ - {_id: 0, host: nodes[0].host, priority: 3}, - {_id: 1, host: nodes[1].host, priority: 0}, - {_id: 2, host: nodes[2].host, priority: 0}, - {_id: 3, host: nodes[3].host, priority: 0}, - {_id: 4, host: nodes[4].host, priority: 0}, - ], - }); + replTest.initiate({ + _id: name, + members: [ + {_id: 0, host: nodes[0].host, priority: 3}, + {_id: 1, host: nodes[1].host, priority: 0}, + {_id: 2, host: nodes[2].host, priority: 0}, + {_id: 3, host: nodes[3].host, priority: 0}, + {_id: 4, host: nodes[4].host, priority: 0}, + ], + }); replTest.waitForState(nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); var primary = replTest.getPrimary(); replTest.awaitReplication(); @@ -25,21 +26,31 @@ // Force node 1 to sync directly from node 0. assert.commandWorked(nodes[1].getDB("admin").runCommand({"replSetSyncFrom": nodes[0].host})); var res; - assert.soon(function() { - res = nodes[1].getDB("admin").runCommand({"replSetGetStatus": 1}); - return res.syncingTo === nodes[0].host; - }, function() { return "node 1 failed to start syncing from node 0: " + tojson(res); } ); + assert.soon( + function() { + res = nodes[1].getDB("admin").runCommand({"replSetGetStatus": 1}); + return res.syncingTo === nodes[0].host; + }, + function() { + return "node 1 failed to start syncing from node 0: " + tojson(res); + }); // Force node 4 to sync through node 1. assert.commandWorked(nodes[4].getDB("admin").runCommand({"replSetSyncFrom": nodes[1].host})); - assert.soon(function() { - res = nodes[4].getDB("admin").runCommand({"replSetGetStatus": 1}); - return res.syncingTo === nodes[1].host; - }, function() { return "node 4 failed to start chaining through node 1: " + tojson(res); } ); + assert.soon( + function() { + res = nodes[4].getDB("admin").runCommand({"replSetGetStatus": 1}); + return res.syncingTo === nodes[1].host; + }, + function() { + return "node 4 failed to start chaining through node 1: " + tojson(res); + }); // write that should reach all nodes var timeout = 15 * 1000; - var options = {writeConcern: {w: numNodes, wtimeout: timeout}}; + var options = { + writeConcern: {w: numNodes, wtimeout: timeout} + }; assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options)); var config = primary.getDB("local").system.replset.findOne(); @@ -48,10 +59,9 @@ // remove node 4 replTest.stop(4); try { - primary.adminCommand({replSetReconfig:config}); - } - catch (e) { - print("error: "+e); + primary.adminCommand({replSetReconfig: config}); + } catch (e) { + print("error: " + e); } // ensure writing to all four nodes still works @@ -59,6 +69,6 @@ replTest.awaitReplication(); options.writeConcern.w = 4; assert.writeOK(primary.getDB(name).foo.insert({x: 2}, options)); - + replTest.stopSet(); }()); diff --git a/jstests/replsets/cloneDb.js b/jstests/replsets/cloneDb.js index ca53d370f4a..f0eb6a2171e 100644 --- a/jstests/replsets/cloneDb.js +++ b/jstests/replsets/cloneDb.js @@ -4,95 +4,104 @@ (function() { "use strict"; -if (jsTest.options().keyFile) { - jsTest.log("Skipping test because clone command doesn't work with authentication enabled:" + - " SERVER-4245"); -} else { - var numDocs = 2000; - - // 1kb string - var str = new Array(1000).toString(); - - var replsetDBName = 'cloneDBreplset'; - var standaloneDBName = 'cloneDBstandalone'; - var testColName = 'foo'; - - jsTest.log("Create replica set"); - var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); - replTest.startSet(); - replTest.initiate(); - var master = replTest.getPrimary(); - var secondary = replTest.liveNodes.slaves[0]; - var masterDB = master.getDB(replsetDBName); - masterDB.dropDatabase(); - - jsTest.log("Create standalone server"); - var standalone = MongoRunner.runMongod(); - standalone.getDB("admin").runCommand({setParameter:1,logLevel:5}); - var standaloneDB = standalone.getDB(replsetDBName); - standaloneDB.dropDatabase(); - - jsTest.log("Insert data into replica set"); - var bulk = masterDB[testColName].initializeUnorderedBulkOp(); - for (var i = 0; i < numDocs; i++) { - bulk.insert({x: i, text: str}); - } - assert.writeOK(bulk.execute({w: 3})); - - jsTest.log("Clone db from replica set to standalone server"); - standaloneDB.cloneDatabase(replTest.getURL()); - assert.eq(numDocs, standaloneDB[testColName].count(), - 'cloneDatabase from replset to standalone failed (document counts do not match)'); - - jsTest.log("Clone db from replica set PRIMARY to standalone server"); - standaloneDB.dropDatabase(); - standaloneDB.cloneDatabase(master.host); - assert.eq(numDocs, standaloneDB[testColName].count(), - 'cloneDatabase from PRIMARY to standalone failed (document counts do not match)'); - - jsTest.log("Clone db from replica set SECONDARY to standalone server (should not copy)"); - standaloneDB.dropDatabase(); - standaloneDB.cloneDatabase(secondary.host); - assert.eq(0, standaloneDB[testColName].count(), - 'cloneDatabase from SECONDARY to standalone copied documents without slaveOk: true'); - - jsTest.log("Clone db from replica set SECONDARY to standalone server using slaveOk"); - standaloneDB.dropDatabase(); - standaloneDB.runCommand({clone: secondary.host, slaveOk: true}); - assert.eq(numDocs, standaloneDB[testColName].count(), - 'cloneDatabase from SECONDARY to standalone failed (document counts do not match)'); - - jsTest.log("Switch db and insert data into standalone server"); - masterDB = master.getDB(standaloneDBName); - var secondaryDB = secondary.getDB(standaloneDBName); - standaloneDB = standalone.getDB(standaloneDBName); - masterDB.dropDatabase(); - secondaryDB.dropDatabase(); - standaloneDB.dropDatabase(); - - bulk = standaloneDB[testColName].initializeUnorderedBulkOp(); - for (var i = 0; i < numDocs; i++) { - bulk.insert({x: i, text: str}); + if (jsTest.options().keyFile) { + jsTest.log("Skipping test because clone command doesn't work with authentication enabled:" + + " SERVER-4245"); + } else { + var numDocs = 2000; + + // 1kb string + var str = new Array(1000).toString(); + + var replsetDBName = 'cloneDBreplset'; + var standaloneDBName = 'cloneDBstandalone'; + var testColName = 'foo'; + + jsTest.log("Create replica set"); + var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); + replTest.startSet(); + replTest.initiate(); + var master = replTest.getPrimary(); + var secondary = replTest.liveNodes.slaves[0]; + var masterDB = master.getDB(replsetDBName); + masterDB.dropDatabase(); + + jsTest.log("Create standalone server"); + var standalone = MongoRunner.runMongod(); + standalone.getDB("admin").runCommand({setParameter: 1, logLevel: 5}); + var standaloneDB = standalone.getDB(replsetDBName); + standaloneDB.dropDatabase(); + + jsTest.log("Insert data into replica set"); + var bulk = masterDB[testColName].initializeUnorderedBulkOp(); + for (var i = 0; i < numDocs; i++) { + bulk.insert({x: i, text: str}); + } + assert.writeOK(bulk.execute({w: 3})); + + jsTest.log("Clone db from replica set to standalone server"); + standaloneDB.cloneDatabase(replTest.getURL()); + assert.eq(numDocs, + standaloneDB[testColName].count(), + 'cloneDatabase from replset to standalone failed (document counts do not match)'); + + jsTest.log("Clone db from replica set PRIMARY to standalone server"); + standaloneDB.dropDatabase(); + standaloneDB.cloneDatabase(master.host); + assert.eq(numDocs, + standaloneDB[testColName].count(), + 'cloneDatabase from PRIMARY to standalone failed (document counts do not match)'); + + jsTest.log("Clone db from replica set SECONDARY to standalone server (should not copy)"); + standaloneDB.dropDatabase(); + standaloneDB.cloneDatabase(secondary.host); + assert.eq( + 0, + standaloneDB[testColName].count(), + 'cloneDatabase from SECONDARY to standalone copied documents without slaveOk: true'); + + jsTest.log("Clone db from replica set SECONDARY to standalone server using slaveOk"); + standaloneDB.dropDatabase(); + standaloneDB.runCommand({clone: secondary.host, slaveOk: true}); + assert.eq( + numDocs, + standaloneDB[testColName].count(), + 'cloneDatabase from SECONDARY to standalone failed (document counts do not match)'); + + jsTest.log("Switch db and insert data into standalone server"); + masterDB = master.getDB(standaloneDBName); + var secondaryDB = secondary.getDB(standaloneDBName); + standaloneDB = standalone.getDB(standaloneDBName); + masterDB.dropDatabase(); + secondaryDB.dropDatabase(); + standaloneDB.dropDatabase(); + + bulk = standaloneDB[testColName].initializeUnorderedBulkOp(); + for (var i = 0; i < numDocs; i++) { + bulk.insert({x: i, text: str}); + } + assert.writeOK(bulk.execute()); + + jsTest.log("Clone db from standalone server to replica set PRIMARY"); + masterDB.cloneDatabase(standalone.host); + replTest.awaitReplication(); + assert.eq(numDocs, + masterDB[testColName].count(), + 'cloneDatabase from standalone to PRIMARY failed (document counts do not match)'); + + jsTest.log("Clone db from standalone server to replica set SECONDARY"); + masterDB.dropDatabase(); + replTest.awaitReplication(); + secondaryDB.cloneDatabase(standalone.host); + assert.eq( + 0, + secondaryDB[testColName].count(), + 'cloneDatabase from standalone to SECONDARY succeeded and should not accept writes'); + + jsTest.log("Shut down replica set and standalone server"); + MongoRunner.stopMongod(standalone.port); + + replTest.stopSet(); } - assert.writeOK(bulk.execute()); - - jsTest.log("Clone db from standalone server to replica set PRIMARY"); - masterDB.cloneDatabase(standalone.host); - replTest.awaitReplication(); - assert.eq(numDocs, masterDB[testColName].count(), - 'cloneDatabase from standalone to PRIMARY failed (document counts do not match)'); - - jsTest.log("Clone db from standalone server to replica set SECONDARY"); - masterDB.dropDatabase(); - replTest.awaitReplication(); - secondaryDB.cloneDatabase(standalone.host); - assert.eq(0, secondaryDB[testColName].count(), - 'cloneDatabase from standalone to SECONDARY succeeded and should not accept writes'); - - jsTest.log("Shut down replica set and standalone server"); - MongoRunner.stopMongod(standalone.port); - - replTest.stopSet(); -} })(); diff --git a/jstests/replsets/config_server_checks.js b/jstests/replsets/config_server_checks.js index 2c6128d75e8..66d30535677 100644 --- a/jstests/replsets/config_server_checks.js +++ b/jstests/replsets/config_server_checks.js @@ -5,149 +5,157 @@ function expectState(rst, state) { assert.soon(function() { - var status = rst.status(); - if (status.myState != state) { - print("Waiting for state " + state + - " in replSetGetStatus output: " + tojson(status)); - } - return status.myState == state; - }); + var status = rst.status(); + if (status.myState != state) { + print("Waiting for state " + state + " in replSetGetStatus output: " + tojson(status)); + } + return status.myState == state; + }); } (function() { -"use strict"; - -(function() { -// Test that node with --configsvr cmd line and configsvr in replset config goes -// into REMOVED state if storage engine is not WiredTiger -jsTestLog("configsvr in rs config and --configsvr cmd line, but mmapv1"); -var rst = new ReplSetTest({name: "configrs3", nodes: 1, nodeOptions: {configsvr: "", - journal: "", - storageEngine: "mmapv1"}}); - -rst.startSet(); -var conf = rst.getReplSetConfig(); -conf.configsvr = true; -try { - rst.nodes[0].adminCommand({replSetInitiate: conf}); -} catch (e) { - // expected since we close all connections after going into REMOVED -} -expectState(rst, ReplSetTest.State.REMOVED); -rst.stopSet(); -})(); - -(function() { -// Test that node with --configsvr cmd line and configsvr in replset config does NOT go -// into REMOVED state if storage engine is not WiredTiger but we're running in SCCC mode -jsTestLog("configsvr in rs config and --configsvr cmd line, but mmapv1 with configSvrMode=sccc"); -var rst = new ReplSetTest({name: "configrs4", nodes: 1, nodeOptions: {configsvr: "", - journal: "", - storageEngine: "mmapv1", - configsvrMode: "sccc"}}); - -rst.startSet(); -var conf = rst.getReplSetConfig(); -conf.configsvr = true; -assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); - -rst.getPrimary(); -expectState(rst, ReplSetTest.State.PRIMARY); -rst.stopSet(); -})(); - -(function() { -// Test that node with --configsvr cmd line and configsvr in replset config and using wiredTiger -// does NOT go into REMOVED state. -jsTestLog("configsvr in rs config and --configsvr cmd line, normal case"); -var rst = new ReplSetTest({name: "configrs5", - nodes: 1, - nodeOptions: {configsvr: "", - journal: "", - storageEngine: "wiredTiger"}}); - -rst.startSet(); -var conf = rst.getReplSetConfig(); -conf.configsvr = true; -assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); - -rst.getPrimary(); -expectState(rst, ReplSetTest.State.PRIMARY); - -var conf = rst.getPrimary().getDB('local').system.replset.findOne(); -assert(conf.configsvr, tojson(conf)); - -rst.stopSet(); -})(); - -(function() { -// Test that node with --configsvr cmd line and initiated with an empty replset config -// will result in configsvr:true getting automatically added to the config (SERVER-20247). -jsTestLog("--configsvr cmd line, empty config to replSetInitiate"); -var rst = new ReplSetTest({name: "configrs6", - nodes: 1, - nodeOptions: {configsvr: "", - journal: "", - storageEngine: "wiredTiger"}}); - -rst.startSet(); -assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: 1})); - -rst.getPrimary(); -expectState(rst, ReplSetTest.State.PRIMARY); -rst.stopSet(); -})(); - -(function() { -// Test that a set initialized without --configsvr but then restarted with --configsvr will fail to -// start up and won't automatically add "configsvr" to the replset config (SERVER-21236). -jsTestLog("set initiated without configsvr, restarted adding --configsvr cmd line"); -var rst = new ReplSetTest({name: "configrs7", - nodes: 1, - nodeOptions: {journal: "", - storageEngine: "wiredTiger"}}); - -rst.startSet(); -var conf = rst.getReplSetConfig(); -assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); - -rst.getPrimary(); -expectState(rst, ReplSetTest.State.PRIMARY); -assert.throws(function() { - rst.restart(0, {configsvr: ""}); - }); - -rst.stopSet(); -})(); - -(function() { -// Test that a set initialized with --configsvr but then restarted without --configsvr will fail to -// start up. -jsTestLog("set initiated with configsvr, restarted without --configsvr cmd line"); -var rst = new ReplSetTest({name: "configrs8", - nodes: 1, - nodeOptions: {configsvr: "", - journal: "", - storageEngine: "wiredTiger"}}); - -rst.startSet(); -var conf = rst.getReplSetConfig(); -conf.configsvr = true; -assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); - -rst.getPrimary(); -expectState(rst, ReplSetTest.State.PRIMARY); - -var node = rst.nodes[0]; -var options = node.savedOptions; -delete options.configsvr; -options.noCleanData = true; - -MongoRunner.stopMongod(node); -var conn = MongoRunner.runMongod(options); -assert.eq(null, conn, "Mongod should have failed to start, but didn't"); - -rst.stopSet(); -})(); + "use strict"; + + (function() { + // Test that node with --configsvr cmd line and configsvr in replset config goes + // into REMOVED state if storage engine is not WiredTiger + jsTestLog("configsvr in rs config and --configsvr cmd line, but mmapv1"); + var rst = new ReplSetTest({ + name: "configrs3", + nodes: 1, + nodeOptions: {configsvr: "", journal: "", storageEngine: "mmapv1"} + }); + + rst.startSet(); + var conf = rst.getReplSetConfig(); + conf.configsvr = true; + try { + rst.nodes[0].adminCommand({replSetInitiate: conf}); + } catch (e) { + // expected since we close all connections after going into REMOVED + } + expectState(rst, ReplSetTest.State.REMOVED); + rst.stopSet(); + })(); + + (function() { + // Test that node with --configsvr cmd line and configsvr in replset config does NOT go + // into REMOVED state if storage engine is not WiredTiger but we're running in SCCC mode + jsTestLog( + "configsvr in rs config and --configsvr cmd line, but mmapv1 with configSvrMode=sccc"); + var rst = new ReplSetTest({ + name: "configrs4", + nodes: 1, + nodeOptions: + {configsvr: "", journal: "", storageEngine: "mmapv1", configsvrMode: "sccc"} + }); + + rst.startSet(); + var conf = rst.getReplSetConfig(); + conf.configsvr = true; + assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); + + rst.getPrimary(); + expectState(rst, ReplSetTest.State.PRIMARY); + rst.stopSet(); + })(); + + (function() { + // Test that node with --configsvr cmd line and configsvr in replset config and using + // wiredTiger + // does NOT go into REMOVED state. + jsTestLog("configsvr in rs config and --configsvr cmd line, normal case"); + var rst = new ReplSetTest({ + name: "configrs5", + nodes: 1, + nodeOptions: {configsvr: "", journal: "", storageEngine: "wiredTiger"} + }); + + rst.startSet(); + var conf = rst.getReplSetConfig(); + conf.configsvr = true; + assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); + + rst.getPrimary(); + expectState(rst, ReplSetTest.State.PRIMARY); + + var conf = rst.getPrimary().getDB('local').system.replset.findOne(); + assert(conf.configsvr, tojson(conf)); + + rst.stopSet(); + })(); + + (function() { + // Test that node with --configsvr cmd line and initiated with an empty replset config + // will result in configsvr:true getting automatically added to the config (SERVER-20247). + jsTestLog("--configsvr cmd line, empty config to replSetInitiate"); + var rst = new ReplSetTest({ + name: "configrs6", + nodes: 1, + nodeOptions: {configsvr: "", journal: "", storageEngine: "wiredTiger"} + }); + + rst.startSet(); + assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: 1})); + + rst.getPrimary(); + expectState(rst, ReplSetTest.State.PRIMARY); + rst.stopSet(); + })(); + + (function() { + // Test that a set initialized without --configsvr but then restarted with --configsvr will + // fail to + // start up and won't automatically add "configsvr" to the replset config (SERVER-21236). + jsTestLog("set initiated without configsvr, restarted adding --configsvr cmd line"); + var rst = new ReplSetTest({ + name: "configrs7", + nodes: 1, + nodeOptions: {journal: "", storageEngine: "wiredTiger"} + }); + + rst.startSet(); + var conf = rst.getReplSetConfig(); + assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); + + rst.getPrimary(); + expectState(rst, ReplSetTest.State.PRIMARY); + assert.throws(function() { + rst.restart(0, {configsvr: ""}); + }); + + rst.stopSet(); + })(); + + (function() { + // Test that a set initialized with --configsvr but then restarted without --configsvr will + // fail to + // start up. + jsTestLog("set initiated with configsvr, restarted without --configsvr cmd line"); + var rst = new ReplSetTest({ + name: "configrs8", + nodes: 1, + nodeOptions: {configsvr: "", journal: "", storageEngine: "wiredTiger"} + }); + + rst.startSet(); + var conf = rst.getReplSetConfig(); + conf.configsvr = true; + assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); + + rst.getPrimary(); + expectState(rst, ReplSetTest.State.PRIMARY); + + var node = rst.nodes[0]; + var options = node.savedOptions; + delete options.configsvr; + options.noCleanData = true; + + MongoRunner.stopMongod(node); + var conn = MongoRunner.runMongod(options); + assert.eq(null, conn, "Mongod should have failed to start, but didn't"); + + rst.stopSet(); + })(); })(); diff --git a/jstests/replsets/copydb.js b/jstests/replsets/copydb.js index 59730f70084..dcbe1deefc2 100644 --- a/jstests/replsets/copydb.js +++ b/jstests/replsets/copydb.js @@ -27,14 +27,16 @@ assert.commandWorked(primarySourceDB.foo.ensureIndex({a: 1}), 'failed to create index in source collection on primary'); - assert.eq(1, primarySourceDB.foo.find().itcount(), + assert.eq(1, + primarySourceDB.foo.find().itcount(), 'incorrect number of documents in source collection on primary before copy'); - assert.eq(0, primaryTargetDB.foo.find().itcount(), + assert.eq(0, + primaryTargetDB.foo.find().itcount(), 'target collection on primary should be empty before copy'); - assert.commandWorked(primarySourceDB.copyDatabase(primarySourceDB.getName(), - primaryTargetDB.getName()), - 'failed to copy database'); + assert.commandWorked( + primarySourceDB.copyDatabase(primarySourceDB.getName(), primaryTargetDB.getName()), + 'failed to copy database'); assert.eq(primarySourceDB.foo.find().itcount(), primaryTargetDB.foo.find().itcount(), diff --git a/jstests/replsets/disallow_adding_initialized_node1.js b/jstests/replsets/disallow_adding_initialized_node1.js index fe348a81e54..8d4491975b6 100644 --- a/jstests/replsets/disallow_adding_initialized_node1.js +++ b/jstests/replsets/disallow_adding_initialized_node1.js @@ -3,20 +3,16 @@ // Initialize two replica sets A and B with the same name: A_0; B_0 // Add B_0 to the replica set A. This operation should fail on replica set A should fail on // detecting an inconsistent replica set ID in the heartbeat response metadata from B_0. -(function () { +(function() { 'use strict'; var name = 'disallow_adding_initialized_node1'; - var replSetA = new ReplSetTest({name: name, nodes: [ - {rsConfig: {_id: 10}}, - ]}); - replSetA.startSet({dbpath : "$set-A-$node"}); + var replSetA = new ReplSetTest({name: name, nodes: [{rsConfig: {_id: 10}}, ]}); + replSetA.startSet({dbpath: "$set-A-$node"}); replSetA.initiate(); - var replSetB = new ReplSetTest({name: name, nodes: [ - {rsConfig: {_id: 20}}, - ]}); - replSetB.startSet({dbpath : "$set-B-$node"}); + var replSetB = new ReplSetTest({name: name, nodes: [{rsConfig: {_id: 20}}, ]}); + replSetB.startSet({dbpath: "$set-B-$node"}); replSetB.initiate(); var primaryA = replSetA.getPrimary(); @@ -34,12 +30,11 @@ jsTestLog("Adding replica set B's primary " + primaryB.host + " to replica set A's config"); configA.version++; configA.members.push({_id: 11, host: primaryB.host}); - var reconfigResult = assert.commandFailedWithCode( - primaryA.adminCommand({replSetReconfig: configA}), - ErrorCodes.NewReplicaSetConfigurationIncompatible); - var msgA = - 'Our replica set ID of ' + configA.settings.replicaSetId + ' did not match that of ' + - primaryB.host + ', which is ' + configB.settings.replicaSetId; + var reconfigResult = + assert.commandFailedWithCode(primaryA.adminCommand({replSetReconfig: configA}), + ErrorCodes.NewReplicaSetConfigurationIncompatible); + var msgA = 'Our replica set ID of ' + configA.settings.replicaSetId + + ' did not match that of ' + primaryB.host + ', which is ' + configB.settings.replicaSetId; assert.neq(-1, reconfigResult.errmsg.indexOf(msgA)); var newPrimaryA = replSetA.getPrimary(); @@ -61,8 +56,7 @@ return false; }, 'Did not see a log entry containing the following message: ' + msg, 10000, 1000); }; - var msgB = - "replica set IDs do not match, ours: " + configB.settings.replicaSetId + + var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetId + "; remote node's: " + configA.settings.replicaSetId; checkLog(primaryB, msgB); diff --git a/jstests/replsets/disallow_adding_initialized_node2.js b/jstests/replsets/disallow_adding_initialized_node2.js index cc1cd09bf1f..c4125f7c069 100644 --- a/jstests/replsets/disallow_adding_initialized_node2.js +++ b/jstests/replsets/disallow_adding_initialized_node2.js @@ -8,21 +8,17 @@ // This test requires users to persist across a restart. // @tags: [requires_persistence] -(function () { +(function() { 'use strict'; var name = 'disallow_adding_initialized_node2'; - var replSetA = new ReplSetTest({name: name, nodes: [ - {rsConfig: {_id: 10}}, - {rsConfig: {_id: 11, arbiterOnly: true}}, - ]}); - replSetA.startSet({dbpath : "$set-A-$node"}); + var replSetA = new ReplSetTest( + {name: name, nodes: [{rsConfig: {_id: 10}}, {rsConfig: {_id: 11, arbiterOnly: true}}, ]}); + replSetA.startSet({dbpath: "$set-A-$node"}); replSetA.initiate(); - var replSetB = new ReplSetTest({name: name, nodes: [ - {rsConfig: {_id: 20}}, - ]}); - replSetB.startSet({dbpath : "$set-B-$node"}); + var replSetB = new ReplSetTest({name: name, nodes: [{rsConfig: {_id: 20}}, ]}); + replSetB.startSet({dbpath: "$set-B-$node"}); replSetB.initiate(); var primaryA = replSetA.getPrimary(); @@ -46,7 +42,7 @@ assert.commandWorked(primaryA.adminCommand({replSetReconfig: configA})); jsTestLog("Restarting B's primary " + primaryB.host); - primaryB = replSetB.start(0, {dbpath : "$set-B-$node", restart: true}); + primaryB = replSetB.start(0, {dbpath: "$set-B-$node", restart: true}); var newPrimaryA = replSetA.getPrimary(); var newPrimaryB = replSetB.getPrimary(); @@ -67,11 +63,9 @@ return false; }, 'Did not see a log entry containing the following message: ' + msg, 10000, 1000); }; - var msgA = - "replica set IDs do not match, ours: " + configA.settings.replicaSetId + + var msgA = "replica set IDs do not match, ours: " + configA.settings.replicaSetId + "; remote node's: " + configB.settings.replicaSetId; - var msgB = - "replica set IDs do not match, ours: " + configB.settings.replicaSetId + + var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetId + "; remote node's: " + configA.settings.replicaSetId; checkLog(primaryA, msgA); checkLog(primaryB, msgB); diff --git a/jstests/replsets/drain.js b/jstests/replsets/drain.js index 95472471f48..5d20ff6a9d6 100644 --- a/jstests/replsets/drain.js +++ b/jstests/replsets/drain.js @@ -9,17 +9,19 @@ // 7. Enable applying ops. // 8. Ensure the ops in queue are applied and that the PRIMARY begins to accept writes as usual. -(function () { +(function() { "use strict"; var replSet = new ReplSetTest({name: 'testSet', nodes: 3}); var nodes = replSet.nodeList(); replSet.startSet(); - replSet.initiate({"_id" : "testSet", - "members" : [ - {"_id" : 0, "host" : nodes[0]}, - {"_id" : 1, "host" : nodes[1]}, - {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); - + replSet.initiate({ + "_id": "testSet", + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], "arbiterOnly": true} + ] + }); var primary = replSet.getPrimary(); var secondary = replSet.getSecondary(); @@ -28,18 +30,16 @@ // Do an initial insert to prevent the secondary from going into recovery var numDocuments = 20; var bulk = primary.getDB("foo").foo.initializeUnorderedBulkOp(); - var bigString = Array(1024*1024).toString(); - assert.writeOK(primary.getDB("foo").foo.insert({ big: bigString})); + var bigString = Array(1024 * 1024).toString(); + assert.writeOK(primary.getDB("foo").foo.insert({big: bigString})); replSet.awaitReplication(); - assert.commandWorked( - secondary.getDB("admin").runCommand({ - configureFailPoint: 'rsSyncApplyStop', - mode: 'alwaysOn'}), - 'failed to enable fail point on secondary'); + assert.commandWorked(secondary.getDB("admin").runCommand( + {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}), + 'failed to enable fail point on secondary'); var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count; for (var i = 1; i < numDocuments; ++i) { - bulk.insert({ big: bigString}); + bulk.insert({big: bigString}); } assert.writeOK(bulk.execute()); jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments); @@ -50,19 +50,19 @@ var bufferCount = serverStatus.metrics.repl.buffer.count; var bufferCountChange = bufferCount - bufferCountBefore; jsTestLog('Number of operations buffered on secondary since stopping applier: ' + - bufferCountChange); + bufferCountChange); return bufferCountChange >= numDocuments - 1; }, 'secondary did not buffer operations for new inserts on primary', 30000, 1000); // Kill primary; secondary will enter drain mode to catch up - primary.getDB("admin").shutdownServer({force:true}); + primary.getDB("admin").shutdownServer({force: true}); - var electionTimeout = (isPV0 ? 60 : 20 ) * 1000; // Timeout in milliseconds + var electionTimeout = (isPV0 ? 60 : 20) * 1000; // Timeout in milliseconds replSet.waitForState(secondary, ReplSetTest.State.PRIMARY, electionTimeout); // Ensure new primary is not yet writable jsTestLog('New primary should not be writable yet'); - assert.writeError(secondary.getDB("foo").flag.insert({sentinel:2})); + assert.writeError(secondary.getDB("foo").flag.insert({sentinel: 2})); assert(!secondary.getDB("admin").runCommand({"isMaster": 1}).ismaster); // Ensure new primary is not yet readable without slaveOk bit. @@ -70,14 +70,16 @@ jsTestLog('New primary should not be readable yet, without slaveOk bit'); var res = secondary.getDB("foo").runCommand({find: "foo"}); assert.commandFailed(res); - assert.eq(ErrorCodes.NotMasterNoSlaveOk, res.code, - "find failed with unexpected error code: " + tojson(res)); + assert.eq(ErrorCodes.NotMasterNoSlaveOk, + res.code, + "find failed with unexpected error code: " + tojson(res)); // Nor should it be readable with the slaveOk bit. secondary.slaveOk = true; res = secondary.getDB("foo").runCommand({find: "foo"}); assert.commandFailed(res); - assert.eq(ErrorCodes.NotMasterOrSecondary, res.code, - "find failed with unexpected error code: " + tojson(res)); + assert.eq(ErrorCodes.NotMasterOrSecondary, + res.code, + "find failed with unexpected error code: " + tojson(res)); secondary.slaveOk = false; assert.commandFailedWithCode( @@ -86,8 +88,7 @@ waitForDrainFinish: 5000, }), ErrorCodes.ExceededTimeLimit, - 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete' - ); + 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete'); // Allow draining to complete jsTestLog('Disabling fail point on new primary to allow draining to complete'); @@ -95,18 +96,17 @@ secondary.getDB("admin").runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}), 'failed to disable fail point on new primary'); primary = replSet.getPrimary(); - + assert.commandWorked( secondary.adminCommand({ replSetTest: 1, waitForDrainFinish: 5000, }), - 'replSetTest waitForDrainFinish should work when draining is allowed to complete' - ); + 'replSetTest waitForDrainFinish should work when draining is allowed to complete'); // Ensure new primary is writable jsTestLog('New primary should be writable after draining is complete'); - assert.writeOK(primary.getDB("foo").flag.insert({sentinel:1})); + assert.writeOK(primary.getDB("foo").flag.insert({sentinel: 1})); // Check for at least two entries. There was one prior to freezing op application on the // secondary and we cannot guarantee all writes reached the secondary's op queue prior to // shutting down the original primary. diff --git a/jstests/replsets/drop_oplog.js b/jstests/replsets/drop_oplog.js index 8a84bb2050e..ddac3904457 100644 --- a/jstests/replsets/drop_oplog.js +++ b/jstests/replsets/drop_oplog.js @@ -1,17 +1,17 @@ // Test that dropping either the replset oplog or the local database is prohibited in a replset. -(function () { +(function() { "use strict"; - var rt = new ReplSetTest( { name : "drop_oplog" , nodes: 1, oplogSize: 30 } ); + var rt = new ReplSetTest({name: "drop_oplog", nodes: 1, oplogSize: 30}); var nodes = rt.startSet(); rt.initiate(); var master = rt.getPrimary(); - var ml = master.getDB( 'local' ); + var ml = master.getDB('local'); var threw = false; - var ret = assert.commandFailed(ml.runCommand({ drop: 'oplog.rs' })); + var ret = assert.commandFailed(ml.runCommand({drop: 'oplog.rs'})); assert.eq('can\'t drop live oplog while replicating', ret.errmsg); var dropOutput = ml.dropDatabase(); @@ -20,13 +20,11 @@ var renameOutput = ml.oplog.rs.renameCollection("poison"); assert.eq(renameOutput.ok, 0); - assert.eq(renameOutput.errmsg, - "can't rename live oplog while replicating"); + assert.eq(renameOutput.errmsg, "can't rename live oplog while replicating"); - assert.writeOK(ml.foo.insert( {a:1} )); + assert.writeOK(ml.foo.insert({a: 1})); renameOutput = ml.foo.renameCollection("oplog.rs"); assert.eq(renameOutput.ok, 0); - assert.eq(renameOutput.errmsg, - "can't rename to live oplog while replicating"); + assert.eq(renameOutput.errmsg, "can't rename to live oplog while replicating"); }()); diff --git a/jstests/replsets/election_id.js b/jstests/replsets/election_id.js index 917be72d561..ff079ca1172 100644 --- a/jstests/replsets/election_id.js +++ b/jstests/replsets/election_id.js @@ -8,94 +8,93 @@ load("jstests/replsets/rslib.js"); // any PV0 election id. On downgrade, the election id will be updated to old PV0 format. (function() { -"use strict"; - -function checkPV1ElectionId(electionId) { - var electionIdStr = electionId.valueOf(); - assert.eq(electionIdStr.slice(0, 8), "7fffffff"); - var res = assert.commandWorked(rst.getPrimary().adminCommand({replSetGetStatus: 1})); - var termStr = "" + res.term; - assert.eq(electionIdStr.slice(-termStr.length), termStr); -} - -var name = "election_id"; -var rst = new ReplSetTest({name: name, nodes: 3}); - -rst.startSet(); -// Initiate the replset in protocol version 0. -var conf = rst.getReplSetConfig(); -conf.protocolVersion = 0; -rst.initiate(conf); -rst.awaitSecondaryNodes(); - -var primary = rst.getPrimary(); -var primaryColl = primary.getDB("test").coll; - -// Do a write, this will set up sync sources on secondaries. -assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: 3}})); - -var res = assert.commandWorked(primary.adminCommand({serverStatus: 1})); -var oldElectionId = res.repl.electionId; - -// Upgrade protocol version -// -conf = rst.getReplSetConfigFromNode(); -conf.protocolVersion = 1; -conf.version++; -reconfig(rst, conf); -// This write will block until all nodes finish upgrade. -assert.writeOK(primaryColl.insert({x: 2}, {writeConcern: {w: 3}})); - -// Check election id after upgrade -res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1})); -var newElectionId = res.repl.electionId; -assert.lt(oldElectionId.valueOf(), newElectionId.valueOf()); -checkPV1ElectionId(newElectionId); -oldElectionId = newElectionId; - -// Step down -assert.throws(function() { - var res = primary.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 30}); - // Error out if stepdown command failed to run and throw. - printjson(res); -}); -rst.awaitSecondaryNodes(); -res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1})); -var newElectionId = res.repl.electionId; - -// Compare the string of ObjectId -assert.lt(oldElectionId.valueOf(), newElectionId.valueOf()); -checkPV1ElectionId(newElectionId); -oldElectionId = newElectionId; - - -// Downgrade protocol version -// -conf = rst.getReplSetConfigFromNode(); -conf.protocolVersion = 0; -conf.version++; -reconfig(rst, conf); -// This write will block until all nodes finish upgrade. -assert.writeOK(rst.getPrimary().getDB("test").coll.insert({x: 2}, {writeConcern: {w: 3}})); - -// Check election id after downgrade -res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1})); -var newElectionId = res.repl.electionId; -// new election id in PV0 is less than the old one in PV1. -assert.gt(oldElectionId.valueOf(), newElectionId.valueOf()); -oldElectionId = newElectionId; - - -// Step down -assert.throws(function() { - var res = rst.getPrimary().adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 30}); - // Error out if stepdown command failed to run and throw. - printjson(res); -}); -rst.awaitSecondaryNodes(); -res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1})); -var newElectionId = res.repl.electionId; -assert.lt(oldElectionId.valueOf(), newElectionId.valueOf()); -oldElectionId = newElectionId; + "use strict"; + + function checkPV1ElectionId(electionId) { + var electionIdStr = electionId.valueOf(); + assert.eq(electionIdStr.slice(0, 8), "7fffffff"); + var res = assert.commandWorked(rst.getPrimary().adminCommand({replSetGetStatus: 1})); + var termStr = "" + res.term; + assert.eq(electionIdStr.slice(-termStr.length), termStr); + } + + var name = "election_id"; + var rst = new ReplSetTest({name: name, nodes: 3}); + + rst.startSet(); + // Initiate the replset in protocol version 0. + var conf = rst.getReplSetConfig(); + conf.protocolVersion = 0; + rst.initiate(conf); + rst.awaitSecondaryNodes(); + + var primary = rst.getPrimary(); + var primaryColl = primary.getDB("test").coll; + + // Do a write, this will set up sync sources on secondaries. + assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: 3}})); + + var res = assert.commandWorked(primary.adminCommand({serverStatus: 1})); + var oldElectionId = res.repl.electionId; + + // Upgrade protocol version + // + conf = rst.getReplSetConfigFromNode(); + conf.protocolVersion = 1; + conf.version++; + reconfig(rst, conf); + // This write will block until all nodes finish upgrade. + assert.writeOK(primaryColl.insert({x: 2}, {writeConcern: {w: 3}})); + + // Check election id after upgrade + res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1})); + var newElectionId = res.repl.electionId; + assert.lt(oldElectionId.valueOf(), newElectionId.valueOf()); + checkPV1ElectionId(newElectionId); + oldElectionId = newElectionId; + + // Step down + assert.throws(function() { + var res = primary.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 30}); + // Error out if stepdown command failed to run and throw. + printjson(res); + }); + rst.awaitSecondaryNodes(); + res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1})); + var newElectionId = res.repl.electionId; + + // Compare the string of ObjectId + assert.lt(oldElectionId.valueOf(), newElectionId.valueOf()); + checkPV1ElectionId(newElectionId); + oldElectionId = newElectionId; + + // Downgrade protocol version + // + conf = rst.getReplSetConfigFromNode(); + conf.protocolVersion = 0; + conf.version++; + reconfig(rst, conf); + // This write will block until all nodes finish upgrade. + assert.writeOK(rst.getPrimary().getDB("test").coll.insert({x: 2}, {writeConcern: {w: 3}})); + + // Check election id after downgrade + res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1})); + var newElectionId = res.repl.electionId; + // new election id in PV0 is less than the old one in PV1. + assert.gt(oldElectionId.valueOf(), newElectionId.valueOf()); + oldElectionId = newElectionId; + + // Step down + assert.throws(function() { + var res = + rst.getPrimary().adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 30}); + // Error out if stepdown command failed to run and throw. + printjson(res); + }); + rst.awaitSecondaryNodes(); + res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1})); + var newElectionId = res.repl.electionId; + assert.lt(oldElectionId.valueOf(), newElectionId.valueOf()); + oldElectionId = newElectionId; })(); diff --git a/jstests/replsets/election_not_blocked.js b/jstests/replsets/election_not_blocked.js index ec916f72cf7..95b53be1ebc 100644 --- a/jstests/replsets/election_not_blocked.js +++ b/jstests/replsets/election_not_blocked.js @@ -1,5 +1,5 @@ /* Check that the fsyncLock'ed secondary will not veto an election - * + * * 1. start a three node set with a hidden, priority:0 node which we will fsyncLock * 2. do a write to master * 3. fsyncLock the hidden, priority:0 node @@ -10,24 +10,26 @@ (function() { "use strict"; var name = "electionNotBlocked"; - var replTest = new ReplSetTest({ name: name, nodes: 3 }); + var replTest = new ReplSetTest({name: name, nodes: 3}); var host = replTest.host; var nodes = replTest.startSet(); var port = replTest.ports; - replTest.initiate({_id: name, members: - [ - {_id: 0, host: host+":"+port[0], priority: 3}, - {_id: 1, host: host+":"+port[1]}, - {_id: 2, host: host+":"+port[2], hidden: true, priority: 0}, - ], - // In PV1, a voter writes the last vote to disk before granting the vote, - // so it cannot vote while fsync locked in PV1. Use PV0 explicitly here. - protocolVersion: 0}); + replTest.initiate({ + _id: name, + members: [ + {_id: 0, host: host + ":" + port[0], priority: 3}, + {_id: 1, host: host + ":" + port[1]}, + {_id: 2, host: host + ":" + port[2], hidden: true, priority: 0}, + ], + // In PV1, a voter writes the last vote to disk before granting the vote, + // so it cannot vote while fsync locked in PV1. Use PV0 explicitly here. + protocolVersion: 0 + }); replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); var master = replTest.getPrimary(); // do a write - assert.writeOK(master.getDB("foo").bar.insert({x:1}, {writeConcern: {w: 3}})); + assert.writeOK(master.getDB("foo").bar.insert({x: 1}, {writeConcern: {w: 3}})); var slave = replTest.liveNodes.slaves[0]; // lock secondary @@ -37,7 +39,7 @@ // take down master replTest.stop(0); - replTest.waitForState(slave, ReplSetTest.State.PRIMARY, 90*1000); + replTest.waitForState(slave, ReplSetTest.State.PRIMARY, 90 * 1000); locked.getDB("admin").fsyncUnlock(); replTest.stopSet(); diff --git a/jstests/replsets/explain_slaveok.js b/jstests/replsets/explain_slaveok.js index 93069e6ac01..8cd715af648 100644 --- a/jstests/replsets/explain_slaveok.js +++ b/jstests/replsets/explain_slaveok.js @@ -38,23 +38,13 @@ assert.eq(1, secondary.getDB("test").explain_slaveok.findOne({a: 1})["a"]); // // Explain a count on the primary. -var explainOut = primary.getDB("test").runCommand({ - explain: { - count: "explain_slaveok", - query: {a: 1} - }, - verbosity: "executionStats" -}); +var explainOut = primary.getDB("test").runCommand( + {explain: {count: "explain_slaveok", query: {a: 1}}, verbosity: "executionStats"}); assert.commandWorked(explainOut, "explain read op on primary"); // Explain an update on the primary. explainOut = primary.getDB("test").runCommand({ - explain: { - update: "explain_slaveok", - updates: [ - {q: {a: 1}, u: {$set: {a: 5}}} - ] - }, + explain: {update: "explain_slaveok", updates: [{q: {a: 1}, u: {$set: {a: 5}}}]}, verbosity: "executionStats" }); assert.commandWorked(explainOut, "explain write op on primary"); @@ -78,67 +68,54 @@ assert.eq(1, secondary.getDB("test").explain_slaveok.findOne({a: 1})["a"]); // Explain a count on the secondary with slaveOk off. Should fail because // slaveOk is required for explains on a secondary. secondary.getDB("test").getMongo().setSlaveOk(false); -explainOut = secondary.getDB("test").runCommand({ - explain: { - count: "explain_slaveok", - query: {a: 1} - }, - verbosity: "executionStats" -}); +explainOut = secondary.getDB("test").runCommand( + {explain: {count: "explain_slaveok", query: {a: 1}}, verbosity: "executionStats"}); assert.commandFailed(explainOut, "explain read op on secondary, slaveOk false"); // Explain of count should succeed once slaveOk is true. secondary.getDB("test").getMongo().setSlaveOk(true); -explainOut = secondary.getDB("test").runCommand({ - explain: { - count: "explain_slaveok", - query: {a: 1} - }, - verbosity: "executionStats" -}); +explainOut = secondary.getDB("test").runCommand( + {explain: {count: "explain_slaveok", query: {a: 1}}, verbosity: "executionStats"}); assert.commandWorked(explainOut, "explain read op on secondary, slaveOk true"); // Explain .find() on a secondary, setting slaveOk directly on the query. secondary.getDB("test").getMongo().setSlaveOk(false); assert.throws(function() { - secondary.getDB("test").explain_slaveok.explain("executionStats") - .find({a: 1}) - .finish(); + secondary.getDB("test").explain_slaveok.explain("executionStats").find({a: 1}).finish(); }); secondary.getDB("test").getMongo().setSlaveOk(false); -explainOut = secondary.getDB("test").explain_slaveok.explain("executionStats") - .find({a: 1}) - .addOption(DBQuery.Option.slaveOk) - .finish(); +explainOut = secondary.getDB("test") + .explain_slaveok.explain("executionStats") + .find({a: 1}) + .addOption(DBQuery.Option.slaveOk) + .finish(); assert.commandWorked(explainOut, "explain read op on secondary, slaveOk set to true on query"); secondary.getDB("test").getMongo().setSlaveOk(true); -explainOut = secondary.getDB("test").explain_slaveok.explain("executionStats") - .find({a: 1}) - .finish(); +explainOut = + secondary.getDB("test").explain_slaveok.explain("executionStats").find({a: 1}).finish(); assert.commandWorked(explainOut, "explain .find() on secondary, slaveOk set to true"); // Explain .find() on a secondary, setting slaveOk to false with various read preferences. var readPrefModes = ["secondary", "secondaryPreferred", "primaryPreferred", "nearest"]; readPrefModes.forEach(function(prefString) { secondary.getDB("test").getMongo().setSlaveOk(false); - explainOut = secondary.getDB("test").explain_slaveok.explain("executionStats") - .find({a: 1}) - .readPref(prefString) - .finish(); - assert.commandWorked(explainOut, "explain .find() on secondary, '" - + prefString - + "' read preference on query"); + explainOut = secondary.getDB("test") + .explain_slaveok.explain("executionStats") + .find({a: 1}) + .readPref(prefString) + .finish(); + assert.commandWorked( + explainOut, "explain .find() on secondary, '" + prefString + "' read preference on query"); // Similarly should succeed if a read preference is set on the connection. secondary.setReadPref(prefString); - explainOut = secondary.getDB("test").explain_slaveok.explain("executionStats") - .find({a: 1}) - .finish(); - assert.commandWorked(explainOut, "explain .find() on secondary, '" - + prefString - + "' read preference on connection"); + explainOut = + secondary.getDB("test").explain_slaveok.explain("executionStats").find({a: 1}).finish(); + assert.commandWorked( + explainOut, + "explain .find() on secondary, '" + prefString + "' read preference on connection"); // Unset read pref on the connection. secondary.setReadPref(); }); @@ -146,24 +123,14 @@ readPrefModes.forEach(function(prefString) { // Fail explain find() on a secondary, setting slaveOk to false with read preference set to primary. var prefStringPrimary = "primary"; secondary.getDB("test").getMongo().setSlaveOk(false); -explainOut = secondary.getDB("test").runCommand({ - explain: { - find: "explain_slaveok", - query: {a: 1} - }, - verbosity: "executionStats" -}); +explainOut = secondary.getDB("test").runCommand( + {explain: {find: "explain_slaveok", query: {a: 1}}, verbosity: "executionStats"}); assert.commandFailed(explainOut, "not master and slaveOk=false"); // Similarly should fail if a read preference is set on the connection. secondary.setReadPref(prefStringPrimary); -explainOut = secondary.getDB("test").runCommand({ - explain: { - find: "explain_slaveok", - query: {a: 1} - }, - verbosity: "executionStats" -}); +explainOut = secondary.getDB("test").runCommand( + {explain: {find: "explain_slaveok", query: {a: 1}}, verbosity: "executionStats"}); assert.commandFailed(explainOut, "not master and slaveOk=false"); // Unset read pref on the connection. secondary.setReadPref(); @@ -172,12 +139,7 @@ secondary.setReadPref(); // slaveOk is required for explains on a secondary. secondary.getDB("test").getMongo().setSlaveOk(false); explainOut = secondary.getDB("test").runCommand({ - explain: { - update: "explain_slaveok", - updates: [ - {q: {a: 1}, u: {$set: {a: 5}}} - ] - }, + explain: {update: "explain_slaveok", updates: [{q: {a: 1}, u: {$set: {a: 5}}}]}, verbosity: "executionStats" }); assert.commandFailed(explainOut, "explain write op on secondary, slaveOk false"); @@ -185,12 +147,7 @@ assert.commandFailed(explainOut, "explain write op on secondary, slaveOk false") // Explain of the update should also fail with slaveOk on. secondary.getDB("test").getMongo().setSlaveOk(true); explainOut = secondary.getDB("test").runCommand({ - explain: { - update: "explain_slaveok", - updates: [ - {q: {a: 1}, u: {$set: {a: 5}}} - ] - }, + explain: {update: "explain_slaveok", updates: [{q: {a: 1}, u: {$set: {a: 5}}}]}, verbosity: "executionStats" }); assert.commandFailed(explainOut, "explain write op on secondary, slaveOk true"); diff --git a/jstests/replsets/find_and_modify_wc.js b/jstests/replsets/find_and_modify_wc.js index 21725c0e6d8..f6cdb092697 100644 --- a/jstests/replsets/find_and_modify_wc.js +++ b/jstests/replsets/find_and_modify_wc.js @@ -5,8 +5,8 @@ 'use strict'; var nodeCount = 3; - var rst = new ReplSetTest({ nodes: nodeCount }); - rst.startSet({ nojournal: "" }); + var rst = new ReplSetTest({nodes: nodeCount}); + rst.startSet({nojournal: ""}); rst.initiate(); var primary = rst.getPrimary(); @@ -16,20 +16,19 @@ // insert some documents var docs = []; for (var i = 1; i <= 5; ++i) { - docs.push({ i: i, j: 2*i }); + docs.push({i: i, j: 2 * i}); } - var res = coll.runCommand({ insert: coll.getName(), - documents: docs, - writeConcern: { w: nodeCount } }); + var res = + coll.runCommand({insert: coll.getName(), documents: docs, writeConcern: {w: nodeCount}}); assert(res.ok); assert.eq(5, coll.count()); // use for updates in subsequent runCommand calls var reqUpdate = { findAndModify: coll.getName(), - query: { i: 3 }, - update: { $inc: { j: 1 } }, - writeConcern: { w: 'majority' } + query: {i: 3}, + update: {$inc: {j: 1}}, + writeConcern: {w: 'majority'} }; // Verify findAndModify returns old document new: false @@ -50,22 +49,15 @@ assert(!res.writeConcernError); // Verify findAndModify remove works - res = coll.runCommand({ - findAndModify: coll.getName(), - sort: { i: 1 }, - remove: true, - writeConcern: { w: nodeCount } - }); + res = coll.runCommand( + {findAndModify: coll.getName(), sort: {i: 1}, remove: true, writeConcern: {w: nodeCount}}); assert.eq(res.value.i, 1); assert.eq(coll.count(), 4); assert(!res.writeConcernError); // Verify findAndModify returns writeConcernError // when given invalid writeConcerns - [ - { w: 'invalid' }, - { w: nodeCount + 1 } - ].forEach(function(wc) { + [{w: 'invalid'}, {w: nodeCount + 1}].forEach(function(wc) { reqUpdate.writeConcern = wc; res = coll.runCommand(reqUpdate); diff --git a/jstests/replsets/fsync_lock_read_secondaries.js b/jstests/replsets/fsync_lock_read_secondaries.js index b8f8c0aa149..8b9127c8c43 100644 --- a/jstests/replsets/fsync_lock_read_secondaries.js +++ b/jstests/replsets/fsync_lock_read_secondaries.js @@ -1,5 +1,5 @@ /* @file : jstests/fsync_lock_read_secondaries.js - * + * * SERVER 4243 : If there is a pending write due to an fsync lock, all reads are blocked * * This test validates part of SERVER-4243 ticket. Allow reading on secondaries with fsyncLock @@ -22,52 +22,53 @@ * witness as an increase in the count of documents stored on the secondary. */ (function() { -"use strict"; -// Load utility methods for replica set tests -load("jstests/replsets/rslib.js"); + "use strict"; + // Load utility methods for replica set tests + load("jstests/replsets/rslib.js"); -var replTest = new ReplSetTest({name: 'testSet', nodes: 2, oplogSize: 5}); -// Start each mongod in the replica set. Returns a list of nodes -var nodes = replTest.startSet(); -// This will wait for initiation -replTest.initiate(); -var master = replTest.getPrimary(); + var replTest = new ReplSetTest({name: 'testSet', nodes: 2, oplogSize: 5}); + // Start each mongod in the replica set. Returns a list of nodes + var nodes = replTest.startSet(); + // This will wait for initiation + replTest.initiate(); + var master = replTest.getPrimary(); -var ret = master.getDB("admin").fsyncLock(); -if (!ret.ok) { - assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported); - jsTestLog("Storage Engine does not support fsyncLock, so bailing"); - return; -} -master.getDB("admin").fsyncUnlock(); + var ret = master.getDB("admin").fsyncLock(); + if (!ret.ok) { + assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported); + jsTestLog("Storage Engine does not support fsyncLock, so bailing"); + return; + } + master.getDB("admin").fsyncUnlock(); -var docNum = 100; -for(var i=0; i<docNum; i++) { - master.getDB("foo").bar.save({a: i}); -} -waitForAllMembers(master.getDB("foo")); -replTest.awaitReplication(); + var docNum = 100; + for (var i = 0; i < docNum; i++) { + master.getDB("foo").bar.save({a: i}); + } + waitForAllMembers(master.getDB("foo")); + replTest.awaitReplication(); -// Calling getPrimary also makes available the liveNodes structure, which looks like this: -// liveNodes = {master: masterNode, slaves: [slave1, slave2] } -var slaves = replTest.liveNodes.slaves; -slaves[0].setSlaveOk(); + // Calling getPrimary also makes available the liveNodes structure, which looks like this: + // liveNodes = {master: masterNode, slaves: [slave1, slave2] } + var slaves = replTest.liveNodes.slaves; + slaves[0].setSlaveOk(); -assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync:1, lock: 1})); -var docNum = 1000; -for (var i=0; i<docNum; i++) { - master.getDB("foo").bar.save({a: i}); -} -// Issue a read query on the secondary while holding the fsync lock. -// This is what we are testing. Previously this would block. After the fix -// this should work just fine. -var slave0count = slaves[0].getDB("foo").bar.count(); -assert.eq(slave0count, 100, "Doc count in fsync lock wrong. Expected (=100), found " + slave0count); -assert(slaves[0].getDB("admin").fsyncUnlock().ok); + assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync: 1, lock: 1})); + var docNum = 1000; + for (var i = 0; i < docNum; i++) { + master.getDB("foo").bar.save({a: i}); + } + // Issue a read query on the secondary while holding the fsync lock. + // This is what we are testing. Previously this would block. After the fix + // this should work just fine. + var slave0count = slaves[0].getDB("foo").bar.count(); + assert.eq( + slave0count, 100, "Doc count in fsync lock wrong. Expected (=100), found " + slave0count); + assert(slaves[0].getDB("admin").fsyncUnlock().ok); -// The secondary should have equal or more documents than what it had before. -assert.soon(function() { + // The secondary should have equal or more documents than what it had before. + assert.soon(function() { return slaves[0].getDB("foo").bar.count() > 100; }, "count of documents stored on the secondary did not increase"); -replTest.stopSet(); + replTest.stopSet(); }()); diff --git a/jstests/replsets/get_replication_info_helper.js b/jstests/replsets/get_replication_info_helper.js index c031fb58779..cd6ef7d8a10 100644 --- a/jstests/replsets/get_replication_info_helper.js +++ b/jstests/replsets/get_replication_info_helper.js @@ -1,6 +1,6 @@ // Tests the output of db.getReplicationInfo() and tests db.printSlaveReplicationInfo(). -(function () { +(function() { "use strict"; var name = "getReplicationInfo"; var replSet = new ReplSetTest({name: name, nodes: 3, oplogSize: 50}); @@ -10,7 +10,7 @@ var primary = replSet.getPrimary(); for (var i = 0; i < 100; i++) { - primary.getDB('test').foo.insert({a:i}); + primary.getDB('test').foo.insert({a: i}); } replSet.awaitReplication(); @@ -28,8 +28,8 @@ // calling this function with and without a primary, should provide sufficient code coverage // to catch any JS errors - var mongo = startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", - primary.port); + var mongo = + startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", primary.port); mongo(); assert.soon(function() { return rawMongoProgramOutput().match("behind the primary"); @@ -42,11 +42,11 @@ } try { primary.getDB('admin').runCommand({replSetStepDown: 120, force: true}); + } catch (e) { } - catch (e) {} - mongo = startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", - primary.port); + mongo = + startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", primary.port); mongo(); assert.soon(function() { return rawMongoProgramOutput().match("behind the freshest"); diff --git a/jstests/replsets/get_status.js b/jstests/replsets/get_status.js index c69764c3d9a..31a49dc1300 100644 --- a/jstests/replsets/get_status.js +++ b/jstests/replsets/get_status.js @@ -3,7 +3,7 @@ * functionality, so we'll just check that it succeeds and fails when it's supposed to. */ -(function () { +(function() { "use strict"; var name = "getstatus"; var numNodes = 4; @@ -12,15 +12,15 @@ var config = replTest.getReplSetConfig(); config.members[numNodes - 1].arbiterOnly = true; - //An invalid time to get status + // An invalid time to get status var statusBeforeInitCode = 94; assert.commandFailedWithCode(nodes[0].getDB("admin").runCommand({replSetGetStatus: 1}), statusBeforeInitCode, - "replSetGetStatus should fail before initializing." ); + "replSetGetStatus should fail before initializing."); replTest.initiate(config); replTest.awaitSecondaryNodes(); - //A valid status + // A valid status var primary = replTest.getPrimary(); assert.commandWorked(primary.getDB("admin").runCommand({replSetGetStatus: 1})); diff --git a/jstests/replsets/groupAndMapReduce.js b/jstests/replsets/groupAndMapReduce.js index 9fcdcbeee0e..15dea43c231 100644 --- a/jstests/replsets/groupAndMapReduce.js +++ b/jstests/replsets/groupAndMapReduce.js @@ -1,6 +1,6 @@ load("jstests/replsets/rslib.js"); -doTest = function( signal ) { +doTest = function(signal) { // Test basic replica set functionality. // -- Replication @@ -8,7 +8,7 @@ doTest = function( signal ) { // Replica set testing API // Create a new replica set test. Specify set name and the number of nodes you want. - var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} ); + var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); // call startSet() to start each mongod in the replica set // this returns a list of nodes @@ -34,41 +34,59 @@ doTest = function( signal ) { replTest.awaitReplication(); slaves = replTest.liveNodes.slaves; - assert( slaves.length == 2, "Expected 2 slaves but length was " + slaves.length ); + assert(slaves.length == 2, "Expected 2 slaves but length was " + slaves.length); slaves.forEach(function(slave) { // try to read from slave slave.slaveOk = true; var count = slave.getDB("foo").foo.count(); - printjson( count ); - assert.eq( len , count , "slave count wrong: " + slave ); - - print("Doing a findOne to verify we can get a row"); + printjson(count); + assert.eq(len, count, "slave count wrong: " + slave); + + print("Doing a findOne to verify we can get a row"); var one = slave.getDB("foo").foo.findOne(); printjson(one); -// stats = slave.getDB("foo").adminCommand({replSetGetStatus:1}); -// printjson(stats); - + // stats = slave.getDB("foo").adminCommand({replSetGetStatus:1}); + // printjson(stats); + print("Calling group() with slaveOk=true, must succeed"); slave.slaveOk = true; - count = slave.getDB("foo").foo.group({initial: {n:0}, reduce: function(obj,out){out.n++;}}); - printjson( count ); - assert.eq( len , count[0].n , "slave group count wrong: " + slave ); - - print("Calling group() with slaveOk=false, must fail"); + count = slave.getDB("foo").foo.group({ + initial: {n: 0}, + reduce: function(obj, out) { + out.n++; + } + }); + printjson(count); + assert.eq(len, count[0].n, "slave group count wrong: " + slave); + + print("Calling group() with slaveOk=false, must fail"); slave.slaveOk = false; try { - count = slave.getDB("foo").foo.group({initial: {n:0}, reduce: function(obj,out){out.n++;}}); + count = slave.getDB("foo").foo.group({ + initial: {n: 0}, + reduce: function(obj, out) { + out.n++; + } + }); assert(false, "group() succeeded with slaveOk=false"); } catch (e) { print("Received exception: " + e); } - - print("Calling inline mr() with slaveOk=true, must succeed"); + + print("Calling inline mr() with slaveOk=true, must succeed"); slave.slaveOk = true; - map = function() { emit(this.a, 1); }; - reduce = function(key, vals) { var sum = 0; for (var i = 0; i < vals.length; ++i) { sum += vals[i]; } return sum; }; - slave.getDB("foo").foo.mapReduce(map, reduce, {out: { "inline" : 1}}); + map = function() { + emit(this.a, 1); + }; + reduce = function(key, vals) { + var sum = 0; + for (var i = 0; i < vals.length; ++i) { + sum += vals[i]; + } + return sum; + }; + slave.getDB("foo").foo.mapReduce(map, reduce, {out: {"inline": 1}}); print("Calling mr() to collection with slaveOk=true, must fail"); try { @@ -78,10 +96,10 @@ doTest = function( signal ) { print("Received exception: " + e); } - print("Calling inline mr() with slaveOk=false, must fail"); + print("Calling inline mr() with slaveOk=false, must fail"); slave.slaveOk = false; try { - slave.getDB("foo").foo.mapReduce(map, reduce, {out: { "inline" : 1}}); + slave.getDB("foo").foo.mapReduce(map, reduce, {out: {"inline": 1}}); assert(false, "mapReduce() succeeded on slave with slaveOk=false"); } catch (e) { print("Received exception: " + e); @@ -96,11 +114,9 @@ doTest = function( signal ) { }); - - // Shut down the set and finish the test. - replTest.stopSet( signal ); + replTest.stopSet(signal); }; -doTest( 15 ); +doTest(15); print("SUCCESS"); diff --git a/jstests/replsets/index_delete.js b/jstests/replsets/index_delete.js index e43e1e9a55d..9013f8d4ab0 100644 --- a/jstests/replsets/index_delete.js +++ b/jstests/replsets/index_delete.js @@ -7,19 +7,19 @@ */ /** - * Starts a replica set with arbiter, build an index - * drop index once secondary starts building index, + * Starts a replica set with arbiter, build an index + * drop index once secondary starts building index, * index should not exist on secondary afterwards */ var checkOp = function(checkDB) { var curOp = checkDB.currentOp(true); - for (var i=0; i < curOp.inprog.length; i++) { + for (var i = 0; i < curOp.inprog.length; i++) { try { - if (curOp.inprog[i].query.background){ - // should throw something when string contains > 90% + if (curOp.inprog[i].query.background) { + // should throw something when string contains > 90% printjson(curOp.inprog[i].msg); - return true; + return true; } } catch (e) { // catchem if you can @@ -28,16 +28,19 @@ var checkOp = function(checkDB) { return false; }; // Set up replica set -var replTest = new ReplSetTest({ name: 'fgIndex', nodes: 3 }); +var replTest = new ReplSetTest({name: 'fgIndex', nodes: 3}); var nodes = replTest.nodeList(); // We need an arbiter to ensure that the primary doesn't step down when we restart the secondary replTest.startSet(); -replTest.initiate({"_id" : "fgIndex", - "members" : [ - {"_id" : 0, "host" : nodes[0]}, - {"_id" : 1, "host" : nodes[1]}, - {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); +replTest.initiate({ + "_id": "fgIndex", + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], "arbiterOnly": true} + ] +}); var master = replTest.getPrimary(); var second = replTest.getSecondary(); @@ -48,30 +51,31 @@ var size = 50000; jsTest.log("creating test data " + size + " documents"); var bulk = masterDB.jstests_fgsec.initializeUnorderedBulkOp(); -for(var i = 0; i < size; ++i) { - bulk.insert({ i: i }); +for (var i = 0; i < size; ++i) { + bulk.insert({i: i}); } assert.writeOK(bulk.execute()); jsTest.log("Creating index"); -masterDB.jstests_fgsec.ensureIndex( {i:1} ); -assert.eq(2, masterDB.jstests_fgsec.getIndexes().length ); +masterDB.jstests_fgsec.ensureIndex({i: 1}); +assert.eq(2, masterDB.jstests_fgsec.getIndexes().length); // Wait for the secondary to get the index entry -assert.soon( function() { - return 2 == secondDB.jstests_fgsec.getIndexes().length; }, - "index not created on secondary", 1000*60*10, 50 ); +assert.soon(function() { + return 2 == secondDB.jstests_fgsec.getIndexes().length; +}, "index not created on secondary", 1000 * 60 * 10, 50); jsTest.log("Index created on secondary"); -masterDB.runCommand( {dropIndexes: "jstests_fgsec", index: "i_1"} ); +masterDB.runCommand({dropIndexes: "jstests_fgsec", index: "i_1"}); jsTest.log("Waiting on replication"); replTest.awaitReplication(); -assert.soon( function() {return !checkOp(secondDB);}, "index not cancelled on secondary", 30000, 50); +assert.soon(function() { + return !checkOp(secondDB); +}, "index not cancelled on secondary", 30000, 50); masterDB.jstests_fgsec.getIndexes().forEach(printjson); secondDB.jstests_fgsec.getIndexes().forEach(printjson); -assert.soon( function() { - return 1 == secondDB.jstests_fgsec.getIndexes().length; }, - "Index not dropped on secondary", 30000, 50 ); +assert.soon(function() { + return 1 == secondDB.jstests_fgsec.getIndexes().length; +}, "Index not dropped on secondary", 30000, 50); jsTest.log("index-restart-secondary.js complete"); - diff --git a/jstests/replsets/index_restart_secondary.js b/jstests/replsets/index_restart_secondary.js index 7308de83271..be328ede8bc 100644 --- a/jstests/replsets/index_restart_secondary.js +++ b/jstests/replsets/index_restart_secondary.js @@ -1,13 +1,13 @@ /** - * Starts a replica set with arbiter, build an index - * restart secondary once it starts building index, + * Starts a replica set with arbiter, build an index + * restart secondary once it starts building index, * index build restarts after secondary restarts */ var replTest = new ReplSetTest({ name: 'fgIndex', nodes: 3, - oplogSize: 100, // This test inserts enough data to wrap the default 40MB oplog. + oplogSize: 100, // This test inserts enough data to wrap the default 40MB oplog. }); var nodes = replTest.nodeList(); @@ -18,11 +18,14 @@ var conns = replTest.startSet(); // don't run on 32-bit builders since they are slow and single core, which leads to heartbeats // failing and loss of primary during the bulk write if (conns[0].getDB('test').serverBuildInfo().bits !== 32) { - replTest.initiate({"_id" : "fgIndex", - "members" : [ - {"_id" : 0, "host" : nodes[0]}, - {"_id" : 1, "host" : nodes[1]}, - {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); + replTest.initiate({ + "_id": "fgIndex", + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], "arbiterOnly": true} + ] + }); var master = replTest.getPrimary(); var second = replTest.getSecondary(); @@ -36,39 +39,39 @@ if (conns[0].getDB('test').serverBuildInfo().bits !== 32) { jsTest.log("creating test data " + size + " documents"); var bulk = masterDB.jstests_fgsec.initializeUnorderedBulkOp(); - for(var i = 0; i < size; ++i) { - bulk.insert({ i: i }); + for (var i = 0; i < size; ++i) { + bulk.insert({i: i}); } - assert.writeOK(bulk.execute( { w: "majority" } )); + assert.writeOK(bulk.execute({w: "majority"})); jsTest.log("Creating index"); - masterDB.jstests_fgsec.ensureIndex( {i:1} ); + masterDB.jstests_fgsec.ensureIndex({i: 1}); assert.eq(2, masterDB.jstests_fgsec.getIndexes().length); // Wait for the secondary to get the index entry - assert.soon( function() { - return 2 == secondDB.jstests_fgsec.getIndexes().length; }, - "index not created on secondary (prior to restart)", 800000, 50 ); + assert.soon(function() { + return 2 == secondDB.jstests_fgsec.getIndexes().length; + }, "index not created on secondary (prior to restart)", 800000, 50); jsTest.log("Index created on secondary"); // restart secondary and reconnect jsTest.log("Restarting secondary"); - replTest.restart(secondId, {}, /*wait=*/true); + replTest.restart(secondId, {}, /*wait=*/true); // Make sure secondary comes back - assert.soon( function() { + assert.soon(function() { try { - secondDB.isMaster(); // trigger a reconnect if needed + secondDB.isMaster(); // trigger a reconnect if needed return true; } catch (e) { - return false; + return false; } - } , "secondary didn't restart", 30000, 1000); + }, "secondary didn't restart", 30000, 1000); - assert.soon( function() { - return 2 == secondDB.jstests_fgsec.getIndexes().length; }, - "Index build not resumed after restart", 30000, 50 ); + assert.soon(function() { + return 2 == secondDB.jstests_fgsec.getIndexes().length; + }, "Index build not resumed after restart", 30000, 50); jsTest.log("index-restart-secondary.js complete"); } diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js index 4047180d783..3977445743e 100644 --- a/jstests/replsets/initial_sync1.js +++ b/jstests/replsets/initial_sync1.js @@ -20,8 +20,8 @@ print("1. Bring up set"); // SERVER-7455, this test is called from ssl/auth_x509.js var x509_options1; var x509_options2; -var replTest = new ReplSetTest({name: basename, - nodes : {node0 : x509_options1, node1 : x509_options2}}); +var replTest = + new ReplSetTest({name: basename, nodes: {node0: x509_options1, node1: x509_options2}}); var conns = replTest.startSet(); replTest.initiate(); @@ -37,19 +37,16 @@ var local_s1 = slave1.getDB("local"); print("2. Insert some data"); var bulk = foo.bar.initializeUnorderedBulkOp(); for (var i = 0; i < 100; i++) { - bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" }); + bulk.insert({date: new Date(), x: i, str: "all the talk on the market"}); } assert.writeOK(bulk.execute()); -print("total in foo: "+foo.bar.count()); - +print("total in foo: " + foo.bar.count()); print("4. Make sure synced"); replTest.awaitReplication(); - print("5. Freeze #2"); -admin_s1.runCommand({replSetFreeze:999999}); - +admin_s1.runCommand({replSetFreeze: 999999}); print("6. Bring up #3"); var hostname = getHostName(); @@ -61,12 +58,11 @@ var admin_s2 = slave2.getDB("admin"); var config = replTest.getReplSetConfig(); config.version = 2; -config.members.push({_id:2, host: slave2.host}); +config.members.push({_id: 2, host: slave2.host}); try { - admin.runCommand({replSetReconfig:config}); -} -catch(e) { - print(e); + admin.runCommand({replSetReconfig: config}); +} catch (e) { + print(e); } reconnect(slave1); reconnect(slave2); @@ -78,17 +74,16 @@ wait(function() { printjson(config2); printjson(config3); - return config2.version == config.version && - (config3 && config3.version == config.version); - }); + return config2.version == config.version && (config3 && config3.version == config.version); +}); -replTest.waitForState( - slave2, [ReplSetTest.State.SECONDARY, ReplSetTest.State.RECOVERING], 60 * 1000); +replTest.waitForState(slave2, + [ReplSetTest.State.SECONDARY, ReplSetTest.State.RECOVERING], + 60 * 1000); print("7. Kill the secondary in the middle of syncing"); replTest.stop(slave1); - print("8. Eventually the new node should become a secondary"); print("if initial sync has started, this will cause it to fail and sleep for 5 minutes"); replTest.waitForState(slave2, ReplSetTest.State.SECONDARY, 60 * 1000); @@ -102,7 +97,7 @@ print("10. Insert some stuff"); master = replTest.getPrimary(); bulk = foo.bar.initializeUnorderedBulkOp(); for (var i = 0; i < 100; i++) { - bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" }); + bulk.insert({date: new Date(), x: i, str: "all the talk on the market"}); } assert.writeOK(bulk.execute()); diff --git a/jstests/replsets/initial_sync2.js b/jstests/replsets/initial_sync2.js index 9a913aeafc5..afff58d6336 100644 --- a/jstests/replsets/initial_sync2.js +++ b/jstests/replsets/initial_sync2.js @@ -20,136 +20,125 @@ var basename = "jstests_initsync2"; var doTest = function() { -print("1. Bring up set"); -var replTest = new ReplSetTest( {name: basename, nodes: 2} ); -var conns = replTest.startSet(); -replTest.initiate(); + print("1. Bring up set"); + var replTest = new ReplSetTest({name: basename, nodes: 2}); + var conns = replTest.startSet(); + replTest.initiate(); -var master = replTest.getPrimary(); -var origMaster = master; -var foo = master.getDB("foo"); -var admin = master.getDB("admin"); + var master = replTest.getPrimary(); + var origMaster = master; + var foo = master.getDB("foo"); + var admin = master.getDB("admin"); -var slave1 = replTest.liveNodes.slaves[0]; -var admin_s1 = slave1.getDB("admin"); -var local_s1 = slave1.getDB("local"); + var slave1 = replTest.liveNodes.slaves[0]; + var admin_s1 = slave1.getDB("admin"); + var local_s1 = slave1.getDB("local"); -print("2. Insert some data"); -for (var i=0; i<10000; i++) { - foo.bar.insert({date : new Date(), x : i, str : "all the talk on the market"}); -} -print("total in foo: "+foo.bar.count()); + print("2. Insert some data"); + for (var i = 0; i < 10000; i++) { + foo.bar.insert({date: new Date(), x: i, str: "all the talk on the market"}); + } + print("total in foo: " + foo.bar.count()); + print("4. Make sure synced"); + replTest.awaitReplication(); -print("4. Make sure synced"); -replTest.awaitReplication(); + print("5. Freeze #2"); + admin_s1.runCommand({replSetFreeze: 999999}); + print("6. Bring up #3"); + var hostname = getHostName(); -print("5. Freeze #2"); -admin_s1.runCommand({replSetFreeze:999999}); + var slave2 = MongoRunner.runMongod({replSet: basename, oplogSize: 2}); + var local_s2 = slave2.getDB("local"); + var admin_s2 = slave2.getDB("admin"); -print("6. Bring up #3"); -var hostname = getHostName(); + var config = replTest.getReplSetConfig(); + config.version = 2; -var slave2 = MongoRunner.runMongod({replSet: basename, oplogSize: 2}); + // Add #3 using rs.add() configuration document. + // Since 'db' currently points to slave2, reset 'db' to admin db on master before running + // rs.add(). + db = admin; -var local_s2 = slave2.getDB("local"); -var admin_s2 = slave2.getDB("admin"); + // If _id is not provided, rs.add() will generate _id for #3 based on existing members' _ids. + assert.commandWorked(rs.add({host: hostname + ":" + slave2.port}), + "failed to add #3 to replica set"); -var config = replTest.getReplSetConfig(); -config.version = 2; + reconnect(slave1); + reconnect(slave2); -// Add #3 using rs.add() configuration document. -// Since 'db' currently points to slave2, reset 'db' to admin db on master before running rs.add(). -db = admin; + wait(function() { + var config2 = local_s1.system.replset.findOne(); + var config3 = local_s2.system.replset.findOne(); -// If _id is not provided, rs.add() will generate _id for #3 based on existing members' _ids. -assert.commandWorked(rs.add({host:hostname+":"+slave2.port}), "failed to add #3 to replica set"); - -reconnect(slave1); -reconnect(slave2); - -wait(function() { - var config2 = local_s1.system.replset.findOne(); - var config3 = local_s2.system.replset.findOne(); - - printjson(config2); - printjson(config3); - - return config2.version == config.version && - (config3 && config3.version == config.version); - }); -admin_s2.runCommand({replSetFreeze:999999}); - - -wait(function() { - var status = admin_s2.runCommand({replSetGetStatus:1}); - printjson(status); - return status.members && - (status.members[2].state == 3 || status.members[2].state == 2); - }); + printjson(config2); + printjson(config3); + return config2.version == config.version && (config3 && config3.version == config.version); + }); + admin_s2.runCommand({replSetFreeze: 999999}); -print("7. Kill #1 in the middle of syncing"); -replTest.stop(0); + wait(function() { + var status = admin_s2.runCommand({replSetGetStatus: 1}); + printjson(status); + return status.members && (status.members[2].state == 3 || status.members[2].state == 2); + }); + print("7. Kill #1 in the middle of syncing"); + replTest.stop(0); -print("8. Check that #3 makes it into secondary state"); -wait(function() { - var status = admin_s2.runCommand({replSetGetStatus:1}); - occasionally(function() { printjson(status);}, 10); + print("8. Check that #3 makes it into secondary state"); + wait(function() { + var status = admin_s2.runCommand({replSetGetStatus: 1}); + occasionally(function() { + printjson(status); + }, 10); if (status.members[2].state == 2 || status.members[2].state == 1) { return true; } return false; }); + print("9. Bring #1 back up"); + replTest.start(0, {}, true); + reconnect(master); + wait(function() { + var status = admin.runCommand({replSetGetStatus: 1}); + printjson(status); + return status.members && (status.members[0].state == 1 || status.members[0].state == 2); + }); + + print("10. Initial sync should succeed"); + wait(function() { + var status = admin_s2.runCommand({replSetGetStatus: 1}); + printjson(status); + return status.members && status.members[2].state == 2 || status.members[2].state == 1; + }); + + print("11. Insert some stuff"); + // ReplSetTest doesn't find master correctly unless all nodes are defined by + // ReplSetTest + for (var i = 0; i < 30; i++) { + var result = admin.runCommand({isMaster: 1}); + if (result.ismaster) { + break; + } else if (result.primary) { + master = connect(result.primary + "/admin").getMongo(); + break; + } + sleep(1000); + } + + for (var i = 0; i < 10000; i++) { + foo.bar.insert({date: new Date(), x: i, str: "all the talk on the market"}); + } + + print("12. Everyone happy eventually"); + replTest.awaitReplication(2 * 60 * 1000); -print("9. Bring #1 back up"); -replTest.start(0, {}, true); -reconnect(master); -wait(function() { - var status = admin.runCommand({replSetGetStatus:1}); - printjson(status); - return status.members && - (status.members[0].state == 1 || status.members[0].state == 2); - }); - - -print("10. Initial sync should succeed"); -wait(function() { - var status = admin_s2.runCommand({replSetGetStatus:1}); - printjson(status); - return status.members && - status.members[2].state == 2 || status.members[2].state == 1; - }); - - -print("11. Insert some stuff"); -// ReplSetTest doesn't find master correctly unless all nodes are defined by -// ReplSetTest -for (var i = 0; i<30; i++) { - var result = admin.runCommand({isMaster : 1}); - if (result.ismaster) { - break; - } - else if (result.primary) { - master = connect(result.primary+"/admin").getMongo(); - break; - } - sleep(1000); -} - -for (var i=0; i<10000; i++) { - foo.bar.insert({date : new Date(), x : i, str : "all the talk on the market"}); -} - - -print("12. Everyone happy eventually"); -replTest.awaitReplication(2 * 60 * 1000); - -replTest.stopSet(); + replTest.stopSet(); }; doTest(); diff --git a/jstests/replsets/initial_sync3.js b/jstests/replsets/initial_sync3.js index 4456cfbd498..5dfbf60c455 100644 --- a/jstests/replsets/initial_sync3.js +++ b/jstests/replsets/initial_sync3.js @@ -10,13 +10,12 @@ * @tags: [requires_persistence] */ - load("jstests/replsets/rslib.js"); var name = "initialsync3"; var host = getHostName(); print("Start set with three nodes"); -var replTest = new ReplSetTest( {name: name, nodes: 3} ); +var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.startSet(); replTest.initiate({ _id: name, @@ -30,7 +29,7 @@ replTest.initiate({ var master = replTest.getPrimary(); print("Initial sync"); -master.getDB("foo").bar.baz.insert({x:1}); +master.getDB("foo").bar.baz.insert({x: 1}); replTest.awaitReplication(); replTest.stop(0); @@ -41,7 +40,7 @@ replTest.start(1); print("make sure 1 does not become a secondary (because it cannot clone from 2)"); sleep(10000); -var result = nodes[1].getDB("admin").runCommand({isMaster : 1}); +var result = nodes[1].getDB("admin").runCommand({isMaster: 1}); assert(!result.ismaster, tojson(result)); assert(!result.secondary, tojson(result)); @@ -52,7 +51,7 @@ master = replTest.getPrimary(); print("now 1 should be able to initial sync"); assert.soon(function() { - var result = nodes[1].getDB("admin").runCommand({isMaster : 1}); + var result = nodes[1].getDB("admin").runCommand({isMaster: 1}); printjson(result); return result.secondary; }); diff --git a/jstests/replsets/initial_sync4.js b/jstests/replsets/initial_sync4.js index c25bc94c4ed..c2fcede9969 100644 --- a/jstests/replsets/initial_sync4.js +++ b/jstests/replsets/initial_sync4.js @@ -4,7 +4,7 @@ load("jstests/replsets/rslib.js"); basename = "jstests_initsync4"; print("1. Bring up set"); -replTest = new ReplSetTest( {name: basename, nodes: 1} ); +replTest = new ReplSetTest({name: basename, nodes: 1}); replTest.startSet(); replTest.initiate(); @@ -14,10 +14,10 @@ mc = m.getDB("d")["c"]; print("2. Insert some data"); N = 5000; -mc.ensureIndex({x:1}); +mc.ensureIndex({x: 1}); var bulk = mc.initializeUnorderedBulkOp(); -for( i = 0; i < N; ++i ) { - bulk.insert({ _id: i, x: i, a: {} }); +for (i = 0; i < N; ++i) { + bulk.insert({_id: i, x: i, a: {}}); } assert.writeOK(bulk.execute()); @@ -31,11 +31,10 @@ s = MongoRunner.runMongod({replSet: basename, oplogSize: 2}); var config = replTest.getReplSetConfig(); config.version = 2; -config.members.push({_id:2, host:hostname+":"+s.port}); +config.members.push({_id: 2, host: hostname + ":" + s.port}); try { - m.getDB("admin").runCommand({replSetReconfig:config}); -} -catch(e) { + m.getDB("admin").runCommand({replSetReconfig: config}); +} catch (e) { print(e); } reconnect(s); @@ -45,39 +44,38 @@ print("5. Wait for new node to start cloning"); s.setSlaveOk(); sc = s.getDB("d")["c"]; -wait( function() { printjson( sc.stats() ); return sc.stats().count > 0; } ); +wait(function() { + printjson(sc.stats()); + return sc.stats().count > 0; +}); print("6. Start updating documents on primary"); -for( i = N-1; i >= N-10000; --i ) { +for (i = N - 1; i >= N - 10000; --i) { // If the document is cloned as {a:1}, the {$set:{'a.b':1}} modifier will uassert. - mc.update( {_id:i}, {$set:{'a.b':1}} ); - mc.update( {_id:i}, {$set:{a:1}} ); + mc.update({_id: i}, {$set: {'a.b': 1}}); + mc.update({_id: i}, {$set: {a: 1}}); } -for ( i = N; i < N*2; i++ ) { - mc.insert( { _id : i, x : i } ); +for (i = N; i < N * 2; i++) { + mc.insert({_id: i, x: i}); } -assert.eq( N*2, mc.count() ); +assert.eq(N * 2, mc.count()); print("7. Wait for new node to become SECONDARY"); wait(function() { - var status = s.getDB("admin").runCommand({replSetGetStatus:1}); - printjson(status); - return status.members && - (status.members[1].state == 2); - }); + var status = s.getDB("admin").runCommand({replSetGetStatus: 1}); + printjson(status); + return status.members && (status.members[1].state == 2); +}); print("8. Wait for new node to have all the data"); wait(function() { return sc.count() == mc.count(); -} ); - +}); -assert.eq( mc.getIndexKeys().length, - sc.getIndexKeys().length ); +assert.eq(mc.getIndexKeys().length, sc.getIndexKeys().length); -assert.eq( mc.find().sort( { x : 1 } ).itcount(), - sc.find().sort( { x : 1 } ).itcount() ); +assert.eq(mc.find().sort({x: 1}).itcount(), sc.find().sort({x: 1}).itcount()); -replTest.stopSet( 15 ); +replTest.stopSet(15); diff --git a/jstests/replsets/initial_sync_unsupported_auth_schema.js b/jstests/replsets/initial_sync_unsupported_auth_schema.js index 28c4d1e8826..e27d25aaac0 100644 --- a/jstests/replsets/initial_sync_unsupported_auth_schema.js +++ b/jstests/replsets/initial_sync_unsupported_auth_schema.js @@ -4,8 +4,7 @@ function checkedReInitiate(rst) { try { rst.reInitiate(); - } - catch (e) { + } catch (e) { // reInitiate can throw because it tries to run an ismaster command on // all secondaries, including the new one that may have already aborted var errMsg = tojson(e); @@ -49,10 +48,12 @@ function testInitialSyncAbortsWithUnsupportedAuthSchema(schema) { var assertFn = function() { return rawMongoProgramOutput().match(msg); }; - assert.soon(assertFn, 'Initial sync should have aborted due to an invalid or unsupported' + - ' authSchema version: ' + tojson(schema), 60000); + assert.soon(assertFn, + 'Initial sync should have aborted due to an invalid or unsupported' + + ' authSchema version: ' + tojson(schema), + 60000); - rst.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] }); + rst.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]}); } function testInitialSyncAbortsWithExistingUserAndNoAuthSchema() { @@ -81,10 +82,12 @@ function testInitialSyncAbortsWithExistingUserAndNoAuthSchema() { return rawMongoProgramOutput().match(msg); }; - assert.soon(assertFn, 'Initial sync should have aborted due to an existing user document and' + - ' a missing auth schema', 60000); + assert.soon(assertFn, + 'Initial sync should have aborted due to an existing user document and' + + ' a missing auth schema', + 60000); - rst.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] }); + rst.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]}); } testInitialSyncAbortsWithUnsupportedAuthSchema({_id: 'authSchema'}); diff --git a/jstests/replsets/initiate.js b/jstests/replsets/initiate.js index 41d53202f80..0afa0c85bcd 100644 --- a/jstests/replsets/initiate.js +++ b/jstests/replsets/initiate.js @@ -2,20 +2,19 @@ * Sanity check that initializing will fail with bad input. There are C++ unit tests for most bad * configs, so this is just seeing if it fails when it's supposed to. */ -(function () { +(function() { "use strict"; - var replTest = new ReplSetTest({name : 'testSet2', nodes : 1}); + var replTest = new ReplSetTest({name: 'testSet2', nodes: 1}); var nodes = replTest.startSet(); assert.soon(function() { try { var result = nodes[0].getDB("admin").runCommand( - {replSetInitiate: {_id: "testSet2", members: [{_id : 0, tags : ["member0"]}]}}); + {replSetInitiate: {_id: "testSet2", members: [{_id: 0, tags: ["member0"]}]}}); printjson(result); return (result.errmsg.match(/bad or missing host field/) || result.errmsg.match(/Missing expected field \"host\"/)); - } - catch (e) { + } catch (e) { print(e); } return false; diff --git a/jstests/replsets/initiate_prohibits_w0.js b/jstests/replsets/initiate_prohibits_w0.js index 9bd5d4a599d..e0d100e7251 100644 --- a/jstests/replsets/initiate_prohibits_w0.js +++ b/jstests/replsets/initiate_prohibits_w0.js @@ -23,13 +23,11 @@ function testInitiate(gleDefaults) { /* * Try to initiate with w: 0 in getLastErrorDefaults. */ -testInitiate({ - getLastErrorDefaults: {w: 0}}); +testInitiate({getLastErrorDefaults: {w: 0}}); /* * Try to initiate with w: 0 and other options in getLastErrorDefaults. */ -testInitiate({ - getLastErrorDefaults: {w: 0, j: false, wtimeout: 100, fsync: true}}); +testInitiate({getLastErrorDefaults: {w: 0, j: false, wtimeout: 100, fsync: true}}); replTest.stopSet(); diff --git a/jstests/replsets/initiate_without_replset_name_at_startup.js b/jstests/replsets/initiate_without_replset_name_at_startup.js index 4deac03ca0b..cccb7137ba6 100644 --- a/jstests/replsets/initiate_without_replset_name_at_startup.js +++ b/jstests/replsets/initiate_without_replset_name_at_startup.js @@ -10,33 +10,28 @@ * will not persist across a restart and they will not transition to PRIMARY as described above. * @tags: [requires_persistence] */ -(function () { +(function() { "use strict"; var baseName = 'testInitiateWithoutReplSetNameAtStartup'; var port = allocatePorts(1)[0]; var dbpath = MongoRunner.dataPath + baseName + '/'; - var mongod = MongoRunner.runMongod({ - dbpath: dbpath, - port: port}); + var mongod = MongoRunner.runMongod({dbpath: dbpath, port: port}); var config = { _id: baseName, version: 1, - members: [ - {_id: 0, host: mongod.name}, - ], + members: [{_id: 0, host: mongod.name}, ], }; var result = assert.commandFailedWithCode( mongod.getDB('admin').runCommand({replSetInitiate: config}), ErrorCodes.NoReplicationEnabled, 'replSetInitiate should fail when both --configsvr and --replSet are missing.'); - assert( - result.errmsg.match(/This node was not started with the replSet option/), - 'unexpected error message when both --configsvr and --replSet are missing. ' + - 'configuration: ' + tojson(result)); + assert(result.errmsg.match(/This node was not started with the replSet option/), + 'unexpected error message when both --configsvr and --replSet are missing. ' + + 'configuration: ' + tojson(result)); // The rest of this test can only be run if the storageEngine supports committed reads. var supportsCommittedReads = @@ -49,96 +44,88 @@ return; } - mongod = MongoRunner.runMongod({ - configsvr: '', - dbpath: dbpath, - port: port, - restart: true}); + mongod = MongoRunner.runMongod({configsvr: '', dbpath: dbpath, port: port, restart: true}); - assert.commandWorked( - mongod.getDB('admin').runCommand({replSetInitiate: config}), - 'replSetInitiate should not fail when given a valid configuration'); + assert.commandWorked(mongod.getDB('admin').runCommand({replSetInitiate: config}), + 'replSetInitiate should not fail when given a valid configuration'); // Check saved config var systemReplsetCollection = mongod.getDB('local').system.replset; - assert.eq(1, systemReplsetCollection.count(), - 'replSetInitiate did not save configuration in ' + - systemReplsetCollection.getFullName()); + assert.eq( + 1, + systemReplsetCollection.count(), + 'replSetInitiate did not save configuration in ' + systemReplsetCollection.getFullName()); var savedConfig = systemReplsetCollection.findOne(); - assert.eq(config._id, savedConfig._id, + assert.eq(config._id, + savedConfig._id, 'config passed to replSetInitiate (left side) does not match config saved in ' + - systemReplsetCollection.getFullName() + ' (right side)'); + systemReplsetCollection.getFullName() + ' (right side)'); result = assert.commandFailedWithCode( - mongod.getDB('admin').runCommand({replSetInitiate: { - _id: baseName + '-2', - version: 1, - members: [ - {_id: 0, host: mongod.name}, - ], - }}), + mongod.getDB('admin').runCommand({ + replSetInitiate: { + _id: baseName + '-2', + version: 1, + members: [{_id: 0, host: mongod.name}, ], + } + }), ErrorCodes.AlreadyInitialized, 'expected AlreadyInitialized error code when configuration already exists in ' + - systemReplsetCollection.getFullName()); - assert(result.errmsg.match(/already initialized/), - 'unexpected error message when replica set configuration already exists ' + - tojson(result)); + systemReplsetCollection.getFullName()); + assert( + result.errmsg.match(/already initialized/), + 'unexpected error message when replica set configuration already exists ' + tojson(result)); systemReplsetCollection = mongod.getDB('local').system.replset; savedConfig = systemReplsetCollection.findOne(); - assert.eq(config._id, savedConfig._id, + assert.eq(config._id, + savedConfig._id, 'config passed to replSetInitiate (left side) does not match config saved in ' + - systemReplsetCollection.getFullName() + ' (right side)'); + systemReplsetCollection.getFullName() + ' (right side)'); var oplogCollection = mongod.getDB('local').oplog.rs; assert(oplogCollection.exists(), 'oplog collection ' + oplogCollection.getFullName() + - ' not created after successful replSetInitiate. Collections in local database: ' + - mongod.getDB('local').getCollectionNames().join(', ')); + ' not created after successful replSetInitiate. Collections in local database: ' + + mongod.getDB('local').getCollectionNames().join(', ')); assert(oplogCollection.isCapped(), 'oplog collection ' + oplogCollection.getFullName() + ' must be capped'); - assert.eq(1, oplogCollection.count(), + assert.eq(1, + oplogCollection.count(), 'oplog collection ' + oplogCollection.getFullName() + - ' is not initialized with first entry.'); + ' is not initialized with first entry.'); var oplogEntry = oplogCollection.findOne(); assert.eq('n', oplogEntry.op, 'unexpected first oplog entry type: ' + tojson(oplogEntry)); MongoRunner.stopMongod(port); // Restart server and attempt to save a different config. - mongod = MongoRunner.runMongod({ - configsvr: '', - dbpath: dbpath, - port: port, - restart: true}); + mongod = MongoRunner.runMongod({configsvr: '', dbpath: dbpath, port: port, restart: true}); result = assert.commandFailedWithCode( - mongod.getDB('admin').runCommand({replSetInitiate: { - _id: baseName + '-2', - version: 1, - members: [ - {_id: 0, host: mongod.name}, - ], - }}), + mongod.getDB('admin').runCommand({ + replSetInitiate: { + _id: baseName + '-2', + version: 1, + members: [{_id: 0, host: mongod.name}, ], + } + }), ErrorCodes.AlreadyInitialized, 'expected AlreadyInitialized error code when configuration already exists in ' + - systemReplsetCollection.getFullName() + ' after restarting'); + systemReplsetCollection.getFullName() + ' after restarting'); assert(result.errmsg.match(/already initialized/), 'unexpected error message when replica set configuration already exists ' + - '(after restarting without --replSet): ' + tojson(result)); + '(after restarting without --replSet): ' + tojson(result)); systemReplsetCollection = mongod.getDB('local').system.replset; savedConfig = systemReplsetCollection.findOne(); - assert.eq(config._id, savedConfig._id, + assert.eq(config._id, + savedConfig._id, 'config passed to replSetInitiate (left side) does not match config saved in ' + - systemReplsetCollection.getFullName() + ' (right side)'); + systemReplsetCollection.getFullName() + ' (right side)'); MongoRunner.stopMongod(port); // Restart server with --replSet and check own replica member state. - mongod = MongoRunner.runMongod({ - configsvr: '', - dbpath: dbpath, - port: port, - replSet: config._id, - restart: true}); + mongod = MongoRunner.runMongod( + {configsvr: '', dbpath: dbpath, port: port, replSet: config._id, restart: true}); // Wait for member state to become PRIMARY. assert.soon( @@ -146,8 +133,8 @@ result = assert.commandWorked( mongod.getDB('admin').runCommand({replSetGetStatus: 1}), 'failed to get replica set status after restarting server with --replSet option'); - assert.eq(1, result.members.length, - 'replica set status should contain exactly 1 member'); + assert.eq( + 1, result.members.length, 'replica set status should contain exactly 1 member'); var member = result.members[0]; print('Current replica member state = ' + member.state + ' (' + member.stateStr + ')'); return member.state == ReplSetTest.State.PRIMARY; @@ -158,27 +145,25 @@ // Write/read a single document to ensure basic functionality. var t = mongod.getDB('config').getCollection(baseName); - var doc = {_id: 0}; - assert.soon( - function() { - result = t.save(doc); - assert(result instanceof WriteResult); - if (result.hasWriteError()) { - print('Failed with write error saving document after transitioning to primary: ' + - tojson(result) + '. Retrying...'); - return false; - } - if (result.hasWriteConcernError()) { - print('Failed with write concern error saving document after transitioning to ' + - 'primary: ' + tojson(result) + '. Retrying...'); - return false; - } - print('Successfully saved document after transitioning to primary: ' + tojson(result)); - return true; - }, - 'failed to save document after transitioning to primary', - 5000, - 1000); + var doc = { + _id: 0 + }; + assert.soon(function() { + result = t.save(doc); + assert(result instanceof WriteResult); + if (result.hasWriteError()) { + print('Failed with write error saving document after transitioning to primary: ' + + tojson(result) + '. Retrying...'); + return false; + } + if (result.hasWriteConcernError()) { + print('Failed with write concern error saving document after transitioning to ' + + 'primary: ' + tojson(result) + '. Retrying...'); + return false; + } + print('Successfully saved document after transitioning to primary: ' + tojson(result)); + return true; + }, 'failed to save document after transitioning to primary', 5000, 1000); assert.eq(1, t.count(), 'incorrect collection size after successful write'); assert.eq(doc, t.findOne()); diff --git a/jstests/replsets/ismaster1.js b/jstests/replsets/ismaster1.js index 8acefc875d5..2d469f385a2 100644 --- a/jstests/replsets/ismaster1.js +++ b/jstests/replsets/ismaster1.js @@ -6,7 +6,7 @@ load("jstests/replsets/rslib.js"); // function create the error message if an assert fails -var generateErrorString = function (badFields, missingFields, badValues, result) { +var generateErrorString = function(badFields, missingFields, badValues, result) { var str = "\nThe result was:\n" + tojson(result); if (badFields.length !== 0) { str += "\nIt had the following fields which it shouldn't have: "; @@ -17,28 +17,28 @@ var generateErrorString = function (badFields, missingFields, badValues, result) str += missingFields; } if (badValues.length !== 0) { - for (i = 0; i < badValues.length; i+=3) { - str += "\nIts value for " + badValues[i] + " is " + badValues[i+1]; - str += " but should be " + badValues[i+2]; + for (i = 0; i < badValues.length; i += 3) { + str += "\nIts value for " + badValues[i] + " is " + badValues[i + 1]; + str += " but should be " + badValues[i + 2]; } } return str; }; // function to check a single result -var checkMember = function (memberInfo) { +var checkMember = function(memberInfo) { // run isMaster on the connection - result = memberInfo.conn.getDB("admin").runCommand({isMaster:1}); + result = memberInfo.conn.getDB("admin").runCommand({isMaster: 1}); // make sure result doesn't contain anything it shouldn't var badFields = []; for (field in result) { - if (!result.hasOwnProperty(field)){ - continue; - } - if (Array.contains(memberInfo.unwantedFields, field)) { - badFields.push(field); - } + if (!result.hasOwnProperty(field)) { + continue; + } + if (Array.contains(memberInfo.unwantedFields, field)) { + badFields.push(field); + } } // make sure result contains the fields we want @@ -52,7 +52,7 @@ var checkMember = function (memberInfo) { } // make sure the result has proper values for fields with known values - var badValues = []; // each mistake will be saved as three entries (key, badvalue, goodvalue) + var badValues = []; // each mistake will be saved as three entries (key, badvalue, goodvalue) for (field in memberInfo.goodValues) { if (typeof(memberInfo.goodValues[field]) === "object") { // assumes nested obj is disk in tags this is currently true, but may change @@ -61,8 +61,7 @@ var checkMember = function (memberInfo) { badValues.push(result[field].disk); badValues.push(memberInfo.goodValues[field].disk); } - } - else { + } else { if (result[field] !== memberInfo.goodValues[field]) { badValues.push(field); badValues.push(result[field]); @@ -71,8 +70,8 @@ var checkMember = function (memberInfo) { } } assert(badFields.length === 0 && missingFields.length === 0 && badValues.length === 0, - memberInfo.name + " had the following problems." - + generateErrorString(badFields, missingFields, badValues, result)); + memberInfo.name + " had the following problems." + + generateErrorString(badFields, missingFields, badValues, result)); }; // start of test code @@ -89,192 +88,201 @@ config.members[2].buildIndexes = false; config.members[3].arbiterOnly = true; replTest.initiate(config); -var agreeOnPrimaryAndSetVersion = function( setVersion ) { - - print( "Waiting for primary and replica set version " + setVersion ); - +var agreeOnPrimaryAndSetVersion = function(setVersion) { + + print("Waiting for primary and replica set version " + setVersion); + var nodes = replTest.nodes; var primary = undefined; - var lastSetVersion = setVersion; - for ( var i = 0; i < nodes.length; i++ ) { + var lastSetVersion = setVersion; + for (var i = 0; i < nodes.length; i++) { try { - var isMasterResult = nodes[i].getDB( "admin" ).runCommand({ isMaster : 1 }); - } - catch (e) { + var isMasterResult = nodes[i].getDB("admin").runCommand({isMaster: 1}); + } catch (e) { // handle reconnect errors due to step downs print("Error while calling isMaster on " + nodes[i] + ": " + e); return false; } - printjson( isMasterResult ); - if ( !primary ) primary = isMasterResult.primary; - if ( !lastSetVersion ) lastSetVersion = isMasterResult.setVersion; - if ( isMasterResult.primary != primary || !primary ) return false; - if ( isMasterResult.setVersion != lastSetVersion ) return false; + printjson(isMasterResult); + if (!primary) + primary = isMasterResult.primary; + if (!lastSetVersion) + lastSetVersion = isMasterResult.setVersion; + if (isMasterResult.primary != primary || !primary) + return false; + if (isMasterResult.setVersion != lastSetVersion) + return false; } - + return true; }; var master = replTest.getPrimary(); -assert.soon( function() { return agreeOnPrimaryAndSetVersion( 1 ); }, - "Nodes did not initiate in less than a minute", 60000 ); +assert.soon(function() { + return agreeOnPrimaryAndSetVersion(1); +}, "Nodes did not initiate in less than a minute", 60000); // check to see if the information from isMaster() is correct at each node // the checker only checks that the field exists when its value is "has" -checkMember({ conn: master, - name: "master", - goodValues: { - setName: "ismaster", - setVersion: 1, - ismaster: true, - secondary: false, - ok: 1 - }, - wantedFields: ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize", - "localTime"], - unwantedFields: ["arbiterOnly", "passive", "slaveDelay", "hidden", "tags", - "buildIndexes"] - }); +checkMember({ + conn: master, + name: "master", + goodValues: {setName: "ismaster", setVersion: 1, ismaster: true, secondary: false, ok: 1}, + wantedFields: + ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"], + unwantedFields: ["arbiterOnly", "passive", "slaveDelay", "hidden", "tags", "buildIndexes"] +}); -checkMember({ conn: replTest.liveNodes.slaves[0], - name: "slave", - goodValues: { - setName: "ismaster", - setVersion: 1, - ismaster: false, - secondary: true, - passive: true, - ok: 1 - }, - wantedFields: ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize", - "localTime"], - unwantedFields: ["arbiterOnly", "slaveDelay", "hidden", "tags", "buildIndexes"] - }); +checkMember({ + conn: replTest.liveNodes.slaves[0], + name: "slave", + goodValues: { + setName: "ismaster", + setVersion: 1, + ismaster: false, + secondary: true, + passive: true, + ok: 1 + }, + wantedFields: + ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"], + unwantedFields: ["arbiterOnly", "slaveDelay", "hidden", "tags", "buildIndexes"] +}); -checkMember({ conn: replTest.liveNodes.slaves[1], - name: "delayed_slave", - goodValues: { - setName: "ismaster", - setVersion: 1, - ismaster: false, - secondary: true, - passive: true, - slaveDelay: 3, - buildIndexes: false, - ok: 1 - }, - wantedFields: ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize", - "localTime"], - unwantedFields: ["arbiterOnly", "tags"] - }); +checkMember({ + conn: replTest.liveNodes.slaves[1], + name: "delayed_slave", + goodValues: { + setName: "ismaster", + setVersion: 1, + ismaster: false, + secondary: true, + passive: true, + slaveDelay: 3, + buildIndexes: false, + ok: 1 + }, + wantedFields: + ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"], + unwantedFields: ["arbiterOnly", "tags"] +}); -checkMember({ conn: replTest.liveNodes.slaves[2], - name: "arbiter", - goodValues: { - setName: "ismaster", - setVersion: 1, - ismaster: false, - secondary: false, - arbiterOnly: true, - ok: 1 - }, - wantedFields: ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize", - "localTime"], - unwantedFields: ["slaveDelay", "hidden", "tags", "buildIndexes", "passive"] - }); +checkMember({ + conn: replTest.liveNodes.slaves[2], + name: "arbiter", + goodValues: { + setName: "ismaster", + setVersion: 1, + ismaster: false, + secondary: false, + arbiterOnly: true, + ok: 1 + }, + wantedFields: + ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"], + unwantedFields: ["slaveDelay", "hidden", "tags", "buildIndexes", "passive"] +}); // reconfigure and make sure the changes show up in ismaster on all members config = master.getDB("local").system.replset.findOne(); -config.version = config.version+1; -config.members[0].tags = {disk: "ssd"}; -config.members[1].tags = {disk: "ssd"}; +config.version = config.version + 1; +config.members[0].tags = { + disk: "ssd" +}; +config.members[1].tags = { + disk: "ssd" +}; config.members[1].hidden = true; config.members[2].slaveDelay = 300000; -config.members[2].tags = {disk: "hdd"}; +config.members[2].tags = { + disk: "hdd" +}; try { - result = master.getDB("admin").runCommand({replSetReconfig : config}); -} -catch(e) { + result = master.getDB("admin").runCommand({replSetReconfig: config}); +} catch (e) { print(e); } master = replTest.getPrimary(); -assert.soon( function() { return agreeOnPrimaryAndSetVersion( 2 ); }, - "Nodes did not sync in less than a minute", 60000 ); +assert.soon(function() { + return agreeOnPrimaryAndSetVersion(2); +}, "Nodes did not sync in less than a minute", 60000); // check nodes for their new settings -checkMember({ conn: master, - name: "master2", - goodValues: { - setName: "ismaster", - setVersion: 2, - ismaster: true, - secondary: false, - tags: {"disk": "ssd"}, - ok: 1 - }, - wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", - "localTime"], - unwantedFields: ["arbiterOnly", "passives", "passive", "slaveDelay", "hidden", "buildIndexes"] - }); +checkMember({ + conn: master, + name: "master2", + goodValues: { + setName: "ismaster", + setVersion: 2, + ismaster: true, + secondary: false, + tags: {"disk": "ssd"}, + ok: 1 + }, + wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"], + unwantedFields: + ["arbiterOnly", "passives", "passive", "slaveDelay", "hidden", "buildIndexes"] +}); -checkMember({ conn: replTest.liveNodes.slaves[0], - name: "first_slave", - goodValues: { - setName: "ismaster", - setVersion: 2, - ismaster: false, - secondary: true, - tags: {"disk": "ssd"}, - passive: true, - hidden: true, - ok: 1 - }, - wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", - "localTime"], - unwantedFields: ["arbiterOnly", "passives", "slaveDelayed", "buildIndexes"] - }); +checkMember({ + conn: replTest.liveNodes.slaves[0], + name: "first_slave", + goodValues: { + setName: "ismaster", + setVersion: 2, + ismaster: false, + secondary: true, + tags: {"disk": "ssd"}, + passive: true, + hidden: true, + ok: 1 + }, + wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"], + unwantedFields: ["arbiterOnly", "passives", "slaveDelayed", "buildIndexes"] +}); -checkMember({ conn: replTest.liveNodes.slaves[1], - name: "very_delayed_slave", - goodValues: { - setName: "ismaster", - setVersion: 2, - ismaster: false, - secondary: true, - tags: {"disk": "hdd"}, - passive: true, - slaveDelay: 300000, - buildIndexes: false, - hidden: true, - ok: 1 - }, - wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", - "localTime"], - unwantedFields: ["arbiterOnly", "passives"] - }); +checkMember({ + conn: replTest.liveNodes.slaves[1], + name: "very_delayed_slave", + goodValues: { + setName: "ismaster", + setVersion: 2, + ismaster: false, + secondary: true, + tags: {"disk": "hdd"}, + passive: true, + slaveDelay: 300000, + buildIndexes: false, + hidden: true, + ok: 1 + }, + wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"], + unwantedFields: ["arbiterOnly", "passives"] +}); -checkMember({ conn: replTest.liveNodes.slaves[2], - name: "arbiter", - goodValues: { - setName: "ismaster", - setVersion: 2, - ismaster: false, - secondary: false, - arbiterOnly: true, - ok: 1 - }, - wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", - "localTime"], - unwantedFields: ["slaveDelay", "hidden", "tags", "buildIndexes", "passive"] - }); +checkMember({ + conn: replTest.liveNodes.slaves[2], + name: "arbiter", + goodValues: { + setName: "ismaster", + setVersion: 2, + ismaster: false, + secondary: false, + arbiterOnly: true, + ok: 1 + }, + wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"], + unwantedFields: ["slaveDelay", "hidden", "tags", "buildIndexes", "passive"] +}); // force reconfig and ensure all have the same setVersion afterwards config = master.getDB("local").system.replset.findOne(); -master.getDB("admin").runCommand({replSetReconfig : config, force: true}); +master.getDB("admin").runCommand({replSetReconfig: config, force: true}); -assert.soon( function() { return agreeOnPrimaryAndSetVersion(); }, - "Nodes did not sync in less than a minute after forced reconfig", 60000 ); +assert.soon(function() { + return agreeOnPrimaryAndSetVersion(); +}, "Nodes did not sync in less than a minute after forced reconfig", 60000); replTest.stopSet(); diff --git a/jstests/replsets/last_op_visible.js b/jstests/replsets/last_op_visible.js index 486230edf73..32df53c93d5 100644 --- a/jstests/replsets/last_op_visible.js +++ b/jstests/replsets/last_op_visible.js @@ -5,64 +5,61 @@ // majority read. (function() { -"use strict"; + "use strict"; -var name = 'lastOpVisible'; -var replTest = new ReplSetTest({name: name, - nodes: 3, - nodeOptions: {enableMajorityReadConcern: ''}}); + var name = 'lastOpVisible'; + var replTest = + new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}}); -try { - replTest.startSet(); -} catch (e) { - var conn = MongoRunner.runMongod(); - if (!conn.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) { - print("Skipping read_majority.js since storageEngine doesn't support it."); - MongoRunner.stopMongod(conn); - return; + try { + replTest.startSet(); + } catch (e) { + var conn = MongoRunner.runMongod(); + if (!conn.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) { + print("Skipping read_majority.js since storageEngine doesn't support it."); + MongoRunner.stopMongod(conn); + return; + } + throw e; } - throw e; -} -replTest.initiate(); + replTest.initiate(); -var primary = replTest.getPrimary(); + var primary = replTest.getPrimary(); -// Do an insert without writeConcern. -var res = primary.getDB(name).runCommandWithMetadata("insert", - {insert: name, documents: [{x:1}]}, - {"$replData": 1}); -assert.commandWorked(res.commandReply); -var last_op_visible = res.metadata["$replData"].lastOpVisible; + // Do an insert without writeConcern. + var res = primary.getDB(name).runCommandWithMetadata( + "insert", {insert: name, documents: [{x: 1}]}, {"$replData": 1}); + assert.commandWorked(res.commandReply); + var last_op_visible = res.metadata["$replData"].lastOpVisible; -// A find should return the same lastVisibleOp. -res = primary.getDB(name).runCommandWithMetadata("find", - {find: name, readConcern: {level: "local"}}, - {"$replData": 1}); -assert.commandWorked(res.commandReply); -assert.eq(last_op_visible, res.metadata["$replData"].lastOpVisible); + // A find should return the same lastVisibleOp. + res = primary.getDB(name).runCommandWithMetadata( + "find", {find: name, readConcern: {level: "local"}}, {"$replData": 1}); + assert.commandWorked(res.commandReply); + assert.eq(last_op_visible, res.metadata["$replData"].lastOpVisible); -// A majority readConcern with afterOpTime: lastOpVisible should also return the same lastVisibleOp. -res = primary.getDB(name).runCommandWithMetadata( + // A majority readConcern with afterOpTime: lastOpVisible should also return the same + // lastVisibleOp. + res = primary.getDB(name).runCommandWithMetadata( "find", {find: name, readConcern: {level: "majority", afterOpTime: last_op_visible}}, {"$replData": 1}); -assert.commandWorked(res.commandReply); -assert.eq(last_op_visible, res.metadata["$replData"].lastOpVisible); + assert.commandWorked(res.commandReply); + assert.eq(last_op_visible, res.metadata["$replData"].lastOpVisible); -// Do an insert without writeConcern. -res = primary.getDB(name).runCommandWithMetadata( + // Do an insert without writeConcern. + res = primary.getDB(name).runCommandWithMetadata( "insert", - {insert: name, documents: [{x:1}], writeConcern: {w: "majority"}}, + {insert: name, documents: [{x: 1}], writeConcern: {w: "majority"}}, {"$replData": 1}); -assert.commandWorked(res.commandReply); -last_op_visible = res.metadata["$replData"].lastOpVisible; + assert.commandWorked(res.commandReply); + last_op_visible = res.metadata["$replData"].lastOpVisible; -// A majority readConcern should return the same lastVisibleOp. -res = primary.getDB(name).runCommandWithMetadata("find", - {find: name, readConcern: {level: "majority"}}, - {"$replData": 1}); -assert.commandWorked(res.commandReply); -assert.eq(last_op_visible, res.metadata["$replData"].lastOpVisible); + // A majority readConcern should return the same lastVisibleOp. + res = primary.getDB(name).runCommandWithMetadata( + "find", {find: name, readConcern: {level: "majority"}}, {"$replData": 1}); + assert.commandWorked(res.commandReply); + assert.eq(last_op_visible, res.metadata["$replData"].lastOpVisible); }()); diff --git a/jstests/replsets/lastop.js b/jstests/replsets/lastop.js index f3eca2ccb3d..e1bf6c6fbd4 100644 --- a/jstests/replsets/lastop.js +++ b/jstests/replsets/lastop.js @@ -1,8 +1,8 @@ // Test that lastOp is updated properly in the face of no-op writes and for writes that generate // errors based on the preexisting data (e.g. duplicate key errors, but not parse errors). // lastOp is used as the optime to wait for when write concern waits for replication. -(function () { - var replTest = new ReplSetTest({ name: 'testSet', nodes: 1 }); +(function() { + var replTest = new ReplSetTest({name: 'testSet', nodes: 1}); replTest.startSet(); replTest.initiate(); @@ -14,79 +14,77 @@ // Do a write with m1, then a write with m2, then a no-op write with m1. m1 should have a lastOp // of m2's write. - - assert.writeOK(m1.getCollection("test.foo").insert({ m1 : 1 })); + + assert.writeOK(m1.getCollection("test.foo").insert({m1: 1})); var firstOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; - - assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 99 })); + + assert.writeOK(m2.getCollection("test.foo").insert({m2: 99})); var secondOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // No-op update - assert.writeOK(m1.getCollection("test.foo").update({ m1 : 1 }, { $set: { m1 : 1 }})); + assert.writeOK(m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 1}})); var noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, secondOp); - assert.writeOK(m1.getCollection("test.foo").remove({ m1 : 1 })); + assert.writeOK(m1.getCollection("test.foo").remove({m1: 1})); var thirdOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; - assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 98 })); + assert.writeOK(m2.getCollection("test.foo").insert({m2: 98})); var fourthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // No-op delete - assert.writeOK(m1.getCollection("test.foo").remove({ m1 : 1 })); + assert.writeOK(m1.getCollection("test.foo").remove({m1: 1})); noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, fourthOp); - // Dummy write, for a new lastOp. - assert.writeOK(m1.getCollection("test.foo").insert({ m1 : 99 })); + assert.writeOK(m1.getCollection("test.foo").insert({m1: 99})); var fifthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; - assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 97 })); + assert.writeOK(m2.getCollection("test.foo").insert({m2: 97})); var sixthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // No-op find-and-modify delete - m1.getCollection("test.foo").findAndModify( { query: { m1 : 1 } , remove: 'true'} ); + m1.getCollection("test.foo").findAndModify({query: {m1: 1}, remove: 'true'}); noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, sixthOp); - assert.commandWorked(m1.getCollection("test.foo").createIndex({x:1})); + assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1})); var seventhOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; - assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 96 })); + assert.writeOK(m2.getCollection("test.foo").insert({m2: 96})); var eighthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // No-op create index. - assert.commandWorked(m1.getCollection("test.foo").createIndex({x:1})); + assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1})); noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, eighthOp); - assert.writeOK(m1.getCollection("test.foo").insert({ _id : 1, x : 1 })); + assert.writeOK(m1.getCollection("test.foo").insert({_id: 1, x: 1})); var ninthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; - assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 991 })); + assert.writeOK(m2.getCollection("test.foo").insert({m2: 991})); var tenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // update with immutable field error - assert.writeError(m1.getCollection("test.foo").update({ _id : 1, x : 1 }, - { $set: { _id : 2 }})); + assert.writeError(m1.getCollection("test.foo").update({_id: 1, x: 1}, {$set: {_id: 2}})); // "After applying the update to the document {_id: 1.0 , ...}, the (immutable) field '_id' // was found to have been altered to _id: 2.0" noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, tenthOp); - assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 992 })); + assert.writeOK(m2.getCollection("test.foo").insert({m2: 992})); var eleventhOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // find-and-modify immutable field error try { - m1.getCollection("test.foo").findAndModify( { query: { _id : 1, x : 1 }, - update: { $set: { _id : 2 } } } ); + m1.getCollection("test.foo") + .findAndModify({query: {_id: 1, x: 1}, update: {$set: {_id: 2}}}); // The findAndModify shell helper should throw. assert(false); } catch (e) { @@ -97,24 +95,24 @@ assert.eq(noOp, eleventhOp); var bigString = new Array(3000).toString(); - assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 994, m3: bigString})); + assert.writeOK(m2.getCollection("test.foo").insert({m2: 994, m3: bigString})); // createIndex with a >1024 byte field fails. var twelfthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; - assert.commandFailed(m1.getCollection("test.foo").createIndex({m3:1})); + assert.commandFailed(m1.getCollection("test.foo").createIndex({m3: 1})); noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, twelfthOp); // No-op insert - assert.writeOK(m1.getCollection("test.foo").insert({ _id : 5, x : 5 })); + assert.writeOK(m1.getCollection("test.foo").insert({_id: 5, x: 5})); var thirteenthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; - assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 991 })); + assert.writeOK(m2.getCollection("test.foo").insert({m2: 991})); var fourteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; // Hits DuplicateKey error and fails insert -- no-op - assert.writeError(m1.getCollection("test.foo").insert({ _id : 5, x : 5 })); + assert.writeError(m1.getCollection("test.foo").insert({_id: 5, x: 5})); noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, fourteenthOp); @@ -122,14 +120,14 @@ // Test update and delete failures in legacy write mode. m2.forceWriteMode('legacy'); m1.forceWriteMode('legacy'); - m2.getCollection("test.foo").insert({ m2 : 995 }); + m2.getCollection("test.foo").insert({m2: 995}); var fifthteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp; - m1.getCollection("test.foo").remove({ m1 : 1 }); + m1.getCollection("test.foo").remove({m1: 1}); noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, fifthteenthOp); - m1.getCollection("test.foo").update({ m1 : 1 }, {$set: {m1: 4}}); + m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 4}}); noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp; assert.eq(noOp, fifthteenthOp); diff --git a/jstests/replsets/localhostAuthBypass.js b/jstests/replsets/localhostAuthBypass.js index 68300c7aeb2..2b2332d258a 100644 --- a/jstests/replsets/localhostAuthBypass.js +++ b/jstests/replsets/localhostAuthBypass.js @@ -1,7 +1,7 @@ -//SERVER-6591: Localhost authentication exception doesn't work right on sharded cluster +// SERVER-6591: Localhost authentication exception doesn't work right on sharded cluster // -//This test is to ensure that localhost authentication works correctly against a replica set -//whether they are hosted with "localhost" or a hostname. +// This test is to ensure that localhost authentication works correctly against a replica set +// whether they are hosted with "localhost" or a hostname. var replSetName = "replsets_server-6591"; var keyfile = "jstests/libs/key1"; @@ -19,55 +19,69 @@ var assertCannotRunCommands = function(mongo, isPrimary) { print("============ ensuring that commands cannot be run."); var test = mongo.getDB("test"); - assert.throws( function() { test.system.users.findOne(); }); - assert.throws( function() { test.foo.findOne({ _id: 0 }); }); + assert.throws(function() { + test.system.users.findOne(); + }); + assert.throws(function() { + test.foo.findOne({_id: 0}); + }); if (isPrimary) { - assert.writeError(test.foo.save({ _id: 0 })); - assert.writeError(test.foo.update({ _id: 0 }, { $set: { x: 20 }})); - assert.writeError(test.foo.remove({ _id: 0 })); + assert.writeError(test.foo.save({_id: 0})); + assert.writeError(test.foo.update({_id: 0}, {$set: {x: 20}})); + assert.writeError(test.foo.remove({_id: 0})); } - assert.throws(function() { + assert.throws(function() { test.foo.mapReduce( - function() { emit(1, 1); }, - function(id, count) { return Array.sum(count); }, - { out: "other" }); + function() { + emit(1, 1); + }, + function(id, count) { + return Array.sum(count); + }, + {out: "other"}); }); // DB operations var authorizeErrorCode = 13; - assert.commandFailedWithCode(mongo.getDB("test").copyDatabase("admin", "admin2"), - authorizeErrorCode, "copyDatabase"); + assert.commandFailedWithCode( + mongo.getDB("test").copyDatabase("admin", "admin2"), authorizeErrorCode, "copyDatabase"); // Create collection - assert.commandFailedWithCode(mongo.getDB("test").createCollection( - "log", { capped: true, size: 5242880, max: 5000 } ), - authorizeErrorCode, "createCollection"); + assert.commandFailedWithCode( + mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}), + authorizeErrorCode, + "createCollection"); // Set/Get system parameters - var params = [{ param: "journalCommitInterval", val: 200 }, - { param: "logLevel", val: 2 }, - { param: "logUserIds", val: 1 }, - { param: "notablescan", val: 1 }, - { param: "quiet", val: 1 }, - { param: "replApplyBatchSize", val: 10 }, - { param: "replIndexPrefetch", val: "none" }, - { param: "syncdelay", val: 30 }, - { param: "traceExceptions", val: true }, - { param: "sslMode", val: "preferSSL" }, - { param: "clusterAuthMode", val: "sendX509" }, - { param: "userCacheInvalidationIntervalSecs", val: 300 } - ]; + var params = [ + {param: "journalCommitInterval", val: 200}, + {param: "logLevel", val: 2}, + {param: "logUserIds", val: 1}, + {param: "notablescan", val: 1}, + {param: "quiet", val: 1}, + {param: "replApplyBatchSize", val: 10}, + {param: "replIndexPrefetch", val: "none"}, + {param: "syncdelay", val: 30}, + {param: "traceExceptions", val: true}, + {param: "sslMode", val: "preferSSL"}, + {param: "clusterAuthMode", val: "sendX509"}, + {param: "userCacheInvalidationIntervalSecs", val: 300} + ]; params.forEach(function(p) { - var cmd = { setParameter: 1 }; + var cmd = { + setParameter: 1 + }; cmd[p.param] = p.val; - assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd), - authorizeErrorCode, "setParameter: "+p.param); + assert.commandFailedWithCode( + mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "setParameter: " + p.param); }); params.forEach(function(p) { - var cmd = { getParameter: 1 }; + var cmd = { + getParameter: 1 + }; cmd[p.param] = 1; - assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd), - authorizeErrorCode, "getParameter: "+p.param); + assert.commandFailedWithCode( + mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "getParameter: " + p.param); }); }; @@ -78,16 +92,18 @@ var assertCanRunCommands = function(mongo) { // will throw on failure test.system.users.findOne(); - assert.writeOK(test.foo.save({_id: 0 })); - assert.writeOK(test.foo.update({ _id: 0 }, { $set: { x: 20 }})); - assert.writeOK(test.foo.remove({ _id: 0 })); - + assert.writeOK(test.foo.save({_id: 0})); + assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}})); + assert.writeOK(test.foo.remove({_id: 0})); test.foo.mapReduce( - function() { emit(1, 1); }, - function(id, count) { return Array.sum(count); }, - { out: "other" } - ); + function() { + emit(1, 1); + }, + function(id, count) { + return Array.sum(count); + }, + {out: "other"}); assert.commandWorked(mongo.getDB("admin").runCommand({replSetGetStatus: 1})); }; @@ -98,11 +114,8 @@ var authenticate = function(mongo) { }; var start = function(useHostName) { - var rs = new ReplSetTest({name: replSetName, - nodes : 3, - keyFile : keyfile, - auth: "", - useHostName: useHostName}); + var rs = new ReplSetTest( + {name: replSetName, nodes: 3, keyFile: keyfile, auth: "", useHostName: useHostName}); rs.startSet(); rs.initiate(); @@ -111,9 +124,9 @@ var start = function(useHostName) { var shutdown = function(rs) { print("============ shutting down."); - rs.stopSet(/*signal*/false, - /*forRestart*/false, - { auth: { user: username, pwd: password}}); + rs.stopSet(/*signal*/ false, + /*forRestart*/ false, + {auth: {user: username, pwd: password}}); }; var runTest = function(useHostName) { @@ -181,7 +194,7 @@ var runTest = function(useHostName) { var runNonlocalTest = function(ipAddr) { print("=========================="); - print("starting mongod: non-local host access "+ipAddr); + print("starting mongod: non-local host access " + ipAddr); print("=========================="); var rs = start(false); @@ -190,7 +203,7 @@ var runNonlocalTest = function(ipAddr) { var secHosts = []; rs.getSecondaries().forEach(function(sec) { - secHosts.push(ipAddr + ":" + rs.getPort(sec)); + secHosts.push(ipAddr + ":" + rs.getPort(sec)); }); var mongo = new Mongo(host); @@ -207,8 +220,10 @@ var runNonlocalTest = function(ipAddr) { assertCannotRunCommands(m, false); }); - assert.throws(function() { mongo.getDB("admin").createUser - ({ user:username, pwd: password, roles: jsTest.adminUserRoles }); }); + assert.throws(function() { + mongo.getDB("admin") + .createUser({user: username, pwd: password, roles: jsTest.adminUserRoles}); + }); shutdown(rs); }; diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js index 28032383e76..7e49e07e396 100644 --- a/jstests/replsets/maintenance.js +++ b/jstests/replsets/maintenance.js @@ -1,7 +1,7 @@ -var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 2} ); -var conns = replTest.startSet({ verbose: 1 }); +var replTest = new ReplSetTest({name: 'unicomplex', nodes: 2}); +var conns = replTest.startSet({verbose: 1}); var config = replTest.getReplSetConfig(); config.members[0].priority = 2; replTest.initiate(config); @@ -11,17 +11,20 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60000); var master = replTest.getPrimary(); for (i = 0; i < 20; i++) { - master.getDB("bar").foo.insert({x:1,y:i,abc:123,str:"foo bar baz"}); + master.getDB("bar").foo.insert({x: 1, y: i, abc: 123, str: "foo bar baz"}); } for (i = 0; i < 20; i++) { - master.getDB("bar").foo.update({ y: i }, { $push: { foo: "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"}}); + master.getDB("bar").foo.update({y: i}, {$push: {foo: "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"}}); } replTest.awaitReplication(); -assert.soon(function() { return conns[1].getDB("admin").isMaster().secondary; }); +assert.soon(function() { + return conns[1].getDB("admin").isMaster().secondary; +}); -join = startParallelShell( "db.getSisterDB('bar').runCommand({compact : 'foo'});", replTest.ports[1] ); +join = + startParallelShell("db.getSisterDB('bar').runCommand({compact : 'foo'});", replTest.ports[1]); print("joining"); join(); @@ -31,7 +34,8 @@ var secondarySoon = function() { var x = 0; assert.soon(function() { var im = conns[1].getDB("admin").isMaster(); - if (x++ % 5 == 0) printjson(im); + if (x++ % 5 == 0) + printjson(im); return im.secondary; }); }; @@ -41,7 +45,7 @@ secondarySoon(); print("make sure compact works on a secondary (SERVER-3923)"); master.getDB("foo").bar.drop(); replTest.awaitReplication(); -var result = conns[1].getDB("foo").runCommand({compact : "bar"}); +var result = conns[1].getDB("foo").runCommand({compact: "bar"}); assert.eq(result.ok, 0, tojson(result)); secondarySoon(); @@ -49,7 +53,7 @@ secondarySoon(); print("use replSetMaintenance command to go in/out of maintence mode"); print("primary cannot go into maintence mode"); -result = master.getDB("admin").runCommand({replSetMaintenance : 1}); +result = master.getDB("admin").runCommand({replSetMaintenance: 1}); assert.eq(result.ok, 0, tojson(result)); print("check getMore works on a secondary, not on a recovering node"); @@ -59,32 +63,29 @@ for (var i = 0; i < 5; i++) { } print("secondary can"); -result = conns[1].getDB("admin").runCommand({replSetMaintenance : 1}); +result = conns[1].getDB("admin").runCommand({replSetMaintenance: 1}); assert.eq(result.ok, 1, tojson(result)); print("make sure secondary goes into recovering"); var x = 0; assert.soon(function() { var im = conns[1].getDB("admin").isMaster(); - if (x++ % 5 == 0) printjson(im); + if (x++ % 5 == 0) + printjson(im); return !im.secondary && !im.ismaster; }); print("now getmore shouldn't work"); -var ex = assert.throws( - function(){ - lastDoc = null; - while (cursor.hasNext()) { - lastDoc = cursor.next(); - } - }, - [] /*no params*/, - "getmore didn't fail"); - -assert(ex.message.match("13436"), "wrong error code -- " + ex ); - -result = conns[1].getDB("admin").runCommand({replSetMaintenance : 0}); +var ex = assert.throws(function() { + lastDoc = null; + while (cursor.hasNext()) { + lastDoc = cursor.next(); + } +}, [] /*no params*/, "getmore didn't fail"); + +assert(ex.message.match("13436"), "wrong error code -- " + ex); + +result = conns[1].getDB("admin").runCommand({replSetMaintenance: 0}); assert.eq(result.ok, 1, tojson(result)); secondarySoon(); - diff --git a/jstests/replsets/maintenance2.js b/jstests/replsets/maintenance2.js index f1bce2159d5..c5e6d9c07e6 100644 --- a/jstests/replsets/maintenance2.js +++ b/jstests/replsets/maintenance2.js @@ -5,7 +5,7 @@ // Replica set testing API // Create a new replica set test. Specify set name and the number of nodes you want. - var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} ); + var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); // call startSet() to start each mongod in the replica set // this returns a list of nodes @@ -34,15 +34,22 @@ slaves.forEach(function(slave) { // put slave into maintenance (recovery) mode - slave.getDB("foo").adminCommand({replSetMaintenance:1}); + slave.getDB("foo").adminCommand({replSetMaintenance: 1}); - var stats = slave.getDB("foo").adminCommand({replSetGetStatus:1}); + var stats = slave.getDB("foo").adminCommand({replSetGetStatus: 1}); assert.eq(stats.myState, 3, "Slave should be in recovering state."); print("group should fail in recovering state..."); slave.slaveOk = true; - assert.commandFailed(slave.getDB("foo").foo.runCommand( - {group: {ns: "foo", initial: {n:0}, $reduce: function(obj,out){out.n++;}}})); + assert.commandFailed(slave.getDB("foo").foo.runCommand({ + group: { + ns: "foo", + initial: {n: 0}, + $reduce: function(obj, out) { + out.n++; + } + } + })); print("count should fail in recovering state..."); slave.slaveOk = true; diff --git a/jstests/replsets/maintenance_non-blocking.js b/jstests/replsets/maintenance_non-blocking.js index 5815893e5df..4606bcc1985 100644 --- a/jstests/replsets/maintenance_non-blocking.js +++ b/jstests/replsets/maintenance_non-blocking.js @@ -1,7 +1,7 @@ // This test ensures that the replSetMaintenance command will not block, nor block-on, a db write doTest = function() { "use strict"; - var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} ); + var replTest = new ReplSetTest({name: 'testSet', nodes: 2}); var nodes = replTest.startSet(); replTest.initiate(); @@ -29,7 +29,7 @@ doTest = function() { var ismaster = assert.commandWorked(sColl.runCommand("ismaster")); assert.eq(false, ismaster.ismaster); assert.eq(false, ismaster.secondary); - + print("******* writing to primary ************* "); assert.writeOK(mColl.save({_id: -1})); printjson(sDB.currentOp()); diff --git a/jstests/replsets/maxSyncSourceLagSecs.js b/jstests/replsets/maxSyncSourceLagSecs.js index 8d44dd5ddb2..0e7fe04355b 100644 --- a/jstests/replsets/maxSyncSourceLagSecs.js +++ b/jstests/replsets/maxSyncSourceLagSecs.js @@ -5,18 +5,22 @@ (function() { "use strict"; var name = "maxSyncSourceLagSecs"; - var replTest = new ReplSetTest({name: name, - nodes: 3, - oplogSize: 5, - nodeOptions: {setParameter: "maxSyncSourceLagSecs=3"}}); + var replTest = new ReplSetTest({ + name: name, + nodes: 3, + oplogSize: 5, + nodeOptions: {setParameter: "maxSyncSourceLagSecs=3"} + }); var nodes = replTest.nodeList(); replTest.startSet(); - replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1], priority: 0 }, - { "_id": 2, "host": nodes[2], priority: 0 }], - }); + replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1], priority: 0}, + {"_id": 2, "host": nodes[2], priority: 0} + ], + }); var master = replTest.getPrimary(); master.getDB("foo").bar.save({a: 1}); @@ -30,24 +34,24 @@ jsTestLog("Setting sync target of slave 2 to slave 1"); assert.commandWorked(slaves[1].getDB("admin").runCommand({replSetSyncFrom: slaves[0].name})); assert.soon(function() { - var res = slaves[1].getDB("admin").runCommand({"replSetGetStatus": 1}); - return res.syncingTo === slaves[0].name; - }, "sync target not changed to other slave"); + var res = slaves[1].getDB("admin").runCommand({"replSetGetStatus": 1}); + return res.syncingTo === slaves[0].name; + }, "sync target not changed to other slave"); printjson(replTest.status()); jsTestLog("Lock slave 1 and add some docs. Force sync target for slave 2 to change to primary"); - assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync:1, lock: 1})); + assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync: 1, lock: 1})); master.getDB("foo").bar.save({a: 2}); assert.soon(function() { - var res = slaves[1].getDB("admin").runCommand({"replSetGetStatus": 1}); - return res.syncingTo === master.name; - }, "sync target not changed back to primary"); + var res = slaves[1].getDB("admin").runCommand({"replSetGetStatus": 1}); + return res.syncingTo === master.name; + }, "sync target not changed back to primary"); printjson(replTest.status()); assert.soon(function() { - return (slaves[1].getDB("foo").bar.count() === 2); - }, "slave should have caught up after syncing to primary."); + return (slaves[1].getDB("foo").bar.count() === 2); + }, "slave should have caught up after syncing to primary."); assert.commandWorked(slaves[0].getDB("admin").fsyncUnlock()); replTest.stopSet(); diff --git a/jstests/replsets/no_chaining.js b/jstests/replsets/no_chaining.js index 88bbe7a78d0..ad086c72f9a 100644 --- a/jstests/replsets/no_chaining.js +++ b/jstests/replsets/no_chaining.js @@ -1,47 +1,39 @@ -function myprint( x ) { - print( "chaining output: " + x ); +function myprint(x) { + print("chaining output: " + x); } var replTest = new ReplSetTest({name: 'testSet', nodes: 3, useBridge: true}); var nodes = replTest.startSet(); var hostnames = replTest.nodeList(); -replTest.initiate( - { - "_id" : "testSet", - "members" : [ - {"_id" : 0, "host" : hostnames[0], priority: 2}, - {"_id" : 1, "host" : hostnames[1], priority: 0}, - {"_id" : 2, "host" : hostnames[2], priority: 0} - ], - "settings" : { - "chainingAllowed" : false - } - } -); +replTest.initiate({ + "_id": "testSet", + "members": [ + {"_id": 0, "host": hostnames[0], priority: 2}, + {"_id": 1, "host": hostnames[1], priority: 0}, + {"_id": 2, "host": hostnames[2], priority: 0} + ], + "settings": {"chainingAllowed": false} +}); var master = replTest.getPrimary(); replTest.awaitReplication(); - var breakNetwork = function() { nodes[0].disconnect(nodes[2]); master = replTest.getPrimary(); }; var checkNoChaining = function() { - master.getDB("test").foo.insert({x:1}); + master.getDB("test").foo.insert({x: 1}); - assert.soon( - function() { - return nodes[1].getDB("test").foo.findOne() != null; - } - ); + assert.soon(function() { + return nodes[1].getDB("test").foo.findOne() != null; + }); - var endTime = (new Date()).getTime()+10000; + var endTime = (new Date()).getTime() + 10000; while ((new Date()).getTime() < endTime) { - assert(nodes[2].getDB("test").foo.findOne() == null, - 'Check that 2 does not catch up'); + assert(nodes[2].getDB("test").foo.findOne() == null, 'Check that 2 does not catch up'); } }; @@ -53,13 +45,10 @@ var forceSync = function() { config = nodes[2].getDB("local").system.replset.findOne(); } var targetHost = config.members[1].host; - printjson(nodes[2].getDB("admin").runCommand({replSetSyncFrom : targetHost})); - assert.soon( - function() { - return nodes[2].getDB("test").foo.findOne() != null; - }, - 'Check for data after force sync' - ); + printjson(nodes[2].getDB("admin").runCommand({replSetSyncFrom: targetHost})); + assert.soon(function() { + return nodes[2].getDB("test").foo.findOne() != null; + }, 'Check for data after force sync'); }; // SERVER-12922 diff --git a/jstests/replsets/oplog_format.js b/jstests/replsets/oplog_format.js index e27374eaf6e..5dc60e33434 100644 --- a/jstests/replsets/oplog_format.js +++ b/jstests/replsets/oplog_format.js @@ -1,117 +1,117 @@ /** * These tests verify that the oplog entries are created correctly for updates - * + * * Do not add more tests here but instead add C++ unit tests in db/ops/modifier*_test files * - */ + */ "use strict"; -var replTest = new ReplSetTest( { nodes: 1, oplogSize:2, nodeOptions: {smallfiles:""}} ); +var replTest = new ReplSetTest({nodes: 1, oplogSize: 2, nodeOptions: {smallfiles: ""}}); var nodes = replTest.startSet(); replTest.initiate(); var master = replTest.getPrimary(); var coll = master.getDB("o").fake; var cdb = coll.getDB(); -var assertLastOplog = function( o, o2 , msg) { - var last = master.getDB("local").oplog.rs.find().limit(1).sort({$natural:-1}).next(); +var assertLastOplog = function(o, o2, msg) { + var last = master.getDB("local").oplog.rs.find().limit(1).sort({$natural: -1}).next(); assert.eq(last.ns, coll.getFullName(), "ns bad : " + msg); assert.docEq(last.o, o, "o bad : " + msg); - if(o2) + if (o2) assert.docEq(last.o2, o2, "o2 bad : " + msg); return last.ts; }; // set things up. -coll.save({_id:1}); -assertLastOplog({_id:1}, null, "save -- setup "); +coll.save({_id: 1}); +assertLastOplog({_id: 1}, null, "save -- setup "); /** * The first ones are from the old updatetests which tested the internal impl using a modSet */ var msg = "IncRewriteExistingField: $inc $set"; -coll.save({_id:1, a:2}); -assertLastOplog({_id:1, a:2}, {_id:1}, "save " + msg); -var res = assert.writeOK(coll.update({}, { $inc: { a: 1 }, $set: { b: 2 }})); +coll.save({_id: 1, a: 2}); +assertLastOplog({_id: 1, a: 2}, {_id: 1}, "save " + msg); +var res = assert.writeOK(coll.update({}, {$inc: {a: 1}, $set: {b: 2}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a:3, b:2}, coll.findOne({}), msg); -assertLastOplog({$set:{a:3, b:2}}, {_id:1}, msg); +assert.docEq({_id: 1, a: 3, b: 2}, coll.findOne({}), msg); +assertLastOplog({$set: {a: 3, b: 2}}, {_id: 1}, msg); var msg = "IncRewriteNonExistingField: $inc $set"; -coll.save({_id:1, c:0}); -assertLastOplog({_id:1, c:0}, {_id:1}, "save " + msg); -res = assert.writeOK(coll.update({}, { $inc: { a: 1 }, $set: { b: 2 }})); +coll.save({_id: 1, c: 0}); +assertLastOplog({_id: 1, c: 0}, {_id: 1}, "save " + msg); +res = assert.writeOK(coll.update({}, {$inc: {a: 1}, $set: {b: 2}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, c:0, a:1, b:2}, coll.findOne({}), msg); -assertLastOplog({$set:{a:1, b:2}}, {_id:1}, msg); +assert.docEq({_id: 1, c: 0, a: 1, b: 2}, coll.findOne({}), msg); +assertLastOplog({$set: {a: 1, b: 2}}, {_id: 1}, msg); var msg = "TwoNestedPulls: two $pull"; -coll.save({_id:1, a:{ b:[ 1, 2 ], c:[ 1, 2 ] }}); -assertLastOplog({_id:1, a:{ b:[ 1, 2 ], c:[ 1, 2 ] }}, {_id:1}, "save " + msg); -res = assert.writeOK(coll.update({}, { $pull: { 'a.b': 2, 'a.c': 2 }})); +coll.save({_id: 1, a: {b: [1, 2], c: [1, 2]}}); +assertLastOplog({_id: 1, a: {b: [1, 2], c: [1, 2]}}, {_id: 1}, "save " + msg); +res = assert.writeOK(coll.update({}, {$pull: {'a.b': 2, 'a.c': 2}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a:{ b:[ 1 ], c:[ 1 ] }}, coll.findOne({}), msg); -assertLastOplog({$set:{'a.b':[1], 'a.c':[1]}}, {_id:1}, msg); +assert.docEq({_id: 1, a: {b: [1], c: [1]}}, coll.findOne({}), msg); +assertLastOplog({$set: {'a.b': [1], 'a.c': [1]}}, {_id: 1}, msg); var msg = "MultiSets: two $set"; -coll.save({_id:1, a:1, b:1}); -assertLastOplog({_id:1, a:1, b:1}, {_id:1}, "save " + msg); -res = assert.writeOK(coll.update({}, { $set: { a: 2, b: 2 }})); +coll.save({_id: 1, a: 1, b: 1}); +assertLastOplog({_id: 1, a: 1, b: 1}, {_id: 1}, "save " + msg); +res = assert.writeOK(coll.update({}, {$set: {a: 2, b: 2}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a:2, b:2}, coll.findOne({}), msg); -assertLastOplog({$set:{a:2, b:2}}, {_id:1}, msg); +assert.docEq({_id: 1, a: 2, b: 2}, coll.findOne({}), msg); +assertLastOplog({$set: {a: 2, b: 2}}, {_id: 1}, msg); // More tests to validate the oplog format and correct excution var msg = "bad single $set"; -coll.save({_id:1, a:1}); -assertLastOplog({_id:1, a:1}, {_id:1}, "save " + msg); -res = assert.writeOK(coll.update({}, { $set: { a: 2 }})); +coll.save({_id: 1, a: 1}); +assertLastOplog({_id: 1, a: 1}, {_id: 1}, "save " + msg); +res = assert.writeOK(coll.update({}, {$set: {a: 2}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a:2}, coll.findOne({}), msg); -assertLastOplog({$set:{a:2}}, {_id:1}, msg); +assert.docEq({_id: 1, a: 2}, coll.findOne({}), msg); +assertLastOplog({$set: {a: 2}}, {_id: 1}, msg); var msg = "bad single $inc"; -res = assert.writeOK(coll.update({}, { $inc: { a: 1 }})); +res = assert.writeOK(coll.update({}, {$inc: {a: 1}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a:3}, coll.findOne({}), msg); -assertLastOplog({$set:{a:3}}, {_id:1}, msg); +assert.docEq({_id: 1, a: 3}, coll.findOne({}), msg); +assertLastOplog({$set: {a: 3}}, {_id: 1}, msg); var msg = "bad double $set"; -res = assert.writeOK(coll.update({}, { $set: { a: 2, b: 2 }})); +res = assert.writeOK(coll.update({}, {$set: {a: 2, b: 2}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a:2, b:2}, coll.findOne({}), msg); -assertLastOplog({$set:{a:2, b:2}}, {_id:1}, msg); +assert.docEq({_id: 1, a: 2, b: 2}, coll.findOne({}), msg); +assertLastOplog({$set: {a: 2, b: 2}}, {_id: 1}, msg); var msg = "bad save"; -assert.writeOK(coll.save({ _id: 1, a: [2] })); -assert.docEq({_id:1, a:[2]}, coll.findOne({}), msg); -assertLastOplog({_id:1, a:[2]}, {_id:1}, msg); +assert.writeOK(coll.save({_id: 1, a: [2]})); +assert.docEq({_id: 1, a: [2]}, coll.findOne({}), msg); +assertLastOplog({_id: 1, a: [2]}, {_id: 1}, msg); var msg = "bad array $inc"; -res = assert.writeOK(coll.update({}, { $inc: { "a.0": 1 }})); +res = assert.writeOK(coll.update({}, {$inc: {"a.0": 1}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a:[3]}, coll.findOne({}), msg); -var lastTS = assertLastOplog({$set:{"a.0": 3}}, {_id:1}, msg); +assert.docEq({_id: 1, a: [3]}, coll.findOne({}), msg); +var lastTS = assertLastOplog({$set: {"a.0": 3}}, {_id: 1}, msg); var msg = "bad $setOnInsert"; -res = assert.writeOK(coll.update({}, { $setOnInsert: { a: -1 }})); +res = assert.writeOK(coll.update({}, {$setOnInsert: {a: -1}})); assert.eq(res.nMatched, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a:[3]}, coll.findOne({}), msg); // No-op -var otherTS = assertLastOplog({$set:{"a.0": 3}}, {_id:1}, msg); // Nothing new -assert.eq(lastTS, otherTS, "new oplog was not expected -- " + msg); // No new oplog entry +assert.docEq({_id: 1, a: [3]}, coll.findOne({}), msg); // No-op +var otherTS = assertLastOplog({$set: {"a.0": 3}}, {_id: 1}, msg); // Nothing new +assert.eq(lastTS, otherTS, "new oplog was not expected -- " + msg); // No new oplog entry coll.remove({}); assert.eq(coll.count(), 0, "collection not empty"); var msg = "bad $setOnInsert w/upsert"; -res = assert.writeOK(coll.update({}, { $setOnInsert: { a: 200 }}, { upsert: true })); // upsert +res = assert.writeOK(coll.update({}, {$setOnInsert: {a: 200}}, {upsert: true})); // upsert assert.eq(res.nUpserted, 1, "update failed for '" + msg + "': " + res.toString()); var id = res.getUpsertedId()._id; -assert.docEq({_id: id, a: 200 }, coll.findOne({}), msg); // No-op -assertLastOplog({ _id: id, a: 200 }, null, msg); // No new oplog entry +assert.docEq({_id: id, a: 200}, coll.findOne({}), msg); // No-op +assertLastOplog({_id: id, a: 200}, null, msg); // No new oplog entry coll.remove({}); assert.eq(coll.count(), 0, "collection not empty-2"); @@ -130,54 +130,49 @@ assertLastOplog({$set:{"a": [1,2,3]}}, {_id:1}, msg); // new format */ var msg = "bad array $push 2"; -coll.save({_id:1, a:"foo"}); -res = assert.writeOK(coll.update({}, { $push: { c: 18 }})); +coll.save({_id: 1, a: "foo"}); +res = assert.writeOK(coll.update({}, {$push: {c: 18}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a:"foo", c:[18]}, coll.findOne({}), msg); -assertLastOplog({$set:{"c": [18]}}, {_id:1}, msg); +assert.docEq({_id: 1, a: "foo", c: [18]}, coll.findOne({}), msg); +assertLastOplog({$set: {"c": [18]}}, {_id: 1}, msg); var msg = "bad array $push $slice"; -coll.save({_id:1, a:{b:[18]}}); -res = assert.writeOK(coll.update({ _id: { $gt: 0 }}, - { $push: { "a.b": { $each: [1, 2], $slice: -2 }}})); +coll.save({_id: 1, a: {b: [18]}}); +res = assert.writeOK(coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [1, 2], $slice: -2}}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a: {b:[1,2]}}, coll.findOne({}), msg); -assertLastOplog({$set:{"a.b": [1,2]}}, {_id:1}, msg); +assert.docEq({_id: 1, a: {b: [1, 2]}}, coll.findOne({}), msg); +assertLastOplog({$set: {"a.b": [1, 2]}}, {_id: 1}, msg); var msg = "bad array $push $sort ($slice -100)"; -coll.save({_id:1, a:{b:[{c:2}, {c:1}]}}); -res = assert.writeOK(coll.update({}, { $push: { "a.b": { $each: [{ c: -1 }], - $sort: { c: 1 }, - $slice: -100 }}})); +coll.save({_id: 1, a: {b: [{c: 2}, {c: 1}]}}); +res = assert.writeOK( + coll.update({}, {$push: {"a.b": {$each: [{c: -1}], $sort: {c: 1}, $slice: -100}}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a: {b:[{c:-1}, {c:1}, {c:2}]}}, coll.findOne({}), msg); -assertLastOplog({$set:{"a.b": [{c:-1},{c:1}, {c:2}]}}, {_id:1}, msg); +assert.docEq({_id: 1, a: {b: [{c: -1}, {c: 1}, {c: 2}]}}, coll.findOne({}), msg); +assertLastOplog({$set: {"a.b": [{c: -1}, {c: 1}, {c: 2}]}}, {_id: 1}, msg); var msg = "bad array $push $slice $sort"; -coll.save({_id:1, a:[{b:2}, {b:1}]}); -res = assert.writeOK(coll.update({ _id: { $gt: 0 }}, { $push: { a: { $each: [{ b: -1 }], - $slice:-2, - $sort: { b: 1 }}}})); +coll.save({_id: 1, a: [{b: 2}, {b: 1}]}); +res = assert.writeOK( + coll.update({_id: {$gt: 0}}, {$push: {a: {$each: [{b: -1}], $slice: -2, $sort: {b: 1}}}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a: [{b:1}, {b:2}]}, coll.findOne({}), msg); -assertLastOplog({$set:{a: [{b:1},{b:2}]}}, {_id:1}, msg); +assert.docEq({_id: 1, a: [{b: 1}, {b: 2}]}, coll.findOne({}), msg); +assertLastOplog({$set: {a: [{b: 1}, {b: 2}]}}, {_id: 1}, msg); var msg = "bad array $push $slice $sort first two"; -coll.save({_id:1, a:{b:[{c:2}, {c:1}]}}); -res = assert.writeOK(coll.update({ _id: { $gt: 0 }}, { $push: { "a.b": { $each: [{ c: -1 }], - $slice: -2, - $sort: { c: 1 }}}})); +coll.save({_id: 1, a: {b: [{c: 2}, {c: 1}]}}); +res = assert.writeOK( + coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [{c: -1}], $slice: -2, $sort: {c: 1}}}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a: {b:[{c:1}, {c:2}]}}, coll.findOne({}), msg); -assertLastOplog({$set:{"a.b": [{c:1},{c:2}]}}, {_id:1}, msg); +assert.docEq({_id: 1, a: {b: [{c: 1}, {c: 2}]}}, coll.findOne({}), msg); +assertLastOplog({$set: {"a.b": [{c: 1}, {c: 2}]}}, {_id: 1}, msg); var msg = "bad array $push $slice $sort reversed first two"; -coll.save({_id:1, a:{b:[{c:1}, {c:2}]}}); -res = assert.writeOK(coll.update({ _id: { $gt: 0 }}, { $push: { "a.b": { $each: [{ c: -1 }], - $slice: -2, - $sort: { c: -1 }}}})); +coll.save({_id: 1, a: {b: [{c: 1}, {c: 2}]}}); +res = assert.writeOK(coll.update( + {_id: {$gt: 0}}, {$push: {"a.b": {$each: [{c: -1}], $slice: -2, $sort: {c: -1}}}})); assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString()); -assert.docEq({_id:1, a: {b:[{c:1}, {c:-1}]}}, coll.findOne({}), msg); -assertLastOplog({$set:{"a.b": [{c:1},{c:-1}]}}, {_id:1}, msg); +assert.docEq({_id: 1, a: {b: [{c: 1}, {c: -1}]}}, coll.findOne({}), msg); +assertLastOplog({$set: {"a.b": [{c: 1}, {c: -1}]}}, {_id: 1}, msg); replTest.stopSet(); diff --git a/jstests/replsets/oplog_note_cmd.js b/jstests/replsets/oplog_note_cmd.js index ecd7e47ca38..0c92609535a 100644 --- a/jstests/replsets/oplog_note_cmd.js +++ b/jstests/replsets/oplog_note_cmd.js @@ -6,7 +6,7 @@ rs.initiate(); var primary = rs.getPrimary(); var db = primary.getDB('admin'); -db.foo.insert({a:1}); +db.foo.insert({a: 1}); // Make sure "optime" field gets updated var statusBefore = db.runCommand({replSetGetStatus: 1}); diff --git a/jstests/replsets/oplog_term.js b/jstests/replsets/oplog_term.js index 6aa6ca612ae..76ba6babfa7 100644 --- a/jstests/replsets/oplog_term.js +++ b/jstests/replsets/oplog_term.js @@ -1,52 +1,56 @@ // Term counter should be present in oplog entries under protocol version 1 but should be absent // protocol version 0. -(function () { - 'use strict'; - load('jstests/replsets/rslib.js'); +(function() { + 'use strict'; + load('jstests/replsets/rslib.js'); - var name = 'oplog_term'; - var replSet = new ReplSetTest({name: name, nodes: 1, protocolVersion: 0}); - replSet.startSet(); - replSet.initiate(); - replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000); + var name = 'oplog_term'; + var replSet = new ReplSetTest({name: name, nodes: 1, protocolVersion: 0}); + replSet.startSet(); + replSet.initiate(); + replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000); - // Protocol version 0 - 'term' field should be absent from oplog entry. - var primary = replSet.getPrimary(); - var collection = primary.getDB('test').getCollection(name); - assert.writeOK(collection.save({_id: 1})); + // Protocol version 0 - 'term' field should be absent from oplog entry. + var primary = replSet.getPrimary(); + var collection = primary.getDB('test').getCollection(name); + assert.writeOK(collection.save({_id: 1})); - var oplogEntry = getLatestOp(primary); - assert(oplogEntry, 'unexpected empty oplog'); - assert.eq(collection.getFullName(), oplogEntry.ns, - 'unexpected namespace in oplog entry: ' + tojson(oplogEntry)); - assert.eq(1, oplogEntry.o._id, - 'oplog entry does not refer to most recently inserted document: ' + - tojson(oplogEntry)); - assert(!oplogEntry.hasOwnProperty('t'), - 'oplog entry must not contain term: ' + tojson(oplogEntry)); + var oplogEntry = getLatestOp(primary); + assert(oplogEntry, 'unexpected empty oplog'); + assert.eq(collection.getFullName(), + oplogEntry.ns, + 'unexpected namespace in oplog entry: ' + tojson(oplogEntry)); + assert.eq( + 1, + oplogEntry.o._id, + 'oplog entry does not refer to most recently inserted document: ' + tojson(oplogEntry)); + assert(!oplogEntry.hasOwnProperty('t'), + 'oplog entry must not contain term: ' + tojson(oplogEntry)); - // Protocol version 1 - 'term' field should present in oplog entry. - var config = assert.commandWorked(primary.adminCommand({replSetGetConfig: 1})).config; - config.protocolVersion = 1; - config.version++; - assert.commandWorked(primary.adminCommand({replSetReconfig: config})); - replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000); + // Protocol version 1 - 'term' field should present in oplog entry. + var config = assert.commandWorked(primary.adminCommand({replSetGetConfig: 1})).config; + config.protocolVersion = 1; + config.version++; + assert.commandWorked(primary.adminCommand({replSetReconfig: config})); + replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000); - primary = replSet.getPrimary(); - collection = primary.getDB('test').getCollection(name); - assert.writeOK(collection.save({_id: 2})); + primary = replSet.getPrimary(); + collection = primary.getDB('test').getCollection(name); + assert.writeOK(collection.save({_id: 2})); - oplogEntry = getLatestOp(primary); - assert(oplogEntry, 'unexpected empty oplog'); - assert.eq(collection.getFullName(), oplogEntry.ns, - 'unexpected namespace in oplog entry: ' + tojson(oplogEntry)); - assert.eq(2, oplogEntry.o._id, - 'oplog entry does not refer to most recently inserted document: ' + - tojson(oplogEntry)); - assert(oplogEntry.hasOwnProperty('t'), - 'oplog entry must contain term: ' + tojson(oplogEntry)); + oplogEntry = getLatestOp(primary); + assert(oplogEntry, 'unexpected empty oplog'); + assert.eq(collection.getFullName(), + oplogEntry.ns, + 'unexpected namespace in oplog entry: ' + tojson(oplogEntry)); + assert.eq( + 2, + oplogEntry.o._id, + 'oplog entry does not refer to most recently inserted document: ' + tojson(oplogEntry)); + assert(oplogEntry.hasOwnProperty('t'), 'oplog entry must contain term: ' + tojson(oplogEntry)); - var status = assert.commandWorked(primary.adminCommand({replSetGetStatus:1})); - assert.eq(status.term, oplogEntry.t, - 'term in oplog entry does not match term in status: ' + tojson(oplogEntry)); + var status = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1})); + assert.eq(status.term, + oplogEntry.t, + 'term in oplog entry does not match term in status: ' + tojson(oplogEntry)); })(); diff --git a/jstests/replsets/oplog_truncated_on_recovery.js b/jstests/replsets/oplog_truncated_on_recovery.js index 4fd4690f0c6..f4e1bf9b1ec 100644 --- a/jstests/replsets/oplog_truncated_on_recovery.js +++ b/jstests/replsets/oplog_truncated_on_recovery.js @@ -28,11 +28,7 @@ jsTest.log(tojson(arg)); } - var replTest = new ReplSetTest( - { - name : "oplog_truncated_on_recovery", - nodes : 1 - }); + var replTest = new ReplSetTest({name: "oplog_truncated_on_recovery", nodes: 1}); var nodes = replTest.startSet(); replTest.initiate(); @@ -42,22 +38,12 @@ var minvalidColl = localDB["replset.minvalid"]; // Write op - log(assert.writeOK(testDB.foo.save( - { - _id : 1, - a : 1 - }, - { - writeConcern : - { - w : 1 - } - }))); + log(assert.writeOK(testDB.foo.save({_id: 1, a: 1}, {writeConcern: {w: 1}}))); // Set minvalid to something far in the future for the current primary, to simulate recovery. // Note: This is so far in the future (5 days) that it will never become secondary. - var farFutureTS = new Timestamp(Math.floor(new Date().getTime() / 1000) - + (60 * 60 * 24 * 5 /* in five days */), 0); + var farFutureTS = new Timestamp( + Math.floor(new Date().getTime() / 1000) + (60 * 60 * 24 * 5 /* in five days */), 0); var rsgs = assert.commandWorked(localDB.adminCommand("replSetGetStatus")); log(rsgs); var primaryOpTime = rsgs.members[0].optime; @@ -69,31 +55,15 @@ // We do an update in case there is a minvalid document on the primary already. // If the doc doesn't exist then upsert:true will create it, and the writeConcern ensures // that update returns details of the write, like whether an update or insert was performed. - log(assert.writeOK(minvalidColl.update( - {}, - { - ts : farFutureTS, - t : NumberLong(-1), - begin : primaryOpTime - }, - { - upsert : true, - writeConcern : - { - w : 1 - } - }))); + log(assert.writeOK( + minvalidColl.update({}, + {ts: farFutureTS, t: NumberLong(-1), begin: primaryOpTime}, + {upsert: true, writeConcern: {w: 1}}))); // Insert a diverged oplog entry that will be truncated after restart. var divergedTS = new Timestamp(primaryOpTime.ts.t, primaryOpTime.ts.i + 1); log(assert.writeOK(localDB.oplog.rs.insert( - { - _id : 0, - ts : divergedTS, - op : "n", - h: NumberLong(0), - t : NumberLong(-1) - }))); + {_id: 0, ts: divergedTS, op: "n", h: NumberLong(0), t: NumberLong(-1)}))); log(localDB.oplog.rs.find().toArray()); log(assert.commandWorked(localDB.adminCommand("replSetGetStatus"))); log("restart primary"); @@ -104,18 +74,14 @@ var mv; try { mv = minvalidColl.findOne(); - } - catch (e) { + } catch (e) { return false; } - var msg = "ts !=, " + farFutureTS + "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv) - + " - " + tsToDate(mv.ts); + var msg = "ts !=, " + farFutureTS + "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv) + + " - " + tsToDate(mv.ts); assert.eq(farFutureTS, mv.ts, msg); - var lastTS = localDB.oplog.rs.find().sort( - { - $natural : -1 - }).limit(-1).next().ts; + var lastTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(-1).next().ts; log(localDB.oplog.rs.find().toArray()); assert.eq(primaryOpTime.ts, lastTS); return true; diff --git a/jstests/replsets/optime.js b/jstests/replsets/optime.js index 5d64719fe8c..a716ca3dbca 100644 --- a/jstests/replsets/optime.js +++ b/jstests/replsets/optime.js @@ -17,9 +17,9 @@ function timestampCompare(o1, o2) { } function optimesAreEqual(replTest) { - var prevStatus = replTest.nodes[0].getDB('admin').serverStatus({oplog:true}).oplog; + var prevStatus = replTest.nodes[0].getDB('admin').serverStatus({oplog: true}).oplog; for (var i = 1; i < replTest.nodes.length; i++) { - var status = replTest.nodes[i].getDB('admin').serverStatus({oplog:true}).oplog; + var status = replTest.nodes[i].getDB('admin').serverStatus({oplog: true}).oplog; if (timestampCompare(prevStatus.latestOptime, status.latestOptime) != 0) { return false; } @@ -28,7 +28,7 @@ function optimesAreEqual(replTest) { return true; } -var replTest = new ReplSetTest( { name : "replStatus" , nodes: 3, oplogSize: 1 } ); +var replTest = new ReplSetTest({name: "replStatus", nodes: 3, oplogSize: 1}); replTest.startSet(); replTest.initiate(); @@ -38,27 +38,31 @@ replTest.awaitSecondaryNodes(); // Check initial optimes assert(optimesAreEqual(replTest)); -var initialInfo = master.getDB('admin').serverStatus({oplog:true}).oplog; +var initialInfo = master.getDB('admin').serverStatus({oplog: true}).oplog; // Do an insert to increment optime, but without rolling the oplog // latestOptime should be updated, but earliestOptime should be unchanged -var options = { writeConcern: { w: replTest.nodes.length }}; -assert.writeOK(master.getDB('test').foo.insert({ a: 1 }, options)); +var options = { + writeConcern: {w: replTest.nodes.length} +}; +assert.writeOK(master.getDB('test').foo.insert({a: 1}, options)); assert(optimesAreEqual(replTest)); -var info = master.getDB('admin').serverStatus({oplog:true}).oplog; +var info = master.getDB('admin').serverStatus({oplog: true}).oplog; assert.gt(timestampCompare(info.latestOptime, initialInfo.latestOptime), 0); assert.eq(timestampCompare(info.earliestOptime, initialInfo.earliestOptime), 0); // Insert some large documents to force the oplog to roll over -var largeString = new Array(1024*10).toString(); +var largeString = new Array(1024 * 10).toString(); for (var i = 0; i < 2000; i++) { - master.getDB('test').foo.insert({ largeString: largeString }, options); + master.getDB('test').foo.insert({largeString: largeString}, options); } -assert.soon(function() { return optimesAreEqual(replTest); } ); +assert.soon(function() { + return optimesAreEqual(replTest); +}); // Test that earliestOptime was updated -info = master.getDB('admin').serverStatus({oplog:true}).oplog; +info = master.getDB('admin').serverStatus({oplog: true}).oplog; assert.gt(timestampCompare(info.latestOptime, initialInfo.latestOptime), 0); assert.gt(timestampCompare(info.earliestOptime, initialInfo.earliestOptime), 0); diff --git a/jstests/replsets/pipelineout.js b/jstests/replsets/pipelineout.js index 97accba2eec..bb86f98c4e9 100644 --- a/jstests/replsets/pipelineout.js +++ b/jstests/replsets/pipelineout.js @@ -1,36 +1,32 @@ // test $out in a replicated environment var name = "pipelineout"; -var replTest = new ReplSetTest( {name: name, nodes: 2} ); +var replTest = new ReplSetTest({name: name, nodes: 2}); var nodes = replTest.nodeList(); replTest.startSet(); -replTest.initiate({"_id" : name, - "members" : [ - {"_id" : 0, "host" : nodes[0]}, - {"_id" : 1, "host" : nodes[1]} - ]}); +replTest.initiate( + {"_id": name, "members": [{"_id": 0, "host": nodes[0]}, {"_id": 1, "host": nodes[1]}]}); var primary = replTest.getPrimary().getDB(name); var secondary = replTest.liveNodes.slaves[0].getDB(name); // populate the collection -for (i=0; i<5; i++) { - primary.in.insert({x:i}); +for (i = 0; i < 5; i++) { + primary.in.insert({x: i}); } replTest.awaitReplication(); // make sure $out cannot be run on a secondary assert.throws(function() { - secondary.in.aggregate({$out: "out"}).itcount; - }); + secondary.in.aggregate({$out: "out"}).itcount; +}); // even if slaveOk secondary.setSlaveOk(); assert.throws(function() { - secondary.in.aggregate({$out: "out"}).itcount; - }); + secondary.in.aggregate({$out: "out"}).itcount; +}); // run one and check for proper replication primary.in.aggregate({$out: "out"}).itcount; replTest.awaitReplication(); -assert.eq(primary.out.find().sort( { x : 1 } ).toArray(), - secondary.out.find().sort( { x : 1 } ).toArray()); +assert.eq(primary.out.find().sort({x: 1}).toArray(), secondary.out.find().sort({x: 1}).toArray()); diff --git a/jstests/replsets/plan_cache_slaveok.js b/jstests/replsets/plan_cache_slaveok.js index a63be51fae1..2de5749f086 100644 --- a/jstests/replsets/plan_cache_slaveok.js +++ b/jstests/replsets/plan_cache_slaveok.js @@ -5,76 +5,44 @@ var name = "plan_cache_slaveok"; function assertPlanCacheCommandsSucceed(db) { // .listQueryShapes() - assert.commandWorked(db.runCommand({ - planCacheListQueryShapes: name - })); + assert.commandWorked(db.runCommand({planCacheListQueryShapes: name})); // .getPlansByQuery() - assert.commandWorked(db.runCommand({ - planCacheListPlans: name, - query: {a: 1} - })); + assert.commandWorked(db.runCommand({planCacheListPlans: name, query: {a: 1}})); // .clear() - assert.commandWorked(db.runCommand({ - planCacheClear: name, - query: {a: 1} - })); + assert.commandWorked(db.runCommand({planCacheClear: name, query: {a: 1}})); // setFilter - assert.commandWorked(db.runCommand({ - planCacheSetFilter: name, - query: {a: 1}, - indexes: [{a: 1}] - })); + assert.commandWorked( + db.runCommand({planCacheSetFilter: name, query: {a: 1}, indexes: [{a: 1}]})); // listFilters - assert.commandWorked(db.runCommand({ - planCacheListFilters: name - })); + assert.commandWorked(db.runCommand({planCacheListFilters: name})); // clearFilters - assert.commandWorked(db.runCommand({ - planCacheClearFilters: name, - query: {a: 1} - })); + assert.commandWorked(db.runCommand({planCacheClearFilters: name, query: {a: 1}})); } function assertPlanCacheCommandsFail(db) { // .listQueryShapes() - assert.commandFailed(db.runCommand({ - planCacheListQueryShapes: name - })); + assert.commandFailed(db.runCommand({planCacheListQueryShapes: name})); // .getPlansByQuery() - assert.commandFailed(db.runCommand({ - planCacheListPlans: name, - query: {a: 1} - })); + assert.commandFailed(db.runCommand({planCacheListPlans: name, query: {a: 1}})); // .clear() - assert.commandFailed(db.runCommand({ - planCacheClear: name, - query: {a: 1} - })); + assert.commandFailed(db.runCommand({planCacheClear: name, query: {a: 1}})); // setFilter - assert.commandFailed(db.runCommand({ - planCacheSetFilter: name, - query: {a: 1}, - indexes: [{a: 1}] - })); + assert.commandFailed( + db.runCommand({planCacheSetFilter: name, query: {a: 1}, indexes: [{a: 1}]})); // listFilters - assert.commandFailed(db.runCommand({ - planCacheListFilters: name - })); + assert.commandFailed(db.runCommand({planCacheListFilters: name})); // clearFilters - assert.commandFailed(db.runCommand({ - planCacheClearFilters: name, - query: {a: 1} - })); + assert.commandFailed(db.runCommand({planCacheClearFilters: name, query: {a: 1}})); } print("Start replica set with two nodes"); diff --git a/jstests/replsets/priority_takeover_cascading_priorities.js b/jstests/replsets/priority_takeover_cascading_priorities.js index f2dc98490f4..b4559fda16f 100644 --- a/jstests/replsets/priority_takeover_cascading_priorities.js +++ b/jstests/replsets/priority_takeover_cascading_priorities.js @@ -3,18 +3,21 @@ // Start replica set. Ensure that highest priority node becomes primary eventually. // Shut down the primary and confirm that the next highest priority node becomes primary. // Repeat until 3 nodes are left standing. -(function () { +(function() { 'use strict'; load('jstests/replsets/rslib.js'); var name = 'priority_takeover_cascading_priorities'; - var replSet = new ReplSetTest({name: name, nodes: [ - {rsConfig: {priority: 5}}, - {rsConfig: {priority: 4}}, - {rsConfig: {priority: 3}}, - {rsConfig: {priority: 2}}, - {rsConfig: {priority: 1}}, - ]}); + var replSet = new ReplSetTest({ + name: name, + nodes: [ + {rsConfig: {priority: 5}}, + {rsConfig: {priority: 4}}, + {rsConfig: {priority: 3}}, + {rsConfig: {priority: 2}}, + {rsConfig: {priority: 1}}, + ] + }); replSet.startSet(); replSet.initiate(); @@ -27,8 +30,7 @@ waitForMemberState: ReplSetTest.State.PRIMARY, timeoutMillis: 60 * 1000, }), - 'node ' + i + ' ' + replSet.nodes[i].host + ' failed to become primary' - ); + 'node ' + i + ' ' + replSet.nodes[i].host + ' failed to become primary'); }; waitForPrimary(0); diff --git a/jstests/replsets/priority_takeover_one_node_higher_priority.js b/jstests/replsets/priority_takeover_one_node_higher_priority.js index aeb550966c0..e718ef131f9 100644 --- a/jstests/replsets/priority_takeover_one_node_higher_priority.js +++ b/jstests/replsets/priority_takeover_one_node_higher_priority.js @@ -3,16 +3,13 @@ // Wait for replica set to stabilize with higher priority node as primary. // Step down high priority node. Wait for the lower priority electable node to become primary. // Eventually high priority node will run a priority takeover election to become primary. -(function () { +(function() { 'use strict'; load('jstests/replsets/rslib.js'); var name = 'priority_takeover_one_node_higher_priority'; - var replSet = new ReplSetTest({name: name, nodes: [ - {rsConfig: {priority: 3}}, - {}, - {rsConfig: {arbiterOnly: true}}, - ]}); + var replSet = new ReplSetTest( + {name: name, nodes: [{rsConfig: {priority: 3}}, {}, {rsConfig: {arbiterOnly: true}}, ]}); replSet.startSet(); replSet.initiate(); @@ -30,12 +27,14 @@ var result = primary.adminCommand({replSetStepDown: stepDownGuardMillis / 1000}); print('replSetStepDown did not throw exception but returned: ' + tojson(result)); }); - assert.neq(-1, tojson(stepDownException).indexOf('error doing query'), + assert.neq(-1, + tojson(stepDownException).indexOf('error doing query'), 'replSetStepDown did not disconnect client'); // Step down primary and wait for node 1 to be promoted to primary. replSet.waitForState(replSet.nodes[1], ReplSetTest.State.PRIMARY, 60 * 1000); // Eventually node 0 will stand for election again because it has a higher priorty. - replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, stepDownGuardMillis + 60 * 1000); + replSet.waitForState( + replSet.nodes[0], ReplSetTest.State.PRIMARY, stepDownGuardMillis + 60 * 1000); })(); diff --git a/jstests/replsets/priority_takeover_two_nodes_equal_priority.js b/jstests/replsets/priority_takeover_two_nodes_equal_priority.js index 1cffd1ecef5..717e9b945e7 100644 --- a/jstests/replsets/priority_takeover_two_nodes_equal_priority.js +++ b/jstests/replsets/priority_takeover_two_nodes_equal_priority.js @@ -4,56 +4,51 @@ // Step down the primary and confirm that the next highest priority node becomes primary. load('jstests/replsets/rslib.js'); -(function () { -'use strict'; +(function() { + 'use strict'; -var name = 'priority_takeover_two_nodes_equal_priority'; -var replSet = new ReplSetTest({name: name, nodes: [ - {rsConfig: {priority: 3}}, - {rsConfig: {priority: 3}}, - {}, -]}); -replSet.startSet(); -replSet.initiate(); + var name = 'priority_takeover_two_nodes_equal_priority'; + var replSet = new ReplSetTest( + {name: name, nodes: [{rsConfig: {priority: 3}}, {rsConfig: {priority: 3}}, {}, ]}); + replSet.startSet(); + replSet.initiate(); -var primary; -var primaryIndex = -1; -var defaultPriorityNodeIndex = 2; -assert.soon( - function() { - primary = replSet.getPrimary(); - replSet.nodes.find(function(node, index, array) { - if (primary.host == node.host) { - primaryIndex = index; - return true; - } - return false; - }); - return primaryIndex !== defaultPriorityNodeIndex; - }, - 'neither of the priority 3 nodes was elected primary', - 60000, // timeout - 1000 // interval -); + var primary; + var primaryIndex = -1; + var defaultPriorityNodeIndex = 2; + assert.soon( + function() { + primary = replSet.getPrimary(); + replSet.nodes.find(function(node, index, array) { + if (primary.host == node.host) { + primaryIndex = index; + return true; + } + return false; + }); + return primaryIndex !== defaultPriorityNodeIndex; + }, + 'neither of the priority 3 nodes was elected primary', + 60000, // timeout + 1000 // interval + ); -try { - assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 90})); -} catch (x) { - // expected -} -var newPrimaryIndex = primaryIndex === 0 ? 1 : 0; + try { + assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 90})); + } catch (x) { + // expected + } + var newPrimaryIndex = primaryIndex === 0 ? 1 : 0; -// Refresh connections to nodes. -replSet.status(); + // Refresh connections to nodes. + replSet.status(); -assert.commandWorked( - replSet.nodes[newPrimaryIndex].adminCommand({ + assert.commandWorked(replSet.nodes[newPrimaryIndex].adminCommand({ replSetTest: 1, waitForMemberState: ReplSetTest.State.PRIMARY, timeoutMillis: 60 * 1000, }), - 'node ' + newPrimaryIndex + ' ' + replSet.nodes[newPrimaryIndex].host + - ' failed to become primary' -); + 'node ' + newPrimaryIndex + ' ' + replSet.nodes[newPrimaryIndex].host + + ' failed to become primary'); })(); diff --git a/jstests/replsets/protocol_version_upgrade_downgrade.js b/jstests/replsets/protocol_version_upgrade_downgrade.js index 3d406baf717..58210853c91 100644 --- a/jstests/replsets/protocol_version_upgrade_downgrade.js +++ b/jstests/replsets/protocol_version_upgrade_downgrade.js @@ -2,86 +2,88 @@ load("jstests/replsets/rslib.js"); (function() { -"use strict"; -var name = "protocol_version_upgrade_downgrade"; -var rst = new ReplSetTest({name: name, nodes: 3}); + "use strict"; + var name = "protocol_version_upgrade_downgrade"; + var rst = new ReplSetTest({name: name, nodes: 3}); -rst.startSet(); -// Initiate the replset in protocol version 0. -var conf = rst.getReplSetConfig(); -conf.settings = conf.settings || { }; -conf.protocolVersion = 0; -// The first node will always be the primary. -conf.members[0].priority = 1; -conf.members[1].priority = 0; -conf.members[2].priority = 0; -rst.initiate(conf); -rst.awaitSecondaryNodes(); + rst.startSet(); + // Initiate the replset in protocol version 0. + var conf = rst.getReplSetConfig(); + conf.settings = conf.settings || {}; + conf.protocolVersion = 0; + // The first node will always be the primary. + conf.members[0].priority = 1; + conf.members[1].priority = 0; + conf.members[2].priority = 0; + rst.initiate(conf); + rst.awaitSecondaryNodes(); -var primary = rst.getPrimary(); -var primaryColl = primary.getDB("test").coll; + var primary = rst.getPrimary(); + var primaryColl = primary.getDB("test").coll; -// Set verbosity for replication on all nodes. -var verbosity = { - "setParameter" : 1, - "logComponentVerbosity" : { - "replication" : { "verbosity" : 3 }, - } -}; -primary.adminCommand(verbosity); -rst.getSecondaries().forEach(function (node) {node.adminCommand(verbosity);}); + // Set verbosity for replication on all nodes. + var verbosity = { + "setParameter": 1, + "logComponentVerbosity": { + "replication": {"verbosity": 3}, + } + }; + primary.adminCommand(verbosity); + rst.getSecondaries().forEach(function(node) { + node.adminCommand(verbosity); + }); -// Do a write, this will set up sync sources on secondaries. -print("do a write"); -assert.writeOK(primaryColl.bar.insert({x: 1}, {writeConcern: {w: 3}})); -// Check optime format in protocol version 0, which is a Timestamp. -var res = primary.adminCommand({replSetGetStatus: 1}); -assert.commandWorked(res); -// Check the optime is a Timestamp, not an OpTime { ts: Timestamp, t: int } -assert.eq(res.members[0].optime.ts, null); + // Do a write, this will set up sync sources on secondaries. + print("do a write"); + assert.writeOK(primaryColl.bar.insert({x: 1}, {writeConcern: {w: 3}})); + // Check optime format in protocol version 0, which is a Timestamp. + var res = primary.adminCommand({replSetGetStatus: 1}); + assert.commandWorked(res); + // Check the optime is a Timestamp, not an OpTime { ts: Timestamp, t: int } + assert.eq(res.members[0].optime.ts, null); -// -// Upgrade protocol version -// -res = primary.adminCommand({replSetGetConfig: 1}); -assert.commandWorked(res); -conf = res.config; -assert.eq(conf.protocolVersion, undefined); -// Change protocol version -conf.protocolVersion = 1; -conf.version++; -reconfig(rst, conf); -// This write may block until all nodes finish upgrade, because replSetUpdatePosition may be -// rejected by the primary for mismatched config version before secondaries get reconfig. -// This will make secondaries wait for 0.5 seconds and retry. -assert.writeOK(primaryColl.bar.insert({x: 2}, {writeConcern: {w: 3}})); + // + // Upgrade protocol version + // + res = primary.adminCommand({replSetGetConfig: 1}); + assert.commandWorked(res); + conf = res.config; + assert.eq(conf.protocolVersion, undefined); + // Change protocol version + conf.protocolVersion = 1; + conf.version++; + reconfig(rst, conf); + // This write may block until all nodes finish upgrade, because replSetUpdatePosition may be + // rejected by the primary for mismatched config version before secondaries get reconfig. + // This will make secondaries wait for 0.5 seconds and retry. + assert.writeOK(primaryColl.bar.insert({x: 2}, {writeConcern: {w: 3}})); -// Check optime format in protocol version 1, which is an object including the term. -res = primary.adminCommand({replSetGetStatus: 1}); -assert.commandWorked(res); -assert.eq(res.members[0].optime.t, NumberLong(0)); + // Check optime format in protocol version 1, which is an object including the term. + res = primary.adminCommand({replSetGetStatus: 1}); + assert.commandWorked(res); + assert.eq(res.members[0].optime.t, NumberLong(0)); -// Check last vote. -var lastVote = primary.getDB("local")['replset.election'].findOne(); -assert.eq(lastVote.term, NumberLong(0)); -assert.eq(lastVote.candidateIndex, NumberLong(-1)); + // Check last vote. + var lastVote = primary.getDB("local")['replset.election'].findOne(); + assert.eq(lastVote.term, NumberLong(0)); + assert.eq(lastVote.candidateIndex, NumberLong(-1)); -// -// Downgrade protocol version -// -res = primary.adminCommand({replSetGetConfig: 1}); -assert.commandWorked(res); -conf = res.config; -assert.eq(conf.protocolVersion, 1); -// Change protocol version -conf.protocolVersion = 0; -conf.version++; -reconfig(rst, conf); -assert.writeOK(primaryColl.bar.insert({x: 3}, {writeConcern: {w: 3}})); + // + // Downgrade protocol version + // + res = primary.adminCommand({replSetGetConfig: 1}); + assert.commandWorked(res); + conf = res.config; + assert.eq(conf.protocolVersion, 1); + // Change protocol version + conf.protocolVersion = 0; + conf.version++; + reconfig(rst, conf); + assert.writeOK(primaryColl.bar.insert({x: 3}, {writeConcern: {w: 3}})); -// Check optime format in protocol version 0, which is a Timestamp. -res = primary.adminCommand({replSetGetStatus: 1}); -assert.commandWorked(res); -assert.eq(res.members[0].optime.ts, null); + // Check optime format in protocol version 0, which is a Timestamp. + res = primary.adminCommand({replSetGetStatus: 1}); + assert.commandWorked(res); + assert.eq(res.members[0].optime.ts, null); })(); diff --git a/jstests/replsets/read_after_optime.js b/jstests/replsets/read_after_optime.js index cff0896344e..30cf7782679 100644 --- a/jstests/replsets/read_after_optime.js +++ b/jstests/replsets/read_after_optime.js @@ -1,108 +1,104 @@ // Test read after opTime functionality with maxTimeMS. (function() { -"use strict"; + "use strict"; -var replTest = new ReplSetTest({ nodes: 2 }); -replTest.startSet(); -replTest.initiate(); -var config = replTest.getReplSetConfigFromNode(); + var replTest = new ReplSetTest({nodes: 2}); + replTest.startSet(); + replTest.initiate(); + var config = replTest.getReplSetConfigFromNode(); -var runTest = function(testDB, primaryConn) { - primaryConn.getDB('test').user.insert({ x: 1 }, { writeConcern: { w: 2 }}); + var runTest = function(testDB, primaryConn) { + primaryConn.getDB('test').user.insert({x: 1}, {writeConcern: {w: 2}}); - var localDB = primaryConn.getDB('local'); + var localDB = primaryConn.getDB('local'); - var oplogTS = localDB.oplog.rs.find().sort({ $natural: -1 }).limit(1).next(); - var twoSecTS = new Timestamp(oplogTS.ts.getTime() + 2, 0); + var oplogTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(1).next(); + var twoSecTS = new Timestamp(oplogTS.ts.getTime() + 2, 0); - var term = -1; - if (config.protocolVersion === 1) { - term = oplogTS.t; - } + var term = -1; + if (config.protocolVersion === 1) { + term = oplogTS.t; + } - // Test timeout with maxTimeMS - var runTimeoutTest = function() { - var timeoutResult = assert.commandFailedWithCode( - testDB.runCommand({ + // Test timeout with maxTimeMS + var runTimeoutTest = function() { + var timeoutResult = assert.commandFailedWithCode(testDB.runCommand({ find: 'user', - filter: { x: 1 }, - readConcern: { - afterOpTime: { ts: twoSecTS, t: term } - }, + filter: {x: 1}, + readConcern: {afterOpTime: {ts: twoSecTS, t: term}}, maxTimeMS: 5000, }), - ErrorCodes.ExceededTimeLimit - ); - assert.gt(timeoutResult.waitedMS, 500); - }; - - var countLogMessages = function(msg) { - var total = 0; - var logMessages = assert.commandWorked(testDB.adminCommand({getLog: 'global'})).log; - for (var i = 0; i < logMessages.length; i++) { - if (logMessages[i].indexOf(msg) != -1) { - total++; + ErrorCodes.ExceededTimeLimit); + assert.gt(timeoutResult.waitedMS, 500); + }; + + var countLogMessages = function(msg) { + var total = 0; + var logMessages = assert.commandWorked(testDB.adminCommand({getLog: 'global'})).log; + for (var i = 0; i < logMessages.length; i++) { + if (logMessages[i].indexOf(msg) != -1) { + total++; + } } - } - return total; - }; - - var checkLog = function(msg, expectedCount) { - var count; - assert.soon(function() { - count = countLogMessages(msg); - return expectedCount == count; + return total; + }; + + var checkLog = function(msg, expectedCount) { + var count; + assert.soon( + function() { + count = countLogMessages(msg); + return expectedCount == count; + }, + 'Expected ' + expectedCount + ', but instead saw ' + count + + ' log entries containing the following message: ' + msg, + 60000, + 300); + }; + + // Run the time out test 3 times with replication debug log level increased to 2 + // for first and last run. The time out message should be logged twice. + testDB.setLogLevel(2, 'command'); + runTimeoutTest(); + testDB.setLogLevel(0, 'command'); + + var msg = 'Command on database ' + testDB.getName() + + ' timed out waiting for read concern to be satisfied. Command:'; + checkLog(msg, 1); + + // Read concern timed out message should not be logged. + runTimeoutTest(); + + testDB.setLogLevel(2, 'command'); + runTimeoutTest(); + testDB.setLogLevel(0, 'command'); + + checkLog(msg, 2); + + // Test read on future afterOpTime that will eventually occur. + var insertFunc = startParallelShell( + "sleep(2100); db.user.insert({ y: 1 }, { writeConcern: { w: 2 }});", primaryConn.port); + + var res = assert.commandWorked(testDB.runCommand({ + find: 'user', + filter: {x: 1}, + readConcern: { + afterOpTime: {ts: twoSecTS, t: term}, }, - 'Expected ' + expectedCount + ', but instead saw ' + count + - ' log entries containing the following message: ' + msg, - 60000, - 300); - }; + maxTimeMS: 10 * 1000, + })); - // Run the time out test 3 times with replication debug log level increased to 2 - // for first and last run. The time out message should be logged twice. - testDB.setLogLevel(2, 'command'); - runTimeoutTest(); - testDB.setLogLevel(0, 'command'); + assert.eq(null, res.code); + assert.gt(res.waitedMS, 0); - var msg = 'Command on database ' + testDB.getName() + - ' timed out waiting for read concern to be satisfied. Command:'; - checkLog(msg, 1); - - // Read concern timed out message should not be logged. - runTimeoutTest(); - - testDB.setLogLevel(2, 'command'); - runTimeoutTest(); - testDB.setLogLevel(0, 'command'); - - checkLog(msg, 2); - - // Test read on future afterOpTime that will eventually occur. - var insertFunc = startParallelShell( - "sleep(2100); db.user.insert({ y: 1 }, { writeConcern: { w: 2 }});", - primaryConn.port); - - var res = assert.commandWorked(testDB.runCommand({ - find: 'user', - filter: { x: 1 }, - readConcern: { - afterOpTime: { ts: twoSecTS, t: term }, - }, - maxTimeMS: 10 * 1000, - })); - - assert.eq(null, res.code); - assert.gt(res.waitedMS, 0); - - insertFunc(); -}; + insertFunc(); + }; -var primary = replTest.getPrimary(); -runTest(primary.getDB('test'), primary); -runTest(replTest.getSecondary().getDB('test'), primary); + var primary = replTest.getPrimary(); + runTest(primary.getDB('test'), primary); + runTest(replTest.getSecondary().getDB('test'), primary); -replTest.stopSet(); + replTest.stopSet(); })(); diff --git a/jstests/replsets/read_committed.js b/jstests/replsets/read_committed.js index 2ed51300534..02b220c33e4 100644 --- a/jstests/replsets/read_committed.js +++ b/jstests/replsets/read_committed.js @@ -11,68 +11,69 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority. (function() { -"use strict"; + "use strict"; -// Set up a set and grab things for later. -var name = "read_committed"; -var replTest = new ReplSetTest({name: name, - nodes: 3, - nodeOptions: {enableMajorityReadConcern: ''}}); + // Set up a set and grab things for later. + var name = "read_committed"; + var replTest = + new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}}); -if (!startSetIfSupportsReadMajority(replTest)) { - jsTest.log("skipping test since storage engine doesn't support committed reads"); - return; -} + if (!startSetIfSupportsReadMajority(replTest)) { + jsTest.log("skipping test since storage engine doesn't support committed reads"); + return; + } -var nodes = replTest.nodeList(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0] }, - { "_id": 1, "host": nodes[1], priority: 0 }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); + var nodes = replTest.nodeList(); + replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1], priority: 0}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] + }); -// Get connections and collection. -var primary = replTest.getPrimary(); -var secondary = replTest.liveNodes.slaves[0]; -var secondaryId = replTest.getNodeId(secondary); -var db = primary.getDB(name); -var t = db[name]; + // Get connections and collection. + var primary = replTest.getPrimary(); + var secondary = replTest.liveNodes.slaves[0]; + var secondaryId = replTest.getNodeId(secondary); + var db = primary.getDB(name); + var t = db[name]; -function doDirtyRead() { - var res = t.runCommand('find', {"readConcern": {"level": "local"}}); - assert.commandWorked(res); - return new DBCommandCursor(db.getMongo(), res).toArray()[0].state; -} + function doDirtyRead() { + var res = t.runCommand('find', {"readConcern": {"level": "local"}}); + assert.commandWorked(res); + return new DBCommandCursor(db.getMongo(), res).toArray()[0].state; + } -function doCommittedRead() { - var res = t.runCommand('find', {"readConcern": {"level": "majority"}}); - assert.commandWorked(res); - return new DBCommandCursor(db.getMongo(), res).toArray()[0].state; -} + function doCommittedRead() { + var res = t.runCommand('find', {"readConcern": {"level": "majority"}}); + assert.commandWorked(res); + return new DBCommandCursor(db.getMongo(), res).toArray()[0].state; + } -// Do a write, wait for it to replicate, and ensure it is visible. -assert.writeOK(t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 60*1000}})); -assert.eq(doDirtyRead(), 0); -assert.eq(doCommittedRead(), 0); + // Do a write, wait for it to replicate, and ensure it is visible. + assert.writeOK( + t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 60 * 1000}})); + assert.eq(doDirtyRead(), 0); + assert.eq(doCommittedRead(), 0); -replTest.stop(secondaryId); + replTest.stop(secondaryId); -// Do a write and ensure it is only visible to dirty reads -assert.writeOK(t.save({_id: 1, state: 1})); -assert.eq(doDirtyRead(), 1); -assert.eq(doCommittedRead(), 0); + // Do a write and ensure it is only visible to dirty reads + assert.writeOK(t.save({_id: 1, state: 1})); + assert.eq(doDirtyRead(), 1); + assert.eq(doCommittedRead(), 0); -// Try the committed read again after sleeping to ensure it doesn't only work for queries -// immediately after the write. -sleep(1000); -assert.eq(doCommittedRead(), 0); - -// Restart the node and ensure the committed view is updated. -replTest.restart(secondaryId); -db.getLastError("majority", 60 * 1000); -assert.eq(doDirtyRead(), 1); -assert.eq(doCommittedRead(), 1); + // Try the committed read again after sleeping to ensure it doesn't only work for queries + // immediately after the write. + sleep(1000); + assert.eq(doCommittedRead(), 0); + // Restart the node and ensure the committed view is updated. + replTest.restart(secondaryId); + db.getLastError("majority", 60 * 1000); + assert.eq(doDirtyRead(), 1); + assert.eq(doCommittedRead(), 1); }()); diff --git a/jstests/replsets/read_committed_no_snapshots.js b/jstests/replsets/read_committed_no_snapshots.js index 2abf15beb2d..25eb18a9cae 100644 --- a/jstests/replsets/read_committed_no_snapshots.js +++ b/jstests/replsets/read_committed_no_snapshots.js @@ -10,73 +10,74 @@ load("jstests/replsets/rslib.js"); // For reconfig and startSetIfSupportsReadMajority. (function() { -"use strict"; + "use strict"; -// Set up a set and grab things for later. -var name = "read_committed_no_snapshots"; -var replTest = new ReplSetTest({name: name, - nodes: 3, - nodeOptions: {enableMajorityReadConcern: ''}}); + // Set up a set and grab things for later. + var name = "read_committed_no_snapshots"; + var replTest = + new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}}); -if (!startSetIfSupportsReadMajority(replTest)) { - jsTest.log("skipping test since storage engine doesn't support committed reads"); - return; -} + if (!startSetIfSupportsReadMajority(replTest)) { + jsTest.log("skipping test since storage engine doesn't support committed reads"); + return; + } -var nodes = replTest.nodeList(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0] }, - { "_id": 1, "host": nodes[1], priority: 0 }, - { "_id": 2, "host": nodes[2], arbiterOnly: true }], - "protocolVersion": 1 - }); + var nodes = replTest.nodeList(); + replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1], priority: 0}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ], + "protocolVersion": 1 + }); -// Get connections and collection. -var primary = replTest.getPrimary(); -var secondary = replTest.liveNodes.slaves[0]; -var secondaryId = replTest.getNodeId(secondary); -var db = primary.getDB(name); + // Get connections and collection. + var primary = replTest.getPrimary(); + var secondary = replTest.liveNodes.slaves[0]; + var secondaryId = replTest.getNodeId(secondary); + var db = primary.getDB(name); -// Do a write, wait for it to replicate, and ensure it is visible. -var res = db.runCommandWithMetadata( - "insert", - { - insert: "foo", - documents: [{_id: 1, state: 0}], - writeConcern: {w: "majority", wtimeout: 60*1000} - }, - {"$replData": 1}); -assert.commandWorked(res.commandReply); + // Do a write, wait for it to replicate, and ensure it is visible. + var res = db.runCommandWithMetadata("insert", + { + insert: "foo", + documents: [{_id: 1, state: 0}], + writeConcern: {w: "majority", wtimeout: 60 * 1000} + }, + {"$replData": 1}); + assert.commandWorked(res.commandReply); -// We need to propagate the lastOpVisible from the primary as afterOpTime in the secondary to ensure -// we wait for the write to be in the majority committed view. -var lastOp = res.metadata["$replData"].lastOpVisible; + // We need to propagate the lastOpVisible from the primary as afterOpTime in the secondary to + // ensure + // we wait for the write to be in the majority committed view. + var lastOp = res.metadata["$replData"].lastOpVisible; -secondary.setSlaveOk(); -// Timeout is based on heartbeat timeout. -assert.commandWorked(secondary.getDB(name).foo.runCommand( - 'find', {"readConcern": {"level": "majority", "afterOpTime": lastOp}, - "maxTimeMS": 10 * 1000})); + secondary.setSlaveOk(); + // Timeout is based on heartbeat timeout. + assert.commandWorked(secondary.getDB(name).foo.runCommand( + 'find', + {"readConcern": {"level": "majority", "afterOpTime": lastOp}, "maxTimeMS": 10 * 1000})); -// Disable snapshotting via failpoint -secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}); + // Disable snapshotting via failpoint + secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}); -// Resync to drop any existing snapshots -secondary.adminCommand({resync: 1}); - -// Ensure maxTimeMS times out while waiting for this snapshot -assert.commandFailed(secondary.getDB(name).foo.runCommand( - 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000})); + // Resync to drop any existing snapshots + secondary.adminCommand({resync: 1}); -// Reconfig to make the secondary the primary -var config = primary.getDB("local").system.replset.findOne(); -config.members[0].priority = 0; -config.members[1].priority = 3; -config.version++; -primary = reconfig(replTest, config, true); + // Ensure maxTimeMS times out while waiting for this snapshot + assert.commandFailed(secondary.getDB(name).foo.runCommand( + 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000})); -// Ensure maxTimeMS times out while waiting for this snapshot -assert.commandFailed(primary.getSiblingDB(name).foo.runCommand( - 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000})); + // Reconfig to make the secondary the primary + var config = primary.getDB("local").system.replset.findOne(); + config.members[0].priority = 0; + config.members[1].priority = 3; + config.version++; + primary = reconfig(replTest, config, true); + + // Ensure maxTimeMS times out while waiting for this snapshot + assert.commandFailed(primary.getSiblingDB(name).foo.runCommand( + 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000})); }()); diff --git a/jstests/replsets/read_committed_on_secondary.js b/jstests/replsets/read_committed_on_secondary.js index 207b1e0373c..468007b7ee8 100644 --- a/jstests/replsets/read_committed_on_secondary.js +++ b/jstests/replsets/read_committed_on_secondary.js @@ -9,87 +9,89 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority. (function() { -"use strict"; - -// Set up a set and grab things for later. -var name = "read_committed_on_secondary"; -var replTest = new ReplSetTest({name: name, - nodes: 3, - nodeOptions: {enableMajorityReadConcern: ''}}); - -if (!startSetIfSupportsReadMajority(replTest)) { - jsTest.log("skipping test since storage engine doesn't support committed reads"); - return; -} - -var nodes = replTest.nodeList(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0] }, - { "_id": 1, "host": nodes[1], priority: 0 }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); - -// Get connections and collection. -var primary = replTest.getPrimary(); -var secondary = replTest.liveNodes.slaves[0]; -var secondaryId = replTest.getNodeId(secondary); - -var dbPrimary = primary.getDB(name); -var collPrimary = dbPrimary[name]; - -var dbSecondary = secondary.getDB(name); -var collSecondary = dbSecondary[name]; - -function saveDoc(state) { - var res = dbPrimary.runCommandWithMetadata( - 'update', - { - update: name, - writeConcern: {w: 2, wtimeout: 60*1000}, - updates: [{q: {_id: 1}, u: {_id: 1, state: state}, upsert: true}], - }, - {"$replData": 1}); - assert.commandWorked(res.commandReply); - assert.eq(res.commandReply.writeErrors, undefined); - return res.metadata.$replData.lastOpVisible; -} - -function doDirtyRead(lastOp) { - var res = collSecondary.runCommand('find', {"readConcern": {"level": "local", - "afterOpTime": lastOp}}); - assert.commandWorked(res); - return new DBCommandCursor(secondary, res).toArray()[0].state; -} - -function doCommittedRead(lastOp) { - var res = collSecondary.runCommand('find', {"readConcern": {"level": "majority", - "afterOpTime": lastOp}}); - assert.commandWorked(res); - return new DBCommandCursor(secondary, res).toArray()[0].state; -} - -// Do a write, wait for it to replicate, and ensure it is visible. -var op0 = saveDoc(0); -assert.eq(doDirtyRead(op0), 0); -assert.eq(doCommittedRead(op0), 0); - -// Disable snapshotting on the secondary. -secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}); - -// Do a write and ensure it is only visible to dirty reads -var op1 = saveDoc(1); -assert.eq(doDirtyRead(op1), 1); -assert.eq(doCommittedRead(op0), 0); - -// Try the committed read again after sleeping to ensure it doesn't only work for queries -// immediately after the write. -sleep(1000); -assert.eq(doCommittedRead(op0), 0); - -// Reenable snapshotting on the secondary and ensure that committed reads are able to see the new -// state. -secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}); -assert.eq(doDirtyRead(op1), 1); -assert.eq(doCommittedRead(op1), 1); + "use strict"; + + // Set up a set and grab things for later. + var name = "read_committed_on_secondary"; + var replTest = + new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}}); + + if (!startSetIfSupportsReadMajority(replTest)) { + jsTest.log("skipping test since storage engine doesn't support committed reads"); + return; + } + + var nodes = replTest.nodeList(); + replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1], priority: 0}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] + }); + + // Get connections and collection. + var primary = replTest.getPrimary(); + var secondary = replTest.liveNodes.slaves[0]; + var secondaryId = replTest.getNodeId(secondary); + + var dbPrimary = primary.getDB(name); + var collPrimary = dbPrimary[name]; + + var dbSecondary = secondary.getDB(name); + var collSecondary = dbSecondary[name]; + + function saveDoc(state) { + var res = dbPrimary.runCommandWithMetadata( + 'update', + { + update: name, + writeConcern: {w: 2, wtimeout: 60 * 1000}, + updates: [{q: {_id: 1}, u: {_id: 1, state: state}, upsert: true}], + }, + {"$replData": 1}); + assert.commandWorked(res.commandReply); + assert.eq(res.commandReply.writeErrors, undefined); + return res.metadata.$replData.lastOpVisible; + } + + function doDirtyRead(lastOp) { + var res = collSecondary.runCommand( + 'find', {"readConcern": {"level": "local", "afterOpTime": lastOp}}); + assert.commandWorked(res); + return new DBCommandCursor(secondary, res).toArray()[0].state; + } + + function doCommittedRead(lastOp) { + var res = collSecondary.runCommand( + 'find', {"readConcern": {"level": "majority", "afterOpTime": lastOp}}); + assert.commandWorked(res); + return new DBCommandCursor(secondary, res).toArray()[0].state; + } + + // Do a write, wait for it to replicate, and ensure it is visible. + var op0 = saveDoc(0); + assert.eq(doDirtyRead(op0), 0); + assert.eq(doCommittedRead(op0), 0); + + // Disable snapshotting on the secondary. + secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}); + + // Do a write and ensure it is only visible to dirty reads + var op1 = saveDoc(1); + assert.eq(doDirtyRead(op1), 1); + assert.eq(doCommittedRead(op0), 0); + + // Try the committed read again after sleeping to ensure it doesn't only work for queries + // immediately after the write. + sleep(1000); + assert.eq(doCommittedRead(op0), 0); + + // Reenable snapshotting on the secondary and ensure that committed reads are able to see the + // new + // state. + secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}); + assert.eq(doDirtyRead(op1), 1); + assert.eq(doCommittedRead(op1), 1); }()); diff --git a/jstests/replsets/read_majority_two_arbs.js b/jstests/replsets/read_majority_two_arbs.js index 22447975e4b..1995f907dfa 100644 --- a/jstests/replsets/read_majority_two_arbs.js +++ b/jstests/replsets/read_majority_two_arbs.js @@ -8,52 +8,54 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority. (function() { -"use strict"; - -// Set up a set and grab things for later. -var name = "read_majority_two_arbs"; -var replTest = new ReplSetTest({name: name, - nodes: 3, - nodeOptions: {enableMajorityReadConcern: ''}}); - -if (!startSetIfSupportsReadMajority(replTest)) { - jsTest.log("skipping test since storage engine doesn't support committed reads"); - return; -} - -var nodes = replTest.nodeList(); -replTest.initiate({"_id": name, - "members": [ - {"_id": 0, "host": nodes[0]}, - {"_id": 1, "host": nodes[1], arbiterOnly: true}, - {"_id": 2, "host": nodes[2], arbiterOnly: true}] - }); - -var primary = replTest.getPrimary(); -var db = primary.getDB(name); -var t = db[name]; - -function doRead(readConcern) { - var res = assert.commandWorked(t.runCommand('find', readConcern)); - var docs = (new DBCommandCursor(db.getMongo(), res)).toArray(); - assert.gt(docs.length, 0, "no docs returned!"); - return docs[0].state; -} - -function doDirtyRead() { - return doRead({"readConcern": {"level": "local"}}); -} - -function doCommittedRead() { - return doRead({"readConcern": {"level": "majority"}}); -} - -jsTest.log("doing write"); -assert.writeOK(t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 10*1000}})); -jsTest.log("doing read"); -assert.eq(doDirtyRead(), 0); -jsTest.log("doing committed read"); -assert.eq(doCommittedRead(), 0); -jsTest.log("stopping replTest; test completed successfully"); -replTest.stopSet(); + "use strict"; + + // Set up a set and grab things for later. + var name = "read_majority_two_arbs"; + var replTest = + new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}}); + + if (!startSetIfSupportsReadMajority(replTest)) { + jsTest.log("skipping test since storage engine doesn't support committed reads"); + return; + } + + var nodes = replTest.nodeList(); + replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1], arbiterOnly: true}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] + }); + + var primary = replTest.getPrimary(); + var db = primary.getDB(name); + var t = db[name]; + + function doRead(readConcern) { + var res = assert.commandWorked(t.runCommand('find', readConcern)); + var docs = (new DBCommandCursor(db.getMongo(), res)).toArray(); + assert.gt(docs.length, 0, "no docs returned!"); + return docs[0].state; + } + + function doDirtyRead() { + return doRead({"readConcern": {"level": "local"}}); + } + + function doCommittedRead() { + return doRead({"readConcern": {"level": "majority"}}); + } + + jsTest.log("doing write"); + assert.writeOK( + t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 10 * 1000}})); + jsTest.log("doing read"); + assert.eq(doDirtyRead(), 0); + jsTest.log("doing committed read"); + assert.eq(doCommittedRead(), 0); + jsTest.log("stopping replTest; test completed successfully"); + replTest.stopSet(); }()); diff --git a/jstests/replsets/reconfig.js b/jstests/replsets/reconfig.js index c9dac4f6a9e..1351aa1d54b 100644 --- a/jstests/replsets/reconfig.js +++ b/jstests/replsets/reconfig.js @@ -2,10 +2,10 @@ * Simple test to ensure that an invalid reconfig fails, a valid one succeeds, and a reconfig won't * succeed without force if force is needed. */ -(function () { +(function() { "use strict"; var numNodes = 5; - var replTest = new ReplSetTest({ name: 'testSet', nodes: numNodes }); + var replTest = new ReplSetTest({name: 'testSet', nodes: numNodes}); var nodes = replTest.startSet(); replTest.initiate(); @@ -23,10 +23,14 @@ jsTestLog("Invalid reconfig"); config.version++; - var badMember = {_id: numNodes, host: "localhost:12345", priority: "High"}; + var badMember = { + _id: numNodes, + host: "localhost:12345", + priority: "High" + }; config.members.push(badMember); var invalidConfigCode = 93; - assert.commandFailedWithCode(primary.adminCommand({replSetReconfig : config}), + assert.commandFailedWithCode(primary.adminCommand({replSetReconfig: config}), invalidConfigCode); jsTestLog("No force when needed."); @@ -35,8 +39,7 @@ config.members[nodes.indexOf(secondary)].priority = 5; var admin = secondary.getDB("admin"); var forceRequiredCode = 10107; - assert.commandFailedWithCode(admin.runCommand({replSetReconfig: config}), - forceRequiredCode); + assert.commandFailedWithCode(admin.runCommand({replSetReconfig: config}), forceRequiredCode); jsTestLog("Force when appropriate"); assert.commandWorked(admin.runCommand({replSetReconfig: config, force: true})); diff --git a/jstests/replsets/reconfig_prohibits_w0.js b/jstests/replsets/reconfig_prohibits_w0.js index 9e96fc632ed..4b785d76e90 100644 --- a/jstests/replsets/reconfig_prohibits_w0.js +++ b/jstests/replsets/reconfig_prohibits_w0.js @@ -10,9 +10,7 @@ var nodes = replTest.nodeList(); var conns = replTest.startSet(); var admin = conns[0].getDB("admin"); -replTest.initiate({ - _id: 'prohibit_w0', - members: [{_id: 0, host: nodes[0]}]}); +replTest.initiate({_id: 'prohibit_w0', members: [{_id: 0, host: nodes[0]}]}); function testReconfig(gleDefaults) { var conf = admin.runCommand({replSetGetConfig: 1}).config; @@ -28,13 +26,11 @@ function testReconfig(gleDefaults) { /* * Try to reconfig with w: 0 in getLastErrorDefaults. */ -testReconfig({ - getLastErrorDefaults: {w: 0}}); +testReconfig({getLastErrorDefaults: {w: 0}}); /* * Try to reconfig with w: 0 and other options in getLastErrorDefaults. */ -testReconfig({ - getLastErrorDefaults: {w: 0, j: false, wtimeout: 100, fsync: true}}); +testReconfig({getLastErrorDefaults: {w: 0, j: false, wtimeout: 100, fsync: true}}); replTest.stopSet(); diff --git a/jstests/replsets/reconfig_tags.js b/jstests/replsets/reconfig_tags.js index 3fa4cb71041..3c4d0e2616d 100644 --- a/jstests/replsets/reconfig_tags.js +++ b/jstests/replsets/reconfig_tags.js @@ -1,7 +1,7 @@ // test that reconfigging only tag changes is properly reflected in isMaster -var replTest = new ReplSetTest({ nodes: 2 }); -replTest.startSet({ oplogSize: 10 }); +var replTest = new ReplSetTest({nodes: 2}); +replTest.startSet({oplogSize: 10}); replTest.initiate(); replTest.awaitSecondaryNodes(); @@ -12,9 +12,15 @@ var rsConfig = primary.getDB("local").system.replset.findOne(); jsTest.log('got rsconf ' + tojson(rsConfig)); rsConfig.members.forEach(function(member) { if (member.host == primary.host) { - member.tags = { dc: 'ny', tag: 'one' }; + member.tags = { + dc: 'ny', + tag: 'one' + }; } else { - member.tags = { dc: 'ny', tag: 'two' }; + member.tags = { + dc: 'ny', + tag: 'two' + }; } }); @@ -23,10 +29,9 @@ rsConfig.version++; jsTest.log('new rsconf ' + tojson(rsConfig)); try { - var res = primary.adminCommand({ replSetReconfig: rsConfig }); - jsTest.log('reconfig res: ' + tojson(res)); // Should not see this -} -catch(e) { + var res = primary.adminCommand({replSetReconfig: rsConfig}); + jsTest.log('reconfig res: ' + tojson(res)); // Should not see this +} catch (e) { jsTest.log('replSetReconfig error: ' + e); } @@ -35,7 +40,7 @@ replTest.awaitSecondaryNodes(); var testDB = primary.getDB('test'); var newConn = new Mongo(primary.host); -var isMaster = newConn.adminCommand({ isMaster: 1 }); +var isMaster = newConn.adminCommand({isMaster: 1}); assert(isMaster.tags != null, 'isMaster: ' + tojson(isMaster)); print('success: ' + tojson(isMaster)); diff --git a/jstests/replsets/reconfig_without_increased_queues.js b/jstests/replsets/reconfig_without_increased_queues.js index 2ae45988fa4..d54ab6c253b 100644 --- a/jstests/replsets/reconfig_without_increased_queues.js +++ b/jstests/replsets/reconfig_without_increased_queues.js @@ -2,11 +2,11 @@ * Test which configures various configs (hidden/priorities/no-chaining) that replExec queues * stay at reasonable/stable levels after repeated reconfigs/stepdowns */ -(function () { +(function() { "use strict"; var numNodes = 5; var maxQueueSizeExpected = 11; - var replTest = new ReplSetTest({ name: 'testSet', nodes: numNodes }); + var replTest = new ReplSetTest({name: 'testSet', nodes: numNodes}); var nodes = replTest.startSet(); replTest.initiate(); @@ -53,15 +53,14 @@ return false; } return true; - }, "queues too high", 13 * 1000 /*13 secs*/); // what we are looking for has a 10s timeout. + }, "queues too high", 13 * 1000 /*13 secs*/); // what we are looking for has a 10s timeout. }; var reconfig = function(newConfig) { newConfig.version += 1; try { assert.commandWorked(replTest.getPrimary().adminCommand({replSetReconfig: newConfig})); - } - catch (e) { + } catch (e) { if (tojson(e).indexOf("error doing query: failed") < 0) { throw e; } @@ -77,7 +76,7 @@ c.members[2].priority = 0; reconfig(c); - for(var i=0;i<50;i++) { + for (var i = 0; i < 50; i++) { reconfig(c); testQueues(); } @@ -89,7 +88,7 @@ c.members[4].priority = 1000; reconfig(c); - for(var i=0;i<50;i++) { + for (var i = 0; i < 50; i++) { reconfig(c); testQueues(); } diff --git a/jstests/replsets/reindex_secondary.js b/jstests/replsets/reindex_secondary.js index 42c2149126c..8c812a068ad 100644 --- a/jstests/replsets/reindex_secondary.js +++ b/jstests/replsets/reindex_secondary.js @@ -1,4 +1,4 @@ -var replTest = new ReplSetTest( {name: 'reindexTest', nodes: 2} ); +var replTest = new ReplSetTest({name: 'reindexTest', nodes: 2}); var nodes = replTest.startSet(); @@ -8,23 +8,22 @@ var master = replTest.getPrimary(); replTest.awaitSecondaryNodes(); var slaves = replTest.liveNodes.slaves; -assert( slaves.length == 1, "Expected 1 slave but length was " + slaves.length ); +assert(slaves.length == 1, "Expected 1 slave but length was " + slaves.length); slave = slaves[0]; db = master.getDB("reindexTest"); slaveDb = slave.getDB("reindexTest"); // Setup index -db.foo.insert({a:1000}); +db.foo.insert({a: 1000}); -db.foo.ensureIndex({a:1}); +db.foo.ensureIndex({a: 1}); replTest.awaitReplication(); assert.eq(2, db.foo.getIndexes().length, "Master didn't have proper indexes before reindex"); assert.eq(2, slaveDb.foo.getIndexes().length, "Slave didn't have proper indexes before reindex"); - // Try to reindex secondary slaveDb.foo.reIndex(); diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js index eab87207508..cde4974677a 100644 --- a/jstests/replsets/remove1.js +++ b/jstests/replsets/remove1.js @@ -13,14 +13,14 @@ var name = "removeNodes"; var host = getHostName(); print("Start set with two nodes"); -var replTest = new ReplSetTest( {name: name, nodes: 2} ); +var replTest = new ReplSetTest({name: name, nodes: 2}); var nodes = replTest.startSet(); replTest.initiate(); var master = replTest.getPrimary(); var secondary = replTest.getSecondary(); print("Initial sync"); -master.getDB("foo").bar.baz.insert({x:1}); +master.getDB("foo").bar.baz.insert({x: 1}); replTest.awaitReplication(); @@ -34,53 +34,53 @@ for (var i = 0; i < config.members.length; i++) { } config.version = 2; -assert.eq(secondary.getDB("admin").runCommand({ping:1}).ok, +assert.eq(secondary.getDB("admin").runCommand({ping: 1}).ok, 1, "we should be connected to the secondary"); try { - master.getDB("admin").runCommand({replSetReconfig:config}); -} -catch(e) { + master.getDB("admin").runCommand({replSetReconfig: config}); +} catch (e) { print(e); } // This tests that the secondary disconnects us when it picks up the new config. -assert.soon( - function() { - try { - secondary.getDB("admin").runCommand({ping:1}); - } catch (e) { - return true; - } - return false; +assert.soon(function() { + try { + secondary.getDB("admin").runCommand({ping: 1}); + } catch (e) { + return true; } -); + return false; +}); // Now we should successfully reconnect to the secondary. -assert.eq(secondary.getDB("admin").runCommand({ping:1}).ok, 1, +assert.eq(secondary.getDB("admin").runCommand({ping: 1}).ok, + 1, "we aren't connected to the secondary"); reconnect(master); assert.soon(function() { - var c = master.getDB("local").system.replset.findOne(); - return c.version == 2; + var c = master.getDB("local").system.replset.findOne(); + return c.version == 2; }); print("Add it back as a secondary"); -config.members.push({_id:2, host : secondary.host}); +config.members.push({_id: 2, host: secondary.host}); config.version = 3; // Need to keep retrying reconfig here, as it will not work at first due to the primary's // perception that the secondary is still "down". -assert.soon(function() { try { - reconfig(replTest, config); - return true; -} catch (e) { - return false; -} }); +assert.soon(function() { + try { + reconfig(replTest, config); + return true; + } catch (e) { + return false; + } +}); master = replTest.getPrimary(); -printjson(master.getDB("admin").runCommand({replSetGetStatus:1})); +printjson(master.getDB("admin").runCommand({replSetGetStatus: 1})); var newConfig = master.getDB("local").system.replset.findOne(); print("newConfig: " + tojson(newConfig)); assert.eq(newConfig.version, 3); @@ -90,26 +90,24 @@ replTest.stop(secondary); assert.soon(function() { try { - return master.getDB("admin").runCommand({isMaster : 1}).secondary; - } - catch(e) { - print("trying to get master: "+e); + return master.getDB("admin").runCommand({isMaster: 1}).secondary; + } catch (e) { + print("trying to get master: " + e); } -},"waiting for primary to step down",(60*1000),1000); +}, "waiting for primary to step down", (60 * 1000), 1000); config.version = 4; config.members.pop(); try { - master.getDB("admin").runCommand({replSetReconfig : config, force : true}); -} -catch(e) { + master.getDB("admin").runCommand({replSetReconfig: config, force: true}); +} catch (e) { print(e); } reconnect(master); assert.soon(function() { - return master.getDB("admin").runCommand({isMaster : 1}).ismaster; -},"waiting for old primary to accept reconfig and step up",(60*1000),1000); + return master.getDB("admin").runCommand({isMaster: 1}).ismaster; +}, "waiting for old primary to accept reconfig and step up", (60 * 1000), 1000); config = master.getDB("local").system.replset.findOne(); printjson(config); diff --git a/jstests/replsets/repl_options.js b/jstests/replsets/repl_options.js index 1d7a858a473..66a07787889 100644 --- a/jstests/replsets/repl_options.js +++ b/jstests/replsets/repl_options.js @@ -4,36 +4,29 @@ load('jstests/libs/command_line/test_parsed_options.js'); jsTest.log("Testing \"replSet\" command line option"); var expectedResult = { - "parsed" : { - "replication" : { - "replSet" : "mycmdlinename" - } - } + "parsed": {"replication": {"replSet": "mycmdlinename"}} }; -testGetCmdLineOptsMongod({ replSet : "mycmdlinename" }, expectedResult); +testGetCmdLineOptsMongod({replSet: "mycmdlinename"}, expectedResult); jsTest.log("Testing \"replication.replSetName\" config file option"); expectedResult = { - "parsed" : { - "config" : "jstests/libs/config_files/set_replsetname.json", - "replication" : { - "replSetName" : "myconfigname" - } + "parsed": { + "config": "jstests/libs/config_files/set_replsetname.json", + "replication": {"replSetName": "myconfigname"} } }; -testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/set_replsetname.json" }, +testGetCmdLineOptsMongod({config: "jstests/libs/config_files/set_replsetname.json"}, expectedResult); jsTest.log("Testing override of \"replication.replSetName\" config file option with \"replSet\""); expectedResult = { - "parsed" : { - "config" : "jstests/libs/config_files/set_replsetname.json", - "replication" : { - "replSet" : "mycmdlinename" - } + "parsed": { + "config": "jstests/libs/config_files/set_replsetname.json", + "replication": {"replSet": "mycmdlinename"} } }; -testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/set_replsetname.json", - replSet : "mycmdlinename" }, expectedResult); +testGetCmdLineOptsMongod( + {config: "jstests/libs/config_files/set_replsetname.json", replSet: "mycmdlinename"}, + expectedResult); print(baseName + " succeeded."); diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js index 49bd10fd2d0..ed254f99758 100644 --- a/jstests/replsets/replset1.js +++ b/jstests/replsets/replset1.js @@ -2,20 +2,20 @@ var ssl_options1; var ssl_options2; var ssl_name; load("jstests/replsets/rslib.js"); -var doTest = function( signal ) { +var doTest = function(signal) { // Test basic replica set functionality. // -- Replication // -- Failover - - // Choose a name that is unique to the options specified. // This is important because we are depending on a fresh replicaSetMonitor for each run; - // each differently-named replica set gets its own monitor. + // each differently-named replica set gets its own monitor. // n0 and n1 get the same SSL config since there are 3 nodes but only 2 different configs - var replTest = new ReplSetTest( {name: 'testSet' + ssl_name, nodes: - {n0: ssl_options1, n1: ssl_options1, n2: ssl_options2}}); + var replTest = new ReplSetTest({ + name: 'testSet' + ssl_name, + nodes: {n0: ssl_options1, n1: ssl_options1, n2: ssl_options2} + }); // call startSet() to start each mongod in the replica set // this returns a list of nodes @@ -50,45 +50,42 @@ var doTest = function( signal ) { // and slaves in the set and wait until the change has replicated. replTest.awaitReplication(); - - var cppconn = new Mongo( replTest.getURL() ).getDB( "foo" ); - assert.eq( 1000 , cppconn.foo.findOne().a , "cppconn 1" ); + var cppconn = new Mongo(replTest.getURL()).getDB("foo"); + assert.eq(1000, cppconn.foo.findOne().a, "cppconn 1"); { // check c++ finding other servers var temp = replTest.getURL(); - temp = temp.substring( 0 , temp.lastIndexOf( "," ) ); - temp = new Mongo( temp ).getDB( "foo" ); - assert.eq( 1000 , temp.foo.findOne().a , "cppconn 1" ); + temp = temp.substring(0, temp.lastIndexOf(",")); + temp = new Mongo(temp).getDB("foo"); + assert.eq(1000, temp.foo.findOne().a, "cppconn 1"); } - // Here's how to stop the master node - var master_id = replTest.getNodeId( master ); - replTest.stop( master_id ); + var master_id = replTest.getNodeId(master); + replTest.stop(master_id); // Now let's see who the new master is: var new_master = replTest.getPrimary(); // Is the new master the same as the old master? - var new_master_id = replTest.getNodeId( new_master ); + var new_master_id = replTest.getNodeId(new_master); - assert( master_id != new_master_id, "Old master shouldn't be equal to new master." ); + assert(master_id != new_master_id, "Old master shouldn't be equal to new master."); reconnect(cppconn); - assert.eq( 1000 , cppconn.foo.findOne().a , "cppconn 2" ); + assert.eq(1000, cppconn.foo.findOne().a, "cppconn 2"); // Now let's write some documents to the new master var bulk = new_master.getDB("bar").bar.initializeUnorderedBulkOp(); - for(var i=0; i<1000; i++) { - bulk.insert({ a: i }); + for (var i = 0; i < 1000; i++) { + bulk.insert({a: i}); } bulk.execute(); // Here's how to restart the old master node: var slave = replTest.restart(master_id); - // Now, let's make sure that the old master comes up as a slave assert.soon(function() { var res = slave.getDB("admin").runCommand({ismaster: 1}); @@ -99,23 +96,23 @@ var doTest = function( signal ) { // And we need to make sure that the replset comes back up assert.soon(function() { var res = new_master.getDB("admin").runCommand({replSetGetStatus: 1}); - printjson( res ); + printjson(res); return res.myState == 1; }); // And that both slave nodes have all the updates new_master = replTest.getPrimary(); - assert.eq( 1000 , new_master.getDB( "bar" ).runCommand( { count:"bar"} ).n , "assumption 2"); + assert.eq(1000, new_master.getDB("bar").runCommand({count: "bar"}).n, "assumption 2"); replTest.awaitSecondaryNodes(); replTest.awaitReplication(); var slaves = replTest.liveNodes.slaves; - assert( slaves.length == 2, "Expected 2 slaves but length was " + slaves.length ); + assert(slaves.length == 2, "Expected 2 slaves but length was " + slaves.length); slaves.forEach(function(slave) { slave.setSlaveOk(); var count = slave.getDB("bar").runCommand({count: "bar"}); - printjson( count ); - assert.eq( 1000 , count.n , "slave count wrong: " + slave ); + printjson(count); + assert.eq(1000, count.n, "slave count wrong: " + slave); }); // last error @@ -126,33 +123,39 @@ var doTest = function( signal ) { var db = master.getDB("foo"); var t = db.foo; - var ts = slaves.map( function(z){ z.setSlaveOk(); return z.getDB( "foo" ).foo; } ); + var ts = slaves.map(function(z) { + z.setSlaveOk(); + return z.getDB("foo").foo; + }); t.save({a: 1000}); - t.ensureIndex( { a : 1 } ); + t.ensureIndex({a: 1}); - var result = db.runCommand({getLastError : 1, w: 3 , wtimeout :30000 }); + var result = db.runCommand({getLastError: 1, w: 3, wtimeout: 30000}); printjson(result); var lastOp = result.lastOp; - var lastOplogOp = master.getDB("local").oplog.rs.find().sort({$natural : -1}).limit(1).next(); + var lastOplogOp = master.getDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next(); if (replTest.getReplSetConfigFromNode().protocolVersion != 1) { assert.eq(lastOplogOp['ts'], lastOp); - } - else { + } else { assert.eq(lastOplogOp['ts'], lastOp['ts']); assert.eq(lastOplogOp['t'], lastOp['t']); } - ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } ); + ts.forEach(function(z) { + assert.eq(2, z.getIndexKeys().length, "A " + z.getMongo()); + }); t.reIndex(); - db.getLastError( 3 , 30000 ); - ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } ); + db.getLastError(3, 30000); + ts.forEach(function(z) { + assert.eq(2, z.getIndexKeys().length, "A " + z.getMongo()); + }); // Shut down the set and finish the test. - replTest.stopSet( signal ); + replTest.stopSet(signal); }; -doTest( 15 ); +doTest(15); print("replset1.js SUCCESS"); diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js index 45e37b8551e..b50a939242d 100644 --- a/jstests/replsets/replset2.js +++ b/jstests/replsets/replset2.js @@ -1,12 +1,12 @@ load("jstests/replsets/rslib.js"); -doTest = function (signal) { +doTest = function(signal) { // Test replication with write concern. // Replica set testing API // Create a new replica set test. Specify set name and the number of nodes you want. - var replTest = new ReplSetTest({ name: 'testSet', nodes: 3, oplogSize: 5 }); + var replTest = new ReplSetTest({name: 'testSet', nodes: 3, oplogSize: 5}); // call startSet() to start each mongod in the replica set // this returns a list of nodes @@ -23,13 +23,15 @@ doTest = function (signal) { var master = replTest.getPrimary(); // Wait for replication to a single node - master.getDB(testDB).bar.insert({ n: 1 }); + master.getDB(testDB).bar.insert({n: 1}); // Wait for states to become PRI,SEC,SEC waitForAllMembers(master.getDB(testDB)); var slaves = replTest.liveNodes.slaves; - slaves.forEach(function (slave) { slave.setSlaveOk(); }); + slaves.forEach(function(slave) { + slave.setSlaveOk(); + }); // Test write concern with multiple inserts. print("\n\nreplset2.js **** Try inserting a multiple records -- first insert ****"); @@ -37,87 +39,88 @@ doTest = function (signal) { printjson(master.getDB("admin").runCommand("replSetGetStatus")); var bulk = master.getDB(testDB).foo.initializeUnorderedBulkOp(); - bulk.insert({ n: 1 }); - bulk.insert({ n: 2 }); - bulk.insert({ n: 3 }); + bulk.insert({n: 1}); + bulk.insert({n: 2}); + bulk.insert({n: 3}); print("\nreplset2.js **** TEMP 1 ****"); printjson(master.getDB("admin").runCommand("replSetGetStatus")); - assert.writeOK(bulk.execute({ w: 3, wtimeout: 25000 })); + assert.writeOK(bulk.execute({w: 3, wtimeout: 25000})); print("replset2.js **** TEMP 1a ****"); - m1 = master.getDB(testDB).foo.findOne({ n: 1 }); + m1 = master.getDB(testDB).foo.findOne({n: 1}); printjson(m1); assert(m1['n'] == 1, "replset2.js Failed to save to master on multiple inserts"); print("replset2.js **** TEMP 1b ****"); - var s0 = slaves[0].getDB(testDB).foo.findOne({ n: 1 }); + var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1}); assert(s0['n'] == 1, "replset2.js Failed to replicate to slave 0 on multiple inserts"); - var s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 }); + var s1 = slaves[1].getDB(testDB).foo.findOne({n: 1}); assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1 on multiple inserts"); // Test write concern with a simple insert print("replset2.js **** Try inserting a single record ****"); master.getDB(testDB).dropDatabase(); - var options = { writeConcern: { w: 3, wtimeout: 10000 }}; - assert.writeOK(master.getDB(testDB).foo.insert({ n: 1 }, options)); + var options = { + writeConcern: {w: 3, wtimeout: 10000} + }; + assert.writeOK(master.getDB(testDB).foo.insert({n: 1}, options)); - m1 = master.getDB(testDB).foo.findOne({ n: 1 }); + m1 = master.getDB(testDB).foo.findOne({n: 1}); printjson(m1); assert(m1['n'] == 1, "replset2.js Failed to save to master"); - s0 = slaves[0].getDB(testDB).foo.findOne({ n: 1 }); + s0 = slaves[0].getDB(testDB).foo.findOne({n: 1}); assert(s0['n'] == 1, "replset2.js Failed to replicate to slave 0"); - s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 }); + s1 = slaves[1].getDB(testDB).foo.findOne({n: 1}); assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1"); print("replset2.js **** Try inserting many records ****"); try { - var bigData = new Array(2000).toString(); - bulk = master.getDB(testDB).baz.initializeUnorderedBulkOp(); - for (var n = 0; n < 1000; n++) { - bulk.insert({ n: n, data: bigData }); - } - assert.writeOK(bulk.execute({ w: 3, wtimeout: 60000 })); - - print("replset2.js **** V1 "); - - var verifyReplication = function (nodeName, collection) { - data = collection.findOne({ n: 1 }); - assert(data['n'] == 1, "replset2.js Failed to save to " + nodeName); - data = collection.findOne({ n: 999 }); - assert(data['n'] == 999, "replset2.js Failed to save to " + nodeName); - }; - - print("replset2.js **** V2 "); - - verifyReplication("master", master.getDB(testDB).baz); - verifyReplication("slave 0", slaves[0].getDB(testDB).baz); - verifyReplication("slave 1", slaves[1].getDB(testDB).baz); - } - catch(e) { - var errstr = "ERROR: " + e; - errstr += "\nMaster oplog findOne:\n"; - errstr += tojson( - master.getDB("local").oplog.rs.find().sort({"$natural":-1}).limit(1).next()); - errstr += "\nSlave 0 oplog findOne:\n"; - errstr += tojson( - slaves[0].getDB("local").oplog.rs.find().sort({"$natural":-1}).limit(1).next()); - errstr += "\nSlave 1 oplog findOne:\n"; - errstr += tojson( - slaves[1].getDB("local").oplog.rs.find().sort({"$natural":-1}).limit(1).next()); - assert(false, errstr); + var bigData = new Array(2000).toString(); + bulk = master.getDB(testDB).baz.initializeUnorderedBulkOp(); + for (var n = 0; n < 1000; n++) { + bulk.insert({n: n, data: bigData}); + } + assert.writeOK(bulk.execute({w: 3, wtimeout: 60000})); + + print("replset2.js **** V1 "); + + var verifyReplication = function(nodeName, collection) { + data = collection.findOne({n: 1}); + assert(data['n'] == 1, "replset2.js Failed to save to " + nodeName); + data = collection.findOne({n: 999}); + assert(data['n'] == 999, "replset2.js Failed to save to " + nodeName); + }; + + print("replset2.js **** V2 "); + + verifyReplication("master", master.getDB(testDB).baz); + verifyReplication("slave 0", slaves[0].getDB(testDB).baz); + verifyReplication("slave 1", slaves[1].getDB(testDB).baz); + } catch (e) { + var errstr = "ERROR: " + e; + errstr += "\nMaster oplog findOne:\n"; + errstr += + tojson(master.getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next()); + errstr += "\nSlave 0 oplog findOne:\n"; + errstr += + tojson(slaves[0].getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next()); + errstr += "\nSlave 1 oplog findOne:\n"; + errstr += + tojson(slaves[1].getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next()); + assert(false, errstr); } replTest.stopSet(signal); }; -doTest( 15 ); +doTest(15); print("\nreplset2.js SUCCESS\n"); diff --git a/jstests/replsets/replset3.js b/jstests/replsets/replset3.js index 6bb29a196ec..2121e395866 100644 --- a/jstests/replsets/replset3.js +++ b/jstests/replsets/replset3.js @@ -1,10 +1,10 @@ -var doTest = function (signal) { +var doTest = function(signal) { "use strict"; // Test replica set step down // Replica set testing API // Create a new replica set test. Specify set name and the number of nodes you want. - var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 }); + var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); // call startSet() to start each mongod in the replica set // this returns a list of nodes @@ -19,7 +19,7 @@ var doTest = function (signal) { // Write some data to master // NOTE: this test fails unless we write some data. - master.getDB("foo").foo.insert({ a: 1 }, { writeConcern: { w: 3, wtimeout: 20000 }}); + master.getDB("foo").foo.insert({a: 1}, {writeConcern: {w: 3, wtimeout: 20000}}); var phase = 1; @@ -27,7 +27,7 @@ var doTest = function (signal) { // Step down master. Note: this may close our connection! try { - master.getDB("admin").runCommand({ replSetStepDown: true, force: 1 }); + master.getDB("admin").runCommand({replSetStepDown: true, force: 1}); } catch (err) { print("caught: " + err + " on stepdown"); } @@ -36,9 +36,8 @@ var doTest = function (signal) { try { var new_master = replTest.getPrimary(); - } - catch (err) { - throw ("Could not elect new master before timeout."); + } catch (err) { + throw("Could not elect new master before timeout."); } print(phase++); @@ -48,25 +47,27 @@ var doTest = function (signal) { print(phase++); // Make sure that slaves are still up - var result = new_master.getDB("admin").runCommand({ replSetGetStatus: 1 }); + var result = new_master.getDB("admin").runCommand({replSetGetStatus: 1}); assert(result['ok'] == 1, "Could not verify that slaves were still up:" + result); print(phase++); var slaves = replTest.liveNodes.slaves; - assert.soon(function () { + assert.soon(function() { try { - var res = slaves[0].getDB("admin").runCommand({ replSetGetStatus: 1 }); - } catch (err) { } + var res = slaves[0].getDB("admin").runCommand({replSetGetStatus: 1}); + } catch (err) { + } return res.myState == 2; }, "Slave 0 state not ready."); print(phase++); - assert.soon(function () { + assert.soon(function() { try { - var res = slaves[1].getDB("admin").runCommand({ replSetGetStatus: 1 }); - } catch (err) { } + var res = slaves[1].getDB("admin").runCommand({replSetGetStatus: 1}); + } catch (err) { + } return res.myState == 2; }, "Slave 1 state not ready."); @@ -75,4 +76,4 @@ var doTest = function (signal) { replTest.stopSet(15); }; -doTest( 15 ); +doTest(15); diff --git a/jstests/replsets/replset4.js b/jstests/replsets/replset4.js index 8aecb715130..e6df067d1ea 100644 --- a/jstests/replsets/replset4.js +++ b/jstests/replsets/replset4.js @@ -1,7 +1,7 @@ -doTest = function (signal) { +doTest = function(signal) { // Test orphaned master steps down - var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 }); + var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); replTest.startSet(); replTest.initiate(); @@ -17,23 +17,20 @@ doTest = function (signal) { print("replset4.js 1"); - assert.soon( - function () { - try { - var result = master.getDB("admin").runCommand({ ismaster: 1 }); - return (result['ok'] == 1 && result['ismaster'] == false); - } catch (e) { - print("replset4.js caught " + e); - return false; - } - }, - "Master fails to step down when orphaned." - ); + assert.soon(function() { + try { + var result = master.getDB("admin").runCommand({ismaster: 1}); + return (result['ok'] == 1 && result['ismaster'] == false); + } catch (e) { + print("replset4.js caught " + e); + return false; + } + }, "Master fails to step down when orphaned."); print("replset4.js worked, stopping"); replTest.stopSet(signal); }; print("replset4.js"); -doTest( 15 ); +doTest(15); print("replset4.js SUCCESS"); diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js index 717a0c8153b..c0aee6e1154 100644 --- a/jstests/replsets/replset5.js +++ b/jstests/replsets/replset5.js @@ -1,17 +1,20 @@ // rs test getlasterrordefaults load("jstests/replsets/rslib.js"); -(function () { +(function() { "use strict"; // Test write concern defaults - var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 }); + var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); var nodes = replTest.startSet(); // Initiate set with default for write concern var config = replTest.getReplSetConfig(); config.settings = {}; - config.settings.getLastErrorDefaults = { 'w': 3, 'wtimeout': 20000 }; + config.settings.getLastErrorDefaults = { + 'w': 3, + 'wtimeout': 20000 + }; config.settings.heartbeatTimeoutSecs = 15; // Prevent node 2 from becoming primary, as we will attempt to set it to hidden later. config.members[2].priority = 0; @@ -24,18 +27,18 @@ load("jstests/replsets/rslib.js"); var testDB = "foo"; // Initial replication - master.getDB("barDB").bar.save({ a: 1 }); + master.getDB("barDB").bar.save({a: 1}); replTest.awaitReplication(); // These writes should be replicated immediately var docNum = 5000; var bulk = master.getDB(testDB).foo.initializeUnorderedBulkOp(); for (var n = 0; n < docNum; n++) { - bulk.insert({ n: n }); + bulk.insert({n: n}); } // should use the configured last error defaults from above, that's what we're testing. - // + // // If you want to test failure, just add values for w and wtimeout (e.g. w=1) // to the following command. This will override the default set above and // prevent replication from happening in time for the count tests below. diff --git a/jstests/replsets/replset6.js b/jstests/replsets/replset6.js index a55c44aaea7..29adfa0ae3c 100644 --- a/jstests/replsets/replset6.js +++ b/jstests/replsets/replset6.js @@ -3,7 +3,7 @@ baseName = "jstests_replsets_replset6"; -var rt = new ReplSetTest({ name : "replset6tests" , nodes: 2 }); +var rt = new ReplSetTest({name: "replset6tests", nodes: 2}); var nodes = rt.startSet(); rt.initiate(); var m = rt.getPrimary(); @@ -11,42 +11,51 @@ rt.awaitSecondaryNodes(); var slaves = rt.liveNodes.slaves; s = slaves[0]; s.setSlaveOk(); -admin = m.getDB( "admin" ); +admin = m.getDB("admin"); -debug = function( foo ) {}; // print( foo ); } +debug = function(foo) {}; // print( foo ); } // rename within db -m.getDB( baseName ).one.save( { a: 1 } ); -assert.soon( function() { v = s.getDB( baseName ).one.findOne(); return v && 1 == v.a; } ); - -assert.commandWorked( admin.runCommand( {renameCollection:"jstests_replsets_replset6.one", to:"jstests_replsets_replset6.two"} ) ); -assert.soon( function() { - if ( -1 == s.getDB( baseName ).getCollectionNames().indexOf( "two" ) ) { - debug( "no two coll" ); - debug( tojson( s.getDB( baseName ).getCollectionNames() ) ); - return false; - } - if ( !s.getDB( baseName ).two.findOne() ) { - debug( "no two object" ); - return false; - } - return 1 == s.getDB( baseName ).two.findOne().a; }); -assert.eq( -1, s.getDB( baseName ).getCollectionNames().indexOf( "one" ) ); +m.getDB(baseName).one.save({a: 1}); +assert.soon(function() { + v = s.getDB(baseName).one.findOne(); + return v && 1 == v.a; +}); + +assert.commandWorked(admin.runCommand( + {renameCollection: "jstests_replsets_replset6.one", to: "jstests_replsets_replset6.two"})); +assert.soon(function() { + if (-1 == s.getDB(baseName).getCollectionNames().indexOf("two")) { + debug("no two coll"); + debug(tojson(s.getDB(baseName).getCollectionNames())); + return false; + } + if (!s.getDB(baseName).two.findOne()) { + debug("no two object"); + return false; + } + return 1 == s.getDB(baseName).two.findOne().a; +}); +assert.eq(-1, s.getDB(baseName).getCollectionNames().indexOf("one")); // rename to new db first = baseName + "_first"; second = baseName + "_second"; -m.getDB( first ).one.save( { a: 1 } ); -assert.soon( function() { return s.getDB( first ).one.findOne() && 1 == s.getDB( first ).one.findOne().a; } ); - -assert.commandWorked( admin.runCommand( {renameCollection:"jstests_replsets_replset6_first.one", to:"jstests_replsets_replset6_second.two"} ) ); -assert.soon( function() { - return -1 != s.getDBNames().indexOf( second ) && - -1 != s.getDB( second ).getCollectionNames().indexOf( "two" ) && - s.getDB( second ).two.findOne() && - 1 == s.getDB( second ).two.findOne().a; } ); -assert.eq( -1, s.getDB( first ).getCollectionNames().indexOf( "one" ) ); - +m.getDB(first).one.save({a: 1}); +assert.soon(function() { + return s.getDB(first).one.findOne() && 1 == s.getDB(first).one.findOne().a; +}); + +assert.commandWorked(admin.runCommand({ + renameCollection: "jstests_replsets_replset6_first.one", + to: "jstests_replsets_replset6_second.two" +})); +assert.soon(function() { + return -1 != s.getDBNames().indexOf(second) && + -1 != s.getDB(second).getCollectionNames().indexOf("two") && + s.getDB(second).two.findOne() && 1 == s.getDB(second).two.findOne().a; +}); +assert.eq(-1, s.getDB(first).getCollectionNames().indexOf("one")); diff --git a/jstests/replsets/replset7.js b/jstests/replsets/replset7.js index 1c63fd8f35f..8b13f2ed7e2 100644 --- a/jstests/replsets/replset7.js +++ b/jstests/replsets/replset7.js @@ -1,43 +1,42 @@ // test for SERVER-5040 - if documents move forward during an initial sync. -var rt = new ReplSetTest( { name : "replset7tests" , nodes: 1 } ); +var rt = new ReplSetTest({name: "replset7tests", nodes: 1}); var nodes = rt.startSet(); rt.initiate(); var master = rt.getPrimary(); -var md = master.getDB( 'd' ); -var mdc = md[ 'c' ]; +var md = master.getDB('d'); +var mdc = md['c']; // prep the data var doccount = 5000; var bulk = mdc.initializeUnorderedBulkOp(); -for( i = 0; i < doccount; ++i ) { - bulk.insert( { _id:i, x:i } ); +for (i = 0; i < doccount; ++i) { + bulk.insert({_id: i, x: i}); } assert.writeOK(bulk.execute()); -assert.commandWorked(mdc.ensureIndex( { x : 1 }, { unique: true } )); +assert.commandWorked(mdc.ensureIndex({x: 1}, {unique: true})); // add a secondary var slave = rt.add(); rt.reInitiate(); -print ("initiation complete!"); -var sc = slave.getDB( 'd' )[ 'c' ]; +print("initiation complete!"); +var sc = slave.getDB('d')['c']; slave.setSlaveOk(); // Wait for slave to start cloning. -//assert.soon( function() { c = sc.find( { _id:1, x:1 } ); print( c ); return c > 0; } ); - +// assert.soon( function() { c = sc.find( { _id:1, x:1 } ); print( c ); return c > 0; } ); // Move all documents to the end by growing it bulk = mdc.initializeUnorderedBulkOp(); var bigStr = "ayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayay" + - "ayayayayayayayayayayayay"; + "ayayayayayayayayayayayay"; for (i = 0; i < doccount; ++i) { - bulk.find({ _id: i, x: i }).remove(); - bulk.insert({ _id: doccount + i, x: i, bigstring: bigStr }); + bulk.find({_id: i, x: i}).remove(); + bulk.insert({_id: doccount + i, x: i, bigstring: bigStr}); } assert.writeOK(bulk.execute()); @@ -45,9 +44,8 @@ assert.writeOK(bulk.execute()); rt.awaitSecondaryNodes(); // Do we have an index? -assert.eq(1, slave.getDB( 'd' )['c'].getIndexes().filter(function (doc) { - return (doc.v === 1 - && JSON.stringify(doc.key) === JSON.stringify({x: 1}) - && doc.ns === 'd.c' - && doc.name === 'x_1'); -}).length); +assert.eq(1, + slave.getDB('d')['c'].getIndexes().filter(function(doc) { + return (doc.v === 1 && JSON.stringify(doc.key) === JSON.stringify({x: 1}) && + doc.ns === 'd.c' && doc.name === 'x_1'); + }).length); diff --git a/jstests/replsets/replset8.js b/jstests/replsets/replset8.js index ead9c50f066..69a16daa3a6 100644 --- a/jstests/replsets/replset8.js +++ b/jstests/replsets/replset8.js @@ -1,64 +1,64 @@ // test for SERVER-6303 - if documents move backward during an initial sync. -var rt = new ReplSetTest( { name : "replset8tests" , nodes: 1 } ); +var rt = new ReplSetTest({name: "replset8tests", nodes: 1}); var nodes = rt.startSet(); rt.initiate(); var master = rt.getPrimary(); var bigstring = "a"; -var md = master.getDB( 'd' ); -var mdc = md[ 'c' ]; +var md = master.getDB('d'); +var mdc = md['c']; // prep the data // idea: create x documents of increasing size, then create x documents of size n. -// delete first x documents. start initial sync (cloner). update all remaining +// delete first x documents. start initial sync (cloner). update all remaining // documents to be increasing size. // this should result in the updates moving the docs backwards. var doccount = 5000; // Avoid empty extent issues -mdc.insert( { _id:-1, x:"dummy" } ); +mdc.insert({_id: -1, x: "dummy"}); -print ("inserting bigstrings"); +print("inserting bigstrings"); var bulk = mdc.initializeUnorderedBulkOp(); -for( i = 0; i < doccount; ++i ) { - bulk.insert( { _id:i, x:bigstring } ); +for (i = 0; i < doccount; ++i) { + bulk.insert({_id: i, x: bigstring}); bigstring += "a"; } assert.writeOK(bulk.execute()); -print ("inserting x"); +print("inserting x"); bulk = mdc.initializeUnorderedBulkOp(); -for( i = doccount; i < doccount*2; ++i ) { - bulk.insert( { _id:i, x:i } ); +for (i = doccount; i < doccount * 2; ++i) { + bulk.insert({_id: i, x: i}); } assert.writeOK(bulk.execute()); -print ("deleting bigstrings"); +print("deleting bigstrings"); bulk = mdc.initializeUnorderedBulkOp(); -for( i = 0; i < doccount; ++i ) { - bulk.find({ _id: i }).remove(); +for (i = 0; i < doccount; ++i) { + bulk.find({_id: i}).remove(); } assert.writeOK(bulk.execute()); // add a secondary var slave = rt.add(); rt.reInitiate(); -print ("initiation complete!"); -var sc = slave.getDB( 'd' )[ 'c' ]; +print("initiation complete!"); +var sc = slave.getDB('d')['c']; slave.setSlaveOk(); sleep(25000); -print ("updating documents backwards"); +print("updating documents backwards"); // Move all documents to the beginning by growing them to sizes that should // fit the holes we made in phase 1 bulk = mdc.initializeUnorderedBulkOp(); -for (i = doccount*2; i > doccount; --i) { - mdc.update( { _id:i, x:i }, { _id:i, x:bigstring } ); - bigstring = bigstring.slice(0, -1); // remove last char +for (i = doccount * 2; i > doccount; --i) { + mdc.update({_id: i, x: i}, {_id: i, x: bigstring}); + bigstring = bigstring.slice(0, -1); // remove last char } -print ("finished"); +print("finished"); // Wait for replication to catch up. rt.awaitSecondaryNodes(); -assert.eq(doccount+1, slave.getDB( 'd' )['c'].count()); +assert.eq(doccount + 1, slave.getDB('d')['c'].count()); diff --git a/jstests/replsets/replset9.js b/jstests/replsets/replset9.js index 8ae46863087..c1493908f12 100644 --- a/jstests/replsets/replset9.js +++ b/jstests/replsets/replset9.js @@ -1,47 +1,47 @@ -var rt = new ReplSetTest( { name : "replset9tests" , nodes: 1, oplogSize: 300 } ); +var rt = new ReplSetTest({name: "replset9tests", nodes: 1, oplogSize: 300}); var nodes = rt.startSet(); rt.initiate(); var master = rt.getPrimary(); var bigstring = Array(5000).toString(); -var md = master.getDB( 'd' ); -var mdc = md[ 'c' ]; +var md = master.getDB('d'); +var mdc = md['c']; // idea: while cloner is running, update some docs and then immediately remove them. // oplog will have ops referencing docs that no longer exist. var doccount = 20000; // Avoid empty extent issues -mdc.insert( { _id:-1, x:"dummy" } ); +mdc.insert({_id: -1, x: "dummy"}); // Make this db big so that cloner takes a while. -print ("inserting bigstrings"); +print("inserting bigstrings"); var bulk = mdc.initializeUnorderedBulkOp(); -for( i = 0; i < doccount; ++i ) { - mdc.insert({ _id: i, x: bigstring }); +for (i = 0; i < doccount; ++i) { + mdc.insert({_id: i, x: bigstring}); } assert.writeOK(bulk.execute()); // Insert some docs to update and remove -print ("inserting x"); +print("inserting x"); bulk = mdc.initializeUnorderedBulkOp(); -for( i = doccount; i < doccount*2; ++i ) { - bulk.insert({ _id: i, bs: bigstring, x: i }); +for (i = doccount; i < doccount * 2; ++i) { + bulk.insert({_id: i, bs: bigstring, x: i}); } assert.writeOK(bulk.execute()); // add a secondary; start cloning var slave = rt.add(); (function reinitiate() { - var master = rt.nodes[0]; + var master = rt.nodes[0]; var c = master.getDB("local")['system.replset'].findOne(); - var config = rt.getReplSetConfig(); + var config = rt.getReplSetConfig(); config.version = c.version + 1; - var admin = master.getDB("admin"); - var cmd = {}; - var cmdKey = 'replSetReconfig'; + var admin = master.getDB("admin"); + var cmd = {}; + var cmdKey = 'replSetReconfig'; var timeout = timeout || 30000; cmd[cmdKey] = config; printjson(cmd); @@ -53,21 +53,20 @@ var slave = rt.add(); }, "reinitiate replica set", timeout); })(); - -print ("initiation complete!"); -var sc = slave.getDB( 'd' )[ 'c' ]; +print("initiation complete!"); +var sc = slave.getDB('d')['c']; slave.setSlaveOk(); master = rt.getPrimary(); -print ("updating and deleting documents"); +print("updating and deleting documents"); bulk = master.getDB('d')['c'].initializeUnorderedBulkOp(); -for (i = doccount*4; i > doccount; --i) { - bulk.find({ _id: i }).update({ $inc: { x: 1 }}); - bulk.find({ _id: i }).remove(); - bulk.insert({ bs: bigstring }); +for (i = doccount * 4; i > doccount; --i) { + bulk.find({_id: i}).update({$inc: {x: 1}}); + bulk.find({_id: i}).remove(); + bulk.insert({bs: bigstring}); } assert.writeOK(bulk.execute()); -print ("finished"); +print("finished"); // Wait for replication to catch up. rt.awaitReplication(640000); diff --git a/jstests/replsets/replsetadd_profile.js b/jstests/replsets/replsetadd_profile.js index 810b6f5f144..641e7ca7cfd 100644 --- a/jstests/replsets/replsetadd_profile.js +++ b/jstests/replsets/replsetadd_profile.js @@ -8,8 +8,7 @@ // the only node is running at a profiling level of 2. var collectionName = 'jstests_replsetadd_profile'; -var replTest = new ReplSetTest({name: 'ReplSetAddProfileTestSet', - nodes: [{profile: 2}]}); +var replTest = new ReplSetTest({name: 'ReplSetAddProfileTestSet', nodes: [{profile: 2}]}); replTest.startSet(); replTest.initiate(); var master = replTest.getPrimary(); @@ -25,8 +24,9 @@ replTest.waitForState(replTest.nodes[1], ReplSetTest.State.SECONDARY, 60 * 1000) replTest.awaitReplication(); var newNodeCollection = newNode.getDB('test').getCollection(collectionName); -assert.eq(1, newNodeCollection.find({a: 1}).itcount(), +assert.eq(1, + newNodeCollection.find({a: 1}).itcount(), 'expect documents to be present in slave after replication'); var signal = 15; -replTest.stopSet( signal ); +replTest.stopSet(signal); diff --git a/jstests/replsets/replsetarb2.js b/jstests/replsets/replsetarb2.js index 16388c8b92b..8e1712749e4 100644 --- a/jstests/replsets/replsetarb2.js +++ b/jstests/replsets/replsetarb2.js @@ -3,16 +3,18 @@ (function() { "use strict"; - var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} ); + var replTest = new ReplSetTest({name: 'unicomplex', nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); - var r = replTest.initiate({"_id" : "unicomplex", - "members" : [ - {"_id" : 0, "host" : nodes[0]}, - {"_id" : 1, "host" : nodes[1], "arbiterOnly" : true, "votes": 1}, - {"_id" : 2, "host" : nodes[2]} - ]}); + var r = replTest.initiate({ + "_id": "unicomplex", + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1], "arbiterOnly": true, "votes": 1}, + {"_id": 2, "host": nodes[2]} + ] + }); // Make sure we have a master var master = replTest.getPrimary(); @@ -24,7 +26,7 @@ return res.myState === 7; }, "Aribiter failed to initialize."); - var result = conns[1].getDB("admin").runCommand({isMaster : 1}); + var result = conns[1].getDB("admin").runCommand({isMaster: 1}); assert(result.arbiterOnly); assert(!result.passive); diff --git a/jstests/replsets/replsetfreeze.js b/jstests/replsets/replsetfreeze.js index 3467f4bd98f..2629a78fe15 100644 --- a/jstests/replsets/replsetfreeze.js +++ b/jstests/replsets/replsetfreeze.js @@ -10,13 +10,12 @@ * 9: check we get a new master within 30 seconds */ - var w = 0; var wait = function(f) { w++; var n = 0; while (!f()) { - if( n % 4 == 0 ) + if (n % 4 == 0) print("toostale.js waiting " + w); if (++n == 4) { print("" + f); @@ -27,26 +26,29 @@ var wait = function(f) { }; var reconnect = function(a) { - wait(function() { - try { - a.getDB("foo").bar.stats(); - return true; - } catch(e) { - print(e); - return false; - } + wait(function() { + try { + a.getDB("foo").bar.stats(); + return true; + } catch (e) { + print(e); + return false; + } }); }; - print("1: initialize set"); -var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} ); +var replTest = new ReplSetTest({name: 'unicomplex', nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); -var config = {"_id" : "unicomplex", "members" : [ - {"_id" : 0, "host" : nodes[0] }, - {"_id" : 1, "host" : nodes[1] }, - {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}; +var config = { + "_id": "unicomplex", + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], "arbiterOnly": true} + ] +}; var r = replTest.initiate(config); var master = replTest.getPrimary(); var secondary = replTest.getSecondary(); @@ -55,55 +57,46 @@ replTest.awaitSecondaryNodes(); print("2: step down m1"); try { - master.getDB("admin").runCommand({replSetStepDown : 1, force : 1}); -} -catch(e) { - print(e); + master.getDB("admin").runCommand({replSetStepDown: 1, force: 1}); +} catch (e) { + print(e); } reconnect(master); -printjson( master.getDB("admin").runCommand({replSetGetStatus: 1}) ); +printjson(master.getDB("admin").runCommand({replSetGetStatus: 1})); print("3: freeze set for 30 seconds"); var start = (new Date()).getTime(); -assert.commandWorked(secondary.getDB("admin").runCommand({replSetFreeze : 30})); -assert.commandWorked(master.getDB("admin").runCommand({replSetFreeze : 30})); - +assert.commandWorked(secondary.getDB("admin").runCommand({replSetFreeze: 30})); +assert.commandWorked(master.getDB("admin").runCommand({replSetFreeze: 30})); print("4: check no one is master for 30 seconds"); -while ((new Date()).getTime() - start < (28 * 1000) ) { // we need less 30 since it takes some time to return... hacky - var result = master.getDB("admin").runCommand({isMaster:1}); - assert.eq(result.ismaster, false); - assert.eq(result.primary, undefined); - sleep(1000); +while ((new Date()).getTime() - start < + (28 * 1000)) { // we need less 30 since it takes some time to return... hacky + var result = master.getDB("admin").runCommand({isMaster: 1}); + assert.eq(result.ismaster, false); + assert.eq(result.primary, undefined); + sleep(1000); } - print("5: check for new master"); master = replTest.getPrimary(); - print("6: step down new master"); try { - master.getDB("admin").runCommand({replSetStepDown : 1, force : 1}); -} -catch(e) { - print(e); + master.getDB("admin").runCommand({replSetStepDown: 1, force: 1}); +} catch (e) { + print(e); } reconnect(master); - print("7: freeze for 30 seconds"); -master.getDB("admin").runCommand({replSetFreeze : 30}); +master.getDB("admin").runCommand({replSetFreeze: 30}); sleep(1000); - print("8: unfreeze"); -master.getDB("admin").runCommand({replSetFreeze : 0}); - +master.getDB("admin").runCommand({replSetFreeze: 0}); print("9: check we get a new master within 30 seconds"); master = replTest.getPrimary(); - -replTest.stopSet( 15 ); - +replTest.stopSet(15); diff --git a/jstests/replsets/replsethostnametrim.js b/jstests/replsets/replsethostnametrim.js index c303ecdea0d..51edf551f96 100644 --- a/jstests/replsets/replsethostnametrim.js +++ b/jstests/replsets/replsethostnametrim.js @@ -1,6 +1,6 @@ // try reconfiguring with space at the end of the host:port -var replTest = new ReplSetTest({ name: 'testSet', nodes: 1 }); +var replTest = new ReplSetTest({name: 'testSet', nodes: 1}); var nodes = replTest.startSet(); replTest.initiate(); @@ -9,13 +9,13 @@ var config = master.getDB("local").system.replset.findOne(); config.version++; var origHost = config.members[0].host; config.members[0].host = origHost + " "; -var result = master.adminCommand({replSetReconfig : config}); +var result = master.adminCommand({replSetReconfig: config}); assert.eq(result.ok, 1, tojson(result)); -//print("current (bad) config:"); printjson(config); +// print("current (bad) config:"); printjson(config); -//check new config to make sure it doesn't have a space in the hostname +// check new config to make sure it doesn't have a space in the hostname config = master.getDB("local").system.replset.findOne(); assert.eq(origHost, config.members[0].host); -//print("current (good) config:"); printjson(config); +// print("current (good) config:"); printjson(config); replTest.stopSet();
\ No newline at end of file diff --git a/jstests/replsets/replsetprio1.js b/jstests/replsets/replsetprio1.js index 37da3c6474d..d71c10383ea 100644 --- a/jstests/replsets/replsetprio1.js +++ b/jstests/replsets/replsetprio1.js @@ -2,15 +2,18 @@ (function() { "use strict"; - var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} ); + var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); var nodenames = replTest.nodeList(); var nodes = replTest.startSet(); - replTest.initiate({"_id" : "testSet", - "members" : [ - {"_id" : 0, "host" : nodenames[0], "priority" : 1}, - {"_id" : 1, "host" : nodenames[1], "priority" : 2}, - {"_id" : 2, "host" : nodenames[2], "priority" : 3}]}); + replTest.initiate({ + "_id": "testSet", + "members": [ + {"_id": 0, "host": nodenames[0], "priority": 1}, + {"_id": 1, "host": nodenames[1], "priority": 2}, + {"_id": 2, "host": nodenames[2], "priority": 3} + ] + }); // 2 should be master (give this a while to happen, as 0 will be elected, then demoted) replTest.waitForState(nodes[2], ReplSetTest.State.PRIMARY, 120000); @@ -20,15 +23,15 @@ // 1 should eventually be master replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY, 60000); - + // do some writes on 1 var master = replTest.getPrimary(); - for (var i=0; i<1000; i++) { - master.getDB("foo").bar.insert({i:i}); + for (var i = 0; i < 1000; i++) { + master.getDB("foo").bar.insert({i: i}); } - for (i=0; i<1000; i++) { - master.getDB("bar").baz.insert({i:i}); + for (i = 0; i < 1000; i++) { + master.getDB("bar").baz.insert({i: i}); } // bring 2 back up, 2 should wait until caught up and then become master @@ -37,8 +40,8 @@ // make sure nothing was rolled back master = replTest.getPrimary(); - for (i=0; i<1000; i++) { - assert(master.getDB("foo").bar.findOne({i:i}) != null, 'checking '+i); - assert(master.getDB("bar").baz.findOne({i:i}) != null, 'checking '+i); + for (i = 0; i < 1000; i++) { + assert(master.getDB("foo").bar.findOne({i: i}) != null, 'checking ' + i); + assert(master.getDB("bar").baz.findOne({i: i}) != null, 'checking ' + i); } }()); diff --git a/jstests/replsets/replsetrestart1.js b/jstests/replsets/replsetrestart1.js index cc3281f0af8..870cdcfe233 100644 --- a/jstests/replsets/replsetrestart1.js +++ b/jstests/replsets/replsetrestart1.js @@ -16,14 +16,14 @@ assert.eq(c1._id, c2._id, '_id same'); for (var i in c1.members) { - assert(c2.members[i] !== undefined, 'field '+i+' exists in both configs'); + assert(c2.members[i] !== undefined, 'field ' + i + ' exists in both configs'); assert.eq(c1.members[i]._id, c2.members[i]._id, 'id is equal in both configs'); assert.eq(c1.members[i].host, c2.members[i].host, 'host is equal in both configs'); } }; // Create a new replica set test. Specify set name and the number of nodes you want. - var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} ); + var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); // call startSet() to start each mongod in the replica set // this returns a list of nodes @@ -43,23 +43,23 @@ var config1 = master.getDB("local").system.replset.findOne(); // Now we're going to shut down all nodes - var mId = replTest.getNodeId( master ); + var mId = replTest.getNodeId(master); var s1 = replTest.liveNodes.slaves[0]; var s1Id = replTest.getNodeId(s1); var s2 = replTest.liveNodes.slaves[1]; var s2Id = replTest.getNodeId(s2); - replTest.stop( s1Id ); - replTest.stop( s2Id ); + replTest.stop(s1Id); + replTest.stop(s2Id); replTest.waitForState(s1, ReplSetTest.State.DOWN); replTest.waitForState(s2, ReplSetTest.State.DOWN); - replTest.stop( mId ); + replTest.stop(mId); // Now let's restart these nodes - replTest.restart( mId ); - replTest.restart( s1Id ); - replTest.restart( s2Id ); + replTest.restart(mId); + replTest.restart(s1Id); + replTest.restart(s2Id); // Make sure that a new master comes up master = replTest.getPrimary(); diff --git a/jstests/replsets/restore_term.js b/jstests/replsets/restore_term.js index 0a0af27c08f..9e679fb67c7 100644 --- a/jstests/replsets/restore_term.js +++ b/jstests/replsets/restore_term.js @@ -10,59 +10,59 @@ load("jstests/replsets/rslib.js"); // storage engines. // @tags: [requires_persistence] (function() { -"use strict"; + "use strict"; -function getCurrentTerm(primary) { - var res = primary.adminCommand({replSetGetStatus: 1}); - assert.commandWorked(res); - return res.term; -} + function getCurrentTerm(primary) { + var res = primary.adminCommand({replSetGetStatus: 1}); + assert.commandWorked(res); + return res.term; + } -var name = "restore_term"; -var rst = new ReplSetTest({name: name, nodes: 2}); + var name = "restore_term"; + var rst = new ReplSetTest({name: name, nodes: 2}); -rst.startSet(); -// Initiate the replset in protocol version 1. -var conf = rst.getReplSetConfig(); -conf.settings = conf.settings || { }; -conf.settings.electionTimeoutMillis = 2000; -conf.protocolVersion = 1; -rst.initiate(conf); -rst.awaitSecondaryNodes(); + rst.startSet(); + // Initiate the replset in protocol version 1. + var conf = rst.getReplSetConfig(); + conf.settings = conf.settings || {}; + conf.settings.electionTimeoutMillis = 2000; + conf.protocolVersion = 1; + rst.initiate(conf); + rst.awaitSecondaryNodes(); -var primary = rst.getPrimary(); -var primaryColl = primary.getDB("test").coll; + var primary = rst.getPrimary(); + var primaryColl = primary.getDB("test").coll; -// Current term may be greater than 1 if election race happens. -var firstSuccessfulTerm = getCurrentTerm(primary); -assert.gte(firstSuccessfulTerm, 1); -assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: "majority"}})); -assert.eq(getCurrentTerm(primary), firstSuccessfulTerm); + // Current term may be greater than 1 if election race happens. + var firstSuccessfulTerm = getCurrentTerm(primary); + assert.gte(firstSuccessfulTerm, 1); + assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: "majority"}})); + assert.eq(getCurrentTerm(primary), firstSuccessfulTerm); -// Check that the insert op has the initial term. -var latestOp = getLatestOp(primary); -assert.eq(latestOp.op, "i"); -assert.eq(latestOp.t, firstSuccessfulTerm); + // Check that the insert op has the initial term. + var latestOp = getLatestOp(primary); + assert.eq(latestOp.op, "i"); + assert.eq(latestOp.t, firstSuccessfulTerm); -// Step down to increase the term. -try { - var res = primary.adminCommand({replSetStepDown: 0}); -} catch (err) { - print("caught: " + err + " on stepdown"); -} -rst.awaitSecondaryNodes(); -// The secondary became the new primary now with a higher term. -// Since there's only one secondary who may run for election, the new term is higher by 1. -assert.eq(getCurrentTerm(rst.getPrimary()), firstSuccessfulTerm + 1); + // Step down to increase the term. + try { + var res = primary.adminCommand({replSetStepDown: 0}); + } catch (err) { + print("caught: " + err + " on stepdown"); + } + rst.awaitSecondaryNodes(); + // The secondary became the new primary now with a higher term. + // Since there's only one secondary who may run for election, the new term is higher by 1. + assert.eq(getCurrentTerm(rst.getPrimary()), firstSuccessfulTerm + 1); -// Restart the replset and verify the term is the same. -rst.stopSet(null /* signal */, true /* forRestart */); -rst.startSet({restart: true}); -rst.awaitSecondaryNodes(); -primary = rst.getPrimary(); + // Restart the replset and verify the term is the same. + rst.stopSet(null /* signal */, true /* forRestart */); + rst.startSet({restart: true}); + rst.awaitSecondaryNodes(); + primary = rst.getPrimary(); -assert.eq(primary.getDB("test").coll.find().itcount(), 1); -// After restart, the new primary stands up with the newer term. -assert.gte(getCurrentTerm(primary), firstSuccessfulTerm + 1); + assert.eq(primary.getDB("test").coll.find().itcount(), 1); + // After restart, the new primary stands up with the newer term. + assert.gte(getCurrentTerm(primary), firstSuccessfulTerm + 1); })(); diff --git a/jstests/replsets/resync.js b/jstests/replsets/resync.js index 032789649ba..ffcd0325951 100755..100644 --- a/jstests/replsets/resync.js +++ b/jstests/replsets/resync.js @@ -12,12 +12,14 @@ var nodes = replTest.nodeList(); var conns = replTest.startSet(); - var r = replTest.initiate({ "_id": "resync", - "members": [ - {"_id": 0, "host": nodes[0], priority:1}, - {"_id": 1, "host": nodes[1], priority:0}, - {"_id": 2, "host": nodes[2], arbiterOnly:true}] - }); + var r = replTest.initiate({ + "_id": "resync", + "members": [ + {"_id": 0, "host": nodes[0], priority: 1}, + {"_id": 1, "host": nodes[1], priority: 0}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] + }); var a_conn = conns[0]; // Make sure we have a master, and it is conns[0] @@ -31,14 +33,14 @@ var BID = replTest.getNodeId(b_conn); // create an oplog entry with an insert - assert.writeOK( A.foo.insert({ x: 1 }, { writeConcern: { w: 2, wtimeout: 60000 }})); + assert.writeOK(A.foo.insert({x: 1}, {writeConcern: {w: 2, wtimeout: 60000}})); assert.eq(B.foo.findOne().x, 1); - + // run resync and wait for it to happen - assert.commandWorked(b_conn.getDB("admin").runCommand({resync:1})); + assert.commandWorked(b_conn.getDB("admin").runCommand({resync: 1})); replTest.awaitReplication(); replTest.awaitSecondaryNodes(); - + assert.eq(B.foo.findOne().x, 1); replTest.stop(BID); @@ -47,9 +49,8 @@ try { // Collection scan to determine if the oplog entry from the first insert has been // deleted yet. - return oplog.find( { "o.x" : 1 } ).sort( { $natural : 1 } ).limit(10).itcount() == 0; - } - catch (except) { + return oplog.find({"o.x": 1}).sort({$natural: 1}).limit(10).itcount() == 0; + } catch (except) { // An error is expected in the case that capped deletions blow away the position of the // collection scan during a yield. In this case, we just try again. var errorRegex = /CappedPositionLost/; @@ -58,40 +59,39 @@ } } - // Make sure the oplog has rolled over on the primary and secondary that is up, + // Make sure the oplog has rolled over on the primary and secondary that is up, // so when we bring up the other replica it is "too stale" - for ( var cycleNumber = 0; cycleNumber < 10; cycleNumber++ ) { + for (var cycleNumber = 0; cycleNumber < 10; cycleNumber++) { // insert enough to cycle oplog var bulk = A.foo.initializeUnorderedBulkOp(); - for (var i=2; i < 10000; i++) { - bulk.insert({x:i}); + for (var i = 2; i < 10000; i++) { + bulk.insert({x: i}); } // wait for secondary to also have its oplog cycle - assert.writeOK(bulk.execute({ w: 1, wtimeout : 60000 })); + assert.writeOK(bulk.execute({w: 1, wtimeout: 60000})); - if ( hasCycled() ) + if (hasCycled()) break; } - assert( hasCycled() ); + assert(hasCycled()); // bring node B and it will enter recovery mode because its newest oplog entry is too old replTest.restart(BID); - + // check that it is in recovery mode assert.soon(function() { try { var result = b_conn.getDB("admin").runCommand({replSetGetStatus: 1}); return (result.members[1].stateStr === "RECOVERING"); - } - catch ( e ) { - print( e ); + } catch (e) { + print(e); } }, "node didn't enter RECOVERING state"); // run resync and wait for it to happen - assert.commandWorked(b_conn.getDB("admin").runCommand({resync:1})); + assert.commandWorked(b_conn.getDB("admin").runCommand({resync: 1})); replTest.awaitReplication(); replTest.awaitSecondaryNodes(); assert.eq(B.foo.findOne().x, 1); diff --git a/jstests/replsets/resync_with_write_load.js b/jstests/replsets/resync_with_write_load.js index 392c7254bc5..07e0dc34084 100644 --- a/jstests/replsets/resync_with_write_load.js +++ b/jstests/replsets/resync_with_write_load.js @@ -1,8 +1,8 @@ /** - * This test creates a 2 node replica set and then puts load on the primary with writes during + * This test creates a 2 node replica set and then puts load on the primary with writes during * the resync in order to verify that all phases of the initial sync work correctly. - * - * We cannot test each phase of the initial sync directly but by providing constant writes we can + * + * We cannot test each phase of the initial sync directly but by providing constant writes we can * assume that each individual phase will have data to work with, and therefore tested. */ var testName = "resync_with_write_load"; @@ -10,12 +10,14 @@ var replTest = new ReplSetTest({name: testName, nodes: 3, oplogSize: 100}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); -var config = { "_id": testName, - "members": [ - {"_id": 0, "host": nodes[0], priority:4}, - {"_id": 1, "host": nodes[1]}, - {"_id": 2, "host": nodes[2]}] - }; +var config = { + "_id": testName, + "members": [ + {"_id": 0, "host": nodes[0], priority: 4}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2]} + ] +}; var r = replTest.initiate(config); replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); // Make sure we have a master @@ -33,44 +35,43 @@ assert(master == conns[0], "conns[0] assumed to be master"); assert(a_conn.host == master.host); // create an oplog entry with an insert -assert.writeOK( A.foo.insert({ x: 1 }, { writeConcern: { w: 1, wtimeout: 60000 }})); +assert.writeOK(A.foo.insert({x: 1}, {writeConcern: {w: 1, wtimeout: 60000}})); replTest.stop(BID); print("******************** starting load for 30 secs *********************"); var work = function() { - print("starting loadgen"); - var start=new Date().getTime(); - - assert.writeOK(db.timeToStartTrigger.insert({_id:1})); + print("starting loadgen"); + var start = new Date().getTime(); - while (true) { - for (x=0; x < 100; x++) { - db["a" + x].insert({a:x}); - } - - var runTime = (new Date().getTime() - start); - if (runTime > 30000) - break; - else if (runTime < 5000) // back-off more during first 2 seconds - sleep(50); - else - sleep(1); + assert.writeOK(db.timeToStartTrigger.insert({_id: 1})); - } - print("finshing loadgen"); - }; -//insert enough that resync node has to go through oplog replay in each step + while (true) { + for (x = 0; x < 100; x++) { + db["a" + x].insert({a: x}); + } + + var runTime = (new Date().getTime() - start); + if (runTime > 30000) + break; + else if (runTime < 5000) // back-off more during first 2 seconds + sleep(50); + else + sleep(1); + } + print("finshing loadgen"); +}; +// insert enough that resync node has to go through oplog replay in each step var loadGen = startParallelShell(work, replTest.ports[0]); // wait for document to appear to continue assert.soon(function() { try { return 1 == master.getDB("test")["timeToStartTrigger"].count(); - } catch ( e ) { - print( e ); + } catch (e) { + print(e); return false; } -}, "waited too long for start trigger", 90 * 1000 /* 90 secs */ ); +}, "waited too long for start trigger", 90 * 1000 /* 90 secs */); print("*************** STARTING node without data ***************"); replTest.start(BID); @@ -79,8 +80,8 @@ assert.soon(function() { try { var result = b_conn.getDB("admin").runCommand({replSetGetStatus: 1}); return true; - } catch ( e ) { - print( e ); + } catch (e) { + print(e); return false; } }, "node didn't come up"); @@ -97,8 +98,8 @@ try { } catch (e) { var aDBHash = A.runCommand("dbhash"); var bDBHash = B.runCommand("dbhash"); - assert.eq(aDBHash.md5, bDBHash.md5, - "hashes differ: " + tojson(aDBHash) + " to " + tojson(bDBHash)); + assert.eq( + aDBHash.md5, bDBHash.md5, "hashes differ: " + tojson(aDBHash) + " to " + tojson(bDBHash)); } replTest.stopSet(); diff --git a/jstests/replsets/rollback.js b/jstests/replsets/rollback.js index 6f8f0154481..d0c162ea98d 100644 --- a/jstests/replsets/rollback.js +++ b/jstests/replsets/rollback.js @@ -19,7 +19,7 @@ */ load("jstests/replsets/rslib.js"); -(function () { +(function() { "use strict"; // helper function for verifying contents at the end of the test var checkFinalResults = function(db) { @@ -32,15 +32,17 @@ load("jstests/replsets/rslib.js"); assert.eq(8, x[4].q); }; - var replTest = new ReplSetTest({ name: 'unicomplex', nodes: 3, oplogSize: 1, useBridge: true }); + var replTest = new ReplSetTest({name: 'unicomplex', nodes: 3, oplogSize: 1, useBridge: true}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); - var r = replTest.initiate({ "_id": "unicomplex", + var r = replTest.initiate({ + "_id": "unicomplex", "members": [ - { "_id": 0, "host": nodes[0], "priority": 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] + {"_id": 0, "host": nodes[0], "priority": 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] }); // Make sure we have a master @@ -63,18 +65,18 @@ load("jstests/replsets/rslib.js"); if (new Date() % 2 == 0) { jsTest.log("ROLLING OPLOG AS PART OF TEST (we only do this sometimes)"); var pass = 1; - var first = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0]; - a.roll.insert({ x: 1 }); + var first = a.getSisterDB("local").oplog.rs.find().sort({$natural: 1}).limit(1)[0]; + a.roll.insert({x: 1}); while (1) { var bulk = a.roll.initializeUnorderedBulkOp(); for (var i = 0; i < 1000; i++) { - bulk.find({}).update({ $inc: { x: 1 }}); + bulk.find({}).update({$inc: {x: 1}}); } - // unlikely secondary isn't keeping up, but let's avoid possible intermittent + // unlikely secondary isn't keeping up, but let's avoid possible intermittent // issues with that. - assert.writeOK(bulk.execute({ w: 2 })); + assert.writeOK(bulk.execute({w: 2})); - var op = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0]; + var op = a.getSisterDB("local").oplog.rs.find().sort({$natural: 1}).limit(1)[0]; if (tojson(op.h) != tojson(first.h)) { printjson(op); printjson(first); @@ -83,14 +85,13 @@ load("jstests/replsets/rslib.js"); pass++; } jsTest.log("PASSES FOR OPLOG ROLL: " + pass); - } - else { + } else { jsTest.log("NO ROLL"); } - assert.writeOK(a.bar.insert({ q: 1, a: "foo" })); - assert.writeOK(a.bar.insert({ q: 2, a: "foo", x: 1 })); - assert.writeOK(a.bar.insert({ q: 3, bb: 9, a: "foo" }, { writeConcern: { w: 2 } })); + assert.writeOK(a.bar.insert({q: 1, a: "foo"})); + assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1})); + assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}, {writeConcern: {w: 2}})); assert.eq(a.bar.count(), 3, "a.count"); assert.eq(b.bar.count(), 3, "b.count"); @@ -99,11 +100,17 @@ load("jstests/replsets/rslib.js"); conns[0].disconnect(conns[2]); // Wait for election and drain mode to finish on node 1. - assert.soon(function () { try { return B.isMaster().ismaster; } catch(e) { return false; } }); + assert.soon(function() { + try { + return B.isMaster().ismaster; + } catch (e) { + return false; + } + }); // These 97 documents will be rolled back eventually. for (var i = 4; i <= 100; i++) { - assert.writeOK(b.bar.insert({ q: i })); + assert.writeOK(b.bar.insert({q: i})); } assert.eq(100, b.bar.count(), "u.count"); @@ -113,13 +120,25 @@ load("jstests/replsets/rslib.js"); conns[0].reconnect(conns[2]); jsTest.log("*************** B ****************"); - assert.soon(function () { try { return !B.isMaster().ismaster; } catch(e) { return false; } }); + assert.soon(function() { + try { + return !B.isMaster().ismaster; + } catch (e) { + return false; + } + }); jsTest.log("*************** A ****************"); - assert.soon(function () { try { return A.isMaster().ismaster; } catch(e) { return false; } }); + assert.soon(function() { + try { + return A.isMaster().ismaster; + } catch (e) { + return false; + } + }); assert(a.bar.count() == 3, "t is 3"); - assert.writeOK(a.bar.insert({ q: 7 })); - assert.writeOK(a.bar.insert({ q: 8 })); + assert.writeOK(a.bar.insert({q: 7})); + assert.writeOK(a.bar.insert({q: 8})); // A is 1 2 3 7 8 // B is 1 2 3 4 5 6 ... 100 @@ -138,8 +157,10 @@ load("jstests/replsets/rslib.js"); var connectionsCreatedOnPrimaryAfterRollback = a.serverStatus().connections.totalCreated; var connectionsCreatedOnPrimaryDuringRollback = connectionsCreatedOnPrimaryAfterRollback - connectionsCreatedOnPrimaryBeforeRollback; - jsTest.log('connections created during rollback = ' + connectionsCreatedOnPrimaryDuringRollback); - assert.lt(connectionsCreatedOnPrimaryDuringRollback, 50, + jsTest.log('connections created during rollback = ' + + connectionsCreatedOnPrimaryDuringRollback); + assert.lt(connectionsCreatedOnPrimaryDuringRollback, + 50, 'excessive number of connections made by secondary to primary during rollback'); replTest.stopSet(15); diff --git a/jstests/replsets/rollback2.js b/jstests/replsets/rollback2.js index 3334eada6ce..911a9d3e128 100644 --- a/jstests/replsets/rollback2.js +++ b/jstests/replsets/rollback2.js @@ -2,7 +2,7 @@ * Basic test of a succesful replica set rollback for CRUD operations. * * This tests sets up a 3 node set, data-bearing nodes A and B and an arbiter. - * + * * 1. A is elected PRIMARY and receives several writes, which are propagated to B. * 2. A is isolated from the rest of the set and B is elected PRIMARY. * 3. B receives several operations, which will later be undone during rollback. @@ -13,7 +13,7 @@ */ load("jstests/replsets/rslib.js"); -(function () { +(function() { "use strict"; // helper function for verifying contents at the end of the test var checkFinalResults = function(db) { @@ -28,16 +28,18 @@ load("jstests/replsets/rslib.js"); }; var name = "rollback2js"; - var replTest = new ReplSetTest({ name: name, nodes: 3, useBridge: true }); + var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); - replTest.initiate({ "_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true} - ]}); + replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] + }); // Make sure we have a master and that that master is node A replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); @@ -56,17 +58,17 @@ load("jstests/replsets/rslib.js"); var b = b_conn.getDB("foo"); // initial data for both nodes - assert.writeOK(a.bar.insert({ q:0})); - assert.writeOK(a.bar.insert({ q: 1, a: "foo" })); - assert.writeOK(a.bar.insert({ q: 2, a: "foo", x: 1 })); - assert.writeOK(a.bar.insert({ q: 3, bb: 9, a: "foo" })); - assert.writeOK(a.bar.insert({ q: 40, a: 1 })); - assert.writeOK(a.bar.insert({ q: 40, a: 2 })); - assert.writeOK(a.bar.insert({ q: 70, txt: 'willremove' })); - a.createCollection("kap", { capped: true, size: 5000 }); - assert.writeOK(a.kap.insert({ foo: 1 })); + assert.writeOK(a.bar.insert({q: 0})); + assert.writeOK(a.bar.insert({q: 1, a: "foo"})); + assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1})); + assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"})); + assert.writeOK(a.bar.insert({q: 40, a: 1})); + assert.writeOK(a.bar.insert({q: 40, a: 2})); + assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'})); + a.createCollection("kap", {capped: true, size: 5000}); + assert.writeOK(a.kap.insert({foo: 1})); // going back to empty on capped is a special case and must be tested - a.createCollection("kap2", { capped: true, size: 5501 }); + a.createCollection("kap2", {capped: true, size: 5501}); replTest.awaitReplication(); var timeout; @@ -78,40 +80,52 @@ load("jstests/replsets/rslib.js"); // isolate A and wait for B to become master conns[0].disconnect(conns[1]); conns[0].disconnect(conns[2]); - assert.soon(function () { + assert.soon(function() { try { return B.isMaster().ismaster; - } catch(e) { + } catch (e) { return false; } }, "node B did not become master as expected", timeout); // do operations on B and B alone, these will be rolled back - assert.writeOK(b.bar.insert({ q: 4 })); - assert.writeOK(b.bar.update({ q: 3 }, { q: 3, rb: true })); - assert.writeOK(b.bar.remove({ q: 40 })); // multi remove test - assert.writeOK(b.bar.update({ q: 2 }, { q: 39, rb: true })); + assert.writeOK(b.bar.insert({q: 4})); + assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true})); + assert.writeOK(b.bar.remove({q: 40})); // multi remove test + assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true})); // rolling back a delete will involve reinserting the item(s) - assert.writeOK(b.bar.remove({ q: 1 })); - assert.writeOK(b.bar.update({ q: 0 }, { $inc: { y: 1} })); - assert.writeOK(b.kap.insert({ foo: 2 })); - assert.writeOK(b.kap2.insert({ foo: 2 })); + assert.writeOK(b.bar.remove({q: 1})); + assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}})); + assert.writeOK(b.kap.insert({foo: 2})); + assert.writeOK(b.kap2.insert({foo: 2})); // create a collection (need to roll back the whole thing) - assert.writeOK(b.newcoll.insert({ a: true })); + assert.writeOK(b.newcoll.insert({a: true})); // create a new empty collection (need to roll back the whole thing) b.createCollection("abc"); // isolate B, bring A back into contact with the arbiter, then wait for A to become master // insert new data into A so that B will need to rollback when it reconnects to A conns[1].disconnect(conns[2]); - assert.soon(function () { try { return !B.isMaster().ismaster; } catch(e) { return false; } }); + assert.soon(function() { + try { + return !B.isMaster().ismaster; + } catch (e) { + return false; + } + }); conns[0].reconnect(conns[2]); - assert.soon(function () { try { return A.isMaster().ismaster; } catch(e) { return false; } }); + assert.soon(function() { + try { + return A.isMaster().ismaster; + } catch (e) { + return false; + } + }); assert(a.bar.count() >= 1, "count check"); - assert.writeOK(a.bar.insert({ txt: 'foo' })); - assert.writeOK(a.bar.remove({ q: 70 })); - assert.writeOK(a.bar.update({ q: 0 }, { $inc: { y: 33} })); + assert.writeOK(a.bar.insert({txt: 'foo'})); + assert.writeOK(a.bar.remove({q: 70})); + assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}})); // A is 1 2 3 7 8 // B is 1 2 3 4 5 6 diff --git a/jstests/replsets/rollback3.js b/jstests/replsets/rollback3.js index 9abac9c3639..740f9caa383 100755..100644 --- a/jstests/replsets/rollback3.js +++ b/jstests/replsets/rollback3.js @@ -2,7 +2,7 @@ * Basic test of a succesful replica set rollback for DDL operations. * * This tests sets up a 3 node set, data-bearing nodes A and B and an arbiter. - * + * * 1. A is elected PRIMARY and receives several writes, which are propagated to B. * 2. A is isolated from the rest of the set and B is elected PRIMARY. * 3. B receives several operations, which will later be undone during rollback. @@ -13,7 +13,7 @@ */ load("jstests/replsets/rslib.js"); -(function () { +(function() { "use strict"; // helper function for verifying contents at the end of the test var checkFinalResults = function(db) { @@ -33,16 +33,18 @@ load("jstests/replsets/rslib.js"); }; var name = "rollback2js"; - var replTest = new ReplSetTest({ name: name, nodes: 3, useBridge: true }); + var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); - replTest.initiate({ "_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true} - ]}); + replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] + }); // Make sure we have a master and that that master is node A replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); @@ -61,42 +63,48 @@ load("jstests/replsets/rslib.js"); var b = b_conn.getDB("foo"); // initial data for both nodes - assert.writeOK(a.b.insert({ x: 1 })); - a.b.ensureIndex({ x: 1 }); - assert.writeOK(a.oldname.insert({ y: 1 })); - assert.writeOK(a.oldname.insert({ y: 2 })); - a.oldname.ensureIndex({ y: 1 },true); - assert.writeOK(a.bar.insert({ q:0})); - assert.writeOK(a.bar.insert({ q: 1, a: "foo" })); - assert.writeOK(a.bar.insert({ q: 2, a: "foo", x: 1 })); - assert.writeOK(a.bar.insert({ q: 3, bb: 9, a: "foo" })); - assert.writeOK(a.bar.insert({ q: 40333333, a: 1 })); + assert.writeOK(a.b.insert({x: 1})); + a.b.ensureIndex({x: 1}); + assert.writeOK(a.oldname.insert({y: 1})); + assert.writeOK(a.oldname.insert({y: 2})); + a.oldname.ensureIndex({y: 1}, true); + assert.writeOK(a.bar.insert({q: 0})); + assert.writeOK(a.bar.insert({q: 1, a: "foo"})); + assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1})); + assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"})); + assert.writeOK(a.bar.insert({q: 40333333, a: 1})); for (var i = 0; i < 200; i++) { - assert.writeOK(a.bar.insert({ i: i })); + assert.writeOK(a.bar.insert({i: i})); } - assert.writeOK(a.bar.insert({ q: 40, a: 2 })); - assert.writeOK(a.bar.insert({ q: 70, txt: 'willremove' })); - a.createCollection("kap", { capped: true, size: 5000 }); - assert.writeOK(a.kap.insert({ foo: 1 })); + assert.writeOK(a.bar.insert({q: 40, a: 2})); + assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'})); + a.createCollection("kap", {capped: true, size: 5000}); + assert.writeOK(a.kap.insert({foo: 1})); replTest.awaitReplication(); // isolate A and wait for B to become master conns[0].disconnect(conns[1]); conns[0].disconnect(conns[2]); - assert.soon(function () { try { return B.isMaster().ismaster; } catch(e) { return false; } }); - + assert.soon(function() { + try { + return B.isMaster().ismaster; + } catch (e) { + return false; + } + }); + // do operations on B and B alone, these will be rolled back - assert.writeOK(b.bar.insert({ q: 4 })); - assert.writeOK(b.bar.update({ q: 3 }, { q: 3, rb: true })); - assert.writeOK(b.bar.remove({ q: 40 })); // multi remove test - assert.writeOK(b.bar.update({ q: 2 }, { q: 39, rb: true })); + assert.writeOK(b.bar.insert({q: 4})); + assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true})); + assert.writeOK(b.bar.remove({q: 40})); // multi remove test + assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true})); // rolling back a delete will involve reinserting the item(s) - assert.writeOK(b.bar.remove({ q: 1 })); - assert.writeOK(b.bar.update({ q: 0 }, { $inc: { y: 1} })); - assert.writeOK(b.kap.insert({ foo: 2 })); - assert.writeOK(b.kap2.insert({ foo: 2 })); + assert.writeOK(b.bar.remove({q: 1})); + assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}})); + assert.writeOK(b.kap.insert({foo: 2})); + assert.writeOK(b.kap2.insert({foo: 2})); // create a collection (need to roll back the whole thing) - assert.writeOK(b.newcoll.insert({ a: true })); + assert.writeOK(b.newcoll.insert({a: true})); // create a new empty collection (need to roll back the whole thing) b.createCollection("abc"); // drop a collection - we'll need all its data back! @@ -107,22 +115,34 @@ load("jstests/replsets/rslib.js"); b.oldname.renameCollection("newname"); b.newname.renameCollection("fooname"); assert(b.fooname.count() > 0, "count rename"); - // test roll back (drop) a whole database + // test roll back (drop) a whole database var abc = b.getSisterDB("abc"); - assert.writeOK(abc.foo.insert({ x: 1 })); - assert.writeOK(abc.bar.insert({ y: 999 })); + assert.writeOK(abc.foo.insert({x: 1})); + assert.writeOK(abc.bar.insert({y: 999})); // isolate B, bring A back into contact with the arbiter, then wait for A to become master // insert new data into A so that B will need to rollback when it reconnects to A conns[1].disconnect(conns[2]); - assert.soon(function () { try { return !B.isMaster().ismaster; } catch(e) { return false; } }); + assert.soon(function() { + try { + return !B.isMaster().ismaster; + } catch (e) { + return false; + } + }); conns[0].reconnect(conns[2]); - assert.soon(function () { try { return A.isMaster().ismaster; } catch(e) { return false; } }); + assert.soon(function() { + try { + return A.isMaster().ismaster; + } catch (e) { + return false; + } + }); assert(a.bar.count() >= 1, "count check"); - assert.writeOK(a.bar.insert({ txt: 'foo' })); - assert.writeOK(a.bar.remove({ q: 70 })); - assert.writeOK(a.bar.update({ q: 0 }, { $inc: { y: 33} })); + assert.writeOK(a.bar.insert({txt: 'foo'})); + assert.writeOK(a.bar.remove({q: 70})); + assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}})); // A is 1 2 3 7 8 // B is 1 2 3 4 5 6 diff --git a/jstests/replsets/rollback5.js b/jstests/replsets/rollback5.js index 07f02419a59..e63b7ab34ea 100644 --- a/jstests/replsets/rollback5.js +++ b/jstests/replsets/rollback5.js @@ -9,16 +9,18 @@ // run on ephemeral storage engines. // @tags: [requires_persistence] -var replTest = new ReplSetTest({ name: 'rollback5', nodes: 3 }); +var replTest = new ReplSetTest({name: 'rollback5', nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); -var r = replTest.initiate({ "_id": "rollback5", - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); +var r = replTest.initiate({ + "_id": "rollback5", + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] +}); // Make sure we have a master replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); @@ -37,26 +39,35 @@ assert(master == conns[0], "conns[0] assumed to be master"); assert(a_conn.host == master.host); // Make sure we have an arbiter -assert.soon(function () { - res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 }); +assert.soon(function() { + res = conns[2].getDB("admin").runCommand({replSetGetStatus: 1}); return res.myState == 7; }, "Arbiter failed to initialize."); -var options = { writeConcern: { w: 2, wtimeout: 60000 }, upsert: true }; -assert.writeOK(A.foo.update({ key: 'value1' }, { $set: { req: 'req' }}, options)); +var options = { + writeConcern: {w: 2, wtimeout: 60000}, + upsert: true +}; +assert.writeOK(A.foo.update({key: 'value1'}, {$set: {req: 'req'}}, options)); replTest.stop(AID); master = replTest.getPrimary(); assert(b_conn.host == master.host); -options = { writeConcern: { w: 1, wtimeout: 60000 }, upsert: true }; -assert.writeOK(B.foo.update({key:'value1'}, {$set: {res: 'res'}}, options)); +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; +assert.writeOK(B.foo.update({key: 'value1'}, {$set: {res: 'res'}}, options)); replTest.stop(BID); replTest.restart(AID); master = replTest.getPrimary(); assert(a_conn.host == master.host); -options = { writeConcern: { w: 1, wtimeout: 60000 }, upsert: true }; -assert.writeOK(A.foo.update({ key: 'value2' }, { $set: { req: 'req' }}, options)); -replTest.restart(BID); // should rollback +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; +assert.writeOK(A.foo.update({key: 'value2'}, {$set: {req: 'req'}}, options)); +replTest.restart(BID); // should rollback reconnect(B); print("BEFORE------------------"); @@ -69,12 +80,12 @@ print("AFTER------------------"); printjson(A.foo.find().toArray()); assert.eq(2, A.foo.count()); -assert.eq('req', A.foo.findOne({key:'value1'}).req); -assert.eq(null, A.foo.findOne({key:'value1'}).res); +assert.eq('req', A.foo.findOne({key: 'value1'}).req); +assert.eq(null, A.foo.findOne({key: 'value1'}).res); reconnect(B); assert.eq(2, B.foo.count()); -assert.eq('req', B.foo.findOne({key:'value1'}).req); -assert.eq(null, B.foo.findOne({key:'value1'}).res); +assert.eq('req', B.foo.findOne({key: 'value1'}).req); +assert.eq(null, B.foo.findOne({key: 'value1'}).res); // check here for rollback files var rollbackDir = Bpath + "rollback/"; @@ -83,7 +94,6 @@ assert(pathExists(rollbackDir), "rollback directory was not created!"); print("rollback5.js SUCCESS"); replTest.stopSet(15); - function wait(f) { var n = 0; while (!f()) { @@ -98,14 +108,13 @@ function wait(f) { } function reconnect(a) { - wait(function() { - try { - a.bar.stats(); - return true; - } catch(e) { - print(e); - return false; - } + wait(function() { + try { + a.bar.stats(); + return true; + } catch (e) { + print(e); + return false; + } }); } - diff --git a/jstests/replsets/rollback_auth.js b/jstests/replsets/rollback_auth.js index 77684550d3e..0c0b35b91ed 100644 --- a/jstests/replsets/rollback_auth.js +++ b/jstests/replsets/rollback_auth.js @@ -10,7 +10,7 @@ // run on ephemeral storage engines. // @tags: [requires_persistence] -(function () { +(function() { "use strict"; // helper function for verifying contents at the end of the test var checkFinalResults = function(db) { @@ -26,17 +26,17 @@ jsTestLog("Setting up replica set"); var name = "rollbackAuth"; - var replTest = new ReplSetTest({name: name, - nodes: 3, - keyFile: 'jstests/libs/key1' }); + var replTest = new ReplSetTest({name: name, nodes: 3, keyFile: 'jstests/libs/key1'}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); - replTest.initiate({ "_id": "rollbackAuth", - "members": [ - { "_id": 0, "host": nodes[0], "priority": 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true} - ]}); + replTest.initiate({ + "_id": "rollbackAuth", + "members": [ + {"_id": 0, "host": nodes[0], "priority": 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] + }); // Make sure we have a master replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); @@ -53,45 +53,49 @@ assert.eq(a_conn, master); // Make sure we have an arbiter - assert.soon(function () { - var res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 }); - return res.myState == 7; - }, "Arbiter failed to initialize."); - + assert.soon(function() { + var res = conns[2].getDB("admin").runCommand({replSetGetStatus: 1}); + return res.myState == 7; + }, "Arbiter failed to initialize."); jsTestLog("Creating initial data"); // Create collections that will be used in test A.createUser({user: 'admin', pwd: 'pwd', roles: ['root']}); A.auth('admin', 'pwd'); - a.foo.insert({a:1}); - a.bar.insert({a:1}); - a.baz.insert({a:1}); - a.foobar.insert({a:1}); + a.foo.insert({a: 1}); + a.bar.insert({a: 1}); + a.baz.insert({a: 1}); + a.foobar.insert({a: 1}); // Set up user admin user A.createUser({user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']}); - A.auth('userAdmin', 'pwd'); // Logs out of admin@admin user + A.auth('userAdmin', 'pwd'); // Logs out of admin@admin user B.auth('userAdmin', 'pwd'); // Create a basic user and role - A.createRole({role: 'replStatusRole', // To make awaitReplication() work - roles: [], - privileges: [{resource: {cluster: true}, actions: ['replSetGetStatus']}, - {resource: {db: 'local', collection: ''}, actions: ['find']}, - {resource: {db: 'local', collection: 'system.replset'}, - actions: ['find']}]}); - a.createRole({role: 'myRole', roles: [], privileges: [{resource: {db: 'test', collection: ''}, - actions: ['dbStats']}]}); - a.createUser({user: 'spencer', - pwd: 'pwd', - roles: ['myRole', {role: 'replStatusRole', db: 'admin'}]}); + A.createRole({ + role: 'replStatusRole', // To make awaitReplication() work + roles: [], + privileges: [ + {resource: {cluster: true}, actions: ['replSetGetStatus']}, + {resource: {db: 'local', collection: ''}, actions: ['find']}, + {resource: {db: 'local', collection: 'system.replset'}, actions: ['find']} + ] + }); + a.createRole({ + role: 'myRole', + roles: [], + privileges: [{resource: {db: 'test', collection: ''}, actions: ['dbStats']}] + }); + a.createUser( + {user: 'spencer', pwd: 'pwd', roles: ['myRole', {role: 'replStatusRole', db: 'admin'}]}); assert(a.auth('spencer', 'pwd')); // wait for secondary to get this data assert.soon(function() { - return b.auth('spencer', 'pwd'); - }); + return b.auth('spencer', 'pwd'); + }); assert.commandWorked(a.runCommand({dbStats: 1})); assert.commandFailedWithCode(a.runCommand({collStats: 'foo'}), authzErrorCode); @@ -105,30 +109,34 @@ assert.commandFailedWithCode(b.runCommand({collStats: 'baz'}), authzErrorCode); assert.commandFailedWithCode(b.runCommand({collStats: 'foobar'}), authzErrorCode); - jsTestLog("Doing writes that will eventually be rolled back"); // down A and wait for B to become master replTest.stop(0); - assert.soon(function () { try { return B.isMaster().ismaster; } catch(e) { return false; } }, - "B didn't become master", - 60000, - 1000); + assert.soon(function() { + try { + return B.isMaster().ismaster; + } catch (e) { + return false; + } + }, "B didn't become master", 60000, 1000); printjson(b.adminCommand('replSetGetStatus')); - // Modify the the user and role in a way that will be rolled back. - b.grantPrivilegesToRole('myRole', - [{resource: {db: 'test', collection: 'foo'}, actions: ['collStats']}], - {}); // Default write concern will wait for majority, which will time out. - b.createRole({role: 'temporaryRole', - roles: [], - privileges: [{resource: {db: 'test', collection: 'bar'}, actions: ['collStats']}]}, - {}); // Default write concern will wait for majority, which will time out. + b.grantPrivilegesToRole( + 'myRole', + [{resource: {db: 'test', collection: 'foo'}, actions: ['collStats']}], + {}); // Default write concern will wait for majority, which will time out. + b.createRole( + { + role: 'temporaryRole', + roles: [], + privileges: [{resource: {db: 'test', collection: 'bar'}, actions: ['collStats']}] + }, + {}); // Default write concern will wait for majority, which will time out. b.grantRolesToUser('spencer', ['temporaryRole'], - {}); // Default write concern will wait for majority, which will time out. - + {}); // Default write concern will wait for majority, which will time out. assert.commandWorked(b.runCommand({dbStats: 1})); assert.commandWorked(b.runCommand({collStats: 'foo'})); @@ -141,10 +149,13 @@ replTest.stop(1); replTest.restart(0); - assert.soon(function () { try { return A.isMaster().ismaster; } catch(e) { return false; } }, - "A didn't become master", - 60000, - 1000); + assert.soon(function() { + try { + return A.isMaster().ismaster; + } catch (e) { + return false; + } + }, "A didn't become master", 60000, 1000); // A should not have the new data as it was down assert.commandWorked(a.runCommand({dbStats: 1})); @@ -158,18 +169,17 @@ A.auth('userAdmin', 'pwd'); // Default write concern will wait for majority, which would time out // so we override it with an empty write concern - a.grantPrivilegesToRole('myRole', - [{resource: {db: 'test', collection: 'baz'}, actions: ['collStats']}], - {}); - - a.createRole({role: 'persistentRole', - roles: [], - privileges: [{resource: {db: 'test', collection: 'foobar'}, - actions: ['collStats']}]}, - {}); - a.grantRolesToUser('spencer', - ['persistentRole'], - {}); + a.grantPrivilegesToRole( + 'myRole', [{resource: {db: 'test', collection: 'baz'}, actions: ['collStats']}], {}); + + a.createRole( + { + role: 'persistentRole', + roles: [], + privileges: [{resource: {db: 'test', collection: 'foobar'}, actions: ['collStats']}] + }, + {}); + a.grantRolesToUser('spencer', ['persistentRole'], {}); A.logout(); a.auth('spencer', 'pwd'); @@ -183,10 +193,12 @@ replTest.restart(1); authutil.asCluster(replTest.nodes, 'jstests/libs/key1', - function() { replTest.awaitReplication(); }); + function() { + replTest.awaitReplication(); + }); assert.soon(function() { - return b.auth('spencer', 'pwd'); - }); + return b.auth('spencer', 'pwd'); + }); // Now both A and B should agree checkFinalResults(a); checkFinalResults(b); diff --git a/jstests/replsets/rollback_cmd_unrollbackable.js b/jstests/replsets/rollback_cmd_unrollbackable.js index a4c3a35b658..801d4c285a7 100644 --- a/jstests/replsets/rollback_cmd_unrollbackable.js +++ b/jstests/replsets/rollback_cmd_unrollbackable.js @@ -12,12 +12,14 @@ var name = "rollback_cmd_unrollbackable"; var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); +replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] +}); var a_conn = conns[0]; var b_conn = conns[1]; var AID = replTest.getNodeId(a_conn); @@ -28,7 +30,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); -var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; +var options = { + writeConcern: {w: 2, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); // shut down master @@ -37,13 +42,18 @@ replTest.stop(AID); // insert a fake oplog entry with a non-rollbackworthy command master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; // another insert to set minvalid ahead assert.writeOK(b_conn.getDB(name).foo.insert({x: 123})); var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0]; oplog_entry["ts"] = Timestamp(oplog_entry["ts"].t, oplog_entry["ts"].i + 1); oplog_entry["op"] = "c"; -oplog_entry["o"] = {"replSetSyncFrom": 1}; +oplog_entry["o"] = { + "replSetSyncFrom": 1 +}; assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); // shut down B and bring back the original master @@ -53,7 +63,10 @@ master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options)); // restart B, which should attempt to rollback but then fassert. @@ -64,4 +77,4 @@ assert.soon(function() { return rawMongoProgramOutput().match(msg); }, "Did not see a log entry about skipping the nonrollbackable command during rollback"); -replTest.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] }); +replTest.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]}); diff --git a/jstests/replsets/rollback_collMod_PowerOf2Sizes.js b/jstests/replsets/rollback_collMod_PowerOf2Sizes.js index bf7799895f9..deab19b2f09 100644 --- a/jstests/replsets/rollback_collMod_PowerOf2Sizes.js +++ b/jstests/replsets/rollback_collMod_PowerOf2Sizes.js @@ -7,78 +7,88 @@ // run on ephemeral storage engines. // @tags: [requires_persistence] (function() { -"use strict"; + "use strict"; -function getOptions(conn) { - return conn.getDB(name).foo.exists().options; -} + function getOptions(conn) { + return conn.getDB(name).foo.exists().options; + } -// Set up a set and grab things for later. -var name = "rollback_collMod_PowerOf2Sizes"; -var replTest = new ReplSetTest({name: name, nodes: 3}); -var nodes = replTest.nodeList(); -var conns = replTest.startSet(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0] }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); -// Get master and do an initial write. -var master = replTest.getPrimary(); -var a_conn = master; -var slaves = replTest.liveNodes.slaves; -var b_conn = slaves[0]; -var AID = replTest.getNodeId(a_conn); -var BID = replTest.getNodeId(b_conn); + // Set up a set and grab things for later. + var name = "rollback_collMod_PowerOf2Sizes"; + var replTest = new ReplSetTest({name: name, nodes: 3}); + var nodes = replTest.nodeList(); + var conns = replTest.startSet(); + replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] + }); + // Get master and do an initial write. + var master = replTest.getPrimary(); + var a_conn = master; + var slaves = replTest.liveNodes.slaves; + var b_conn = slaves[0]; + var AID = replTest.getNodeId(a_conn); + var BID = replTest.getNodeId(b_conn); -// Create collection with custom options. -var originalCollectionOptions = {flags: 0, - validator: {x: {$exists: 1}}, - validationLevel: "moderate", - validationAction: "warn"}; -assert.commandWorked(a_conn.getDB(name).createCollection('foo', originalCollectionOptions)); + // Create collection with custom options. + var originalCollectionOptions = { + flags: 0, + validator: {x: {$exists: 1}}, + validationLevel: "moderate", + validationAction: "warn" + }; + assert.commandWorked(a_conn.getDB(name).createCollection('foo', originalCollectionOptions)); -var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; -assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); + var options = { + writeConcern: {w: 2, wtimeout: 60000}, + upsert: true + }; + assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); -assert.eq(getOptions(a_conn), originalCollectionOptions); -assert.eq(getOptions(b_conn), originalCollectionOptions); + assert.eq(getOptions(a_conn), originalCollectionOptions); + assert.eq(getOptions(b_conn), originalCollectionOptions); -// Stop the slave so it never sees the collMod. -replTest.stop(BID); + // Stop the slave so it never sees the collMod. + replTest.stop(BID); -// Run the collMod only on A. -assert.commandWorked(a_conn.getDB(name).runCommand({collMod: "foo", - usePowerOf2Sizes: false, - noPadding: true, - validator: {a: 1}, - validationLevel: "moderate", - validationAction: "warn"})); -assert.eq(getOptions(a_conn), {flags: 2, - validator: {a: 1}, - validationLevel: "moderate", - validationAction: "warn"}); + // Run the collMod only on A. + assert.commandWorked(a_conn.getDB(name).runCommand({ + collMod: "foo", + usePowerOf2Sizes: false, + noPadding: true, + validator: {a: 1}, + validationLevel: "moderate", + validationAction: "warn" + })); + assert.eq( + getOptions(a_conn), + {flags: 2, validator: {a: 1}, validationLevel: "moderate", validationAction: "warn"}); -// Shut down A and fail over to B. -replTest.stop(AID); -replTest.restart(BID); -master = replTest.getPrimary(); -assert.eq(b_conn.host, master.host, "b_conn assumed to be master"); -b_conn = master; + // Shut down A and fail over to B. + replTest.stop(AID); + replTest.restart(BID); + master = replTest.getPrimary(); + assert.eq(b_conn.host, master.host, "b_conn assumed to be master"); + b_conn = master; -// Do a write on B so that A will have to roll back. -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; -assert.writeOK(b_conn.getDB(name).foo.insert({x: 2}, options)); + // Do a write on B so that A will have to roll back. + options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true + }; + assert.writeOK(b_conn.getDB(name).foo.insert({x: 2}, options)); -// Restart A, which should rollback the collMod before becoming primary. -replTest.restart(AID); -try { - b_conn.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}); -} -catch (e) { - // Ignore network disconnect. -} -replTest.waitForState(a_conn, ReplSetTest.State.PRIMARY); -assert.eq(getOptions(a_conn), originalCollectionOptions); + // Restart A, which should rollback the collMod before becoming primary. + replTest.restart(AID); + try { + b_conn.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}); + } catch (e) { + // Ignore network disconnect. + } + replTest.waitForState(a_conn, ReplSetTest.State.PRIMARY); + assert.eq(getOptions(a_conn), originalCollectionOptions); }()); diff --git a/jstests/replsets/rollback_collMod_fatal.js b/jstests/replsets/rollback_collMod_fatal.js index 770165cf88c..c907213f05d 100644 --- a/jstests/replsets/rollback_collMod_fatal.js +++ b/jstests/replsets/rollback_collMod_fatal.js @@ -12,12 +12,14 @@ var name = "rollback_collMod_fatal"; var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); +replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] +}); var a_conn = conns[0]; var b_conn = conns[1]; var AID = replTest.getNodeId(a_conn); @@ -29,7 +31,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); -var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; +var options = { + writeConcern: {w: 2, wtimeout: 60000}, + upsert: true +}; a_conn.getDB(name).foo.ensureIndex({x: 1}, {expireAfterSeconds: 3600}); assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); @@ -39,9 +44,8 @@ replTest.stop(AID); // do a collMod altering TTL which should cause FATAL when rolled back master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); -assert.commandWorked(b_conn.getDB(name).runCommand({collMod: "foo", - index: {keyPattern: {x:1}, - expireAfterSeconds: 10}})); +assert.commandWorked(b_conn.getDB(name).runCommand( + {collMod: "foo", index: {keyPattern: {x: 1}, expireAfterSeconds: 10}})); // shut down B and bring back the original master replTest.stop(BID); @@ -50,7 +54,10 @@ master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options)); // restart B, which should attempt rollback but then fassert @@ -60,4 +67,4 @@ assert.soon(function() { return rawMongoProgramOutput().match("cannot rollback a collMod command"); }, "B failed to fassert"); -replTest.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] }); +replTest.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]}); diff --git a/jstests/replsets/rollback_different_h.js b/jstests/replsets/rollback_different_h.js index 948823aa894..4b9aede1bbc 100644 --- a/jstests/replsets/rollback_different_h.js +++ b/jstests/replsets/rollback_different_h.js @@ -23,12 +23,14 @@ var name = "rollback_different_h"; var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); +replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] +}); var a_conn = conns[0]; var b_conn = conns[1]; var AID = replTest.getNodeId(a_conn); @@ -40,7 +42,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); -var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; +var options = { + writeConcern: {w: 2, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); // shut down master @@ -49,12 +54,15 @@ replTest.stop(AID); // change the h value of the most recent entry on B master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0]; oplog_entry["ts"].t++; oplog_entry["h"] = NumberLong(1); res = b_conn.getDB("local").oplog.rs.insert(oplog_entry); -assert( res.nInserted > 0, tojson( res ) ); +assert(res.nInserted > 0, tojson(res)); // another insert to set minvalid ahead assert.writeOK(b_conn.getDB(name).foo.insert({x: 123})); @@ -66,7 +74,10 @@ master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options)); // restart B, which should rollback and get to the same state as A @@ -81,8 +92,7 @@ assert.soon(function() { } } return true; - } - catch (e) { + } catch (e) { return false; } }, "collection on A and B did not match after rollback"); diff --git a/jstests/replsets/rollback_dropdb.js b/jstests/replsets/rollback_dropdb.js index 5c47e6ab34b..c11b14ab06e 100644 --- a/jstests/replsets/rollback_dropdb.js +++ b/jstests/replsets/rollback_dropdb.js @@ -12,12 +12,14 @@ var name = "rollback_dropdb"; var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); +replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] +}); var a_conn = conns[0]; var b_conn = conns[1]; var AID = replTest.getNodeId(a_conn); @@ -29,7 +31,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); -var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; +var options = { + writeConcern: {w: 2, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); // shut down master @@ -48,14 +53,18 @@ master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options)); // restart B, which should attempt rollback but then fassert clearRawMongoProgramOutput(); replTest.restart(BID); assert.soon(function() { - return rawMongoProgramOutput().match("rollback : can't rollback drop database full resync will be required"); + return rawMongoProgramOutput().match( + "rollback : can't rollback drop database full resync will be required"); }, "B failed to fassert"); -replTest.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] }); +replTest.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]}); diff --git a/jstests/replsets/rollback_empty_ns.js b/jstests/replsets/rollback_empty_ns.js index 77116668971..f6a07319eb4 100644 --- a/jstests/replsets/rollback_empty_ns.js +++ b/jstests/replsets/rollback_empty_ns.js @@ -23,12 +23,14 @@ var name = "rollback_empty_ns"; var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); +replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] +}); var a_conn = conns[0]; var b_conn = conns[1]; var AID = replTest.getNodeId(a_conn); @@ -40,7 +42,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); -var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; +var options = { + writeConcern: {w: 2, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); // shut down master @@ -49,7 +54,10 @@ replTest.stop(AID); // insert a fake oplog entry with an empty ns master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; // another insert to set minvalid ahead assert.writeOK(b_conn.getDB(name).foo.insert({x: 123})); var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0]; @@ -64,7 +72,10 @@ master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options)); // restart B, which should rollback and log a message about not rolling back empty ns'd oplog entry @@ -74,8 +85,7 @@ assert.soon(function() { try { var log = b_conn.getDB("admin").adminCommand({getLog: "global"}).log; return doesEntryMatch(log, msg); - } - catch (e) { + } catch (e) { return false; } }, "Did not see a log entry about skipping the empty ns'd oplog entry during rollback"); diff --git a/jstests/replsets/rollback_empty_o.js b/jstests/replsets/rollback_empty_o.js index dfc94519cb3..f3468fcde5e 100644 --- a/jstests/replsets/rollback_empty_o.js +++ b/jstests/replsets/rollback_empty_o.js @@ -23,12 +23,14 @@ var name = "rollback_empty_o"; var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); +replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] +}); var a_conn = conns[0]; var b_conn = conns[1]; var AID = replTest.getNodeId(a_conn); @@ -40,7 +42,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); -var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; +var options = { + writeConcern: {w: 2, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); // shut down master @@ -49,7 +54,10 @@ replTest.stop(AID); // insert a fake oplog entry with an empty o master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; // another insert to set minvalid ahead assert.writeOK(b_conn.getDB(name).foo.insert({x: 123})); var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0]; @@ -64,7 +72,10 @@ master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options)); // restart B, which should rollback and log a message about not rolling back empty o'd oplog entry @@ -74,8 +85,7 @@ assert.soon(function() { try { var log = b_conn.getDB("admin").adminCommand({getLog: "global"}).log; return doesEntryMatch(log, msg); - } - catch (e) { + } catch (e) { return false; } }, "Did not see a log entry about skipping the empty o'd oplog entry during rollback"); diff --git a/jstests/replsets/rollback_empty_o2.js b/jstests/replsets/rollback_empty_o2.js index e1e5add816f..56eb8512575 100644 --- a/jstests/replsets/rollback_empty_o2.js +++ b/jstests/replsets/rollback_empty_o2.js @@ -23,12 +23,14 @@ var name = "rollback_empty_o2"; var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); +replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] +}); var a_conn = conns[0]; var b_conn = conns[1]; var AID = replTest.getNodeId(a_conn); @@ -40,7 +42,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); -var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; +var options = { + writeConcern: {w: 2, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); // shut down master @@ -49,7 +54,10 @@ replTest.stop(AID); // insert a fake oplog entry with an empty o2 master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; // another insert to set minvalid ahead assert.writeOK(b_conn.getDB(name).foo.insert({x: 123})); var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0]; @@ -65,7 +73,10 @@ master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options)); // restart B, which should rollback and log a message about not rolling back empty o2'd oplog entry @@ -75,8 +86,7 @@ assert.soon(function() { try { var log = b_conn.getDB("admin").adminCommand({getLog: "global"}).log; return doesEntryMatch(log, msg); - } - catch (e) { + } catch (e) { return false; } }, "Did not see a log entry about skipping the empty o2'd oplog entry during rollback"); diff --git a/jstests/replsets/rollback_fake_cmd.js b/jstests/replsets/rollback_fake_cmd.js index b624a8ea80e..175359121f8 100644 --- a/jstests/replsets/rollback_fake_cmd.js +++ b/jstests/replsets/rollback_fake_cmd.js @@ -23,12 +23,14 @@ var name = "rollback_fake_cmd"; var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); +replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] +}); var a_conn = conns[0]; var b_conn = conns[1]; var AID = replTest.getNodeId(a_conn); @@ -40,7 +42,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); -var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; +var options = { + writeConcern: {w: 2, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); // shut down master @@ -49,13 +54,18 @@ replTest.stop(AID); // insert a fake oplog entry with a nonexistent command master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; // another insert to set minvalid ahead assert.writeOK(b_conn.getDB(name).foo.insert({x: 123})); var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0]; oplog_entry.ts = Timestamp(oplog_entry.ts.t, oplog_entry.ts.i + 1); oplog_entry.op = "c"; -oplog_entry.o = {fake_command_name: 1}; +oplog_entry.o = { + fake_command_name: 1 +}; assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); jsTestLog('inserted oplog entry with invalid command: ' + tojson(oplog_entry)); @@ -66,7 +76,10 @@ master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options)); // restart B, which should rollback and log a message about not rolling back the nonexistent cmd @@ -77,4 +90,4 @@ assert.soon(function() { return rawMongoProgramOutput().match(msg); }, "Did not see a log entry about skipping the nonexistent command during rollback"); -replTest.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] }); +replTest.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]}); diff --git a/jstests/replsets/rollback_index.js b/jstests/replsets/rollback_index.js index 1a3197f9ed0..6fb3044b740 100644 --- a/jstests/replsets/rollback_index.js +++ b/jstests/replsets/rollback_index.js @@ -25,12 +25,14 @@ var name = "rollback_index"; var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); -replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1] }, - { "_id": 2, "host": nodes[2], arbiterOnly: true}] - }); +replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], arbiterOnly: true} + ] +}); var a_conn = conns[0]; var b_conn = conns[1]; var AID = replTest.getNodeId(a_conn); @@ -42,7 +44,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); -var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; +var options = { + writeConcern: {w: 2, wtimeout: 60000}, + upsert: true +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); // shut down master @@ -52,7 +57,10 @@ replTest.stop(AID); // cause errors when applying operations from the primary. master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); -options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; +options = { + writeConcern: {w: 1, wtimeout: 60000}, + upsert: true +}; // another insert to set minvalid ahead assert.writeOK(b_conn.getDB(name).foo.insert({x: 123})); assert.commandWorked(b_conn.getDB(name).foo.ensureIndex({x: 1}, {unique: true})); @@ -66,7 +74,9 @@ assert(a_conn.host === master.host, "a_conn assumed to be master"); // Insert a document with the same value for 'x' that should be // propagated successfully to B if the unique index was dropped successfully. -options = {writeConcern: {w: 1, wtimeout: 60000}}; +options = { + writeConcern: {w: 1, wtimeout: 60000} +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); assert.eq(2, a_conn.getDB(name).foo.count(), 'invalid number of documents on A'); @@ -78,14 +88,18 @@ replTest.awaitReplication(); replTest.awaitSecondaryNodes(); // Perform a write that should succeed if there's no unique index on B. -options = {writeConcern: {w: 'majority', wtimeout: 60000}}; +options = { + writeConcern: {w: 'majority', wtimeout: 60000} +}; assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); // Check collections and indexes. -assert.eq(3, b_conn.getDB(name).foo.count(), +assert.eq(3, + b_conn.getDB(name).foo.count(), 'Collection on B does not have the same number of documents as A'); -assert.eq(a_conn.getDB(name).foo.getIndexes().length, b_conn.getDB(name).foo.getIndexes().length, +assert.eq(a_conn.getDB(name).foo.getIndexes().length, + b_conn.getDB(name).foo.getIndexes().length, 'Unique index not dropped during rollback: ' + - tojson(b_conn.getDB(name).foo.getIndexes())); + tojson(b_conn.getDB(name).foo.getIndexes())); replTest.stopSet(); diff --git a/jstests/replsets/rollback_too_new.js b/jstests/replsets/rollback_too_new.js index 8f2d43bc8d7..e0a88e12f31 100644 --- a/jstests/replsets/rollback_too_new.js +++ b/jstests/replsets/rollback_too_new.js @@ -14,27 +14,31 @@ var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.nodeList(); var conns = replTest.startSet(); - replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0] }, - { "_id": 1, "host": nodes[1], arbiterOnly: true }, - { "_id": 2, "host": nodes[2], priority: 0 }], - "settings": { - "chainingAllowed": false - } - }); + replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1], arbiterOnly: true}, + {"_id": 2, "host": nodes[2], priority: 0} + ], + "settings": {"chainingAllowed": false} + }); var c_conn = conns[2]; var CID = replTest.getNodeId(c_conn); // get master and do an initial write var master = replTest.getPrimary(); - var options = {writeConcern: {w: 2, wtimeout: 60000}}; + var options = { + writeConcern: {w: 2, wtimeout: 60000} + }; assert.writeOK(master.getDB(name).foo.insert({x: 1}, options)); // add an oplog entry from the distant future as the most recent entry on node C var future_oplog_entry = conns[2].getDB("local").oplog.rs.find().sort({$natural: -1})[0]; future_oplog_entry["ts"] = new Timestamp(future_oplog_entry["ts"].getTime() + 200000, 1); - options = {writeConcern: {w: 1, wtimeout: 60000}}; + options = { + writeConcern: {w: 1, wtimeout: 60000} + }; assert.writeOK(conns[2].getDB("local").oplog.rs.insert(future_oplog_entry, options)); replTest.stop(CID); @@ -56,6 +60,6 @@ } }, "node C failed to fassert", 60 * 1000); - replTest.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] }); + replTest.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]}); }()); diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js index a9c3024b51e..b992464682d 100644 --- a/jstests/replsets/rslib.js +++ b/jstests/replsets/rslib.js @@ -8,207 +8,217 @@ var awaitOpTime; var startSetIfSupportsReadMajority; var waitUntilAllNodesCaughtUp; -(function () { -"use strict"; -var count = 0; -var w = 0; - -wait = function(f,msg) { - w++; - var n = 0; - while (!f()) { - if( n % 4 == 0 ) - print("waiting " + w); - if (++n == 4) { - print("" + f); - } - if (n >= 200) { - throw new Error('tried 200 times, giving up on ' + msg); - } - sleep(1000); - } -}; - -/** - * Use this to do something once every 4 iterations. - * - * <pre> - * for (i=0; i<1000; i++) { - * occasionally(function() { print("4 more iterations"); }); - * } - * </pre> - */ -occasionally = function(f, n) { - var interval = n || 4; - if (count % interval == 0) { - f(); - } - count++; -}; - -reconnect = function(a) { - wait(function() { - var db; - try { - // make this work with either dbs or connections - if (typeof(a.getDB) == "function") { - db = a.getDB('foo'); - } - else { - db = a; - } - db.bar.stats(); - if (jsTest.options().keyFile) { // SERVER-4241: Shell connections don't re-authenticate on reconnect - return jsTest.authenticate(db.getMongo()); +(function() { + "use strict"; + var count = 0; + var w = 0; + + wait = function(f, msg) { + w++; + var n = 0; + while (!f()) { + if (n % 4 == 0) + print("waiting " + w); + if (++n == 4) { + print("" + f); + } + if (n >= 200) { + throw new Error('tried 200 times, giving up on ' + msg); + } + sleep(1000); } - return true; - } catch(e) { - print(e); - return false; - } - }); -}; - - -getLatestOp = function(server) { - server.getDB("admin").getMongo().setSlaveOk(); - var log = server.getDB("local")['oplog.rs']; - var cursor = log.find({}).sort({'$natural': -1}).limit(1); - if (cursor.hasNext()) { - return cursor.next(); - } - return null; -}; - - -waitForAllMembers = function(master, timeout) { - var failCount = 0; - - assert.soon( function() { - var state = null; - try { - state = master.getSisterDB("admin").runCommand({replSetGetStatus:1}); - failCount = 0; - } catch ( e ) { - // Connection can get reset on replica set failover causing a socket exception - print( "Calling replSetGetStatus failed" ); - print( e ); - return false; + }; + + /** + * Use this to do something once every 4 iterations. + * + * <pre> + * for (i=0; i<1000; i++) { + * occasionally(function() { print("4 more iterations"); }); + * } + * </pre> + */ + occasionally = function(f, n) { + var interval = n || 4; + if (count % interval == 0) { + f(); } - occasionally(function() { printjson(state); }, 10); - - for (var m in state.members) { - if (state.members[m].state != 1 && // PRIMARY - state.members[m].state != 2 && // SECONDARY - state.members[m].state != 7) { // ARBITER + count++; + }; + + reconnect = function(a) { + wait(function() { + var db; + try { + // make this work with either dbs or connections + if (typeof(a.getDB) == "function") { + db = a.getDB('foo'); + } else { + db = a; + } + db.bar.stats(); + if (jsTest.options().keyFile) { // SERVER-4241: Shell connections don't + // re-authenticate on reconnect + return jsTest.authenticate(db.getMongo()); + } + return true; + } catch (e) { + print(e); return false; } + }); + }; + + getLatestOp = function(server) { + server.getDB("admin").getMongo().setSlaveOk(); + var log = server.getDB("local")['oplog.rs']; + var cursor = log.find({}).sort({'$natural': -1}).limit(1); + if (cursor.hasNext()) { + return cursor.next(); } - printjson( state ); - return true; - }, "not all members ready", timeout || 60000); - - print( "All members are now in state PRIMARY, SECONDARY, or ARBITER" ); -}; - -reconfig = function(rs, config, force) { - "use strict"; - var admin = rs.getPrimary().getDB("admin"); - var e; - var master; - try { - assert.commandWorked(admin.runCommand({replSetReconfig: config, force: force})); - } - catch (e) { - if (tojson(e).indexOf( "error doing query: failed" ) < 0) { - throw e; - } - } - - var master = rs.getPrimary().getDB("admin"); - waitForAllMembers(master); - - return master; -}; - -awaitOpTime = function (node, opTime) { - var ts, ex; - assert.soon(function () { + return null; + }; + + waitForAllMembers = function(master, timeout) { + var failCount = 0; + + assert.soon(function() { + var state = null; + try { + state = master.getSisterDB("admin").runCommand({replSetGetStatus: 1}); + failCount = 0; + } catch (e) { + // Connection can get reset on replica set failover causing a socket exception + print("Calling replSetGetStatus failed"); + print(e); + return false; + } + occasionally(function() { + printjson(state); + }, 10); + + for (var m in state.members) { + if (state.members[m].state != 1 && // PRIMARY + state.members[m].state != 2 && // SECONDARY + state.members[m].state != 7) { // ARBITER + return false; + } + } + printjson(state); + return true; + }, "not all members ready", timeout || 60000); + + print("All members are now in state PRIMARY, SECONDARY, or ARBITER"); + }; + + reconfig = function(rs, config, force) { + "use strict"; + var admin = rs.getPrimary().getDB("admin"); + var e; + var master; try { - // The following statement extracts the timestamp field from the most recent element of - // the oplog, and stores it in "ts". - ts = node.getDB("local")['oplog.rs'].find({}).sort({'$natural': -1}).limit(1).next().ts; - if ((ts.t == opTime.t) && (ts.i == opTime.i)) { - return true; + assert.commandWorked(admin.runCommand({replSetReconfig: config, force: force})); + } catch (e) { + if (tojson(e).indexOf("error doing query: failed") < 0) { + throw e; } - ex = null; - return false; } - catch (ex) { - return false; - } - }, function () { - var message = "Node " + node + " only reached optime " + tojson(ts) + " not " + - tojson(opTime); - if (ex) { - message += "; last attempt failed with exception " + tojson(ex); - } - return message; - }); -}; - -/** - * Uses the results of running replSetGetStatus against an arbitrary replset node to wait until - * all nodes in the set are replicated through the same optime. - * 'rs' is an array of connections to replica set nodes. This function is useful when you - * don't have a ReplSetTest object to use, otherwise ReplSetTest.awaitReplication is preferred. - */ -waitUntilAllNodesCaughtUp = function(rs, timeout) { - var rsStatus; - var firstConflictingIndex; - var ot; - var otherOt; - assert.soon(function () { - rsStatus = rs[0].adminCommand('replSetGetStatus'); - if (rsStatus.ok != 1) { - return false; - } - assert.eq(rs.length, rsStatus.members.length, tojson(rsStatus)); - ot = rsStatus.members[0].optime; - for (var i = 1; i < rsStatus.members.length; ++i) { - otherOt = rsStatus.members[i].optime; - if (bsonWoCompare({ts: otherOt.ts}, {ts: ot.ts}) || - bsonWoCompare({t: otherOt.t}, {t: ot.t})) { - firstConflictingIndex = i; + + var master = rs.getPrimary().getDB("admin"); + waitForAllMembers(master); + + return master; + }; + + awaitOpTime = function(node, opTime) { + var ts, ex; + assert.soon( + function() { + try { + // The following statement extracts the timestamp field from the most recent + // element of + // the oplog, and stores it in "ts". + ts = node.getDB("local")['oplog.rs'] + .find({}) + .sort({'$natural': -1}) + .limit(1) + .next() + .ts; + if ((ts.t == opTime.t) && (ts.i == opTime.i)) { + return true; + } + ex = null; + return false; + } catch (ex) { + return false; + } + }, + function() { + var message = "Node " + node + " only reached optime " + tojson(ts) + " not " + + tojson(opTime); + if (ex) { + message += "; last attempt failed with exception " + tojson(ex); + } + return message; + }); + }; + + /** + * Uses the results of running replSetGetStatus against an arbitrary replset node to wait until + * all nodes in the set are replicated through the same optime. + * 'rs' is an array of connections to replica set nodes. This function is useful when you + * don't have a ReplSetTest object to use, otherwise ReplSetTest.awaitReplication is preferred. + */ + waitUntilAllNodesCaughtUp = function(rs, timeout) { + var rsStatus; + var firstConflictingIndex; + var ot; + var otherOt; + assert.soon( + function() { + rsStatus = rs[0].adminCommand('replSetGetStatus'); + if (rsStatus.ok != 1) { + return false; + } + assert.eq(rs.length, rsStatus.members.length, tojson(rsStatus)); + ot = rsStatus.members[0].optime; + for (var i = 1; i < rsStatus.members.length; ++i) { + otherOt = rsStatus.members[i].optime; + if (bsonWoCompare({ts: otherOt.ts}, {ts: ot.ts}) || + bsonWoCompare({t: otherOt.t}, {t: ot.t})) { + firstConflictingIndex = i; + return false; + } + } + return true; + }, + function() { + return "Optimes of members 0 (" + tojson(ot) + ") and " + firstConflictingIndex + + " (" + tojson(otherOt) + ") are different in " + tojson(rsStatus); + }, + timeout); + }; + + /** + * Starts each node in the given replica set if the storage engine supports readConcern + *'majority'. + * Returns true if the replica set was started successfully and false otherwise. + * + * @param replSetTest - The instance of {@link ReplSetTest} to start + * @param options - The options passed to {@link ReplSetTest.startSet} + */ + startSetIfSupportsReadMajority = function(replSetTest, options) { + try { + replSetTest.startSet(options); + } catch (e) { + var conn = MongoRunner.runMongod(); + if (!conn.getDB("admin").serverStatus().storageEngine.supportsCommittedReads) { + MongoRunner.stopMongod(conn); return false; } + throw e; } return true; - }, function () { - return "Optimes of members 0 (" + tojson(ot) + ") and " + firstConflictingIndex + " (" + - tojson(otherOt) + ") are different in " + tojson(rsStatus); - }, timeout); -}; - -/** - * Starts each node in the given replica set if the storage engine supports readConcern 'majority'. - * Returns true if the replica set was started successfully and false otherwise. - * - * @param replSetTest - The instance of {@link ReplSetTest} to start - * @param options - The options passed to {@link ReplSetTest.startSet} - */ -startSetIfSupportsReadMajority = function (replSetTest, options) { - try { - replSetTest.startSet(options); - } catch (e) { - var conn = MongoRunner.runMongod(); - if (!conn.getDB("admin").serverStatus().storageEngine.supportsCommittedReads) { - MongoRunner.stopMongod(conn); - return false; - } - throw e; - } - return true; -}; + }; }()); diff --git a/jstests/replsets/server8070.js b/jstests/replsets/server8070.js index 2b29a7592d0..e91e95e99a4 100644 --- a/jstests/replsets/server8070.js +++ b/jstests/replsets/server8070.js @@ -5,37 +5,27 @@ // helper to ensure two nodes are at the same place in the oplog var waitForSameOplogPosition = function(db1, db2, errmsg) { - assert.soon( - function() { - var last1 = db1.getSisterDB("local").oplog.rs.find().sort({$natural:-1}).limit(1) - .next(); - var last2 = db2.getSisterDB("local").oplog.rs.find().sort({$natural:-1}).limit(1) - .next(); - jsTest.log("primary: " + tojson(last1) + " secondary: " + tojson(last2)); - - return ((last1.ts.t === last2.ts.t) && (last1.ts.i === last2.ts.i)); - }, - errmsg - ); + assert.soon(function() { + var last1 = db1.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next(); + var last2 = db2.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next(); + jsTest.log("primary: " + tojson(last1) + " secondary: " + tojson(last2)); + + return ((last1.ts.t === last2.ts.t) && (last1.ts.i === last2.ts.i)); + }, errmsg); }; // start set var replSet = new ReplSetTest({name: 'testSet', nodes: 3}); replSet.startSet(); -replSet.initiate( - { - _id:'testSet', - members: - [ - {_id: 0, host: getHostName()+":"+replSet.ports[0]}, - {_id: 1, host: getHostName()+":"+replSet.ports[1], priority: 0}, - {_id: 2, host: getHostName()+":"+replSet.ports[2], priority: 0} - ], - settings: { - chainingAllowed: false - } - } -); +replSet.initiate({ + _id: 'testSet', + members: [ + {_id: 0, host: getHostName() + ":" + replSet.ports[0]}, + {_id: 1, host: getHostName() + ":" + replSet.ports[1], priority: 0}, + {_id: 2, host: getHostName() + ":" + replSet.ports[2], priority: 0} + ], + settings: {chainingAllowed: false} +}); // set up common points of access var master = replSet.getPrimary(); @@ -46,12 +36,12 @@ var member2 = replSet.nodes[1].getDB("admin"); var member3 = replSet.nodes[2].getDB("admin"); // Do an initial write -master.getDB("foo").bar.insert({x:1}); +master.getDB("foo").bar.insert({x: 1}); replSet.awaitReplication(); jsTest.log("Make sure 2 & 3 are syncing from the primary"); -member2.adminCommand({replSetSyncFrom : getHostName()+":"+replSet.ports[0]}); -member3.adminCommand({replSetSyncFrom : getHostName()+":"+replSet.ports[0]}); +member2.adminCommand({replSetSyncFrom: getHostName() + ":" + replSet.ports[0]}); +member3.adminCommand({replSetSyncFrom: getHostName() + ":" + replSet.ports[0]}); jsTest.log("Stop 2's replication"); member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}); @@ -69,7 +59,7 @@ jsTest.log("Stop 3's replication"); member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}); // logLevel 3 will allow us to see each op the secondary pulls from the primary so that we can // determine whether or not all ops are actually being pulled -member3.runCommand({setParameter: 1, logLevel:3}); +member3.runCommand({setParameter: 1, logLevel: 3}); jsTest.log("Start 2's replication"); member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}); @@ -92,7 +82,7 @@ for (var i = 50; i < 75; i++) { } var primaryCollectionSize = primary.bar.find().itcount(); jsTest.log("primary collection size: " + primaryCollectionSize); -var last = primary.getSisterDB("local").oplog.rs.find().sort({$natural:-1}).limit(1).next(); +var last = primary.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next(); jsTest.log("waiting a bit for the secondaries to get the write"); sleep(10000); @@ -105,43 +95,40 @@ replSet.stop(0); // which would check for 30 seconds that node 3 didn't try to sync from 2 sleep(30 * 1000); jsTest.log("3 should not attempt to sync from 2, as it cannot clear its buffer"); -var syncingTo = member3.adminCommand({replSetGetStatus:1}).syncingTo; -assert(syncingTo !== getHostName()+":"+replSet.ports[1], "node 3 is syncing from node 2 :("); +var syncingTo = member3.adminCommand({replSetGetStatus: 1}).syncingTo; +assert(syncingTo !== getHostName() + ":" + replSet.ports[1], "node 3 is syncing from node 2 :("); jsTest.log("Pause 3's bgsync thread"); var rsBgSyncProduceResult3 = - member3.runCommand({configureFailPoint: 'rsBgSyncProduce', mode: 'alwaysOn'}); + member3.runCommand({configureFailPoint: 'rsBgSyncProduce', mode: 'alwaysOn'}); assert.eq(1, rsBgSyncProduceResult3.ok, "member 3 rsBgSyncProduce admin command failed"); // count documents in member 3 -assert.eq(26, member3.getSisterDB("foo").bar.find().itcount(), +assert.eq(26, + member3.getSisterDB("foo").bar.find().itcount(), "collection size incorrect on node 3 before applying ops 25-75"); jsTest.log("Allow 3 to apply ops 25-75"); var rsSyncApplyStopResult3 = - member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}); + member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}); assert.eq(1, rsSyncApplyStopResult3.ok, "member 3 rsSyncApplyStop admin command failed"); -assert.soon( - function() { - var last3 = member3.getSisterDB("local").oplog.rs.find().sort({$natural:-1}).limit(1) - .next(); - jsTest.log("primary: " + tojson(last, '', true) + " secondary: " + tojson(last3, '', true)); - jsTest.log("member 3 collection size: " + member3.getSisterDB("foo").bar.find().itcount()); - jsTest.log("curop: "); - printjson(member3.getSisterDB("foo").currentOp(true)); - return ((last.ts.t === last3.ts.t) && (last.ts.i === last3.ts.i)); - }, - "Replication member 3 did not apply ops 25-75" -); +assert.soon(function() { + var last3 = member3.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next(); + jsTest.log("primary: " + tojson(last, '', true) + " secondary: " + tojson(last3, '', true)); + jsTest.log("member 3 collection size: " + member3.getSisterDB("foo").bar.find().itcount()); + jsTest.log("curop: "); + printjson(member3.getSisterDB("foo").currentOp(true)); + return ((last.ts.t === last3.ts.t) && (last.ts.i === last3.ts.i)); +}, "Replication member 3 did not apply ops 25-75"); jsTest.log("Start 3's bgsync thread"); member3.runCommand({configureFailPoint: 'rsBgSyncProduce', mode: 'off'}); jsTest.log("Node 3 shouldn't hit rollback"); -var end = (new Date()).getTime()+10000; +var end = (new Date()).getTime() + 10000; while ((new Date()).getTime() < end) { - assert('ROLLBACK' !== member3.runCommand({replSetGetStatus:1}).members[2].stateStr); + assert('ROLLBACK' !== member3.runCommand({replSetGetStatus: 1}).members[2].stateStr); sleep(30); } diff --git a/jstests/replsets/server_status_metrics.js b/jstests/replsets/server_status_metrics.js index 46add3f0cd2..553077cdc99 100644 --- a/jstests/replsets/server_status_metrics.js +++ b/jstests/replsets/server_status_metrics.js @@ -21,7 +21,7 @@ function testSecondaryMetrics(secondary, opCount, offset) { assert(ss.metrics.repl.buffer.maxSizeBytes >= 0, "maxSize (bytes) missing"); assert(ss.metrics.repl.preload.docs.num >= 0, "preload.docs num missing"); - assert(ss.metrics.repl.preload.docs.totalMillis >= 0, "preload.docs time missing"); + assert(ss.metrics.repl.preload.docs.totalMillis >= 0, "preload.docs time missing"); assert(ss.metrics.repl.preload.docs.num >= 0, "preload.indexes num missing"); assert(ss.metrics.repl.preload.indexes.totalMillis >= 0, "preload.indexes time missing"); @@ -30,7 +30,7 @@ function testSecondaryMetrics(secondary, opCount, offset) { assert.eq(ss.metrics.repl.apply.ops, opCount + offset, "wrong number of applied ops"); } -var rt = new ReplSetTest( { name : "server_status_metrics" , nodes: 2, oplogSize: 100 } ); +var rt = new ReplSetTest({name: "server_status_metrics", nodes: 2, oplogSize: 100}); rt.startSet(); rt.initiate(); @@ -41,24 +41,28 @@ var primary = rt.getPrimary(); var testDB = primary.getDB("test"); assert.commandWorked(testDB.createCollection('a')); -assert.writeOK(testDB.b.insert({}, { writeConcern: { w: 2 }})); +assert.writeOK(testDB.b.insert({}, {writeConcern: {w: 2}})); var ss = secondary.getDB("test").serverStatus(); var secondaryBaseOplogInserts = ss.metrics.repl.apply.ops; -//add test docs +// add test docs var bulk = testDB.a.initializeUnorderedBulkOp(); -for(x = 0; x < 1000; x++) { +for (x = 0; x < 1000; x++) { bulk.insert({}); } -assert.writeOK(bulk.execute({ w: 2 })); +assert.writeOK(bulk.execute({w: 2})); -testSecondaryMetrics(secondary, 1000, secondaryBaseOplogInserts ); +testSecondaryMetrics(secondary, 1000, secondaryBaseOplogInserts); -var options = { writeConcern: { w: 2 }, multi: true, upsert: true }; -assert.writeOK(testDB.a.update({}, { $set: { d: new Date() }}, options)); +var options = { + writeConcern: {w: 2}, + multi: true, + upsert: true +}; +assert.writeOK(testDB.a.update({}, {$set: {d: new Date()}}, options)); -testSecondaryMetrics(secondary, 2000, secondaryBaseOplogInserts ); +testSecondaryMetrics(secondary, 2000, secondaryBaseOplogInserts); // Test getLastError.wtime and that it only records stats for w > 1, see SERVER-9005 var startMillis = testDB.serverStatus().metrics.getLastError.wtime.totalMillis; @@ -66,20 +70,20 @@ var startNum = testDB.serverStatus().metrics.getLastError.wtime.num; printjson(primary.getDB("test").serverStatus().metrics); -assert.writeOK(testDB.a.insert({ x: 1 }, { writeConcern: { w: 1, wtimeout: 5000 }})); +assert.writeOK(testDB.a.insert({x: 1}, {writeConcern: {w: 1, wtimeout: 5000}})); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.totalMillis, startMillis); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum); -assert.writeOK(testDB.a.insert({ x: 1 }, { writeConcern: { w: -11, wtimeout: 5000 }})); +assert.writeOK(testDB.a.insert({x: 1}, {writeConcern: {w: -11, wtimeout: 5000}})); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.totalMillis, startMillis); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum); -assert.writeOK(testDB.a.insert({ x: 1 }, { writeConcern: { w: 2, wtimeout: 5000 }})); +assert.writeOK(testDB.a.insert({x: 1}, {writeConcern: {w: 2, wtimeout: 5000}})); assert(testDB.serverStatus().metrics.getLastError.wtime.totalMillis >= startMillis); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum + 1); // Write will fail because there are only 2 nodes -assert.writeError(testDB.a.insert({ x: 1 }, { writeConcern: { w: 3, wtimeout: 50 }})); +assert.writeError(testDB.a.insert({x: 1}, {writeConcern: {w: 3, wtimeout: 50}})); assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum + 2); printjson(primary.getDB("test").serverStatus().metrics); diff --git a/jstests/replsets/server_status_repl.js b/jstests/replsets/server_status_repl.js index 787cd2356c0..c00fcc8818a 100644 --- a/jstests/replsets/server_status_repl.js +++ b/jstests/replsets/server_status_repl.js @@ -1,4 +1,4 @@ -var rt = new ReplSetTest( { name : "server_status_repl" , nodes: 2} ); +var rt = new ReplSetTest({name: "server_status_repl", nodes: 2}); rt.startSet(); rt.initiate(); @@ -9,9 +9,9 @@ var primary = rt.getPrimary(); var testDB = primary.getDB("test"); assert.commandWorked(testDB.createCollection('a')); -assert.writeOK(testDB.b.insert({}, { writeConcern: { w: 2 }})); +assert.writeOK(testDB.b.insert({}, {writeConcern: {w: 2}})); -var ss = primary.getDB("test").serverStatus({repl:1}); +var ss = primary.getDB("test").serverStatus({repl: 1}); assert.neq(ss.repl.replicationProgress, null, tojson(ss.repl)); rt.stopSet();
\ No newline at end of file diff --git a/jstests/replsets/single_server_majority.js b/jstests/replsets/single_server_majority.js index 3e3a6dc7bbf..c36f021c989 100644 --- a/jstests/replsets/single_server_majority.js +++ b/jstests/replsets/single_server_majority.js @@ -9,4 +9,4 @@ col = db.getCollection("single_server_majority"); col.drop(); // see if we can get a majority write on this single server -assert.writeOK(col.save({ a: "test" }, { writeConcern: { w: 'majority' }}));
\ No newline at end of file +assert.writeOK(col.save({a: "test"}, {writeConcern: {w: 'majority'}}));
\ No newline at end of file diff --git a/jstests/replsets/sized_zero_capped.js b/jstests/replsets/sized_zero_capped.js index 149cbaaf1af..41debd6d17c 100644 --- a/jstests/replsets/sized_zero_capped.js +++ b/jstests/replsets/sized_zero_capped.js @@ -8,12 +8,14 @@ var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.nodeList(); replTest.startSet(); - replTest.initiate({"_id": name, - "members": [ - { "_id": 0, "host": nodes[0], priority: 3 }, - { "_id": 1, "host": nodes[1], priority: 0 }, - { "_id": 2, "host": nodes[2], priority: 0 }] - }); + replTest.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], priority: 3}, + {"_id": 1, "host": nodes[1], priority: 0}, + {"_id": 2, "host": nodes[2], priority: 0} + ] + }); var testDB = replTest.getPrimary().getDB(name); testDB.createCollection(name, {capped: true, size: 0}); @@ -21,7 +23,7 @@ // ensure secondary is still up and responsive var secondary = replTest.getSecondary(); - assert.commandWorked(secondary.getDB(name).runCommand({ping:1 })); + assert.commandWorked(secondary.getDB(name).runCommand({ping: 1})); replTest.stopSet(); }()); diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js index ae97d3373bc..040c47d3e78 100644 --- a/jstests/replsets/slavedelay1.js +++ b/jstests/replsets/slavedelay1.js @@ -1,120 +1,121 @@ load("jstests/replsets/rslib.js"); -doTest = function( signal ) { +doTest = function(signal) { - var name = "slaveDelay"; - var host = getHostName(); + var name = "slaveDelay"; + var host = getHostName(); - var replTest = new ReplSetTest( {name: name, nodes: 3} ); + var replTest = new ReplSetTest({name: name, nodes: 3}); - var nodes = replTest.startSet(); + var nodes = replTest.startSet(); - /* set slaveDelay to 30 seconds */ - var config = replTest.getReplSetConfig(); - config.members[2].priority = 0; - config.members[2].slaveDelay = 30; + /* set slaveDelay to 30 seconds */ + var config = replTest.getReplSetConfig(); + config.members[2].priority = 0; + config.members[2].slaveDelay = 30; - replTest.initiate(config); + replTest.initiate(config); - var master = replTest.getPrimary().getDB(name); - var slaveConns = replTest.liveNodes.slaves; - var slaves = []; - for (var i in slaveConns) { - var d = slaveConns[i].getDB(name); - slaves.push(d); - } + var master = replTest.getPrimary().getDB(name); + var slaveConns = replTest.liveNodes.slaves; + var slaves = []; + for (var i in slaveConns) { + var d = slaveConns[i].getDB(name); + slaves.push(d); + } - waitForAllMembers(master); + waitForAllMembers(master); - // insert a record - assert.writeOK(master.foo.insert({ x: 1 }, { writeConcern: { w: 2 }})); + // insert a record + assert.writeOK(master.foo.insert({x: 1}, {writeConcern: {w: 2}})); - var doc = master.foo.findOne(); - assert.eq(doc.x, 1); + var doc = master.foo.findOne(); + assert.eq(doc.x, 1); - // make sure slave has it - var doc = slaves[0].foo.findOne(); - assert.eq(doc.x, 1); + // make sure slave has it + var doc = slaves[0].foo.findOne(); + assert.eq(doc.x, 1); - // make sure delayed slave doesn't have it - for (var i=0; i<8; i++) { - assert.eq(slaves[1].foo.findOne(), null); - sleep(1000); - } + // make sure delayed slave doesn't have it + for (var i = 0; i < 8; i++) { + assert.eq(slaves[1].foo.findOne(), null); + sleep(1000); + } - // within 30 seconds delayed slave should have it - assert.soon(function() { - var z = slaves[1].foo.findOne(); - return z && z.x == 1; - }); + // within 30 seconds delayed slave should have it + assert.soon(function() { + var z = slaves[1].foo.findOne(); + return z && z.x == 1; + }); + /************* Part 2 *******************/ - /************* Part 2 *******************/ + // how about if we add a new server? will it sync correctly? + conn = replTest.add(); - // how about if we add a new server? will it sync correctly? - conn = replTest.add(); + config = master.getSisterDB("local").system.replset.findOne(); + printjson(config); + config.version++; + config.members.push({ + _id: 3, + host: host + ":" + replTest.ports[replTest.ports.length - 1], + priority: 0, + slaveDelay: 30 + }); - config = master.getSisterDB("local").system.replset.findOne(); - printjson(config); - config.version++; - config.members.push({_id: 3, - host: host+":"+replTest.ports[replTest.ports.length-1], - priority:0, - slaveDelay:30}); + master = reconfig(replTest, config); + master = master.getSisterDB(name); - master = reconfig(replTest, config); - master = master.getSisterDB(name); + // wait for the node to catch up + replTest.awaitReplication(90 * 1000); - // wait for the node to catch up - replTest.awaitReplication(90*1000); + assert.writeOK(master.foo.insert({_id: 123, x: 'foo'}, {writeConcern: {w: 2}})); - assert.writeOK(master.foo.insert({ _id: 123, x: 'foo' }, { writeConcern: { w: 2 }})); + for (var i = 0; i < 8; i++) { + assert.eq(conn.getDB(name).foo.findOne({_id: 123}), null); + sleep(1000); + } - for (var i=0; i<8; i++) { - assert.eq(conn.getDB(name).foo.findOne({_id:123}), null); - sleep(1000); - } + assert.soon(function() { + var z = conn.getDB(name).foo.findOne({_id: 123}); + return z != null && z.x == "foo"; + }); - assert.soon(function() { - var z = conn.getDB(name).foo.findOne({_id:123}); - return z != null && z.x == "foo"; - }); + /************* Part 3 ******************/ - /************* Part 3 ******************/ + print("reconfigure slavedelay"); - print("reconfigure slavedelay"); + config.version++; + config.members[3].slaveDelay = 15; - config.version++; - config.members[3].slaveDelay = 15; + reconfig(replTest, config); + master = replTest.getPrimary().getDB(name); + assert.soon(function() { + return conn.getDB("local").system.replset.findOne().version == config.version; + }); - reconfig(replTest, config); - master = replTest.getPrimary().getDB(name); - assert.soon(function() { - return conn.getDB("local").system.replset.findOne().version == config.version; - }); + // wait for node to become secondary + assert.soon(function() { + var result = conn.getDB("admin").isMaster(); + printjson(result); + return result.secondary; + }); - // wait for node to become secondary - assert.soon(function() { - var result = conn.getDB("admin").isMaster(); - printjson(result); - return result.secondary; - }); + print("testing insert"); + master.foo.insert({_id: 124, "x": "foo"}); + assert(master.foo.findOne({_id: 124}) != null); - print("testing insert"); - master.foo.insert({_id : 124, "x" : "foo"}); - assert(master.foo.findOne({_id:124}) != null); - - for (var i=0; i<10; i++) { - assert.eq(conn.getDB(name).foo.findOne({_id:124}), null); - sleep(1000); - } - - // the node should have the document in 15 seconds (20 for some safety against races) - assert.soon(function() { - return conn.getDB(name).foo.findOne({_id:124}) != null; - }, 10*1000); - - replTest.stopSet(); + for (var i = 0; i < 10; i++) { + assert.eq(conn.getDB(name).foo.findOne({_id: 124}), null); + sleep(1000); + } + + // the node should have the document in 15 seconds (20 for some safety against races) + assert.soon(function() { + return conn.getDB(name).foo.findOne({_id: 124}) != null; + }, 10 * 1000); + + replTest.stopSet(); }; doTest(15); diff --git a/jstests/replsets/slavedelay3.js b/jstests/replsets/slavedelay3.js index 5a19027a4ad..2ce6e9b2a80 100644 --- a/jstests/replsets/slavedelay3.js +++ b/jstests/replsets/slavedelay3.js @@ -1,7 +1,7 @@ load("jstests/replsets/rslib.js"); var name = 'slavedelay3'; -var replTest = new ReplSetTest({ name: name, nodes: 3, useBridge: true }); +var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true}); var nodes = replTest.startSet(); var config = replTest.getReplSetConfig(); // ensure member 0 is primary @@ -24,21 +24,21 @@ for (var i in slaveConns) { waitForAllMembers(master); - - replTest.awaitReplication(); nodes[0].disconnect(nodes[2]); -master.foo.insert({x:1}); +master.foo.insert({x: 1}); assert.commandWorked(nodes[1].getDB("admin").runCommand({"replSetSyncFrom": nodes[0].host})); var res; assert.soon(function() { res = nodes[1].getDB("admin").runCommand({"replSetGetStatus": 1}); return res.syncingTo === nodes[0].host; -}, "node 4 failed to start chaining: "+ tojson(res)); +}, "node 4 failed to start chaining: " + tojson(res)); // make sure the record still appears in the remote slave -assert.soon( function() { return slave[1].foo.findOne() != null; } ); +assert.soon(function() { + return slave[1].foo.findOne() != null; +}); replTest.stopSet(); diff --git a/jstests/replsets/stepdown.js b/jstests/replsets/stepdown.js index cd574f725e6..4b736b3b93a 100644 --- a/jstests/replsets/stepdown.js +++ b/jstests/replsets/stepdown.js @@ -14,24 +14,9 @@ var errorWasDueToConnectionFailure = function(error) { }; var replTest = new ReplSetTest({ - name : 'testSet', - nodes : { - "n0" : { - rsConfig : { - priority : 2 - } - }, - "n1" : {}, - "n2" : { - rsConfig : { - votes : 1, - priority : 0 - } - } - }, - nodeOptions : { - verbose : 1 - } + name: 'testSet', + nodes: {"n0": {rsConfig: {priority: 2}}, "n1": {}, "n2": {rsConfig: {votes: 1, priority: 0}}}, + nodeOptions: {verbose: 1} }); var nodes = replTest.startSet(); replTest.initiate(); @@ -40,13 +25,13 @@ var master = replTest.getPrimary(); // do a write print("\ndo a write"); -assert.writeOK(master.getDB("foo").bar.insert({x:1})); +assert.writeOK(master.getDB("foo").bar.insert({x: 1})); replTest.awaitReplication(); // lock secondaries print("\nlock secondaries"); replTest.liveNodes.slaves.forEach(function(slave) { - printjson(assert.commandWorked(slave.getDB("admin").runCommand({fsync : 1, lock : 1}))); + printjson(assert.commandWorked(slave.getDB("admin").runCommand({fsync: 1, lock: 1}))); }); print("\nwaiting several seconds before stepdown"); @@ -55,7 +40,7 @@ sleep(2000); for (var i = 0; i < 11; i++) { // do another write - assert.writeOK(master.getDB("foo").bar.insert({x:i})); + assert.writeOK(master.getDB("foo").bar.insert({x: i})); sleep(1000); } @@ -66,10 +51,10 @@ printjson(assert.commandFailed(master.getDB("admin").runCommand({replSetStepDown print("\n do stepdown that should work"); assert.throws(function() { - assert.commandFailed(master.getDB("admin").runCommand({replSetStepDown:50, force:true})); + assert.commandFailed(master.getDB("admin").runCommand({replSetStepDown: 50, force: true})); }); -var r2 = assert.commandWorked(master.getDB("admin").runCommand({ismaster : 1})); +var r2 = assert.commandWorked(master.getDB("admin").runCommand({ismaster: 1})); assert.eq(r2.ismaster, false); assert.eq(r2.secondary, true); @@ -79,7 +64,7 @@ replTest.liveNodes.slaves.forEach(function(slave) { }); print("\nreset stepped down time"); -assert.commandWorked(master.getDB("admin").runCommand({replSetFreeze:0})); +assert.commandWorked(master.getDB("admin").runCommand({replSetFreeze: 0})); master = replTest.getPrimary(); print("\nawait"); @@ -99,12 +84,11 @@ assert.soon(function() { master = replTest.getPrimary(); var firstMaster = master; -print("\nmaster is now "+firstMaster); +print("\nmaster is now " + firstMaster); try { - assert.commandWorked(master.getDB("admin").runCommand({replSetStepDown : 100, force : true})); -} -catch (e) { + assert.commandWorked(master.getDB("admin").runCommand({replSetStepDown: 100, force: true})); +} catch (e) { // ignore errors due to connection failures as we expect the master to close connections // on stepdown if (!errorWasDueToConnectionFailure(e)) { @@ -128,17 +112,15 @@ master = replTest.liveNodes.master; var slave = replTest.liveNodes.slaves[0]; try { - slave.adminCommand({shutdown :1}); -} -catch (e) { + slave.adminCommand({shutdown: 1}); +} catch (e) { print(e); } - master = replTest.getPrimary(); assert.soon(function() { try { - var result = master.getDB("admin").runCommand({replSetGetStatus:1}); + var result = master.getDB("admin").runCommand({replSetGetStatus: 1}); for (var i in result.members) { if (result.members[i].self) { continue; @@ -146,21 +128,19 @@ assert.soon(function() { return result.members[i].health == 0; } - } - catch (e) { + } catch (e) { print("error getting status from master: " + e); master = replTest.getPrimary(); return false; } }, 'make sure master knows that slave is down before proceeding'); +print("\nrunning shutdown without force on master: " + master); -print("\nrunning shutdown without force on master: "+master); - -// this should fail because the master can't reach an up-to-date secondary (because the only +// this should fail because the master can't reach an up-to-date secondary (because the only // secondary is down) var now = new Date(); -assert.commandFailed(master.getDB("admin").runCommand({shutdown : 1, timeoutSecs : 3})); +assert.commandFailed(master.getDB("admin").runCommand({shutdown: 1, timeoutSecs: 3})); // on windows, javascript and the server perceive time differently, to compensate here we use 2750ms assert.gte((new Date()) - now, 2750); @@ -168,20 +148,18 @@ print("\nsend shutdown command"); var currentMaster = replTest.getPrimary(); try { - printjson(currentMaster.getDB("admin").runCommand({shutdown : 1, force : true})); -} -catch (e) { + printjson(currentMaster.getDB("admin").runCommand({shutdown: 1, force: true})); +} catch (e) { if (!errorWasDueToConnectionFailure(e)) { throw e; } } -print("checking "+currentMaster+" is actually shutting down"); +print("checking " + currentMaster + " is actually shutting down"); assert.soon(function() { try { currentMaster.findOne(); - } - catch(e) { + } catch (e) { return true; } return false; diff --git a/jstests/replsets/stepdown3.js b/jstests/replsets/stepdown3.js index 9bfda0ae82b..d0da019f7a1 100644 --- a/jstests/replsets/stepdown3.js +++ b/jstests/replsets/stepdown3.js @@ -3,58 +3,59 @@ // This test requires the fsync command to force a secondary to be stale. // @tags: [requires_fsync] (function() { -'use strict'; - -var replTest = new ReplSetTest({ name: 'testSet', nodes: 2 }); -var nodes = replTest.startSet(); -replTest.initiate(); -var master = replTest.getPrimary(); - -// do a write to allow stepping down of the primary; -// otherwise, the primary will refuse to step down -print("\ndo a write"); -master.getDB("test").foo.insert({x:1}); -replTest.awaitReplication(); - -// do another write, because the first one might be longer than 10 seconds ago -// on the secondary (due to starting up), and we need to be within 10 seconds -// to step down. -var options = { writeConcern: { w: 2, wtimeout: 30000 }}; -assert.writeOK(master.getDB("test").foo.insert({ x: 2 }, options)); -// lock secondary, to pause replication -print("\nlock secondary"); -var locked = replTest.liveNodes.slaves[0]; -printjson( locked.getDB("admin").runCommand({fsync : 1, lock : 1}) ); - -// do a write -print("\ndo a write"); -master.getDB("test").foo.insert({x:3}); - -// step down the primary asyncronously -print("stepdown"); -var command = "sleep(4000); tojson(db.adminCommand( { replSetStepDown : 60, force : 1 } ));"; -var awaitShell = startParallelShell(command, master.port); - -print("getlasterror; should assert or return an error, depending on timing"); -var gleFunction = function() { - var result = master.getDB("test").runCommand({getLastError : 1, w: 2 , wtimeout :30000 }); - if (result.errmsg === "not master" || - result.code == ErrorCodes.NotMaster || - result.code == ErrorCodes.InterruptedDueToReplStateChange) { - throw new Error("satisfy assert.throws()"); - } - print("failed to throw exception; GLE returned: "); + 'use strict'; + + var replTest = new ReplSetTest({name: 'testSet', nodes: 2}); + var nodes = replTest.startSet(); + replTest.initiate(); + var master = replTest.getPrimary(); + + // do a write to allow stepping down of the primary; + // otherwise, the primary will refuse to step down + print("\ndo a write"); + master.getDB("test").foo.insert({x: 1}); + replTest.awaitReplication(); + + // do another write, because the first one might be longer than 10 seconds ago + // on the secondary (due to starting up), and we need to be within 10 seconds + // to step down. + var options = { + writeConcern: {w: 2, wtimeout: 30000} + }; + assert.writeOK(master.getDB("test").foo.insert({x: 2}, options)); + // lock secondary, to pause replication + print("\nlock secondary"); + var locked = replTest.liveNodes.slaves[0]; + printjson(locked.getDB("admin").runCommand({fsync: 1, lock: 1})); + + // do a write + print("\ndo a write"); + master.getDB("test").foo.insert({x: 3}); + + // step down the primary asyncronously + print("stepdown"); + var command = "sleep(4000); tojson(db.adminCommand( { replSetStepDown : 60, force : 1 } ));"; + var awaitShell = startParallelShell(command, master.port); + + print("getlasterror; should assert or return an error, depending on timing"); + var gleFunction = function() { + var result = master.getDB("test").runCommand({getLastError: 1, w: 2, wtimeout: 30000}); + if (result.errmsg === "not master" || result.code == ErrorCodes.NotMaster || + result.code == ErrorCodes.InterruptedDueToReplStateChange) { + throw new Error("satisfy assert.throws()"); + } + print("failed to throw exception; GLE returned: "); + printjson(result); + }; + var result = assert.throws(gleFunction); + print("result of gle:"); printjson(result); -}; -var result = assert.throws(gleFunction); -print("result of gle:"); -printjson(result); -var exitCode = awaitShell({checkExitSuccess: false}); -assert.neq(0, exitCode, "expected replSetStepDown to close the shell's connection"); + var exitCode = awaitShell({checkExitSuccess: false}); + assert.neq(0, exitCode, "expected replSetStepDown to close the shell's connection"); -// unlock and shut down -printjson(locked.getDB("admin").fsyncUnlock()); -replTest.stopSet(); + // unlock and shut down + printjson(locked.getDB("admin").fsyncUnlock()); + replTest.stopSet(); })(); diff --git a/jstests/replsets/stepdown_catch_up_opt.js b/jstests/replsets/stepdown_catch_up_opt.js index a5ccb456762..304927a7838 100644 --- a/jstests/replsets/stepdown_catch_up_opt.js +++ b/jstests/replsets/stepdown_catch_up_opt.js @@ -26,41 +26,32 @@ var stringNotIntCode = 14; // Expect a failure with a string argument. - assert.commandFailedWithCode( - primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 'STR'}), - stringNotIntCode, - 'Expected string argument to secondaryCatchupPeriodSecs to fail.' - ); + assert.commandFailedWithCode(primary.getDB('admin').runCommand( + {replSetStepDown: 10, secondaryCatchUpPeriodSecs: 'STR'}), + stringNotIntCode, + 'Expected string argument to secondaryCatchupPeriodSecs to fail.'); // Expect a failure with a longer secondaryCatchupPeriodSecs than the stepdown period. assert.commandFailedWithCode( primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 20}), stepDownPeriodTooShortCode, ('Expected replSetStepDown to fail given a stepdown time shorter than' + - ' secondaryCatchUpPeriodSecs') - ); + ' secondaryCatchUpPeriodSecs')); jsTestLog('Stop secondary syncing.'); - assert.commandWorked( - secondary.getDB('admin').runCommand( - {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'} - ), - 'Failed to configure rsSyncApplyStop failpoint.' - ); + assert.commandWorked(secondary.getDB('admin').runCommand( + {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}), + 'Failed to configure rsSyncApplyStop failpoint.'); function disableFailPoint() { - assert.commandWorked( - secondary.getDB('admin').runCommand( - {configureFailPoint: 'rsSyncApplyStop', mode: 'off'} - ), - 'Failed to disable rsSyncApplyStop failpoint.' - ); + assert.commandWorked(secondary.getDB('admin') + .runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}), + 'Failed to disable rsSyncApplyStop failpoint.'); } // If any of these assertions fail, we need to disable the fail point in order for the mongod to // shut down. try { - jsTestLog('Write to primary to make secondary out of sync.'); assert.writeOK(primary.getDB('test').foo.insert({i: 1}), 'Failed to insert document.'); sleep(1000); @@ -69,10 +60,10 @@ jsTestLog('Try to step down.'); var startTime = new Date(); assert.commandFailedWithCode( - primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 1}), + primary.getDB('admin') + .runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 1}), noCaughtUpSecondariesCode, - 'Expected replSetStepDown to fail, since no secondaries should be caught up.' - ); + 'Expected replSetStepDown to fail, since no secondaries should be caught up.'); var endTime = new Date(); // Ensure it took at least 1 second to time out. Adjust the timeout a little bit @@ -80,8 +71,7 @@ assert.lte(0.95, (endTime - startTime) / 1000, 'Expected replSetStepDown command to fail after 1 second.'); - } - catch (err) { + } catch (err) { disableFailPoint(); throw err; } diff --git a/jstests/replsets/stepdown_kill_other_ops.js b/jstests/replsets/stepdown_kill_other_ops.js index 6770c9246c9..e55fce17be0 100644 --- a/jstests/replsets/stepdown_kill_other_ops.js +++ b/jstests/replsets/stepdown_kill_other_ops.js @@ -1,68 +1,72 @@ // SERVER-15310 Ensure that stepDown kills all other running operations -(function () { - "use strict"; - var name = "stepdownKillOps"; - var replSet = new ReplSetTest({name: name, nodes: 3}); - var nodes = replSet.nodeList(); - replSet.startSet(); - replSet.initiate({"_id" : name, - "members" : [ - {"_id" : 0, "host" : nodes[0], "priority" : 3}, - {"_id" : 1, "host" : nodes[1]}, - {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); +(function() { + "use strict"; + var name = "stepdownKillOps"; + var replSet = new ReplSetTest({name: name, nodes: 3}); + var nodes = replSet.nodeList(); + replSet.startSet(); + replSet.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], "priority": 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], "arbiterOnly": true} + ] + }); - replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); + replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); - var primary = replSet.getPrimary(); - assert.eq(primary.host, nodes[0], "primary assumed to be node 0"); - assert.writeOK(primary.getDB(name).foo.insert({x: 1}, {w: 2, wtimeout:10000})); - replSet.awaitReplication(); + var primary = replSet.getPrimary(); + assert.eq(primary.host, nodes[0], "primary assumed to be node 0"); + assert.writeOK(primary.getDB(name).foo.insert({x: 1}, {w: 2, wtimeout: 10000})); + replSet.awaitReplication(); - jsTestLog("Sleeping 30 seconds so the SECONDARY will be considered electable"); - sleep(30000); + jsTestLog("Sleeping 30 seconds so the SECONDARY will be considered electable"); + sleep(30000); - // Run eval() in a separate thread to take the global write lock which would prevent stepdown - // from completing if it failed to kill all running operations. - jsTestLog("Running eval() to grab global write lock"); - var evalCmd = function() { - db.eval(function() { - for (var i = 0; i < 60; i++) { - // Sleep in 1 second intervals so the javascript engine will notice when - // it's killed - sleep(1000); - } }); - }; - var evalRunner = startParallelShell(evalCmd, primary.port); + // Run eval() in a separate thread to take the global write lock which would prevent stepdown + // from completing if it failed to kill all running operations. + jsTestLog("Running eval() to grab global write lock"); + var evalCmd = function() { + db.eval(function() { + for (var i = 0; i < 60; i++) { + // Sleep in 1 second intervals so the javascript engine will notice when + // it's killed + sleep(1000); + } + }); + }; + var evalRunner = startParallelShell(evalCmd, primary.port); - jsTestLog("Confirming that eval() is running and has the global lock"); - assert.soon(function() { - var res = primary.getDB('admin').currentOp(); - for (var index in res.inprog) { - var entry = res.inprog[index]; - if (entry["query"] && entry["query"]["$eval"]) { - assert.eq("W", entry["locks"]["Global"]); - return true; - } - } - printjson(res); - return false; - }, "$eval never ran and grabbed the global write lock"); + jsTestLog("Confirming that eval() is running and has the global lock"); + assert.soon(function() { + var res = primary.getDB('admin').currentOp(); + for (var index in res.inprog) { + var entry = res.inprog[index]; + if (entry["query"] && entry["query"]["$eval"]) { + assert.eq("W", entry["locks"]["Global"]); + return true; + } + } + printjson(res); + return false; + }, "$eval never ran and grabbed the global write lock"); - jsTestLog("Stepping down"); - try { - assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 30})); - } catch (x) { - // expected - } + jsTestLog("Stepping down"); + try { + assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 30})); + } catch (x) { + // expected + } - jsTestLog("Waiting for former PRIMARY to become SECONDARY"); - replSet.waitForState(primary, ReplSetTest.State.SECONDARY, 30000); + jsTestLog("Waiting for former PRIMARY to become SECONDARY"); + replSet.waitForState(primary, ReplSetTest.State.SECONDARY, 30000); - var newPrimary = replSet.getPrimary(); - assert.neq(primary, newPrimary, "SECONDARY did not become PRIMARY"); + var newPrimary = replSet.getPrimary(); + assert.neq(primary, newPrimary, "SECONDARY did not become PRIMARY"); - var exitCode = evalRunner({checkExitSuccess: false}); - assert.neq(0, exitCode, - "expected shell to exit abnormally due to JS execution being terminated"); - })(); + var exitCode = evalRunner({checkExitSuccess: false}); + assert.neq( + 0, exitCode, "expected shell to exit abnormally due to JS execution being terminated"); +})(); diff --git a/jstests/replsets/stepdown_killop.js b/jstests/replsets/stepdown_killop.js index a14193112d1..c5fc593239b 100644 --- a/jstests/replsets/stepdown_killop.js +++ b/jstests/replsets/stepdown_killop.js @@ -8,98 +8,96 @@ // 5. Once a write is blocked, kill the stepDown operation // 6. Writes should become unblocked and the primary should stay primary -(function () { - "use strict"; - var name = "interruptStepDown"; - var replSet = new ReplSetTest({name: name, nodes: 3}); - var nodes = replSet.nodeList(); - replSet.startSet(); - replSet.initiate({"_id" : name, - "members" : [ - {"_id" : 0, "host" : nodes[0], "priority" : 3}, - {"_id" : 1, "host" : nodes[1]}, - {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); +(function() { + "use strict"; + var name = "interruptStepDown"; + var replSet = new ReplSetTest({name: name, nodes: 3}); + var nodes = replSet.nodeList(); + replSet.startSet(); + replSet.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], "priority": 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], "arbiterOnly": true} + ] + }); - replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); + replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); - var secondary = replSet.getSecondary(); - jsTestLog('Disable replication on the SECONDARY ' + secondary.host); - assert.commandWorked( - secondary.getDB('admin').runCommand( - {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'} - ), - 'Failed to configure rsSyncApplyStop failpoint.' - ); + var secondary = replSet.getSecondary(); + jsTestLog('Disable replication on the SECONDARY ' + secondary.host); + assert.commandWorked(secondary.getDB('admin').runCommand( + {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}), + 'Failed to configure rsSyncApplyStop failpoint.'); - replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); + replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); - var primary = replSet.getPrimary(); - assert.eq(primary.host, nodes[0], "primary assumed to be node 0"); + var primary = replSet.getPrimary(); + assert.eq(primary.host, nodes[0], "primary assumed to be node 0"); - // do a write then ask the PRIMARY to stepdown - jsTestLog("Initiating stepdown"); - assert.writeOK(primary.getDB(name).foo.insert({myDoc: true, x: 1}, - {writeConcern: {w: 1, wtimeout: 60000}})); - var stepDownCmd = function() { - var res = db.getSiblingDB('admin').runCommand({replSetStepDown: 60, - secondaryCatchUpPeriodSecs: 60}); - assert.commandFailedWithCode(res, 11601 /*interrupted*/); - }; - var stepDowner = startParallelShell(stepDownCmd, primary.port); - var stepDownOpID = -1; + // do a write then ask the PRIMARY to stepdown + jsTestLog("Initiating stepdown"); + assert.writeOK(primary.getDB(name) + .foo.insert({myDoc: true, x: 1}, {writeConcern: {w: 1, wtimeout: 60000}})); + var stepDownCmd = function() { + var res = db.getSiblingDB('admin') + .runCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}); + assert.commandFailedWithCode(res, 11601 /*interrupted*/); + }; + var stepDowner = startParallelShell(stepDownCmd, primary.port); + var stepDownOpID = -1; - jsTestLog("Looking for stepdown in currentOp() output"); - assert.soon(function() { - var res = primary.getDB('admin').currentOp(true); - for (var index in res.inprog) { - var entry = res.inprog[index]; - if (entry["query"] && entry["query"]["replSetStepDown"] === 60){ - stepDownOpID = entry.opid; - return true; - } - } - printjson(res); - return false; - }, "global shared lock not acquired"); + jsTestLog("Looking for stepdown in currentOp() output"); + assert.soon(function() { + var res = primary.getDB('admin').currentOp(true); + for (var index in res.inprog) { + var entry = res.inprog[index]; + if (entry["query"] && entry["query"]["replSetStepDown"] === 60) { + stepDownOpID = entry.opid; + return true; + } + } + printjson(res); + return false; + }, "global shared lock not acquired"); - jsTestLog("Ensuring writes block on the stepdown"); - // Start repeatedly doing an update until one blocks waiting for the lock. - // If the test is successful this thread will be terminated when we remove the document - // being updated. - var updateCmd = function() { - while(true) { - var res = db.getSiblingDB("interruptStepDown").foo.update({myDoc: true}, - {$inc: {x: 1}}); - assert.writeOK(res); - if (res.nModified == 0) { - quit(0); - } - else { - printjson(res); - } + jsTestLog("Ensuring writes block on the stepdown"); + // Start repeatedly doing an update until one blocks waiting for the lock. + // If the test is successful this thread will be terminated when we remove the document + // being updated. + var updateCmd = function() { + while (true) { + var res = + db.getSiblingDB("interruptStepDown").foo.update({myDoc: true}, {$inc: {x: 1}}); + assert.writeOK(res); + if (res.nModified == 0) { + quit(0); + } else { + printjson(res); + } + } + }; + var writer = startParallelShell(updateCmd, primary.port); + assert.soon(function() { + var res = primary.getDB(name).currentOp(); + for (var entry in res.inprog) { + if (res.inprog[entry]["waitingForLock"]) { + return true; + } + } + printjson(res); + return false; + }, "write never blocked on the global shared lock"); - } - }; - var writer = startParallelShell(updateCmd, primary.port); - assert.soon(function() { - var res = primary.getDB(name).currentOp(); - for (var entry in res.inprog) { - if (res.inprog[entry]["waitingForLock"]) { - return true; - } - } - printjson(res); - return false; - }, "write never blocked on the global shared lock"); + // kill the stepDown and ensure that that unblocks writes to the db + jsTestLog("Killing stepdown"); + primary.getDB('admin').killOp(stepDownOpID); - // kill the stepDown and ensure that that unblocks writes to the db - jsTestLog("Killing stepdown"); - primary.getDB('admin').killOp(stepDownOpID); + var exitCode = stepDowner(); + assert.eq(0, exitCode); - var exitCode = stepDowner(); - assert.eq(0, exitCode); - - assert.writeOK(primary.getDB(name).foo.remove({})); - exitCode = writer(); - assert.eq(0, exitCode); - })(); + assert.writeOK(primary.getDB(name).foo.remove({})); + exitCode = writer(); + assert.eq(0, exitCode); +})(); diff --git a/jstests/replsets/stepdown_long_wait_time.js b/jstests/replsets/stepdown_long_wait_time.js index db1821cf988..60e0fdb4247 100644 --- a/jstests/replsets/stepdown_long_wait_time.js +++ b/jstests/replsets/stepdown_long_wait_time.js @@ -7,45 +7,46 @@ // 5. Once a write is blocked, restart replication on the SECONDARY. // 6. Wait for PRIMARY to StepDown. -(function () { +(function() { "use strict"; var name = "stepDownWithLongWait"; var replSet = new ReplSetTest({name: name, nodes: 3}); var nodes = replSet.nodeList(); replSet.startSet(); - replSet.initiate({"_id" : name, - "members" : [ - {"_id" : 0, "host" : nodes[0], "priority" : 3}, - {"_id" : 1, "host" : nodes[1]}, - {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); + replSet.initiate({ + "_id": name, + "members": [ + {"_id": 0, "host": nodes[0], "priority": 3}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], "arbiterOnly": true} + ] + }); replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); var primary = replSet.getPrimary(); var secondary = replSet.getSecondary(); jsTestLog('Disable replication on the SECONDARY ' + secondary.host); - assert.commandWorked( - secondary.getDB('admin').runCommand( - {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'} - ), - 'Failed to configure rsSyncApplyStop failpoint.' - ); + assert.commandWorked(secondary.getDB('admin').runCommand( + {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}), + 'Failed to configure rsSyncApplyStop failpoint.'); jsTestLog("do a write then ask the PRIMARY to stepdown"); - var options = {writeConcern: {w: 1, wtimeout: 60000}}; + var options = { + writeConcern: {w: 1, wtimeout: 60000} + }; assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options)); var stepDownSecs = 60; var secondaryCatchUpPeriodSecs = 60; - var stepDownCmd = "db.getSiblingDB('admin').runCommand({" + - "replSetStepDown: " + stepDownSecs + ", " + - "secondaryCatchUpPeriodSecs: " + secondaryCatchUpPeriodSecs + - "});"; + var stepDownCmd = "db.getSiblingDB('admin').runCommand({" + "replSetStepDown: " + stepDownSecs + + ", " + "secondaryCatchUpPeriodSecs: " + secondaryCatchUpPeriodSecs + "});"; var stepDowner = startParallelShell(stepDownCmd, primary.port); assert.soon(function() { var res = primary.getDB('admin').currentOp(true); for (var entry in res.inprog) { - if (res.inprog[entry]["query"] && res.inprog[entry]["query"]["replSetStepDown"] === 60){ + if (res.inprog[entry]["query"] && + res.inprog[entry]["query"]["replSetStepDown"] === 60) { return true; } } @@ -60,8 +61,7 @@ var res = db.getSiblingDB("stepDownWithLongWait").foo.update({}, {$inc: {x: 1}}); jsTestLog('Unexpected successful update operation on the primary during step down: ' + tojson(res)); - } - catch (e) { + } catch (e) { // Not important what error we get back. The client will probably be disconnected by // the primary with a "error doing query: failed" message. jsTestLog('Update operation returned with result: ' + tojson(e)); @@ -81,11 +81,8 @@ jsTestLog('Enable replication on the SECONDARY ' + secondary.host); assert.commandWorked( - secondary.getDB('admin').runCommand( - {configureFailPoint: 'rsSyncApplyStop', mode: 'off'} - ), - 'Failed to disable rsSyncApplyStop failpoint.' - ); + secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}), + 'Failed to disable rsSyncApplyStop failpoint.'); jsTestLog("Wait for PRIMARY " + primary.host + " to completely step down."); replSet.waitForState(primary, ReplSetTest.State.SECONDARY, secondaryCatchUpPeriodSecs * 1000); diff --git a/jstests/replsets/stepdown_wrt_electable.js b/jstests/replsets/stepdown_wrt_electable.js index e6917ec7c3b..c929f2a2c56 100644 --- a/jstests/replsets/stepdown_wrt_electable.js +++ b/jstests/replsets/stepdown_wrt_electable.js @@ -1,11 +1,10 @@ // Test that replSetStepDown filters out non-electable nodes -var replTest = new ReplSetTest({ name: 'testSet', nodes: 2 }); +var replTest = new ReplSetTest({name: 'testSet', nodes: 2}); var nodes = replTest.startSet(); - // setup config var c = replTest.getReplSetConfig(); -c.members[1].priority = 0; // not electable +c.members[1].priority = 0; // not electable replTest.initiate(c); var master = replTest.getPrimary(); @@ -14,11 +13,11 @@ var firstPrimary = testDB.isMaster().primary; // do a write to allow stepping down of the primary; // otherwise, the primary will refuse to step down -testDB.foo.insert({x:1}); +testDB.foo.insert({x: 1}); replTest.awaitReplication(); // stepdown should fail since there is no-one to elect within 10 secs -testDB.adminCommand({replSetStepDown:5}); +testDB.adminCommand({replSetStepDown: 5}); assert(master.getDB("a").isMaster().ismaster, "not master"); // step down the primary asyncronously so it doesn't kill this test @@ -27,7 +26,7 @@ var exitCode = wait({checkExitSuccess: false}); assert.neq(0, exitCode, "expected replSetStepDown to close the shell's connection"); // check that the old primary is no longer master -assert.soon( function() { +assert.soon(function() { try { var isMaster = master.getDB("a").isMaster(); printjson(isMaster); @@ -35,7 +34,7 @@ assert.soon( function() { } catch (e) { return false; } - }, "they shouldn't be master, but are"); +}, "they shouldn't be master, but are"); // stop replTest.stopSet(); diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js index d847127cae3..481f59a13d9 100644 --- a/jstests/replsets/sync2.js +++ b/jstests/replsets/sync2.js @@ -1,20 +1,22 @@ var replTest = new ReplSetTest({name: 'sync2', nodes: 5, useBridge: true}); var nodes = replTest.nodeList(); var conns = replTest.startSet({oplogSize: "2"}); -replTest.initiate({"_id": "sync2", - "members": [ - {"_id": 0, host: nodes[0], priority: 2}, - {"_id": 1, host: nodes[1]}, - {"_id": 2, host: nodes[2]}, - {"_id": 3, host: nodes[3]}, - {"_id": 4, host: nodes[4]}] - }); +replTest.initiate({ + "_id": "sync2", + "members": [ + {"_id": 0, host: nodes[0], priority: 2}, + {"_id": 1, host: nodes[1]}, + {"_id": 2, host: nodes[2]}, + {"_id": 3, host: nodes[3]}, + {"_id": 4, host: nodes[4]} + ] +}); var master = replTest.getPrimary(); jsTestLog("Replica set test initialized"); // initial sync -master.getDB("foo").bar.insert({x:1}); +master.getDB("foo").bar.insert({x: 1}); replTest.awaitReplication(); conns[0].disconnect(conns[4]); @@ -33,18 +35,22 @@ assert.soon(function() { replTest.awaitReplication(); jsTestLog("Checking that ops still replicate correctly"); -var option = { writeConcern: { w: 5, wtimeout: 30000 }}; +var option = { + writeConcern: {w: 5, wtimeout: 30000} +}; // In PV0, this write can fail as a result of a bad spanning tree. If 2 was syncing from 4 prior to // bridging, it will not change sync sources and receive the write in time. This was not a problem // in 3.0 because the old version of mongobridge caused all the nodes to restart during // partitioning, forcing the set to rebuild the spanning tree. -assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }, option)); +assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option)); // 4 is connected to 3 conns[4].disconnect(conns[2]); conns[4].reconnect(conns[3]); -option = { writeConcern: { w: 5, wtimeout: 30000 }}; -assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }, option)); +option = { + writeConcern: {w: 5, wtimeout: 30000} +}; +assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option)); replTest.stopSet(); diff --git a/jstests/replsets/sync_passive.js b/jstests/replsets/sync_passive.js index 76db6a4f838..4899385563f 100644 --- a/jstests/replsets/sync_passive.js +++ b/jstests/replsets/sync_passive.js @@ -18,8 +18,8 @@ load("jstests/replsets/rslib.js"); var name = "sync_passive"; var host = getHostName(); - -var replTest = new ReplSetTest( {name: name, nodes: 3} ); + +var replTest = new ReplSetTest({name: name, nodes: 3}); var nodes = replTest.startSet(); @@ -27,7 +27,7 @@ var nodes = replTest.startSet(); var config = replTest.getReplSetConfig(); config.members[0].priority = 2; config.members[2].priority = 0; - + replTest.initiate(config); replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); @@ -36,56 +36,46 @@ var server0 = master; var server1 = replTest.liveNodes.slaves[0]; print("Initial sync"); -for (var i=0;i<100;i++) { - master.foo.insert({x:i}); +for (var i = 0; i < 100; i++) { + master.foo.insert({x: i}); } replTest.awaitReplication(); - print("stop #1"); replTest.stop(1); - print("add some data"); -for (var i=0;i<1000;i++) { - master.bar.insert({x:i}); +for (var i = 0; i < 1000; i++) { + master.bar.insert({x: i}); } replTest.awaitReplication(); - print("stop #0"); replTest.stop(0); - print("restart #1"); replTest.restart(1); - print("check sync"); replTest.awaitReplication(60 * 1000); - print("add data"); reconnect(server1); master = replTest.getPrimary().getDB("test"); -for (var i=0;i<1000;i++) { - master.bar.insert({x:i}); +for (var i = 0; i < 1000; i++) { + master.bar.insert({x: i}); } replTest.awaitReplication(); - print("kill #1"); replTest.stop(1); - print("restart #0"); replTest.restart(0); reconnect(server0); - print("wait for sync"); replTest.awaitReplication(); - print("bring #1 back up, make sure everything's okay"); replTest.restart(1); diff --git a/jstests/replsets/system_profile.js b/jstests/replsets/system_profile.js index 592accb43b8..5a40e594866 100644 --- a/jstests/replsets/system_profile.js +++ b/jstests/replsets/system_profile.js @@ -36,8 +36,8 @@ // emptycapped the collection assert.commandWorked(primaryDB.runCommand({emptycapped: "system.profile"})); - assert.eq(op, getLatestOp(), - "oplog entry created when system.profile was emptied via emptycapped"); + assert.eq( + op, getLatestOp(), "oplog entry created when system.profile was emptied via emptycapped"); assert(primaryDB.system.profile.drop()); // convertToCapped diff --git a/jstests/replsets/tags.js b/jstests/replsets/tags.js index 87bc0075109..55a0c4e2927 100644 --- a/jstests/replsets/tags.js +++ b/jstests/replsets/tags.js @@ -11,63 +11,62 @@ var port = replTest.ports; replTest.initiate({ _id: name, - members : [ + members: [ { - _id: 0, - host: nodes[0], - tags: { - server: '0', - dc: 'ny', - ny: '1', - rack: 'ny.rk1', - }, + _id: 0, + host: nodes[0], + tags: { + server: '0', + dc: 'ny', + ny: '1', + rack: 'ny.rk1', + }, }, { - _id: 1, - host: nodes[1], - priority: 2, - tags: { - server: '1', - dc: 'ny', - ny: '2', - rack: 'ny.rk1', - }, + _id: 1, + host: nodes[1], + priority: 2, + tags: { + server: '1', + dc: 'ny', + ny: '2', + rack: 'ny.rk1', + }, }, { - _id: 2, - host: nodes[2], - priority: 3, - tags: { - server: '2', - dc: 'ny', - ny: '3', - rack: 'ny.rk2', - 2: 'this', - }, + _id: 2, + host: nodes[2], + priority: 3, + tags: { + server: '2', + dc: 'ny', + ny: '3', + rack: 'ny.rk2', 2: 'this', + }, }, { - _id: 3, - host: nodes[3], - tags: { - server: '3', - dc: 'sf', - sf: '1', - rack: 'sf.rk1', - }, + _id: 3, + host: nodes[3], + tags: { + server: '3', + dc: 'sf', + sf: '1', + rack: 'sf.rk1', + }, }, { - _id: 4, - host: nodes[4], - tags: { - server: '4', - dc: 'sf', - sf: '2', - rack: 'sf.rk2', - }, + _id: 4, + host: nodes[4], + tags: { + server: '4', + dc: 'sf', + sf: '2', + rack: 'sf.rk2', + }, }, ], - settings : { - getLastErrorModes : { + settings: { + getLastErrorModes: { '2 dc and 3 server': { dc: 2, server: 3, @@ -99,7 +98,9 @@ jsTestLog('Node ' + nodeId + ' (' + replTest.nodes[nodeId].host + ') should be primary.'); replTest.waitForState(replTest.nodes[nodeId], ReplSetTest.State.PRIMARY, 60 * 1000); primary = replTest.getPrimary(); - var writeConcern = {writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}}; + var writeConcern = { + writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000} + }; assert.writeOK(primary.getDB('foo').bar.insert({x: 100}, writeConcern)); return primary; }; @@ -124,7 +125,9 @@ jsTestLog('partitions: nodes with each set of brackets [N1, N2, N3] form a complete network.'); jsTestLog('partitions: [0-1-2] [3] [4] (only nodes 0 and 1 can replicate from primary node 2'); - var doc = {x: 1}; + var doc = { + x: 1 + }; // This timeout should be shorter in duration than the server parameter maxSyncSourceLagSecs. // Some writes are expected to block for this 'timeout' duration before failing. @@ -137,15 +140,20 @@ primary = ensurePrimary(2, 3); jsTestLog('Non-existent write concern should be rejected.'); - options = {writeConcern: {w: 'blahblah', wtimeout: timeout}}; + options = { + writeConcern: {w: 'blahblah', wtimeout: timeout} + }; assert.writeOK(primary.getDB('foo').bar.insert(doc)); var result = assert.writeError(primary.getDB('foo').bar.insert(doc, options)); assert.neq(null, result.getWriteConcernError()); - assert.eq(ErrorCodes.UnknownReplWriteConcern, result.getWriteConcernError().code, + assert.eq(ErrorCodes.UnknownReplWriteConcern, + result.getWriteConcernError().code, tojson(result.getWriteConcernError())); jsTestLog('Write concern "3 or 4" should fail - 3 and 4 are not connected to the primary.'); - var options = {writeConcern: {w: '3 or 4', wtimeout: timeout}}; + var options = { + writeConcern: {w: '3 or 4', wtimeout: timeout} + }; assert.writeOK(primary.getDB('foo').bar.insert(doc)); result = primary.getDB('foo').bar.insert(doc, options); assert.neq(null, result.getWriteConcernError()); @@ -158,12 +166,16 @@ jsTestLog('Write concern "3 or 4" should work - 4 is now connected to the primary ' + primary.host + ' via node 1 ' + replTest.nodes[1].host); - options = {writeConcern: {w: '3 or 4', wtimeout: timeout}}; + options = { + writeConcern: {w: '3 or 4', wtimeout: timeout} + }; assert.writeOK(primary.getDB('foo').bar.insert(doc)); assert.writeOK(primary.getDB('foo').bar.insert(doc, options)); jsTestLog('Write concern "3 and 4" should fail - 3 is not connected to the primary.'); - options = {writeConcern: {w: '3 and 4', wtimeout: timeout}}; + options = { + writeConcern: {w: '3 and 4', wtimeout: timeout} + }; assert.writeOK(primary.getDB('foo').bar.insert(doc)); result = assert.writeError(primary.getDB('foo').bar.insert(doc, options)); assert.neq(null, result.getWriteConcernError()); @@ -178,23 +190,31 @@ jsTestLog('31003 should sync from 31004 (31024)'); jsTestLog('Write concern "3 and 4" should work - ' + 'nodes 3 and 4 are connected to primary via node 1.'); - options = {writeConcern: {w: '3 and 4', wtimeout: timeout}}; + options = { + writeConcern: {w: '3 and 4', wtimeout: timeout} + }; assert.writeOK(primary.getDB('foo').bar.insert(doc)); assert.writeOK(primary.getDB('foo').bar.insert(doc, options)); jsTestLog('Write concern "2" - writes to primary only.'); - options = {writeConcern: {w: '2', wtimeout: 0}}; + options = { + writeConcern: {w: '2', wtimeout: 0} + }; assert.writeOK(primary.getDB('foo').bar.insert(doc)); assert.writeOK(primary.getDB('foo').bar.insert(doc, options)); jsTestLog('Write concern "1 and 2"'); - options = {writeConcern: {w: '1 and 2', wtimeout: 0}}; + options = { + writeConcern: {w: '1 and 2', wtimeout: 0} + }; assert.writeOK(primary.getDB('foo').bar.insert(doc)); assert.writeOK(primary.getDB('foo').bar.insert(doc, options)); jsTestLog('Write concern "2 dc and 3 server"'); primary = ensurePrimary(2, 5); - options = {writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}}; + options = { + writeConcern: {w: '2 dc and 3 server', wtimeout: timeout} + }; assert.writeOK(primary.getDB('foo').bar.insert(doc)); assert.writeOK(primary.getDB('foo').bar.insert(doc, options)); @@ -211,7 +231,7 @@ // Is this necessary when we partition node 2 off from the rest of the nodes? replTest.stop(2); jsTestLog('partitions: [0-1] [2] [1-3-4] ' + - '(all secondaries except down node 2 can replicate from new primary node 1)'); + '(all secondaries except down node 2 can replicate from new primary node 1)'); // Node 1 with slightly higher priority will take over. jsTestLog('1 must become primary here because otherwise the other members will take too ' + @@ -219,13 +239,17 @@ primary = ensurePrimary(1, 4); jsTestLog('Write concern "3 and 4" should still work with new primary node 1 ' + primary.host); - options = {writeConcern: {w: '3 and 4', wtimeout: timeout}}; + options = { + writeConcern: {w: '3 and 4', wtimeout: timeout} + }; assert.writeOK(primary.getDB('foo').bar.insert(doc)); assert.writeOK(primary.getDB('foo').bar.insert(doc, options)); jsTestLog('Write concern "2" should fail because node 2 ' + replTest.nodes[2].host + ' is down.'); - options = {writeConcern: {w: '2', wtimeout: timeout}}; + options = { + writeConcern: {w: '2', wtimeout: timeout} + }; assert.writeOK(primary.getDB('foo').bar.insert(doc)); result = assert.writeError(primary.getDB('foo').bar.insert(doc, options)); assert.neq(null, result.getWriteConcernError()); diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js index e4d4ccd50e8..ff0e81fda97 100644 --- a/jstests/replsets/tags2.js +++ b/jstests/replsets/tags2.js @@ -1,49 +1,53 @@ // Change a write concern mode from 2 to 3 servers var host = getHostName(); -var replTest = new ReplSetTest({ name: "rstag", nodes: 4 }); +var replTest = new ReplSetTest({name: "rstag", nodes: 4}); var nodes = replTest.startSet(); var ports = replTest.ports; -var conf = {_id : "rstag", version: 1, members : [ - {_id : 0, host : host+":"+ports[0], tags : {"backup" : "A"}}, - {_id : 1, host : host+":"+ports[1], tags : {"backup" : "B"}}, - {_id : 2, host : host+":"+ports[2], tags : {"backup" : "C"}}, - {_id : 3, host : host+":"+ports[3], tags : {"backup" : "D"}, arbiterOnly : true}], - settings : {getLastErrorModes : { - backedUp : {backup : 2} }} }; +var conf = { + _id: "rstag", + version: 1, + members: [ + {_id: 0, host: host + ":" + ports[0], tags: {"backup": "A"}}, + {_id: 1, host: host + ":" + ports[1], tags: {"backup": "B"}}, + {_id: 2, host: host + ":" + ports[2], tags: {"backup": "C"}}, + {_id: 3, host: host + ":" + ports[3], tags: {"backup": "D"}, arbiterOnly: true} + ], + settings: {getLastErrorModes: {backedUp: {backup: 2}}} +}; print("arbiters can't have tags"); -var result = nodes[0].getDB("admin").runCommand({replSetInitiate : conf}); +var result = nodes[0].getDB("admin").runCommand({replSetInitiate: conf}); printjson(result); assert.eq(result.ok, 0); conf.members.pop(); replTest.stop(3); replTest.remove(3); -replTest.initiate( conf ); +replTest.initiate(conf); replTest.awaitReplication(); master = replTest.getPrimary(); var db = master.getDB("test"); -assert.writeOK(db.foo.insert({ x: 1 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }})); +assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'backedUp', wtimeout: 20000}})); conf.version = 2; conf.settings.getLastErrorModes.backedUp.backup = 3; -master.getDB("admin").runCommand( {replSetReconfig: conf} ); +master.getDB("admin").runCommand({replSetReconfig: conf}); replTest.awaitReplication(); master = replTest.getPrimary(); var db = master.getDB("test"); -assert.writeOK(db.foo.insert({ x: 2 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }})); +assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'backedUp', wtimeout: 20000}})); conf.version = 3; conf.members[0].priorty = 3; conf.members[2].priorty = 0; -master.getDB("admin").runCommand( {replSetReconfig: conf} ); +master.getDB("admin").runCommand({replSetReconfig: conf}); master = replTest.getPrimary(); var db = master.getDB("test"); -assert.writeOK(db.foo.insert({ x: 3 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }})); +assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'backedUp', wtimeout: 20000}})); replTest.stopSet(); diff --git a/jstests/replsets/tags_with_reconfig.js b/jstests/replsets/tags_with_reconfig.js index 8f1e01ce176..512a55fe771 100644 --- a/jstests/replsets/tags_with_reconfig.js +++ b/jstests/replsets/tags_with_reconfig.js @@ -3,35 +3,40 @@ // time. This would cause us to update stale items in the cache when secondaries // reported their progress to a primary. - // Start a replica set with 3 nodes var host = getHostName(); -var replTest = new ReplSetTest({ name: "tags_with_reconfig", nodes: 3 }); +var replTest = new ReplSetTest({name: "tags_with_reconfig", nodes: 3}); var nodes = replTest.startSet(); var ports = replTest.ports; // Set tags and getLastErrorModes -var conf = {_id : "tags_with_reconfig", version: 1, members : [ - {_id : 0, host : host+":"+ports[0], tags : {"dc" : "bbb"}}, - {_id : 1, host : host+":"+ports[1], tags : {"dc" : "bbb"}}, - {_id : 2, host : host+":"+ports[2], tags : {"dc" : "ccc"}}], - settings : {getLastErrorModes : { - anydc : {dc : 1}, - alldc : {dc : 2}, }} }; - - -replTest.initiate( conf ); +var conf = { + _id: "tags_with_reconfig", + version: 1, + members: [ + {_id: 0, host: host + ":" + ports[0], tags: {"dc": "bbb"}}, + {_id: 1, host: host + ":" + ports[1], tags: {"dc": "bbb"}}, + {_id: 2, host: host + ":" + ports[2], tags: {"dc": "ccc"}} + ], + settings: { + getLastErrorModes: { + anydc: {dc: 1}, + alldc: {dc: 2}, + } + } +}; + +replTest.initiate(conf); replTest.awaitReplication(); - master = replTest.getPrimary(); var db = master.getDB("test"); // Insert a document with write concern : anydc -assert.writeOK(db.foo.insert({ x: 1 }, { writeConcern: { w: 'anydc', wtimeout: 20000 }})); +assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'anydc', wtimeout: 20000}})); // Insert a document with write concern : alldc -assert.writeOK(db.foo.insert({ x: 2 }, { writeConcern: { w: 'alldc', wtimeout: 20000 }})); +assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'alldc', wtimeout: 20000}})); // Add a new tag to the replica set var config = master.getDB("local").system.replset.findOne(); @@ -41,9 +46,8 @@ config.version++; config.members[0].tags.newtag = "newtag"; try { - master.getDB("admin").runCommand({replSetReconfig : config}); -} -catch(e) { + master.getDB("admin").runCommand({replSetReconfig: config}); +} catch (e) { print(e); } @@ -53,14 +57,13 @@ replTest.awaitReplication(); var config = master.getDB("local").system.replset.findOne(); printjson(config); - master = replTest.getPrimary(); var db = master.getDB("test"); // Insert a document with write concern : anydc -assert.writeOK(db.foo.insert({ x: 3 }, { writeConcern: { w: 'anydc', wtimeout: 20000 }})); +assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'anydc', wtimeout: 20000}})); // Insert a document with write concern : alldc -assert.writeOK(db.foo.insert({ x: 4 }, { writeConcern: { w: 'alldc', wtimeout: 20000 }})); +assert.writeOK(db.foo.insert({x: 4}, {writeConcern: {w: 'alldc', wtimeout: 20000}})); replTest.stopSet(); diff --git a/jstests/replsets/temp_namespace.js b/jstests/replsets/temp_namespace.js index 07e5291da0e..4efd3e0b7b3 100644 --- a/jstests/replsets/temp_namespace.js +++ b/jstests/replsets/temp_namespace.js @@ -1,17 +1,20 @@ // SERVER-10927 // This is to make sure that temp collections get cleaned up on promotion to primary -var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 }); +var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); var nodes = replTest.nodeList(); printjson(nodes); // We need an arbiter to ensure that the primary doesn't step down when we restart the secondary replTest.startSet(); -replTest.initiate({"_id" : "testSet", - "members" : [ - {"_id" : 0, "host" : nodes[0]}, - {"_id" : 1, "host" : nodes[1]}, - {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); +replTest.initiate({ + "_id": "testSet", + "members": [ + {"_id": 0, "host": nodes[0]}, + {"_id": 1, "host": nodes[1]}, + {"_id": 2, "host": nodes[2], "arbiterOnly": true} + ] +}); var master = replTest.getPrimary(); var second = replTest.getSecondary(); @@ -24,63 +27,62 @@ var secondDB = second.getDB('test'); // set up collections masterDB.runCommand({create: 'temp1', temp: true}); -masterDB.temp1.ensureIndex({x:1}); +masterDB.temp1.ensureIndex({x: 1}); masterDB.runCommand({create: 'temp2', temp: 1}); -masterDB.temp2.ensureIndex({x:1}); +masterDB.temp2.ensureIndex({x: 1}); masterDB.runCommand({create: 'keep1', temp: false}); masterDB.runCommand({create: 'keep2', temp: 0}); masterDB.runCommand({create: 'keep3'}); -assert.writeOK(masterDB.keep4.insert({}, { writeConcern: { w: 2 }})); +assert.writeOK(masterDB.keep4.insert({}, {writeConcern: {w: 2}})); // make sure they exist on primary and secondary -function countCollection( mydb, nameFilter ) { - var result = mydb.runCommand( "listCollections", { filter : { name : nameFilter } } ); - assert.commandWorked( result ); - return new DBCommandCursor( mydb.getMongo(), result ).itcount(); +function countCollection(mydb, nameFilter) { + var result = mydb.runCommand("listCollections", {filter: {name: nameFilter}}); + assert.commandWorked(result); + return new DBCommandCursor(mydb.getMongo(), result).itcount(); } -function countIndexesFor( mydb, nameFilter ) { - var result = mydb.runCommand( "listCollections", { filter : { name : nameFilter } } ); - assert.commandWorked( result ); - var arr = new DBCommandCursor( mydb.getMongo(), result ).toArray(); +function countIndexesFor(mydb, nameFilter) { + var result = mydb.runCommand("listCollections", {filter: {name: nameFilter}}); + assert.commandWorked(result); + var arr = new DBCommandCursor(mydb.getMongo(), result).toArray(); var total = 0; - for ( var i = 0; i < arr.length; i++ ) { + for (var i = 0; i < arr.length; i++) { var coll = arr[i]; - total += mydb.getCollection( coll.name ).getIndexes().length; + total += mydb.getCollection(coll.name).getIndexes().length; } return total; } -assert.eq(countCollection(masterDB,/temp\d$/), 2); // collections -assert.eq(countIndexesFor(masterDB,/temp\d$/), 4); // indexes (2 _id + 2 x) -assert.eq(countCollection(masterDB,/keep\d$/), 4); +assert.eq(countCollection(masterDB, /temp\d$/), 2); // collections +assert.eq(countIndexesFor(masterDB, /temp\d$/), 4); // indexes (2 _id + 2 x) +assert.eq(countCollection(masterDB, /keep\d$/), 4); -assert.eq(countCollection(secondDB,/temp\d$/), 2); // collections -assert.eq(countIndexesFor(secondDB,/temp\d$/), 4); // indexes (2 _id + 2 x) -assert.eq(countCollection(secondDB,/keep\d$/), 4); +assert.eq(countCollection(secondDB, /temp\d$/), 2); // collections +assert.eq(countIndexesFor(secondDB, /temp\d$/), 4); // indexes (2 _id + 2 x) +assert.eq(countCollection(secondDB, /keep\d$/), 4); // restart secondary and reconnect -replTest.restart(secondId, {}, /*wait=*/true); +replTest.restart(secondId, {}, /*wait=*/true); // wait for the secondary to achieve secondary status -assert.soon(function () { - try { - res = second.getDB("admin").runCommand({ replSetGetStatus: 1 }); - return res.myState == 2; - } - catch (e) { - return false; - } - }, "took more than a minute for the secondary to become secondary again", 60*1000); +assert.soon(function() { + try { + res = second.getDB("admin").runCommand({replSetGetStatus: 1}); + return res.myState == 2; + } catch (e) { + return false; + } +}, "took more than a minute for the secondary to become secondary again", 60 * 1000); // make sure restarting secondary didn't drop collections -assert.eq(countCollection(secondDB,/temp\d$/), 2); // collections -assert.eq(countIndexesFor(secondDB,/temp\d$/), 4); // indexes (2 _id + 2 x) -assert.eq(countCollection(secondDB,/keep\d$/), 4); +assert.eq(countCollection(secondDB, /temp\d$/), 2); // collections +assert.eq(countIndexesFor(secondDB, /temp\d$/), 4); // indexes (2 _id + 2 x) +assert.eq(countCollection(secondDB, /keep\d$/), 4); // step down primary and make sure former secondary (now primary) drops collections try { - master.adminCommand({replSetStepDown: 50, force : true}); + master.adminCommand({replSetStepDown: 50, force: true}); } catch (e) { // ignoring socket errors since they sometimes, but not always, fire after running that command. } @@ -89,16 +91,16 @@ assert.soon(function() { printjson(secondDB.adminCommand("replSetGetStatus")); printjson(secondDB.isMaster()); return secondDB.isMaster().ismaster; -}, '', 75*1000); // must wait for secondary to be willing to promote self +}, '', 75 * 1000); // must wait for secondary to be willing to promote self -assert.eq(countCollection(secondDB,/temp\d$/), 0); // collections -assert.eq(countIndexesFor(secondDB,/temp\d$/), 0); // indexes (2 _id + 2 x) -assert.eq(countCollection(secondDB,/keep\d$/), 4); +assert.eq(countCollection(secondDB, /temp\d$/), 0); // collections +assert.eq(countIndexesFor(secondDB, /temp\d$/), 0); // indexes (2 _id + 2 x) +assert.eq(countCollection(secondDB, /keep\d$/), 4); // check that former primary dropped collections replTest.awaitReplication(); -assert.eq(countCollection(masterDB,/temp\d$/), 0); // collections -assert.eq(countIndexesFor(masterDB,/temp\d$/), 0); // indexes (2 _id + 2 x) -assert.eq(countCollection(masterDB,/keep\d$/), 4); +assert.eq(countCollection(masterDB, /temp\d$/), 0); // collections +assert.eq(countIndexesFor(masterDB, /temp\d$/), 0); // indexes (2 _id + 2 x) +assert.eq(countCollection(masterDB, /keep\d$/), 4); replTest.stopSet(); diff --git a/jstests/replsets/test_command.js b/jstests/replsets/test_command.js index d8d5eb42984..abaf10bc56d 100644 --- a/jstests/replsets/test_command.js +++ b/jstests/replsets/test_command.js @@ -2,7 +2,7 @@ // waitForMemberState - waits for node's state to become 'expectedState'. // waitForDrainFinish - waits for primary to finish draining its applier queue. -(function () { +(function() { 'use strict'; var name = 'test_command'; var replSet = new ReplSetTest({name: name, nodes: 3}); @@ -19,14 +19,12 @@ // Stabilize replica set with node 0 as primary. - assert.commandWorked( - replSet.nodes[0].adminCommand({ - replSetTest: 1, - waitForMemberState: ReplSetTest.State.PRIMARY, - timeoutMillis: 60 * 1000, - }), - 'node 0' + replSet.nodes[0].host + ' failed to become primary' - ); + assert.commandWorked(replSet.nodes[0].adminCommand({ + replSetTest: 1, + waitForMemberState: ReplSetTest.State.PRIMARY, + timeoutMillis: 60 * 1000, + }), + 'node 0' + replSet.nodes[0].host + ' failed to become primary'); // We need the try/catch to handle that the node may have hung up the connection due // to a state change. @@ -42,7 +40,8 @@ replSetTest: 1, waitForMemberState: ReplSetTest.State.SECONDARY, timeoutMillis: 60 * 1000, - }), 'node 1' + replSet.nodes[1].host + ' failed to become secondary'); + }), + 'node 1' + replSet.nodes[1].host + ' failed to become secondary'); } var primary = replSet.getPrimary(); @@ -50,20 +49,16 @@ // Check replication mode. - assert.commandFailedWithCode( - primary.getDB(name).runCommand({ - replSetTest: 1, - }), - ErrorCodes.Unauthorized, - 'replSetTest should fail against non-admin database' - ); + assert.commandFailedWithCode(primary.getDB(name).runCommand({ + replSetTest: 1, + }), + ErrorCodes.Unauthorized, + 'replSetTest should fail against non-admin database'); - assert.commandWorked( - primary.adminCommand({ - replSetTest: 1, - }), - 'failed to check replication mode' - ); + assert.commandWorked(primary.adminCommand({ + replSetTest: 1, + }), + 'failed to check replication mode'); // waitForMemberState tests. @@ -74,8 +69,7 @@ timeoutMillis: 1000, }), ErrorCodes.TypeMismatch, - 'replSetTest waitForMemberState should fail on non-numerical state' - ); + 'replSetTest waitForMemberState should fail on non-numerical state'); assert.commandFailedWithCode( primary.adminCommand({ @@ -84,28 +78,23 @@ timeoutMillis: "what timeout", }), ErrorCodes.TypeMismatch, - 'replSetTest waitForMemberState should fail on non-numerical timeout' - ); - - assert.commandFailedWithCode( - primary.adminCommand({ - replSetTest: 1, - waitForMemberState: 9999, - timeoutMillis: 1000, - }), - ErrorCodes.BadValue, - 'replSetTest waitForMemberState should fail on invalid state' - ); - - assert.commandFailedWithCode( - primary.adminCommand({ - replSetTest: 1, - waitForMemberState: ReplSetTest.State.PRIMARY, - timeoutMillis: -1000, - }), - ErrorCodes.BadValue, - 'replSetTest waitForMemberState should fail on negative timeout' - ); + 'replSetTest waitForMemberState should fail on non-numerical timeout'); + + assert.commandFailedWithCode(primary.adminCommand({ + replSetTest: 1, + waitForMemberState: 9999, + timeoutMillis: 1000, + }), + ErrorCodes.BadValue, + 'replSetTest waitForMemberState should fail on invalid state'); + + assert.commandFailedWithCode(primary.adminCommand({ + replSetTest: 1, + waitForMemberState: ReplSetTest.State.PRIMARY, + timeoutMillis: -1000, + }), + ErrorCodes.BadValue, + 'replSetTest waitForMemberState should fail on negative timeout'); assert.commandFailedWithCode( primary.adminCommand({ @@ -114,9 +103,7 @@ timeoutMillis: 1000, }), ErrorCodes.ExceededTimeLimit, - 'replSetTest waitForMemberState(SECONDARY) should time out on node 0 ' + - primary.host - ); + 'replSetTest waitForMemberState(SECONDARY) should time out on node 0 ' + primary.host); assert.commandWorked( secondary.adminCommand({ @@ -124,9 +111,7 @@ waitForMemberState: ReplSetTest.State.SECONDARY, timeoutMillis: 1000, }), - 'replSetTest waitForMemberState(SECONDARY) failed on node 1 ' + - secondary.host - ); + 'replSetTest waitForMemberState(SECONDARY) failed on node 1 ' + secondary.host); // waitForDrainFinish tests. @@ -136,31 +121,24 @@ waitForDrainFinish: 'what state', }), ErrorCodes.TypeMismatch, - 'replSetTest waitForDrainFinish should fail on non-numerical timeout' - ); - - assert.commandFailedWithCode( - primary.adminCommand({ - replSetTest: 1, - waitForDrainFinish: -1000, - }), - ErrorCodes.BadValue, - 'replSetTest waitForDrainFinish should fail on negative timeout' - ); - - assert.commandWorked( - primary.adminCommand({ - replSetTest: 1, - waitForDrainFinish: 1000, - }), - 'node 0' + primary.host + ' failed to wait for drain to finish' - ); - - assert.commandWorked( - secondary.adminCommand({ - replSetTest: 1, - waitForDrainFinish: 0, - }), - 'node 1' + primary.host + ' failed to wait for drain to finish' - ); - })(); + 'replSetTest waitForDrainFinish should fail on non-numerical timeout'); + + assert.commandFailedWithCode(primary.adminCommand({ + replSetTest: 1, + waitForDrainFinish: -1000, + }), + ErrorCodes.BadValue, + 'replSetTest waitForDrainFinish should fail on negative timeout'); + + assert.commandWorked(primary.adminCommand({ + replSetTest: 1, + waitForDrainFinish: 1000, + }), + 'node 0' + primary.host + ' failed to wait for drain to finish'); + + assert.commandWorked(secondary.adminCommand({ + replSetTest: 1, + waitForDrainFinish: 0, + }), + 'node 1' + primary.host + ' failed to wait for drain to finish'); +})(); diff --git a/jstests/replsets/toostale.js b/jstests/replsets/toostale.js index 32c75e953b8..4f9e10ea94e 100644 --- a/jstests/replsets/toostale.js +++ b/jstests/replsets/toostale.js @@ -19,13 +19,12 @@ * 8: check s2.state == 3 */ - var w = 0; var wait = function(f) { w++; var n = 0; while (!f()) { - if( n % 4 == 0 ) + if (n % 4 == 0) print("toostale.js waiting " + w); if (++n == 4) { print("" + f); @@ -36,43 +35,45 @@ var wait = function(f) { }; var reconnect = function(a) { - wait(function() { - try { - a.bar.stats(); - return true; - } catch(e) { - print(e); - return false; - } + wait(function() { + try { + a.bar.stats(); + return true; + } catch (e) { + print(e); + return false; + } }); }; - var name = "toostale"; -var replTest = new ReplSetTest({ name: name, nodes: 3, oplogSize: 5 }); +var replTest = new ReplSetTest({name: name, nodes: 3, oplogSize: 5}); var host = getHostName(); var nodes = replTest.startSet(); -replTest.initiate({_id : name, members : [ - {_id : 0, host : host+":"+replTest.ports[0], priority: 2}, - {_id : 1, host : host+":"+replTest.ports[1], arbiterOnly : true}, - {_id : 2, host : host+":"+replTest.ports[2], priority: 0} -]}); +replTest.initiate({ + _id: name, + members: [ + {_id: 0, host: host + ":" + replTest.ports[0], priority: 2}, + {_id: 1, host: host + ":" + replTest.ports[1], arbiterOnly: true}, + {_id: 2, host: host + ":" + replTest.ports[2], priority: 0} + ] +}); var master = replTest.getPrimary(); var mdb = master.getDB("foo"); - print("1: initial insert"); mdb.foo.save({a: 1000}); - print("2: initial sync"); replTest.awaitReplication(); print("3: stop s2"); replTest.stop(2); print("waiting until the master knows the slave is blind"); -assert.soon(function() { return master.getDB("admin").runCommand({replSetGetStatus:1}).members[2].health == 0; }); +assert.soon(function() { + return master.getDB("admin").runCommand({replSetGetStatus: 1}).members[2].health == 0; +}); print("okay"); print("4: overflow oplog"); @@ -80,49 +81,46 @@ reconnect(master.getDB("local")); var count = master.getDB("local").oplog.rs.count(); var prevCount = -1; while (count > prevCount) { - print("inserting 1000"); - var bulk = mdb.bar.initializeUnorderedBulkOp(); - for (var i = 0; i < 1000; i++) { - bulk.insert({ x: i, date: new Date(), str: "safkaldmfaksndfkjansfdjanfjkafa" }); - } - assert.writeOK(bulk.execute()); + print("inserting 1000"); + var bulk = mdb.bar.initializeUnorderedBulkOp(); + for (var i = 0; i < 1000; i++) { + bulk.insert({x: i, date: new Date(), str: "safkaldmfaksndfkjansfdjanfjkafa"}); + } + assert.writeOK(bulk.execute()); - prevCount = count; - replTest.awaitReplication(); - count = master.getDB("local").oplog.rs.count(); - print("count: "+count+" prev: "+prevCount); + prevCount = count; + replTest.awaitReplication(); + count = master.getDB("local").oplog.rs.count(); + print("count: " + count + " prev: " + prevCount); } - print("5: restart s2"); replTest.restart(2); print("waiting until the master knows the slave is not blind"); -assert.soon(function() { return master.getDB("admin").runCommand({replSetGetStatus:1}).members[2].health != 0; }); +assert.soon(function() { + return master.getDB("admin").runCommand({replSetGetStatus: 1}).members[2].health != 0; +}); print("okay"); - print("6: check s2.state == 3"); var goStale = function() { - wait(function() { - var status = master.getDB("admin").runCommand({replSetGetStatus:1}); - printjson(status); - return status.members[2].state == 3; + wait(function() { + var status = master.getDB("admin").runCommand({replSetGetStatus: 1}); + printjson(status); + return status.members[2].state == 3; }); }; goStale(); - print("7: restart s2"); replTest.stop(2); replTest.restart(2); - print("8: check s2.state == 3"); assert.soon(function() { - var status = master.getDB("admin").runCommand({replSetGetStatus:1}); + var status = master.getDB("admin").runCommand({replSetGetStatus: 1}); printjson(status); return status.members && status.members[2].state == 3; }); replTest.stop(0); - diff --git a/jstests/replsets/two_initsync.js b/jstests/replsets/two_initsync.js index bdb0c96bc5a..1f2b526d61e 100755..100644 --- a/jstests/replsets/two_initsync.js +++ b/jstests/replsets/two_initsync.js @@ -1,7 +1,7 @@ // test initial sync failing // try running as : -// +// // mongo --nodb two_initsync.js | tee out | grep -v ^m31 // @@ -15,10 +15,10 @@ function pause(s) { } } -function deb(obj) { - if( debugging ) { +function deb(obj) { + if (debugging) { print("\n\n\n" + obj + "\n\n"); - } + } } w = 0; @@ -27,7 +27,7 @@ function wait(f) { w++; var n = 0; while (!f()) { - if( n % 4 == 0 ) + if (n % 4 == 0) print("twoinitsync waiting " + w); if (++n == 4) { print("" + f); @@ -37,26 +37,29 @@ function wait(f) { } } -doTest = function (signal) { - var replTest = new ReplSetTest({ name: 'testSet', nodes: 0 }); +doTest = function(signal) { + var replTest = new ReplSetTest({name: 'testSet', nodes: 0}); var first = replTest.add(); // Initiate replica set - assert.soon(function () { - var res = first.getDB("admin").runCommand({ replSetInitiate: null }); + assert.soon(function() { + var res = first.getDB("admin").runCommand({replSetInitiate: null}); return res['ok'] == 1; }); // Get status - assert.soon(function () { - var result = first.getDB("admin").runCommand({ replSetGetStatus: true }); + assert.soon(function() { + var result = first.getDB("admin").runCommand({replSetGetStatus: true}); return result['ok'] == 1; }); var a = replTest.getPrimary().getDB("two"); for (var i = 0; i < 20000; i++) - a.coll.insert({ i: i, s: "a b" }); + a.coll.insert({ + i: i, + s: "a b" + }); // Start a second node var second = replTest.add(); @@ -68,11 +71,13 @@ doTest = function (signal) { var b = second.getDB("admin"); // attempt to interfere with the initial sync - b._adminCommand({ replSetTest: 1, forceInitialSyncFailure: 1 }); + b._adminCommand({replSetTest: 1, forceInitialSyncFailure: 1}); // wait(function () { return a._adminCommand("replSetGetStatus").members.length == 2; }); - wait(function () { return b.isMaster().secondary || b.isMaster().ismaster; }); + wait(function() { + return b.isMaster().secondary || b.isMaster().ismaster; + }); print("b.isMaster:"); printjson(b.isMaster()); @@ -82,13 +87,16 @@ doTest = function (signal) { print("b.isMaster:"); printjson(b.isMaster()); - wait(function () { var c = b.getSisterDB("two").coll.count(); print(c); return c == 20000; }); + wait(function() { + var c = b.getSisterDB("two").coll.count(); + print(c); + return c == 20000; + }); print("two_initsync.js SUCCESS"); replTest.stopSet(signal); }; - print("two_initsync.js"); -doTest( 15 ); +doTest(15); diff --git a/jstests/replsets/two_nodes_priority_take_over.js b/jstests/replsets/two_nodes_priority_take_over.js index 403c9ba8464..f6e62fe681d 100644 --- a/jstests/replsets/two_nodes_priority_take_over.js +++ b/jstests/replsets/two_nodes_priority_take_over.js @@ -5,60 +5,57 @@ // TODO: We have to disable this test until SERVER-21456 is fixed, due to the // race of tagging and closing connections on stepdown. if (false) { - -load("jstests/replsets/rslib.js"); - -(function() { - -"use strict"; -var name = "two_nodes_priority_take_over"; -var rst = new ReplSetTest({name: name, nodes: 2}); - -rst.startSet(); -var conf = rst.getReplSetConfig(); -conf.members[0].priority = 2; -conf.members[1].priority = 1; -rst.initiate(conf); -rst.awaitSecondaryNodes(); -// Set verbosity for replication on all nodes. -var verbosity = { - "setParameter" : 1, - "logComponentVerbosity" : { - "verbosity": 4, - "storage" : { "verbosity" : 1 } - } -}; -rst.nodes.forEach(function (node) {node.adminCommand(verbosity);}); - -// The first node will be the primary at the beginning. -rst.waitForState(rst.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); - -// Get the term when replset is stable. -var res = rst.getPrimary().adminCommand("replSetGetStatus"); -assert.commandWorked(res); -var stableTerm = res.term; - -// Reconfig to change priorities. The current primary remains the same until -// the higher priority node takes over. -var conf = rst.getReplSetConfig(); -conf.members[0].priority = 1; -conf.members[1].priority = 2; -conf.version = 2; -reconfig(rst, conf); - -// The second node will take over the primary. -rst.waitForState(rst.nodes[1], ReplSetTest.State.PRIMARY, 60 * 1000); - -res = rst.getPrimary().adminCommand("replSetGetStatus"); -assert.commandWorked(res); -var newTerm = res.term; - -// Priority takeover should happen smoothly without failed election as there is -// no current candidate. If vote requests failed (wrongly) for some reason, -// nodes have to start new elections, which increase the term unnecessarily. -if (rst.getReplSetConfigFromNode().protocolVersion == 1) { - assert.eq(newTerm, stableTerm + 1); -} -})(); - + load("jstests/replsets/rslib.js"); + + (function() { + + "use strict"; + var name = "two_nodes_priority_take_over"; + var rst = new ReplSetTest({name: name, nodes: 2}); + + rst.startSet(); + var conf = rst.getReplSetConfig(); + conf.members[0].priority = 2; + conf.members[1].priority = 1; + rst.initiate(conf); + rst.awaitSecondaryNodes(); + // Set verbosity for replication on all nodes. + var verbosity = { + "setParameter": 1, + "logComponentVerbosity": {"verbosity": 4, "storage": {"verbosity": 1}} + }; + rst.nodes.forEach(function(node) { + node.adminCommand(verbosity); + }); + + // The first node will be the primary at the beginning. + rst.waitForState(rst.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000); + + // Get the term when replset is stable. + var res = rst.getPrimary().adminCommand("replSetGetStatus"); + assert.commandWorked(res); + var stableTerm = res.term; + + // Reconfig to change priorities. The current primary remains the same until + // the higher priority node takes over. + var conf = rst.getReplSetConfig(); + conf.members[0].priority = 1; + conf.members[1].priority = 2; + conf.version = 2; + reconfig(rst, conf); + + // The second node will take over the primary. + rst.waitForState(rst.nodes[1], ReplSetTest.State.PRIMARY, 60 * 1000); + + res = rst.getPrimary().adminCommand("replSetGetStatus"); + assert.commandWorked(res); + var newTerm = res.term; + + // Priority takeover should happen smoothly without failed election as there is + // no current candidate. If vote requests failed (wrongly) for some reason, + // nodes have to start new elections, which increase the term unnecessarily. + if (rst.getReplSetConfigFromNode().protocolVersion == 1) { + assert.eq(newTerm, stableTerm + 1); + } + })(); } diff --git a/jstests/replsets/zero_vote_arbiter.js b/jstests/replsets/zero_vote_arbiter.js index cba292d6fb0..bc7552ef47b 100644 --- a/jstests/replsets/zero_vote_arbiter.js +++ b/jstests/replsets/zero_vote_arbiter.js @@ -17,12 +17,7 @@ var InvalidReplicaSetConfig = 93; var arbiterConn = replTest.add(); var admin = replTest.getPrimary().getDB("admin"); var conf = admin.runCommand({replSetGetConfig: 1}).config; - conf.members.push({ - _id: 3, - host: arbiterConn.host, - arbiterOnly: true, - votes: 0 - }); + conf.members.push({_id: 3, host: arbiterConn.host, arbiterOnly: true, votes: 0}); conf.version++; jsTestLog('Add arbiter with zero votes:'); @@ -60,7 +55,6 @@ var InvalidReplicaSetConfig = 93; replTest.stopSet(); })(); - /* * replSetInitiate with a 0-vote arbiter. */ @@ -96,12 +90,7 @@ var InvalidReplicaSetConfig = 93; var arbiterConn = replTest.add(); var admin = replTest.getPrimary().getDB("admin"); var conf = admin.runCommand({replSetGetConfig: 1}).config; - conf.members.push({ - _id: 7, - host: arbiterConn.host, - arbiterOnly: true, - votes: 0 - }); + conf.members.push({_id: 7, host: arbiterConn.host, arbiterOnly: true, votes: 0}); conf.version++; jsTestLog('Add arbiter with zero votes:'); |