diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2015-11-25 11:20:43 -0500 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2015-12-08 13:05:00 -0500 |
commit | 3ed6635a5fb26c354046d275a1217c4526b2fe02 (patch) | |
tree | f40aa20b5e62996843ce3df0f47b82042dd683a7 | |
parent | 4f24dc58f48cb087db8a4832421d298e9e2633a0 (diff) | |
download | mongo-3ed6635a5fb26c354046d275a1217c4526b2fe02.tar.gz |
SERVER-21050 Cleanup ReplSetTest
This is just a cleanup work to hide some of the private state of
ReplSetTest so it is easier to encapsulate and add new logic. Also enables
strict mode.
137 files changed, 1815 insertions, 1696 deletions
diff --git a/jstests/auth/copyauth.js b/jstests/auth/copyauth.js index 2d3ee534666..a0e0b92c51e 100644 --- a/jstests/auth/copyauth.js +++ b/jstests/auth/copyauth.js @@ -60,7 +60,7 @@ function ClusterSpawnHelper(clusterType, startWithAuth) { else { replSetTest.awaitReplication(); } - this.conn = replSetTest.getMaster(); + this.conn = replSetTest.getPrimary(); this.connString = replSetTest.getURL(); } else { diff --git a/jstests/auth/user_defined_roles_on_secondaries.js b/jstests/auth/user_defined_roles_on_secondaries.js index 474ee7ad5d0..4ca2d14f651 100644 --- a/jstests/auth/user_defined_roles_on_secondaries.js +++ b/jstests/auth/user_defined_roles_on_secondaries.js @@ -95,7 +95,7 @@ m0.getDB("db1").createRole({ rstest.add(); rstest.reInitiate(); -rstest.getMaster().getDB("db1").createRole({ +rstest.getPrimary().getDB("db1").createRole({ role: "r3", roles: [ "r1", "r2" ], privileges: [ @@ -116,8 +116,8 @@ rstest.nodes.forEach(function (node) { }); // Verify that updating roles propagates. -rstest.getMaster().getDB("db1").revokeRolesFromRole("r1", [ "read" ], { w: 2 }); -rstest.getMaster().getDB("db1").grantRolesToRole("r1", [ "dbAdmin" ], { w: 2 }); +rstest.getPrimary().getDB("db1").revokeRolesFromRole("r1", [ "read" ], { w: 2 }); +rstest.getPrimary().getDB("db1").grantRolesToRole("r1", [ "dbAdmin" ], { w: 2 }); rstest.nodes.forEach(function (node) { var role = node.getDB("db1").getRole("r1"); assert.eq(1, role.roles.length, node); @@ -125,7 +125,7 @@ rstest.nodes.forEach(function (node) { }); // Verify that dropping roles propagates. -rstest.getMaster().getDB("db1").dropRole("r2", { w: 2}); +rstest.getPrimary().getDB("db1").dropRole("r2", { w: 2}); rstest.nodes.forEach(function (node) { assert.eq(null, node.getDB("db1").getRole("r2")); var role = node.getDB("db1").getRole("r3"); @@ -137,8 +137,8 @@ rstest.nodes.forEach(function (node) { }); // Verify that dropping the admin database propagates. -assert.commandWorked(rstest.getMaster().getDB("admin").dropDatabase()); -assert.commandWorked(rstest.getMaster().getDB("admin").getLastErrorObj(2)); +assert.commandWorked(rstest.getPrimary().getDB("admin").dropDatabase()); +assert.commandWorked(rstest.getPrimary().getDB("admin").getLastErrorObj(2)); rstest.nodes.forEach(function (node) { var roles = node.getDB("db1").getRoles(); assert.eq(0, roles.length, node); @@ -146,7 +146,7 @@ rstest.nodes.forEach(function (node) { // Verify that applyOps commands propagate. // NOTE: This section of the test depends on the oplog and roles schemas. -assert.commandWorked(rstest.getMaster().getDB("admin").runCommand({ applyOps: [ +assert.commandWorked(rstest.getPrimary().getDB("admin").runCommand({ applyOps: [ { op: "c", ns: "admin.$cmd", @@ -214,7 +214,7 @@ assert.commandWorked(rstest.getMaster().getDB("admin").runCommand({ applyOps: [ } ] })); -assert.commandWorked(rstest.getMaster().getDB("admin").getLastErrorObj(2)); +assert.commandWorked(rstest.getPrimary().getDB("admin").getLastErrorObj(2)); rstest.nodes.forEach(function (node) { var role = node.getDB("db1").getRole("t1"); assert.eq(1, role.roles.length, node); diff --git a/jstests/gle/get_last_error.js b/jstests/gle/get_last_error.js index 8d0b3d940f3..3b5d6368c61 100644 --- a/jstests/gle/get_last_error.js +++ b/jstests/gle/get_last_error.js @@ -5,7 +5,7 @@ var replTest = new ReplSetTest({name: name, oplogSize: 1, nodes: 3, settings: {chainingAllowed: false}}); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var mdb = master.getDB("test"); // synchronize replication @@ -49,7 +49,7 @@ assert.eq(gle.wtimeout, null); // take a node down and GLE for more nodes than are up replTest.stop(2); -master = replTest.getMaster(); +master = replTest.getPrimary(); mdb = master.getDB("test"); // do w:2 write so secondary is caught up before calling {gle w:3}. assert.writeOK(mdb.foo.insert({_id: "3"}, {writeConcern: {w: 2, wtimeout:30000}})); diff --git a/jstests/libs/parallelTester.js b/jstests/libs/parallelTester.js index bd948327310..cb5fb085a94 100644 --- a/jstests/libs/parallelTester.js +++ b/jstests/libs/parallelTester.js @@ -1,22 +1,18 @@ /** * The ParallelTester class is used to test more than one test concurrently */ - - -if ( typeof _threadInject != "undefined" ){ - //print( "fork() available!" ); - +if (typeof _threadInject != "undefined") { Thread = function(){ this.init.apply( this, arguments ); } _threadInject( Thread.prototype ); - + ScopedThread = function() { this.init.apply( this, arguments ); } ScopedThread.prototype = new Thread( function() {} ); _scopedThreadInject( ScopedThread.prototype ); - + fork = function() { var t = new Thread( function() {} ); Thread.apply( t, arguments ); @@ -29,7 +25,7 @@ if ( typeof _threadInject != "undefined" ){ if (host == undefined) host = db.getMongo().host; this.events = new Array( me, collectionName, host ); } - + EventGenerator.prototype._add = function( action ) { this.events.push( [ Random.genExp( this.mean ), action ] ); } diff --git a/jstests/noPassthrough/minvalid.js b/jstests/noPassthrough/minvalid.js index 056b36f3f5a..cbaf26e1b71 100644 --- a/jstests/noPassthrough/minvalid.js +++ b/jstests/noPassthrough/minvalid.js @@ -9,7 +9,7 @@ var host = getHostName(); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var mdb = master.getDB("foo"); print("1: initial insert"); diff --git a/jstests/noPassthrough/minvalid2.js b/jstests/noPassthrough/minvalid2.js index d34c93120d4..72a8c9a4d80 100644 --- a/jstests/noPassthrough/minvalid2.js +++ b/jstests/noPassthrough/minvalid2.js @@ -30,7 +30,7 @@ replTest.initiate({_id : name, members : [ {_id : 2, host : host+":"+replTest.ports[2], arbiterOnly : true} ]}); var slaves = replTest.liveNodes.slaves; -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var masterId = replTest.getNodeId(master); var slave = slaves[0]; var slaveId = replTest.getNodeId(slave); @@ -62,7 +62,7 @@ print("6: start up slave"); replTest.restart(slaveId); print("7: writes on former slave") -master = replTest.getMaster(); +master = replTest.getPrimary(); mdb1 = master.getDB("foo"); mdb1.foo.save({a:1002}); diff --git a/jstests/noPassthrough/wt_nojournal_repl.js b/jstests/noPassthrough/wt_nojournal_repl.js index 01bd23b10da..71cf78c5afc 100644 --- a/jstests/noPassthrough/wt_nojournal_repl.js +++ b/jstests/noPassthrough/wt_nojournal_repl.js @@ -40,7 +40,7 @@ else { config.members[0].priority = 1; replTest.initiate(config); - var masterDB = replTest.getMaster().getDB("test"); + var masterDB = replTest.getPrimary().getDB("test"); var secondary1 = replTest.liveNodes.slaves[0]; jsTestLog("add some data to collection foo"); diff --git a/jstests/noPassthroughWithMongod/indexbg_drop.js b/jstests/noPassthroughWithMongod/indexbg_drop.js index 59dc0cf181a..d929665df66 100644 --- a/jstests/noPassthroughWithMongod/indexbg_drop.js +++ b/jstests/noPassthroughWithMongod/indexbg_drop.js @@ -27,7 +27,7 @@ replTest.initiate({"_id" : "bgIndex", {"_id" : 1, "host" : nodes[1]}, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var second = replTest.getSecondary(); var masterId = replTest.getNodeId(master); diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js index 7289de6f25b..24a04775746 100644 --- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js +++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js @@ -44,7 +44,7 @@ replTest.initiate({"_id" : "bgIndex", {"_id" : 1, "host" : nodes[1]}, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var second = replTest.getSecondary(); var masterDB = master.getDB(dbname); diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js index bb71ef6aa2f..80379b64844 100644 --- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js +++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js @@ -27,7 +27,7 @@ replTest.initiate({"_id" : "bgIndex", {"_id" : 1, "host" : nodes[1]}, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var second = replTest.getSecondary(); var secondId = replTest.getNodeId(second); diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js index f971fc1ba73..ee25b5874b5 100644 --- a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js +++ b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js @@ -53,7 +53,7 @@ {"_id" : 1, "host" : nodenames[1]}, {"_id" : 2, "host" : nodenames[2], arbiterOnly: true}]}); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var second = replTest.getSecondary(); var secondId = replTest.getNodeId(second); diff --git a/jstests/noPassthroughWithMongod/moveprimary-replset.js b/jstests/noPassthroughWithMongod/moveprimary-replset.js index 964dd1afd26..50fe756463b 100755 --- a/jstests/noPassthroughWithMongod/moveprimary-replset.js +++ b/jstests/noPassthroughWithMongod/moveprimary-replset.js @@ -26,7 +26,7 @@ var replSet1 = shardingTest.rs0; var replSet2 = shardingTest.rs1; jsTest.log("Adding data to our first replica set"); -var repset1DB = replSet1.getMaster().getDB(testDBName); +var repset1DB = replSet1.getPrimary().getDB(testDBName); for (var i = 1; i <= numDocs; i++) { repset1DB[testCollName].insert({ x : i }); } @@ -48,13 +48,13 @@ jsTest.log("Adding replSet2 as second shard"); mongosConn.adminCommand({ addshard : replSet2.getURL() }); mongosConn.getDB('admin').printShardingStatus(); -printjson(replSet2.getMaster().getDBs()); +printjson(replSet2.getPrimary().getDBs()); jsTest.log("Moving test db from replSet1 to replSet2"); assert.commandWorked(mongosConn.getDB('admin').runCommand({ moveprimary: testDBName, to: replSet2.getURL() })); mongosConn.getDB('admin').printShardingStatus(); -printjson(replSet2.getMaster().getDBs()); +printjson(replSet2.getPrimary().getDBs()); assert.eq(testDB.getSiblingDB("config").databases.findOne({ "_id" : testDBName }).primary, replSet2.name, "Failed to change primary shard for unsharded database."); diff --git a/jstests/noPassthroughWithMongod/replica_set_shard_version.js b/jstests/noPassthroughWithMongod/replica_set_shard_version.js index 23328bf2378..400c49a3a4c 100644 --- a/jstests/noPassthroughWithMongod/replica_set_shard_version.js +++ b/jstests/noPassthroughWithMongod/replica_set_shard_version.js @@ -21,7 +21,7 @@ coll.findOne() var sadmin = shard.getDB( "admin" ) assert.throws(function() { sadmin.runCommand({ replSetStepDown : 3000, force : true }); }); -st.rs0.getMaster(); +st.rs0.getPrimary(); mongosA.getDB("admin").runCommand({ setParameter : 1, traceExceptions : true }) diff --git a/jstests/noPassthroughWithMongod/sharding_rs_arb1.js b/jstests/noPassthroughWithMongod/sharding_rs_arb1.js index 4c36ff4f05d..30570c261c5 100644 --- a/jstests/noPassthroughWithMongod/sharding_rs_arb1.js +++ b/jstests/noPassthroughWithMongod/sharding_rs_arb1.js @@ -12,7 +12,7 @@ replTest.initiate({_id : name, members : replTest.awaitReplication(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var db = master.getDB( "test" ); printjson( rs.status() ); diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js index f56134f5008..431154cb033 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl.js +++ b/jstests/noPassthroughWithMongod/ttl_repl.js @@ -14,7 +14,7 @@ var rt = new ReplSetTest( { name : "ttl_repl" , nodes: 2 } ); // setup set var nodes = rt.startSet(); rt.initiate(); -var master = rt.getMaster(); +var master = rt.getPrimary(); rt.awaitSecondaryNodes(); var slave1 = rt.liveNodes.slaves[0]; diff --git a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js index f1c33f448f7..bf9317aad95 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js +++ b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js @@ -6,7 +6,7 @@ var rt = new ReplSetTest( { name : "ttl_repl" , nodes: 2 } ); // setup set var nodes = rt.startSet(); rt.initiate(); -var master = rt.getMaster(); +var master = rt.getPrimary(); rt.awaitSecondaryNodes(); var slave1 = rt.getSecondary(); diff --git a/jstests/replsets/apply_batch_only_goes_forward.js b/jstests/replsets/apply_batch_only_goes_forward.js index 7786be9cc4f..8abbb693fe0 100644 --- a/jstests/replsets/apply_batch_only_goes_forward.js +++ b/jstests/replsets/apply_batch_only_goes_forward.js @@ -25,7 +25,7 @@ var nodes = replTest.startSet(); replTest.initiate(); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var mTest = master.getDB("test"); var mLocal = master.getDB("local"); var mMinvalid = mLocal["replset.minvalid"]; @@ -66,7 +66,7 @@ replTest.waitForState(master, replTest.RECOVERING, 90000); // Slave is now master... so do a write to get a minvalid entry on the secondary. - assert.writeOK(replTest.getMaster().getDB("test").foo.save({}, {writeConcern: {w: 3}})); + assert.writeOK(replTest.getPrimary().getDB("test").foo.save({}, {writeConcern: {w: 3}})); assert.soon(function() { var mv; diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js index 939b1d90e22..fc0eb463fbf 100644 --- a/jstests/replsets/auth1.js +++ b/jstests/replsets/auth1.js @@ -59,7 +59,7 @@ result = m.getDB("admin").runCommand({replSetInitiate : rs.getReplSetConfig()}); assert.eq(result.ok, 1, "couldn't initiate: "+tojson(result)); m.getDB('admin').logout(); // In case this node doesn't become primary, make sure its not auth'd -var master = rs.getMaster(); +var master = rs.getPrimary(); rs.awaitSecondaryNodes(); var mId = rs.getNodeId(master); var slave = rs.liveNodes.slaves[0]; @@ -107,7 +107,7 @@ assert.writeOK(bulk.execute({ w: 3, wtimeout: 60000 })); print("fail over"); rs.stop(mId); -master = rs.getMaster(); +master = rs.getPrimary(); print("add some more data 1"); master.getDB("test").auth("bar", "baz"); @@ -119,7 +119,7 @@ assert.writeOK(bulk.execute({ w: 2 })); print("resync"); rs.restart(mId, {"keyFile" : key1_600}); -master = rs.getMaster(); +master = rs.getPrimary(); print("add some more data 2"); bulk = master.getDB("test").foo.initializeUnorderedBulkOp(); @@ -146,7 +146,7 @@ try { catch (e) { print("error: "+e); } -master = rs.getMaster(); +master = rs.getPrimary(); master.getDB("admin").auth("foo", "bar"); diff --git a/jstests/replsets/auth2.js b/jstests/replsets/auth2.js index 8899162d04e..62179c1c7af 100644 --- a/jstests/replsets/auth2.js +++ b/jstests/replsets/auth2.js @@ -39,7 +39,7 @@ rs.initiate({ "_id" : name, {"_id" : 2, "host" : hostnames[2], priority: 0} ]}); -var master = rs.getMaster(); +var master = rs.getPrimary(); print("add an admin user"); master.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles}, diff --git a/jstests/replsets/auth3.js b/jstests/replsets/auth3.js index d940c5e3e37..504bfeffe9c 100644 --- a/jstests/replsets/auth3.js +++ b/jstests/replsets/auth3.js @@ -19,13 +19,13 @@ rs.startSet(); rs.initiate(); - master = rs.getMaster(); + master = rs.getPrimary(); jsTest.log("adding user"); master.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles}, {w: 2, wtimeout: 30000}); var safeInsert = function() { - master = rs.getMaster(); + master = rs.getPrimary(); master.getDB("admin").auth("foo", "bar"); assert.writeOK(master.getDB("foo").bar.insert({ x: 1 })); }; @@ -44,7 +44,7 @@ jsTest.log("write stuff to 0&2"); rs.stop(1); - master = rs.getMaster(); + master = rs.getPrimary(); master.getDB("admin").auth("foo", "bar"); master.getDB("foo").bar.drop(); jsTest.log("last op: " + diff --git a/jstests/replsets/auth_no_pri.js b/jstests/replsets/auth_no_pri.js index 71c976586d7..59eefa52f52 100644 --- a/jstests/replsets/auth_no_pri.js +++ b/jstests/replsets/auth_no_pri.js @@ -6,7 +6,7 @@ var nodes = rs.startSet(); rs.initiate(); // Add user -var master = rs.getMaster(); +var master = rs.getPrimary(); master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {w: NODE_COUNT}); // Can authenticate replset connection when whole set is up. diff --git a/jstests/replsets/buildindexes.js b/jstests/replsets/buildindexes.js index 5ec65765e27..a114011c3a0 100644 --- a/jstests/replsets/buildindexes.js +++ b/jstests/replsets/buildindexes.js @@ -15,7 +15,7 @@ replTest.initiate(config); - var master = replTest.getMaster().getDB(name); + var master = replTest.getPrimary().getDB(name); var slaveConns = replTest.liveNodes.slaves; var slave = []; for (var i in slaveConns) { diff --git a/jstests/replsets/capped_id.js b/jstests/replsets/capped_id.js index cd866fb0234..83942f6405f 100644 --- a/jstests/replsets/capped_id.js +++ b/jstests/replsets/capped_id.js @@ -18,9 +18,9 @@ var nodes = replTest.startSet(); // This will wait for initiation replTest.initiate(); -// Call getMaster to return a reference to the node that's been +// Call getPrimary to return a reference to the node that's been // elected master -var master = replTest.getMaster(); +var master = replTest.getPrimary(); // wait for secondaries to be up, since we'll be reading from them replTest.awaitSecondaryNodes(); @@ -28,7 +28,7 @@ replTest.awaitSecondaryNodes(); var slave1 = replTest.liveNodes.slaves[0]; var slave2 = replTest.liveNodes.slaves[1]; -// Calling getMaster made available the liveNodes structure, +// Calling getPrimary made available the liveNodes structure, // which looks like this: // liveNodes = {master: masterNode, slaves: [slave1, slave2] } printjson( replTest.liveNodes ); diff --git a/jstests/replsets/capped_insert_order.js b/jstests/replsets/capped_insert_order.js index b55972afc16..9b39021732c 100644 --- a/jstests/replsets/capped_insert_order.js +++ b/jstests/replsets/capped_insert_order.js @@ -8,7 +8,7 @@ replTest.startSet(); replTest.initiate(); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var slave = replTest.liveNodes.slaves[0]; var dbName = "db"; diff --git a/jstests/replsets/cloneDb.js b/jstests/replsets/cloneDb.js index 54b50d9c63f..ca53d370f4a 100644 --- a/jstests/replsets/cloneDb.js +++ b/jstests/replsets/cloneDb.js @@ -21,7 +21,7 @@ if (jsTest.options().keyFile) { var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); replTest.startSet(); replTest.initiate(); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var secondary = replTest.liveNodes.slaves[0]; var masterDB = master.getDB(replsetDBName); masterDB.dropDatabase(); diff --git a/jstests/replsets/config_server_checks.js b/jstests/replsets/config_server_checks.js index b7627923d8d..41579aceb89 100644 --- a/jstests/replsets/config_server_checks.js +++ b/jstests/replsets/config_server_checks.js @@ -51,7 +51,7 @@ var conf = rst.getReplSetConfig(); conf.configsvr = true; assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); -rst.getMaster(); +rst.getPrimary(); expectState(rst, rst.PRIMARY); rst.stopSet(); })(); @@ -71,10 +71,10 @@ var conf = rst.getReplSetConfig(); conf.configsvr = true; assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); -rst.getMaster(); +rst.getPrimary(); expectState(rst, rst.PRIMARY); -var conf = rst.getMaster().getDB('local').system.replset.findOne(); +var conf = rst.getPrimary().getDB('local').system.replset.findOne(); assert(conf.configsvr, tojson(conf)); rst.stopSet(); @@ -93,7 +93,7 @@ var rst = new ReplSetTest({name: "configrs6", rst.startSet(); assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: 1})); -rst.getMaster(); +rst.getPrimary(); expectState(rst, rst.PRIMARY); rst.stopSet(); })(); @@ -111,7 +111,7 @@ rst.startSet(); var conf = rst.getReplSetConfig(); assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); -rst.getMaster(); +rst.getPrimary(); expectState(rst, rst.PRIMARY); assert.throws(function() { rst.restart(0, {configsvr: ""}); @@ -135,7 +135,7 @@ var conf = rst.getReplSetConfig(); conf.configsvr = true; assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); -rst.getMaster(); +rst.getPrimary(); expectState(rst, rst.PRIMARY); var node = rst.nodes[0]; diff --git a/jstests/replsets/copydb.js b/jstests/replsets/copydb.js index d3df7de08c2..59730f70084 100644 --- a/jstests/replsets/copydb.js +++ b/jstests/replsets/copydb.js @@ -8,7 +8,7 @@ replTest.startSet(); replTest.initiate(); - var primary = replTest.getMaster(); + var primary = replTest.getPrimary(); var secondary = replTest.liveNodes.slaves[0]; var sourceDBName = 'copydb-repl-test-source'; diff --git a/jstests/replsets/drop_oplog.js b/jstests/replsets/drop_oplog.js index 90c920e1b27..8a84bb2050e 100644 --- a/jstests/replsets/drop_oplog.js +++ b/jstests/replsets/drop_oplog.js @@ -6,7 +6,7 @@ var nodes = rt.startSet(); rt.initiate(); - var master = rt.getMaster(); + var master = rt.getPrimary(); var ml = master.getDB( 'local' ); var threw = false; diff --git a/jstests/replsets/election_not_blocked.js b/jstests/replsets/election_not_blocked.js index 20c2ff7cc59..c3523200b0b 100644 --- a/jstests/replsets/election_not_blocked.js +++ b/jstests/replsets/election_not_blocked.js @@ -24,7 +24,7 @@ // so it cannot vote while fsync locked in PV1. Use PV0 explicitly here. protocolVersion: 0}); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // do a write assert.writeOK(master.getDB("foo").bar.insert({x:1}, {writeConcern: {w: 3}})); diff --git a/jstests/replsets/explain_slaveok.js b/jstests/replsets/explain_slaveok.js index 0714c5074d1..93069e6ac01 100644 --- a/jstests/replsets/explain_slaveok.js +++ b/jstests/replsets/explain_slaveok.js @@ -12,7 +12,7 @@ print("Start replica set with two nodes"); var replTest = new ReplSetTest({name: name, nodes: 2}); var nodes = replTest.startSet(); replTest.initiate(); -var primary = replTest.getMaster(); +var primary = replTest.getPrimary(); // Insert a document and let it sync to the secondary. print("Initial sync"); diff --git a/jstests/replsets/fsync_lock_read_secondaries.js b/jstests/replsets/fsync_lock_read_secondaries.js index 3f55bc8ef8b..81f6a0be8ae 100644 --- a/jstests/replsets/fsync_lock_read_secondaries.js +++ b/jstests/replsets/fsync_lock_read_secondaries.js @@ -31,7 +31,7 @@ var replTest = new ReplSetTest({name: 'testSet', nodes: 2, oplogSize: 5}); var nodes = replTest.startSet(); // This will wait for initiation replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var ret = master.getDB("admin").fsyncLock(); if (!ret.ok) { @@ -48,7 +48,7 @@ for(var i=0; i<docNum; i++) { waitForAllMembers(master.getDB("foo")); replTest.awaitReplication(); -// Calling getMaster also makes available the liveNodes structure, which looks like this: +// Calling getPrimary also makes available the liveNodes structure, which looks like this: // liveNodes = {master: masterNode, slaves: [slave1, slave2] } var slaves = replTest.liveNodes.slaves; slaves[0].setSlaveOk(); diff --git a/jstests/replsets/groupAndMapReduce.js b/jstests/replsets/groupAndMapReduce.js index a60ce82f5f8..1aff25a18eb 100644 --- a/jstests/replsets/groupAndMapReduce.js +++ b/jstests/replsets/groupAndMapReduce.js @@ -18,9 +18,9 @@ doTest = function( signal ) { // This will wait for initiation replTest.initiate(); - // Call getMaster to return a reference to the node that's been + // Call getPrimary to return a reference to the node that's been // elected master. - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // save some records var len = 100 diff --git a/jstests/replsets/index_delete.js b/jstests/replsets/index_delete.js index c2e871fe6f1..8747eacc412 100644 --- a/jstests/replsets/index_delete.js +++ b/jstests/replsets/index_delete.js @@ -39,7 +39,7 @@ replTest.initiate({"_id" : "fgIndex", {"_id" : 1, "host" : nodes[1]}, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var second = replTest.getSecondary(); var masterDB = master.getDB('fgIndexSec'); var secondDB = second.getDB('fgIndexSec'); diff --git a/jstests/replsets/index_restart_secondary.js b/jstests/replsets/index_restart_secondary.js index d792839c580..7308de83271 100644 --- a/jstests/replsets/index_restart_secondary.js +++ b/jstests/replsets/index_restart_secondary.js @@ -24,7 +24,7 @@ if (conns[0].getDB('test').serverBuildInfo().bits !== 32) { {"_id" : 1, "host" : nodes[1]}, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var second = replTest.getSecondary(); var secondId = replTest.getNodeId(second); diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js index a3ecaf5db68..8b673117fd1 100644 --- a/jstests/replsets/initial_sync1.js +++ b/jstests/replsets/initial_sync1.js @@ -26,7 +26,7 @@ var replTest = new ReplSetTest({name: basename, var conns = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var foo = master.getDB("foo"); var admin = master.getDB("admin"); @@ -98,7 +98,7 @@ reconnect(slave1); replTest.waitForState(slave1, [replTest.PRIMARY, replTest.SECONDARY], 60 * 1000); print("10. Insert some stuff"); -master = replTest.getMaster(); +master = replTest.getPrimary(); bulk = foo.bar.initializeUnorderedBulkOp(); for (var i = 0; i < 100; i++) { bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" }); diff --git a/jstests/replsets/initial_sync2.js b/jstests/replsets/initial_sync2.js index 7888f7d3354..9a913aeafc5 100644 --- a/jstests/replsets/initial_sync2.js +++ b/jstests/replsets/initial_sync2.js @@ -25,7 +25,7 @@ var replTest = new ReplSetTest( {name: basename, nodes: 2} ); var conns = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var origMaster = master; var foo = master.getDB("foo"); var admin = master.getDB("admin"); diff --git a/jstests/replsets/initial_sync3.js b/jstests/replsets/initial_sync3.js index b7446813004..4456cfbd498 100644 --- a/jstests/replsets/initial_sync3.js +++ b/jstests/replsets/initial_sync3.js @@ -27,7 +27,7 @@ replTest.initiate({ ] }); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); print("Initial sync"); master.getDB("foo").bar.baz.insert({x:1}); @@ -48,7 +48,7 @@ assert(!result.secondary, tojson(result)); print("bring 0 back up"); replTest.restart(0); print("0 should become primary"); -master = replTest.getMaster(); +master = replTest.getPrimary(); print("now 1 should be able to initial sync"); assert.soon(function() { diff --git a/jstests/replsets/initial_sync4.js b/jstests/replsets/initial_sync4.js index da49839ef0c..c7c23e65497 100644 --- a/jstests/replsets/initial_sync4.js +++ b/jstests/replsets/initial_sync4.js @@ -8,7 +8,7 @@ replTest = new ReplSetTest( {name: basename, nodes: 1} ); replTest.startSet(); replTest.initiate(); -m = replTest.getMaster(); +m = replTest.getPrimary(); md = m.getDB("d"); mc = m.getDB("d")["c"]; diff --git a/jstests/replsets/ismaster1.js b/jstests/replsets/ismaster1.js index 76252a069cb..1904145c882 100644 --- a/jstests/replsets/ismaster1.js +++ b/jstests/replsets/ismaster1.js @@ -116,7 +116,7 @@ var agreeOnPrimaryAndSetVersion = function( setVersion ) { return true; } -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert.soon( function() { return agreeOnPrimaryAndSetVersion( 1 ); }, "Nodes did not initiate in less than a minute", 60000 ); @@ -199,7 +199,7 @@ catch(e) { print(e); } -master = replTest.getMaster(); +master = replTest.getPrimary(); assert.soon( function() { return agreeOnPrimaryAndSetVersion( 2 ); }, "Nodes did not sync in less than a minute", 60000 ); diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js index 34c0e83993b..5ecc15456dc 100644 --- a/jstests/replsets/maintenance.js +++ b/jstests/replsets/maintenance.js @@ -8,7 +8,7 @@ replTest.initiate(config); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60000); // Make sure we have a master -var master = replTest.getMaster(); +var master = replTest.getPrimary(); for (i = 0; i < 20; i++) { master.getDB("bar").foo.insert({x:1,y:i,abc:123,str:"foo bar baz"}); diff --git a/jstests/replsets/maintenance2.js b/jstests/replsets/maintenance2.js index 9b2793a3bd9..401bfeb8701 100644 --- a/jstests/replsets/maintenance2.js +++ b/jstests/replsets/maintenance2.js @@ -15,9 +15,9 @@ // This will wait for initiation replTest.initiate(); - // Call getMaster to return a reference to the node that's been + // Call getPrimary to return a reference to the node that's been // elected master. - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // save some records var len = 100 diff --git a/jstests/replsets/maxSyncSourceLagSecs.js b/jstests/replsets/maxSyncSourceLagSecs.js index 087db2edaab..8d44dd5ddb2 100644 --- a/jstests/replsets/maxSyncSourceLagSecs.js +++ b/jstests/replsets/maxSyncSourceLagSecs.js @@ -18,7 +18,7 @@ { "_id": 2, "host": nodes[2], priority: 0 }], }); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); master.getDB("foo").bar.save({a: 1}); replTest.awaitReplication(); var slaves = replTest.liveNodes.slaves; diff --git a/jstests/replsets/no_chaining.js b/jstests/replsets/no_chaining.js index ebedae9fa5d..97acc61875a 100644 --- a/jstests/replsets/no_chaining.js +++ b/jstests/replsets/no_chaining.js @@ -20,13 +20,13 @@ replTest.initiate( } ); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); replTest.awaitReplication(); var breakNetwork = function() { nodes[0].disconnect(nodes[2]); - master = replTest.getMaster(); + master = replTest.getPrimary(); }; var checkNoChaining = function() { diff --git a/jstests/replsets/oplog_format.js b/jstests/replsets/oplog_format.js index c07d4f66cef..c7cc18a5908 100644 --- a/jstests/replsets/oplog_format.js +++ b/jstests/replsets/oplog_format.js @@ -9,7 +9,7 @@ var replTest = new ReplSetTest( { nodes: 1, oplogSize:2, nodeOptions: {smallfiles:""}} ); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var coll = master.getDB("o").fake; var cdb = coll.getDB(); diff --git a/jstests/replsets/oplog_truncated_on_recovery.js b/jstests/replsets/oplog_truncated_on_recovery.js index 8142b8df7ee..c9e2fcaeae2 100644 --- a/jstests/replsets/oplog_truncated_on_recovery.js +++ b/jstests/replsets/oplog_truncated_on_recovery.js @@ -36,7 +36,7 @@ var nodes = replTest.startSet(); replTest.initiate(); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var testDB = master.getDB("test"); var localDB = master.getDB("local"); var minvalidColl = localDB["replset.minvalid"]; diff --git a/jstests/replsets/optime.js b/jstests/replsets/optime.js index 647864245dc..5d64719fe8c 100644 --- a/jstests/replsets/optime.js +++ b/jstests/replsets/optime.js @@ -32,7 +32,7 @@ var replTest = new ReplSetTest( { name : "replStatus" , nodes: 3, oplogSize: 1 } replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); replTest.awaitReplication(); replTest.awaitSecondaryNodes(); diff --git a/jstests/replsets/pipelineout.js b/jstests/replsets/pipelineout.js index 5e0c1d7f45c..97accba2eec 100644 --- a/jstests/replsets/pipelineout.js +++ b/jstests/replsets/pipelineout.js @@ -10,7 +10,7 @@ replTest.initiate({"_id" : name, {"_id" : 1, "host" : nodes[1]} ]}); -var primary = replTest.getMaster().getDB(name); +var primary = replTest.getPrimary().getDB(name); var secondary = replTest.liveNodes.slaves[0].getDB(name); // populate the collection diff --git a/jstests/replsets/plan_cache_slaveok.js b/jstests/replsets/plan_cache_slaveok.js index 311efef86bb..a63be51fae1 100644 --- a/jstests/replsets/plan_cache_slaveok.js +++ b/jstests/replsets/plan_cache_slaveok.js @@ -81,7 +81,7 @@ print("Start replica set with two nodes"); var replTest = new ReplSetTest({name: name, nodes: 2}); var nodes = replTest.startSet(); replTest.initiate(); -var primary = replTest.getMaster(); +var primary = replTest.getPrimary(); // Insert a document and let it sync to the secondary. print("Initial sync"); diff --git a/jstests/replsets/protocol_version_upgrade_downgrade.js b/jstests/replsets/protocol_version_upgrade_downgrade.js index a5906d2e88d..ebb0a740d7c 100644 --- a/jstests/replsets/protocol_version_upgrade_downgrade.js +++ b/jstests/replsets/protocol_version_upgrade_downgrade.js @@ -18,7 +18,7 @@ conf.members[2].priority = 0; rst.initiate(conf); rst.awaitSecondaryNodes(); -var primary = rst.getMaster(); +var primary = rst.getPrimary(); var primaryColl = primary.getDB("test").coll; // Set verbosity for replication on all nodes. diff --git a/jstests/replsets/reindex_secondary.js b/jstests/replsets/reindex_secondary.js index af08dd4cec8..d3c0991a5d3 100644 --- a/jstests/replsets/reindex_secondary.js +++ b/jstests/replsets/reindex_secondary.js @@ -4,7 +4,7 @@ var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); replTest.awaitSecondaryNodes() var slaves = replTest.liveNodes.slaves; diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js index 794b9dd9000..e15b793434b 100644 --- a/jstests/replsets/remove1.js +++ b/jstests/replsets/remove1.js @@ -16,7 +16,7 @@ print("Start set with two nodes"); var replTest = new ReplSetTest( {name: name, nodes: 2} ); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var secondary = replTest.getSecondary(); print("Initial sync"); @@ -79,7 +79,7 @@ assert.soon(function() { try { } catch (e) { return false; } }); -master = replTest.getMaster(); +master = replTest.getPrimary(); printjson(master.getDB("admin").runCommand({replSetGetStatus:1})); var newConfig = master.getDB("local").system.replset.findOne(); print("newConfig: " + tojson(newConfig)); diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js index de0fb30df4f..36bc2de74d1 100644 --- a/jstests/replsets/replset1.js +++ b/jstests/replsets/replset1.js @@ -25,9 +25,9 @@ var doTest = function( signal ) { // This will wait for initiation replTest.initiate(); - // Call getMaster to return a reference to the node that's been + // Call getPrimary to return a reference to the node that's been // elected master. - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var isPV1 = (replTest.getConfigFromPrimary().protocolVersion == 1); if (isPV1) { @@ -36,7 +36,7 @@ var doTest = function( signal ) { assert.eq("new primary", oplog_entry["o"]["msg"]); assert.eq("n", oplog_entry["op"]); } - // Calling getMaster also makes available the liveNodes structure, + // Calling getPrimary also makes available the liveNodes structure, // which looks like this: // liveNodes = {master: masterNode, // slaves: [slave1, slave2] @@ -68,7 +68,7 @@ var doTest = function( signal ) { replTest.stop( master_id ); // Now let's see who the new master is: - var new_master = replTest.getMaster(); + var new_master = replTest.getPrimary(); // Is the new master the same as the old master? var new_master_id = replTest.getNodeId( new_master ); @@ -104,7 +104,7 @@ var doTest = function( signal ) { }); // And that both slave nodes have all the updates - new_master = replTest.getMaster(); + new_master = replTest.getPrimary(); assert.eq( 1000 , new_master.getDB( "bar" ).runCommand( { count:"bar"} ).n , "assumption 2"); replTest.awaitSecondaryNodes(); replTest.awaitReplication(); @@ -119,7 +119,7 @@ var doTest = function( signal ) { }); // last error - master = replTest.getMaster(); + master = replTest.getPrimary(); slaves = replTest.liveNodes.slaves; printjson(replTest.liveNodes); diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js index 20364381dd0..656b727ba81 100644 --- a/jstests/replsets/replset2.js +++ b/jstests/replsets/replset2.js @@ -18,9 +18,9 @@ doTest = function (signal) { var testDB = "repl-test"; - // Call getMaster to return a reference to the node that's been + // Call getPrimary to return a reference to the node that's been // elected master. - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // Wait for replication to a single node master.getDB(testDB).bar.insert({ n: 1 }); diff --git a/jstests/replsets/replset3.js b/jstests/replsets/replset3.js index 5731e269ce9..6bb29a196ec 100644 --- a/jstests/replsets/replset3.js +++ b/jstests/replsets/replset3.js @@ -15,7 +15,7 @@ var doTest = function (signal) { replTest.initiate(); // Get master node - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // Write some data to master // NOTE: this test fails unless we write some data. @@ -35,7 +35,7 @@ var doTest = function (signal) { print(phase++); try { - var new_master = replTest.getMaster(); + var new_master = replTest.getPrimary(); } catch (err) { throw ("Could not elect new master before timeout."); diff --git a/jstests/replsets/replset4.js b/jstests/replsets/replset4.js index 95fa3dbd543..605284e01da 100644 --- a/jstests/replsets/replset4.js +++ b/jstests/replsets/replset4.js @@ -6,7 +6,7 @@ doTest = function (signal) { replTest.startSet(); replTest.initiate(); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // Kill both slaves, simulating a network partition var slaves = replTest.liveNodes.slaves; diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js index 3f239499e1b..717a0c8153b 100644 --- a/jstests/replsets/replset5.js +++ b/jstests/replsets/replset5.js @@ -19,7 +19,7 @@ load("jstests/replsets/rslib.js"); replTest.initiate(config); // - var master = replTest.getMaster(); + var master = replTest.getPrimary(); replTest.awaitSecondaryNodes(); var testDB = "foo"; diff --git a/jstests/replsets/replset6.js b/jstests/replsets/replset6.js index f9111e28dbd..1c772cc7c28 100644 --- a/jstests/replsets/replset6.js +++ b/jstests/replsets/replset6.js @@ -6,7 +6,7 @@ baseName = "jstests_replsets_replset6"; var rt = new ReplSetTest({ name : "replset6tests" , nodes: 2 }); var nodes = rt.startSet(); rt.initiate(); -var m = rt.getMaster(); +var m = rt.getPrimary(); rt.awaitSecondaryNodes(); var slaves = rt.liveNodes.slaves; s = slaves[0]; diff --git a/jstests/replsets/replset7.js b/jstests/replsets/replset7.js index 0714a23e962..1c63fd8f35f 100644 --- a/jstests/replsets/replset7.js +++ b/jstests/replsets/replset7.js @@ -5,7 +5,7 @@ var rt = new ReplSetTest( { name : "replset7tests" , nodes: 1 } ); var nodes = rt.startSet(); rt.initiate(); -var master = rt.getMaster(); +var master = rt.getPrimary(); var md = master.getDB( 'd' ); var mdc = md[ 'c' ]; diff --git a/jstests/replsets/replset8.js b/jstests/replsets/replset8.js index 51cae86670a..ead9c50f066 100644 --- a/jstests/replsets/replset8.js +++ b/jstests/replsets/replset8.js @@ -5,7 +5,7 @@ var rt = new ReplSetTest( { name : "replset8tests" , nodes: 1 } ); var nodes = rt.startSet(); rt.initiate(); -var master = rt.getMaster(); +var master = rt.getPrimary(); var bigstring = "a"; var md = master.getDB( 'd' ); var mdc = md[ 'c' ]; diff --git a/jstests/replsets/replset9.js b/jstests/replsets/replset9.js index 382ddd3c3e6..8ae46863087 100644 --- a/jstests/replsets/replset9.js +++ b/jstests/replsets/replset9.js @@ -4,7 +4,7 @@ var rt = new ReplSetTest( { name : "replset9tests" , nodes: 1, oplogSize: 300 } var nodes = rt.startSet(); rt.initiate(); -var master = rt.getMaster(); +var master = rt.getPrimary(); var bigstring = Array(5000).toString(); var md = master.getDB( 'd' ); var mdc = md[ 'c' ]; @@ -57,7 +57,7 @@ var slave = rt.add(); print ("initiation complete!"); var sc = slave.getDB( 'd' )[ 'c' ]; slave.setSlaveOk(); -master = rt.getMaster(); +master = rt.getPrimary(); print ("updating and deleting documents"); bulk = master.getDB('d')['c'].initializeUnorderedBulkOp(); diff --git a/jstests/replsets/replsetadd_profile.js b/jstests/replsets/replsetadd_profile.js index 45267f9ed4f..cc36f4c1a57 100644 --- a/jstests/replsets/replsetadd_profile.js +++ b/jstests/replsets/replsetadd_profile.js @@ -12,7 +12,7 @@ var replTest = new ReplSetTest({name: 'ReplSetAddProfileTestSet', nodes: [{profile: 2}]}); replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var masterCollection = master.getDB('test').getCollection(collectionName); masterCollection.save({a: 1}); diff --git a/jstests/replsets/replsetarb2.js b/jstests/replsets/replsetarb2.js index 75c53008c54..16388c8b92b 100644 --- a/jstests/replsets/replsetarb2.js +++ b/jstests/replsets/replsetarb2.js @@ -15,7 +15,7 @@ ]}); // Make sure we have a master - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // Make sure we have an arbiter assert.soon(function() { @@ -37,7 +37,7 @@ replTest.stop(mId); // And make sure that the slave is promoted - var new_master = replTest.getMaster(); + var new_master = replTest.getPrimary(); var newMasterId = replTest.getNodeId(new_master); assert.neq(newMasterId, mId, "Secondary wasn't promoted to new primary"); diff --git a/jstests/replsets/replsetfreeze.js b/jstests/replsets/replsetfreeze.js index 03e9f724f8a..67c08740e98 100644 --- a/jstests/replsets/replsetfreeze.js +++ b/jstests/replsets/replsetfreeze.js @@ -48,7 +48,7 @@ var config = {"_id" : "unicomplex", "members" : [ {"_id" : 1, "host" : nodes[1] }, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}; var r = replTest.initiate(config); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var secondary = replTest.getSecondary(); replTest.awaitSecondaryNodes(); @@ -79,7 +79,7 @@ while ((new Date()).getTime() - start < (28 * 1000) ) { // we need less 30 since print("5: check for new master"); -master = replTest.getMaster(); +master = replTest.getPrimary(); print("6: step down new master"); @@ -102,7 +102,7 @@ master.getDB("admin").runCommand({replSetFreeze : 0}); print("9: check we get a new master within 30 seconds"); -master = replTest.getMaster(); +master = replTest.getPrimary(); replTest.stopSet( 15 ); diff --git a/jstests/replsets/replsethostnametrim.js b/jstests/replsets/replsethostnametrim.js index 44f4c7b2320..c303ecdea0d 100644 --- a/jstests/replsets/replsethostnametrim.js +++ b/jstests/replsets/replsethostnametrim.js @@ -4,7 +4,7 @@ var replTest = new ReplSetTest({ name: 'testSet', nodes: 1 }); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var config = master.getDB("local").system.replset.findOne(); config.version++; var origHost = config.members[0].host; diff --git a/jstests/replsets/replsetprio1.js b/jstests/replsets/replsetprio1.js index 04a3fb1f237..c0d6cb48a01 100644 --- a/jstests/replsets/replsetprio1.js +++ b/jstests/replsets/replsetprio1.js @@ -22,7 +22,7 @@ replTest.waitForState(nodes[1], replTest.PRIMARY, 60000); // do some writes on 1 - var master = replTest.getMaster(); + var master = replTest.getPrimary(); for (var i=0; i<1000; i++) { master.getDB("foo").bar.insert({i:i}); } @@ -36,7 +36,7 @@ replTest.waitForState(nodes[2], replTest.PRIMARY, 60000); // make sure nothing was rolled back - master = replTest.getMaster(); + master = replTest.getPrimary(); for (i=0; i<1000; i++) { assert(master.getDB("foo").bar.findOne({i:i}) != null, 'checking '+i); assert(master.getDB("bar").baz.findOne({i:i}) != null, 'checking '+i); diff --git a/jstests/replsets/replsetrestart1.js b/jstests/replsets/replsetrestart1.js index 0224d0f47c7..9ff3773a24f 100644 --- a/jstests/replsets/replsetrestart1.js +++ b/jstests/replsets/replsetrestart1.js @@ -35,9 +35,9 @@ // DOWN, later. replTest.awaitSecondaryNodes(); - // Call getMaster to return a reference to the node that's been + // Call getPrimary to return a reference to the node that's been // elected master. - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var config1 = master.getDB("local").system.replset.findOne(); // Now we're going to shut down all nodes @@ -60,7 +60,7 @@ replTest.restart( s2Id ); // Make sure that a new master comes up - master = replTest.getMaster(); + master = replTest.getPrimary(); replTest.awaitSecondaryNodes(); var config2 = master.getDB("local").system.replset.findOne(); compare_configs(config1, config2); diff --git a/jstests/replsets/restore_term.js b/jstests/replsets/restore_term.js index 00fa68ed0d0..0a0af27c08f 100644 --- a/jstests/replsets/restore_term.js +++ b/jstests/replsets/restore_term.js @@ -30,7 +30,7 @@ conf.protocolVersion = 1; rst.initiate(conf); rst.awaitSecondaryNodes(); -var primary = rst.getMaster(); +var primary = rst.getPrimary(); var primaryColl = primary.getDB("test").coll; // Current term may be greater than 1 if election race happens. @@ -53,13 +53,13 @@ try { rst.awaitSecondaryNodes(); // The secondary became the new primary now with a higher term. // Since there's only one secondary who may run for election, the new term is higher by 1. -assert.eq(getCurrentTerm(rst.getMaster()), firstSuccessfulTerm + 1); +assert.eq(getCurrentTerm(rst.getPrimary()), firstSuccessfulTerm + 1); // Restart the replset and verify the term is the same. rst.stopSet(null /* signal */, true /* forRestart */); rst.startSet({restart: true}); rst.awaitSecondaryNodes(); -primary = rst.getMaster(); +primary = rst.getPrimary(); assert.eq(primary.getDB("test").coll.find().itcount(), 1); // After restart, the new primary stands up with the newer term. diff --git a/jstests/replsets/resync_with_write_load.js b/jstests/replsets/resync_with_write_load.js index c67f1f67d59..f96c9a8e152 100644 --- a/jstests/replsets/resync_with_write_load.js +++ b/jstests/replsets/resync_with_write_load.js @@ -19,7 +19,7 @@ var config = { "_id": testName, var r = replTest.initiate(config); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // Make sure we have a master -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var a_conn = conns[0]; var b_conn = conns[1]; a_conn.setSlaveOk(); diff --git a/jstests/replsets/rollback.js b/jstests/replsets/rollback.js index 4e886b487a4..e66357b1fc5 100644 --- a/jstests/replsets/rollback.js +++ b/jstests/replsets/rollback.js @@ -45,7 +45,7 @@ load("jstests/replsets/rslib.js"); // Make sure we have a master replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var a_conn = conns[0]; var A = a_conn.getDB("admin"); var b_conn = conns[1]; diff --git a/jstests/replsets/rollback2.js b/jstests/replsets/rollback2.js index 1b91c8803f3..000346b17d3 100644 --- a/jstests/replsets/rollback2.js +++ b/jstests/replsets/rollback2.js @@ -41,7 +41,7 @@ load("jstests/replsets/rslib.js"); // Make sure we have a master and that that master is node A replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var a_conn = conns[0]; a_conn.setSlaveOk(); var A = a_conn.getDB("admin"); diff --git a/jstests/replsets/rollback3.js b/jstests/replsets/rollback3.js index 1ba4d14d255..b5bfcd655c5 100755 --- a/jstests/replsets/rollback3.js +++ b/jstests/replsets/rollback3.js @@ -46,7 +46,7 @@ load("jstests/replsets/rslib.js"); // Make sure we have a master and that that master is node A replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var a_conn = conns[0]; a_conn.setSlaveOk(); var A = a_conn.getDB("admin"); diff --git a/jstests/replsets/rollback5.js b/jstests/replsets/rollback5.js index 7159edead5c..c85e86bbe7c 100644 --- a/jstests/replsets/rollback5.js +++ b/jstests/replsets/rollback5.js @@ -22,7 +22,7 @@ var r = replTest.initiate({ "_id": "rollback5", // Make sure we have a master replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var a_conn = conns[0]; var b_conn = conns[1]; a_conn.setSlaveOk(); @@ -46,13 +46,13 @@ var options = { writeConcern: { w: 2, wtimeout: 60000 }, upsert: true }; assert.writeOK(A.foo.update({ key: 'value1' }, { $set: { req: 'req' }}, options)); replTest.stop(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host == master.host); options = { writeConcern: { w: 1, wtimeout: 60000 }, upsert: true }; assert.writeOK(B.foo.update({key:'value1'}, {$set: {res: 'res'}}, options)); replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host == master.host); options = { writeConcern: { w: 1, wtimeout: 60000 }, upsert: true }; assert.writeOK(A.foo.update({ key: 'value2' }, { $set: { req: 'req' }}, options)); diff --git a/jstests/replsets/rollback_auth.js b/jstests/replsets/rollback_auth.js index 589f755aaed..4e32e51b49a 100644 --- a/jstests/replsets/rollback_auth.js +++ b/jstests/replsets/rollback_auth.js @@ -40,7 +40,7 @@ // Make sure we have a master replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var a_conn = conns[0]; var b_conn = conns[1]; a_conn.setSlaveOk(); diff --git a/jstests/replsets/rollback_cmd_unrollbackable.js b/jstests/replsets/rollback_cmd_unrollbackable.js index 8cfe12e6523..13cac4e8459 100644 --- a/jstests/replsets/rollback_cmd_unrollbackable.js +++ b/jstests/replsets/rollback_cmd_unrollbackable.js @@ -25,7 +25,7 @@ var BID = replTest.getNodeId(b_conn); // get master and do an initial write replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -35,7 +35,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // insert a fake oplog entry with a non-rollbackworthy command -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; // another insert to set minvalid ahead @@ -49,7 +49,7 @@ assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_collMod_PowerOf2Sizes.js b/jstests/replsets/rollback_collMod_PowerOf2Sizes.js index 2032f9a584b..0500e5f28e3 100644 --- a/jstests/replsets/rollback_collMod_PowerOf2Sizes.js +++ b/jstests/replsets/rollback_collMod_PowerOf2Sizes.js @@ -25,7 +25,7 @@ replTest.initiate({"_id": name, { "_id": 2, "host": nodes[2], arbiterOnly: true}] }); // Get master and do an initial write. -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var a_conn = master; var slaves = replTest.liveNodes.slaves; var b_conn = slaves[0]; @@ -63,7 +63,7 @@ assert.eq(getOptions(a_conn), {flags: 2, // Shut down A and fail over to B. replTest.stop(AID); replTest.restart(BID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert.eq(b_conn.host, master.host, "b_conn assumed to be master"); b_conn = master; diff --git a/jstests/replsets/rollback_collMod_fatal.js b/jstests/replsets/rollback_collMod_fatal.js index 62cb22ac20a..03cde3e904c 100644 --- a/jstests/replsets/rollback_collMod_fatal.js +++ b/jstests/replsets/rollback_collMod_fatal.js @@ -26,7 +26,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -37,7 +37,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // do a collMod altering TTL which should cause FATAL when rolled back -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); assert.commandWorked(b_conn.getDB(name).runCommand({collMod: "foo", index: {keyPattern: {x:1}, @@ -46,7 +46,7 @@ assert.commandWorked(b_conn.getDB(name).runCommand({collMod: "foo", // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_different_h.js b/jstests/replsets/rollback_different_h.js index a6350a325da..a9ca7d1aac3 100644 --- a/jstests/replsets/rollback_different_h.js +++ b/jstests/replsets/rollback_different_h.js @@ -37,7 +37,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -47,7 +47,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // change the h value of the most recent entry on B -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0]; @@ -62,7 +62,7 @@ assert.writeOK(b_conn.getDB(name).foo.insert({x: 123})); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_dropdb.js b/jstests/replsets/rollback_dropdb.js index 368dbb6ee95..aba4264c561 100644 --- a/jstests/replsets/rollback_dropdb.js +++ b/jstests/replsets/rollback_dropdb.js @@ -26,7 +26,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -36,7 +36,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // drop database which should cause FATAL when rolled back -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); b_conn.getDB(name).dropDatabase(); assert.eq(0, b_conn.getDB(name).foo.count(), "dropping database failed"); @@ -44,7 +44,7 @@ assert.eq(0, b_conn.getDB(name).foo.count(), "dropping database failed"); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_empty_ns.js b/jstests/replsets/rollback_empty_ns.js index a5c6a92c1be..fea10564b43 100644 --- a/jstests/replsets/rollback_empty_ns.js +++ b/jstests/replsets/rollback_empty_ns.js @@ -37,7 +37,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -47,7 +47,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // insert a fake oplog entry with an empty ns -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; // another insert to set minvalid ahead @@ -60,7 +60,7 @@ assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_empty_o.js b/jstests/replsets/rollback_empty_o.js index 81a84db6eac..73a887ee2d9 100644 --- a/jstests/replsets/rollback_empty_o.js +++ b/jstests/replsets/rollback_empty_o.js @@ -37,7 +37,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -47,7 +47,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // insert a fake oplog entry with an empty o -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; // another insert to set minvalid ahead @@ -60,7 +60,7 @@ assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_empty_o2.js b/jstests/replsets/rollback_empty_o2.js index 7e4a22be06d..d705aa999b6 100644 --- a/jstests/replsets/rollback_empty_o2.js +++ b/jstests/replsets/rollback_empty_o2.js @@ -37,7 +37,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -47,7 +47,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // insert a fake oplog entry with an empty o2 -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; // another insert to set minvalid ahead @@ -61,7 +61,7 @@ assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_fake_cmd.js b/jstests/replsets/rollback_fake_cmd.js index 03ae1345790..f4d54a1f146 100644 --- a/jstests/replsets/rollback_fake_cmd.js +++ b/jstests/replsets/rollback_fake_cmd.js @@ -37,7 +37,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -47,7 +47,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // insert a fake oplog entry with a nonexistent command -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; // another insert to set minvalid ahead @@ -61,7 +61,7 @@ assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_index.js b/jstests/replsets/rollback_index.js index 83e8df5b9a6..67360624a03 100644 --- a/jstests/replsets/rollback_index.js +++ b/jstests/replsets/rollback_index.js @@ -39,7 +39,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -50,7 +50,7 @@ replTest.stop(AID); // Create a unique index that, if not dropped during rollback, would // cause errors when applying operations from the primary. -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; // another insert to set minvalid ahead @@ -61,7 +61,7 @@ assert.writeError(b_conn.getDB(name).foo.insert({x: 123})); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // Insert a document with the same value for 'x' that should be diff --git a/jstests/replsets/rollback_too_new.js b/jstests/replsets/rollback_too_new.js index c3ff41368ca..6365a3eceeb 100644 --- a/jstests/replsets/rollback_too_new.js +++ b/jstests/replsets/rollback_too_new.js @@ -27,7 +27,7 @@ var CID = replTest.getNodeId(c_conn); // get master and do an initial write - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var options = {writeConcern: {w: 2, wtimeout: 60000}}; assert.writeOK(master.getDB(name).foo.insert({x: 1}, options)); diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js index 7111063f38e..d4cea8a51eb 100644 --- a/jstests/replsets/rslib.js +++ b/jstests/replsets/rslib.js @@ -102,7 +102,7 @@ waitForAllMembers = function(master, timeout) { reconfig = function(rs, config, force) { "use strict"; - var admin = rs.getMaster().getDB("admin"); + var admin = rs.getPrimary().getDB("admin"); var e; var master; try { @@ -114,7 +114,7 @@ reconfig = function(rs, config, force) { } } - var master = rs.getMaster().getDB("admin"); + var master = rs.getPrimary().getDB("admin"); waitForAllMembers(master); return master; diff --git a/jstests/replsets/server8070.js b/jstests/replsets/server8070.js index 8d0be219ab3..74f4d43cafa 100644 --- a/jstests/replsets/server8070.js +++ b/jstests/replsets/server8070.js @@ -38,7 +38,7 @@ replSet.initiate( ); // set up common points of access -var master = replSet.getMaster(); +var master = replSet.getPrimary(); var primary = master.getDB("foo"); replSet.nodes[1].setSlaveOk(); replSet.nodes[2].setSlaveOk(); diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js index f8b6dd9510b..c2b26bb09a3 100644 --- a/jstests/replsets/slavedelay1.js +++ b/jstests/replsets/slavedelay1.js @@ -16,7 +16,7 @@ doTest = function( signal ) { replTest.initiate(config); - var master = replTest.getMaster().getDB(name); + var master = replTest.getPrimary().getDB(name); var slaveConns = replTest.liveNodes.slaves; var slaves = []; for (var i in slaveConns) { @@ -88,7 +88,7 @@ doTest = function( signal ) { config.members[3].slaveDelay = 15; reconfig(replTest, config); - master = replTest.getMaster().getDB(name); + master = replTest.getPrimary().getDB(name); assert.soon(function() { return conn.getDB("local").system.replset.findOne().version == config.version; }); diff --git a/jstests/replsets/slavedelay3.js b/jstests/replsets/slavedelay3.js index 920fad2a354..5a19027a4ad 100644 --- a/jstests/replsets/slavedelay3.js +++ b/jstests/replsets/slavedelay3.js @@ -11,7 +11,7 @@ config.members[1].slaveDelay = 5; config.members[2].priority = 0; replTest.initiate(config); -var master = replTest.getMaster().getDB(name); +var master = replTest.getPrimary().getDB(name); replTest.awaitReplication(); var slaveConns = replTest.liveNodes.slaves; diff --git a/jstests/replsets/stepdown.js b/jstests/replsets/stepdown.js index 77dd76462df..502f0cf8c4c 100644 --- a/jstests/replsets/stepdown.js +++ b/jstests/replsets/stepdown.js @@ -36,7 +36,7 @@ var replTest = new ReplSetTest({ var nodes = replTest.startSet(); replTest.initiate(); replTest.waitForState(nodes[0], replTest.PRIMARY, 60 * 1000); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); // do a write print("\ndo a write"); @@ -80,7 +80,7 @@ replTest.liveNodes.slaves.forEach(function(slave) { print("\nreset stepped down time"); assert.commandWorked(master.getDB("admin").runCommand({replSetFreeze:0})); -master = replTest.getMaster(); +master = replTest.getPrimary(); print("\nawait"); replTest.awaitSecondaryNodes(90000); @@ -97,7 +97,7 @@ assert.soon(function() { } }, "wait for n0 to be primary", 60000); -master = replTest.getMaster(); +master = replTest.getPrimary(); var firstMaster = master; print("\nmaster is now "+firstMaster); @@ -113,10 +113,10 @@ catch (e) { } print("\nget a master"); -replTest.getMaster(); +replTest.getPrimary(); assert.soon(function() { - var secondMaster = replTest.getMaster(); + var secondMaster = replTest.getPrimary(); return firstMaster.host !== secondMaster.host; }, "making sure " + firstMaster.host + " isn't still master", 60000); @@ -135,7 +135,7 @@ catch (e) { } -master = replTest.getMaster(); +master = replTest.getPrimary(); assert.soon(function() { try { var result = master.getDB("admin").runCommand({replSetGetStatus:1}); @@ -149,7 +149,7 @@ assert.soon(function() { } catch (e) { print("error getting status from master: " + e); - master = replTest.getMaster(); + master = replTest.getPrimary(); return false; } }, 'make sure master knows that slave is down before proceeding'); @@ -166,7 +166,7 @@ assert.gte((new Date()) - now, 2750); print("\nsend shutdown command"); -var currentMaster = replTest.getMaster(); +var currentMaster = replTest.getPrimary(); try { printjson(currentMaster.getDB("admin").runCommand({shutdown : 1, force : true})); } diff --git a/jstests/replsets/stepdown3.js b/jstests/replsets/stepdown3.js index b4464f12c22..9c3cf3d9544 100644 --- a/jstests/replsets/stepdown3.js +++ b/jstests/replsets/stepdown3.js @@ -6,7 +6,7 @@ var replTest = new ReplSetTest({ name: 'testSet', nodes: 2 }); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); // do a write to allow stepping down of the primary; // otherwise, the primary will refuse to step down diff --git a/jstests/replsets/stepdown_wrt_electable.js b/jstests/replsets/stepdown_wrt_electable.js index cb0d2446fe3..365c35f6643 100644 --- a/jstests/replsets/stepdown_wrt_electable.js +++ b/jstests/replsets/stepdown_wrt_electable.js @@ -8,7 +8,7 @@ var c = replTest.getReplSetConfig(); c.members[1].priority = 0; // not electable replTest.initiate(c); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var testDB = master.getDB('test'); var firstPrimary = testDB.isMaster().primary diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js index 878600c9a94..d847127cae3 100644 --- a/jstests/replsets/sync2.js +++ b/jstests/replsets/sync2.js @@ -10,7 +10,7 @@ replTest.initiate({"_id": "sync2", {"_id": 4, host: nodes[4]}] }); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); jsTestLog("Replica set test initialized"); // initial sync @@ -27,7 +27,7 @@ conns[4].disconnect(conns[1]); conns[4].disconnect(conns[3]); assert.soon(function() { - master = replTest.getMaster(); + master = replTest.getPrimary(); return master === conns[0]; }, 60 * 1000, "node 0 did not become primary quickly enough"); diff --git a/jstests/replsets/sync_passive.js b/jstests/replsets/sync_passive.js index f73dad062c9..fef16253896 100644 --- a/jstests/replsets/sync_passive.js +++ b/jstests/replsets/sync_passive.js @@ -31,7 +31,7 @@ config.members[2].priority = 0; replTest.initiate(config); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); -var master = replTest.getMaster().getDB("test"); +var master = replTest.getPrimary().getDB("test"); var server0 = master; var server1 = replTest.liveNodes.slaves[0]; @@ -67,7 +67,7 @@ replTest.awaitReplication(60 * 1000); print("add data"); reconnect(server1); -master = replTest.getMaster().getDB("test"); +master = replTest.getPrimary().getDB("test"); for (var i=0;i<1000;i++) { master.bar.insert({x:i}); } diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js index 62ca15e82d8..e4d4ccd50e8 100644 --- a/jstests/replsets/tags2.js +++ b/jstests/replsets/tags2.js @@ -24,7 +24,7 @@ replTest.initiate( conf ); replTest.awaitReplication(); -master = replTest.getMaster(); +master = replTest.getPrimary(); var db = master.getDB("test"); assert.writeOK(db.foo.insert({ x: 1 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }})); @@ -33,7 +33,7 @@ conf.settings.getLastErrorModes.backedUp.backup = 3; master.getDB("admin").runCommand( {replSetReconfig: conf} ); replTest.awaitReplication(); -master = replTest.getMaster(); +master = replTest.getPrimary(); var db = master.getDB("test"); assert.writeOK(db.foo.insert({ x: 2 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }})); @@ -42,7 +42,7 @@ conf.members[0].priorty = 3; conf.members[2].priorty = 0; master.getDB("admin").runCommand( {replSetReconfig: conf} ); -master = replTest.getMaster(); +master = replTest.getPrimary(); var db = master.getDB("test"); assert.writeOK(db.foo.insert({ x: 3 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }})); diff --git a/jstests/replsets/tags_with_reconfig.js b/jstests/replsets/tags_with_reconfig.js index 22b2404e009..8f1e01ce176 100644 --- a/jstests/replsets/tags_with_reconfig.js +++ b/jstests/replsets/tags_with_reconfig.js @@ -24,7 +24,7 @@ replTest.initiate( conf ); replTest.awaitReplication(); -master = replTest.getMaster(); +master = replTest.getPrimary(); var db = master.getDB("test"); // Insert a document with write concern : anydc @@ -54,7 +54,7 @@ var config = master.getDB("local").system.replset.findOne(); printjson(config); -master = replTest.getMaster(); +master = replTest.getPrimary(); var db = master.getDB("test"); // Insert a document with write concern : anydc diff --git a/jstests/replsets/temp_namespace.js b/jstests/replsets/temp_namespace.js index 88a256db87f..3e75ff400e3 100644 --- a/jstests/replsets/temp_namespace.js +++ b/jstests/replsets/temp_namespace.js @@ -13,7 +13,7 @@ replTest.initiate({"_id" : "testSet", {"_id" : 1, "host" : nodes[1]}, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var second = replTest.getSecondary(); var masterId = replTest.getNodeId(master); diff --git a/jstests/replsets/toostale.js b/jstests/replsets/toostale.js index 9225921daf2..82e6b062d5d 100644 --- a/jstests/replsets/toostale.js +++ b/jstests/replsets/toostale.js @@ -58,7 +58,7 @@ replTest.initiate({_id : name, members : [ {_id : 1, host : host+":"+replTest.ports[1], arbiterOnly : true}, {_id : 2, host : host+":"+replTest.ports[2], priority: 0} ]}); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var mdb = master.getDB("foo"); diff --git a/jstests/replsets/two_initsync.js b/jstests/replsets/two_initsync.js index 08e440406cb..603625df975 100755 --- a/jstests/replsets/two_initsync.js +++ b/jstests/replsets/two_initsync.js @@ -54,7 +54,7 @@ doTest = function (signal) { return result['ok'] == 1; }); - var a = replTest.getMaster().getDB("two"); + var a = replTest.getPrimary().getDB("two"); for (var i = 0; i < 20000; i++) a.coll.insert({ i: i, s: "a b" }); diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js index 2ec634cae9a..2bd57cf1da4 100644 --- a/jstests/sharding/addshard2.js +++ b/jstests/sharding/addshard2.js @@ -13,12 +13,12 @@ var conn2 = MongoRunner.runMongod({useHostname: true}); var rs1 = new ReplSetTest( { "name" : "add_shard2_rs1", nodes : 3 } ); rs1.startSet(); rs1.initiate(); -var master1 = rs1.getMaster(); +var master1 = rs1.getPrimary(); var rs2 = new ReplSetTest( { "name" : "add_shard2_rs2", nodes : 3 } ); rs2.startSet(); rs2.initiate(); -var master2 = rs2.getMaster(); +var master2 = rs2.getPrimary(); // replica set with set name = 'config' var rs3 = new ReplSetTest({ 'name': 'config', nodes: 3 }); @@ -95,7 +95,7 @@ assert.commandFailed(s.admin.runCommand({ addshard: 'dummy:12345' })); // // SERVER-17231 Adding replica set w/ set name = 'config' // -var configReplURI = 'config/' + getHostName() + ':' + rs3.getMaster().port; +var configReplURI = 'config/' + getHostName() + ':' + rs3.getPrimary().port; assert(!s.admin.runCommand({ 'addshard': configReplURI }).ok, 'accepted replica set shard with set name "config"'); @@ -109,7 +109,7 @@ assert(shard, 'shard with name "not_config" not found'); // // SERVER-17232 Try inserting into shard with name 'admin' // -assert(s.admin.runCommand({ 'addshard': 'admin/' + getHostName() + ':' + rs4.getMaster().port}).ok, +assert(s.admin.runCommand({ 'addshard': 'admin/' + getHostName() + ':' + rs4.getPrimary().port}).ok, 'adding replica set with name "admin" should work'); var wRes = s.getDB('test').foo.insert({ x: 1 }); assert(!wRes.hasWriteError() && wRes.nInserted === 1, diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js index aa4ccbfed19..2a66cbc74fe 100644 --- a/jstests/sharding/addshard4.js +++ b/jstests/sharding/addshard4.js @@ -17,7 +17,7 @@ r.initiate(config); //to pre-allocate files on slow systems r.awaitReplication(); -var master = r.getMaster(); +var master = r.getPrimary(); var members = config.members.map(function(elem) { return elem.host; }); var shardName = "addshard4/"+members.join(","); @@ -46,7 +46,7 @@ r.initiate(config); // to pre-allocate files on slow systems r.awaitReplication(); -master = r.getMaster(); +master = r.getPrimary(); print("adding shard addshard42"); diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js index e16cd8cf5aa..494c37ec601 100644 --- a/jstests/sharding/auth.js +++ b/jstests/sharding/auth.js @@ -38,7 +38,7 @@ function logout(userObj, thingToUse) { } function getShardName(rsTest) { - var master = rsTest.getMaster(); + var master = rsTest.getPrimary(); var config = master.getDB("local").system.replset.findOne(); var members = config.members.map(function(elem) { return elem.host; }); return config._id+"/"+members.join(","); @@ -118,7 +118,7 @@ d1.stopSet(); d1.startSet({keyFile : "jstests/libs/key1" }); d1.initiate(); -var master = d1.getMaster(); +var master = d1.getPrimary(); print("adding shard w/auth " + shardName); @@ -257,11 +257,11 @@ authutil.asCluster(d1.nodes, "jstests/libs/key1", function() { d1.awaitReplicati authutil.asCluster(d2.nodes, "jstests/libs/key1", function() { d2.awaitReplication(120000); }); // add admin on shard itself, hack to prevent localhost auth bypass -d1.getMaster().getDB(adminUser.db).createUser({user: adminUser.username, +d1.getPrimary().getDB(adminUser.db).createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles}, {w: 3, wtimeout: 60000}); -d2.getMaster().getDB(adminUser.db).createUser({user: adminUser.username, +d2.getPrimary().getDB(adminUser.db).createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles}, {w: 3, wtimeout: 60000}); diff --git a/jstests/sharding/copydb_from_mongos.js b/jstests/sharding/copydb_from_mongos.js index 4ec392f5789..aa6ac16b465 100644 --- a/jstests/sharding/copydb_from_mongos.js +++ b/jstests/sharding/copydb_from_mongos.js @@ -1,3 +1,5 @@ +(function() { + var st = new ShardingTest({ shards: 1 }); var testDB = st.s.getDB('test'); @@ -20,3 +22,5 @@ assert.commandFailed(testDB.adminCommand({ copydb: 1, todb: 'test/copy' })); st.stop(); + +})(); diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js index 470174890c4..410e2e4a4f3 100644 --- a/jstests/sharding/count_slaveok.js +++ b/jstests/sharding/count_slaveok.js @@ -1,21 +1,22 @@ // Tests count and distinct using slaveOk. Also tests a scenario querying a set where only one // secondary is up. (function() { +'use strict'; var st = new ShardingTest({ name: "countSlaveOk", shards: 1, mongos: 1, - other: { rs : true, - rs0 : { nodes : 2 } } }); + other: { rs: true, + rs0: { nodes: 2 } } }); var rst = st._rs[0].test; // Insert data into replica set -var conn = new Mongo( st.s.host ) -conn.setLogLevel( 3 ) +var conn = new Mongo(st.s.host); +conn.setLogLevel(3); -var coll = conn.getCollection( "test.countSlaveOk" ) -coll.drop() +var coll = conn.getCollection('test.countSlaveOk'); +coll.drop(); var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < 300; i++ ){ @@ -36,7 +37,7 @@ var primary = rst.getPrimary() var sec = rst.getSecondary() // Data now inserted... stop the master, since only two in set, other will still be secondary -rst.stop(rst.getMaster()); +rst.stop(rst.getPrimary()); printjson( rst.status() ) // Wait for the mongos to recognize the slave diff --git a/jstests/sharding/create_idx_empty_primary.js b/jstests/sharding/create_idx_empty_primary.js index 16d2a921125..b6eeacb8cd1 100644 --- a/jstests/sharding/create_idx_empty_primary.js +++ b/jstests/sharding/create_idx_empty_primary.js @@ -2,15 +2,14 @@ * Test to make sure that the createIndex command gets sent to all shards. */ (function() { -"use strict"; +'use strict'; var st = new ShardingTest({ shards: 2 }); +assert.commandWorked(st.s.adminCommand({ enablesharding: 'test' })); +st.ensurePrimaryShard('test', 'shard0001'); var testDB = st.s.getDB('test'); - -testDB.adminCommand({ enablesharding: 'test' }); -var res = testDB.adminCommand({ movePrimary: 'test', to: 'shard0001' }); -testDB.adminCommand({ shardcollection: 'test.user', key: { _id: 1 }}); +assert.commandWorked(testDB.adminCommand({ shardcollection: 'test.user', key: { _id: 1 }})); // Move only chunk out of primary shard. assert.commandWorked(testDB.adminCommand({ movechunk: 'test.user', diff --git a/jstests/sharding/csrs_upgrade_during_migrate.js b/jstests/sharding/csrs_upgrade_during_migrate.js index 1f0da2dc34d..45177857ea7 100644 --- a/jstests/sharding/csrs_upgrade_during_migrate.js +++ b/jstests/sharding/csrs_upgrade_during_migrate.js @@ -34,10 +34,10 @@ var st; }; var addSlaveDelay = function(rst) { - var conf = rst.getMaster().getDB('local').system.replset.findOne(); + var conf = rst.getPrimary().getDB('local').system.replset.findOne(); conf.version++; var secondaryIndex = 0; - if (conf.members[secondaryIndex].host === rst.getMaster().host) { + if (conf.members[secondaryIndex].host === rst.getPrimary().host) { secondaryIndex = 1; } conf.members[secondaryIndex].priority = 0; diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js index 1ddff72d5d0..9b8500f01d1 100644 --- a/jstests/sharding/diffservers1.js +++ b/jstests/sharding/diffservers1.js @@ -3,8 +3,8 @@ var s = new ShardingTest({ name: "diffservers1", shards: 2 }); assert.eq( 2 , s.config.shards.count() , "server count wrong" ); -assert.eq( 0 , s._shardServers[0].getDB( "config" ).shards.count() , "shouldn't be here" ); -assert.eq( 0 , s._shardServers[1].getDB( "config" ).shards.count() , "shouldn't be here" ); +assert.eq( 0 , s._connections[0].getDB( "config" ).shards.count() , "shouldn't be here" ); +assert.eq( 0 , s._connections[1].getDB( "config" ).shards.count() , "shouldn't be here" ); test1 = s.getDB( "test1" ).foo; test1.save( { a : 1 } ); diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js index 8bf3937732d..84c857e644c 100644 --- a/jstests/sharding/features3.js +++ b/jstests/sharding/features3.js @@ -5,12 +5,14 @@ // - Inserts 10k documents and ensures they're evenly distributed // - Verifies a $where query can be killed on multiple DBs // - Tests fsync and fsync+lock permissions on sharded db +(function() { +'use strict'; var s = new ShardingTest({shards: 2, mongos: 1 }); +var dbForTest = s.getDB("test"); +dbForTest.foo.drop(); -var db = s.getDB("test"); // db variable name is required due to startParallelShell() var numDocs = 10000; -db.foo.drop(); // shard test.foo and add a split point s.adminCommand({enablesharding: "test"}); @@ -26,12 +28,12 @@ s.adminCommand({moveChunk: "test.foo", find: {_id: 3}, s.startBalancer(); // insert 10k small documents into the sharded collection -var bulk = db.foo.initializeUnorderedBulkOp(); +var bulk = dbForTest.foo.initializeUnorderedBulkOp(); for (i = 0; i < numDocs; i++) bulk.insert({ _id: i }); assert.writeOK(bulk.execute()); -var x = db.foo.stats(); +var x = dbForTest.foo.stats(); // verify the colleciton has been sharded and documents are evenly distributed assert.eq("test.foo", x.ns, "namespace mismatch"); @@ -42,8 +44,8 @@ assert.eq(numDocs / 2, x.shards.shard0001.count, "count on shard0001"); assert(x.totalIndexSize > 0); // insert one doc into a non-sharded collection -db.bar.insert({x: 1}); -var x = db.bar.stats(); +dbForTest.bar.insert({x: 1}); +var x = dbForTest.bar.stats(); assert.eq(1, x.count, "XXX1"); assert.eq("test.bar", x.ns, "XXX2"); assert(!x.sharded, "XXX3: " + tojson(x)); @@ -62,33 +64,35 @@ var parallelCommand = // fork a parallel shell, but do not wait for it to start print("about to fork new shell at: " + Date()); -var awaitShell = startParallelShell(parallelCommand); +var awaitShell = startParallelShell(parallelCommand, s.s.port); print("done forking shell at: " + Date()); // Get all current $where operations function getMine(printInprog) { - var inprog = db.currentOp().inprog; + var inprog = dbForTest.currentOp().inprog; if (printInprog) printjson(inprog); // Find all the where queries - var mine = []; - for (var x=0; x<inprog.length; x++) { + var myProcs = []; + for (var x = 0; x < inprog.length; x++) { if (inprog[x].query && inprog[x].query.filter && inprog[x].query.filter.$where) { - mine.push(inprog[x]); + myProcs.push(inprog[x]); } } - return mine; + return myProcs; } var curOpState = 0; // 0 = not found, 1 = killed var killTime = null; var i = 0; +var mine; assert.soon(function() { // Get all the current operations mine = getMine(true); // SERVER-8794: print all operations + // get curren tops, but only print out operations before we see a $where op has started // mine = getMine(curOpState == 0 && i > 20); i++; @@ -99,7 +103,7 @@ assert.soon(function() { curOpState = 1; // kill all $where mine.forEach(function(z) { - printjson(db.getSisterDB("admin").killOp(z.opid)); + printjson(dbForTest.getSisterDB("admin").killOp(z.opid)); }); killTime = new Date(); } @@ -130,20 +134,22 @@ var end = new Date(); print("elapsed: " + (end.getTime() - start.getTime())); // test fsync command on non-admin db -x = db.runCommand("fsync"); +x = dbForTest.runCommand("fsync"); assert(!x.ok , "fsync on non-admin namespace should fail : " + tojson(x)); assert(x.code == 13, "fsync on non-admin succeeded, but should have failed: " + tojson(x)); // test fsync on admin db -x = db._adminCommand("fsync"); +x = dbForTest._adminCommand("fsync"); assert(x.ok == 1, "fsync failed: " + tojson(x)); if ( x.all.shard0000 > 0 ) { assert(x.numFiles > 0, "fsync failed: " + tojson(x)); } // test fsync+lock on admin db -x = db._adminCommand({"fsync" :1, lock:true}); +x = dbForTest._adminCommand({"fsync" :1, lock:true}); assert(!x.ok, "lock should fail: " + tojson(x)); s.stop(); + +})(); diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js index a5c20f51ea5..f7fba0e0f88 100644 --- a/jstests/sharding/group_slaveok.js +++ b/jstests/sharding/group_slaveok.js @@ -1,63 +1,63 @@ // Tests group using slaveOk (function() { +'use strict'; var st = new ShardingTest({ name: "groupSlaveOk", shards: 1, mongos: 1, - other :{ rs : true, - rs0 : { nodes : 2 } } }); + other: { rs: true, + rs0: { nodes: 2 } } }); -var rst = st._rs[0].test +var rst = st._rs[0].test; // Insert data into replica set -var conn = new Mongo( st.s.host ) -conn.setLogLevel( 3 ) +var conn = new Mongo(st.s.host); +conn.setLogLevel(3); -var coll = conn.getCollection( "test.groupSlaveOk" ) -coll.drop() +var coll = conn.getCollection("test.groupSlaveOk"); +coll.drop(); var bulk = coll.initializeUnorderedBulkOp(); -for( var i = 0; i < 300; i++ ){ - bulk.insert( { i : i % 10 } ); +for(var i = 0; i < 300; i++) { + bulk.insert({ i: i % 10 }); } -assert.writeOK( bulk.execute() ); +assert.writeOK(bulk.execute()); -st.printShardingStatus() +st.printShardingStatus(); // Wait for client to update itself and replication to finish -rst.awaitReplication() +rst.awaitReplication(); -var primary = rst.getPrimary() -var sec = rst.getSecondary() +var primary = rst.getPrimary(); +var sec = rst.getSecondary(); // Data now inserted... stop the master, since only two in set, other will still be secondary -rst.stop(rst.getMaster()); -printjson( rst.status() ) +rst.stop(rst.getPrimary()); +printjson(rst.status()); // Wait for the mongos to recognize the slave -ReplSetTest.awaitRSClientHosts( conn, sec, { ok : true, secondary : true } ) +ReplSetTest.awaitRSClientHosts(conn, sec, { ok: true, secondary: true }); // Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when // master is down -conn.setSlaveOk() +conn.setSlaveOk(); // Should not throw exception, since slaveOk'd -assert.eq( 10, coll.group({ key : { i : true } , - reduce : function( obj, ctx ){ ctx.count += 1 } , - initial : { count : 0 } }).length ) - -try { - - conn.setSlaveOk( false ) - var res = coll.group({ key : { i : true } , - reduce : function( obj, ctx ){ ctx.count += 1 } , - initial : { count : 0 } }); - - print( "Should not reach here! Group result: " + tojson(res) ); - assert( false ); +assert.eq(10, coll.group({ key: { i: true } , + reduce: function(obj, ctx) { ctx.count += 1 }, + initial: { count: 0 } }).length) + +try { + conn.setSlaveOk(false); + var res = coll.group({ key: { i: true }, + reduce: function(obj, ctx) { ctx.count += 1 }, + initial: { count: 0 } }); + + print("Should not reach here! Group result: " + tojson(res)); + assert(false); } -catch( e ){ - print( "Non-slaveOk'd connection failed." + tojson(e) ) +catch(e){ + print("Non-slaveOk'd connection failed." + tojson(e)); } st.stop(); diff --git a/jstests/sharding/mongos_validate_backoff.js b/jstests/sharding/mongos_validate_backoff.js index 877ab808dcc..4faff61698d 100644 --- a/jstests/sharding/mongos_validate_backoff.js +++ b/jstests/sharding/mongos_validate_backoff.js @@ -1,28 +1,27 @@ -// // Ensures that single mongos shard-key errors are fast, but slow down when many are triggered -// +(function() { +'use strict'; -var st = new ShardingTest({ shards : 1, mongos : 1 }) +var st = new ShardingTest({ shards : 1, mongos : 1 }); -var mongos = st.s0 -var admin = mongos.getDB( "admin" ) -var coll = mongos.getCollection( "foo.bar" ) +var mongos = st.s0; +var admin = mongos.getDB("admin"); +var coll = mongos.getCollection("foo.bar"); -printjson( admin.runCommand({ enableSharding : coll.getDB() + "" }) ) +assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB() + "" })); -coll.ensureIndex({ shardKey : 1 }) -printjson( admin.runCommand({ shardCollection : coll + "", key : { shardKey : 1 } }) ) +coll.ensureIndex({ shardKey : 1 }); +assert.commandWorked(admin.runCommand({ shardCollection : coll + "", key : { shardKey : 1 } })); -var timeBadInsert = function(){ - - var start = new Date().getTime() +var timeBadInsert = function() { + var start = new Date().getTime(); // Bad insert, no shard key assert.writeError(coll.insert({ hello : "world" })); - var end = new Date().getTime() + var end = new Date().getTime(); - return end - start + return end - start; } // We need to work at least twice in order to check resetting the counter @@ -31,28 +30,31 @@ var success = 0; // Loop over this test a few times, to ensure that the error counters get reset if we don't have // bad inserts over a long enough time. -for( var test = 0; test < 5; test++ ){ - - var firstWait = timeBadInsert() - var lastWait = 0 - - for( var i = 0; i < 20; i++ ){ - printjson( lastWait = timeBadInsert() ) +for (var test = 0; test < 5; test++) { + var firstWait = timeBadInsert(); + var lastWait = 0; + + for(var i = 0; i < 20; i++) { + printjson(lastWait = timeBadInsert()); } - // Kind a heuristic test, we want to make sure that the error wait after sleeping is much less - // than the error wait after a lot of errors - if( lastWait > firstWait * 2 * 2 ) success++; // Success! - - if( success >= successNeeded ) break; + // As a heuristic test, we want to make sure that the error wait after sleeping is much less + // than the error wait after a lot of errors. + if (lastWait > firstWait * 2 * 2) { + success++; + } + if (success >= successNeeded) { + break; + } + // Abort if we've failed too many times - assert.lt( test, 4 ); - + assert.lt(test, 4); + // Sleeping for long enough to reset our exponential counter - sleep( 3000 ) + sleep(3000); } -jsTest.log( "DONE!" ) +st.stop(); -st.stop() +})(); diff --git a/jstests/sharding/mr_shard_version.js b/jstests/sharding/mr_shard_version.js index fd143b72ded..fddccfb3fa0 100644 --- a/jstests/sharding/mr_shard_version.js +++ b/jstests/sharding/mr_shard_version.js @@ -36,12 +36,12 @@ var migrateOp = { op : "command", ns : "admin", command : { moveChunk : "" + col var checkMigrate = function(){ print( "Result of migrate : " ); printjson( this ) } var ops = {} -for( var i = 0; i < st._shardServers.length; i++ ){ +for( var i = 0; i < st._connections.length; i++ ){ for( var j = 0; j < 2; j++ ){ ops[ "" + (i * 2 + j) ] = { op : "command", ns : "admin", command : { moveChunk : "" + coll, find : { _id : ( j == 0 ? 0 : halfId ) }, - to : st._shardServers[i].shardName }, + to : st._connections[i].shardName }, check : checkMigrate }; } } diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js index ea98a8273ee..7b92eb0d1b4 100755 --- a/jstests/sharding/read_pref.js +++ b/jstests/sharding/read_pref.js @@ -17,7 +17,7 @@ var doTest = function(useDollarQuerySyntax) { rs0: { nodes: NODES, oplogSize: 10, useHostName: true } }}); var replTest = st.rs0; - var primaryNode = replTest.getMaster(); + var primaryNode = replTest.getPrimary(); // The $-prefixed query syntax is only legal for compatibility mode reads, not for the // find/getMore commands. diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js index c4efc5bd666..8862103d129 100644 --- a/jstests/sharding/recovering_slaveok.js +++ b/jstests/sharding/recovering_slaveok.js @@ -25,8 +25,8 @@ var collSOk = mongosSOK.getCollection( "" + coll ); var rsA = shardTest._rs[0].test; var rsB = shardTest._rs[1].test; -rsA.getMaster().getDB( "test_a" ).dummy.insert({ x : 1 }); -rsB.getMaster().getDB( "test_b" ).dummy.insert({ x : 1 }); +rsA.getPrimary().getDB( "test_a" ).dummy.insert({ x : 1 }); +rsB.getPrimary().getDB( "test_b" ).dummy.insert({ x : 1 }); rsA.awaitReplication(); rsB.awaitReplication(); diff --git a/jstests/sharding/replset_config/basic_sharding_params.js b/jstests/sharding/replset_config/basic_sharding_params.js index 37a87855711..80a617e9987 100644 --- a/jstests/sharding/replset_config/basic_sharding_params.js +++ b/jstests/sharding/replset_config/basic_sharding_params.js @@ -22,7 +22,7 @@ function shardingTestUsingObjects() { assert.eq( c0, st._configServers[0] ); var d0 = st.d0; - assert.eq( d0, st._shardServers[0] ); + assert.eq( d0, st._connections[0] ); var rs1 = st.rs1; assert.eq( rs1, st._rsObjects[1] ); @@ -57,10 +57,10 @@ function shardingTestUsingArrays() { assert.eq( c0, st._configServers[0] ); var d0 = st.d0; - assert.eq( d0, st._shardServers[0] ); + assert.eq( d0, st._connections[0] ); var d1 = st.d1; - assert.eq( d1, st._shardServers[1] ); + assert.eq( d1, st._connections[1] ); assert.contains( "-vvvvv", s0.commandLine ); assert.contains( "-vvvv", s1.commandLine ); diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js index c858933a449..0c8dc5208ba 100644 --- a/jstests/sharding/shard_insert_getlasterror_w2.js +++ b/jstests/sharding/shard_insert_getlasterror_w2.js @@ -29,7 +29,7 @@ var replSet1 = shardingTest.rs0; // Add data to it - var testDBReplSet1 = replSet1.getMaster().getDB(testDBName); + var testDBReplSet1 = replSet1.getPrimary().getDB(testDBName); var bulk = testDBReplSet1.foo.initializeUnorderedBulkOp(); for (var i = 0; i < numDocs; i++) { bulk.insert({ x: i, text: textString }); diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js index 1dfa816c637..e3a2082ba8d 100644 --- a/jstests/sharding/sharding_rs2.js +++ b/jstests/sharding/sharding_rs2.js @@ -47,7 +47,7 @@ catch ( e ){ assert.soon( function(){ try { - printjson( rs.test.getMaster().getDB("admin").runCommand( "isMaster" ) ) + printjson( rs.test.getPrimary().getDB("admin").runCommand( "isMaster" ) ) s.config.shards.find().forEach( printjsononeline ); return countNodes() == 3; } @@ -80,7 +80,7 @@ rs.test.waitForState( rs.test.getSecondaries(), rs.test.SECONDARY, 180 * 1000 ) m = new Mongo( s.s.name ); ts = m.getDB( "test" ).foo -before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +before = rs.test.getPrimary().adminCommand( "serverStatus" ).opcounters for ( i=0; i<10; i++ ) assert.eq( 17 , ts.findOne().x , "B1" ) @@ -89,7 +89,7 @@ m.setSlaveOk() for ( i=0; i<10; i++ ) assert.eq( 17 , ts.findOne().x , "B2" ) -after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +after = rs.test.getPrimary().adminCommand( "serverStatus" ).opcounters printjson( before ) printjson( after ) @@ -141,14 +141,14 @@ assert.commandWorked(s.getDB('admin').runCommand({ moveChunk: "test.foo", _waitForDelete: true })); assert.eq( 100 , t.count() , "C3" ) -assert.eq( 50 , rs.test.getMaster().getDB( "test" ).foo.count() , "C4" ) +assert.eq( 50 , rs.test.getPrimary().getDB( "test" ).foo.count() , "C4" ) // by non-shard key m = new Mongo( s.s.name ); ts = m.getDB( "test" ).foo -before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +before = rs.test.getPrimary().adminCommand( "serverStatus" ).opcounters for ( i=0; i<10; i++ ) assert.eq( 17 , ts.findOne( { _id : 5 } ).x , "D1" ) @@ -157,7 +157,7 @@ m.setSlaveOk() for ( i=0; i<10; i++ ) assert.eq( 17 , ts.findOne( { _id : 5 } ).x , "D2" ) -after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +after = rs.test.getPrimary().adminCommand( "serverStatus" ).opcounters assert.lte( before.query + 10 , after.query , "D3" ) @@ -170,7 +170,7 @@ db.printShardingStatus() ts = m.getDB( "test" ).foo -before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +before = rs.test.getPrimary().adminCommand( "serverStatus" ).opcounters for ( i=0; i<10; i++ ) assert.eq( 57 , ts.findOne( { x : 57 } ).x , "E1" ) @@ -179,7 +179,7 @@ m.setSlaveOk() for ( i=0; i<10; i++ ) assert.eq( 57 , ts.findOne( { x : 57 } ).x , "E2" ) -after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +after = rs.test.getPrimary().adminCommand( "serverStatus" ).opcounters assert.lte( before.query + 10 , after.query , "E3" ) diff --git a/jstests/sharding/split_large_key.js b/jstests/sharding/split_large_key.js index c18b92b93e4..a0cdcd61d67 100644 --- a/jstests/sharding/split_large_key.js +++ b/jstests/sharding/split_large_key.js @@ -1,5 +1,7 @@ // Test for splitting a chunk with a very large shard key value should not be allowed // and does not corrupt the config.chunks metadata. +(function() { +'use strict'; function verifyChunk(keys, expectFail) { // If split failed then there's only 1 chunk @@ -14,30 +16,6 @@ function verifyChunk(keys, expectFail) { } } -function runTest(test) { - var collName = "split_large_key_"+test.name; - var midKey = {}; - var chunkKeys = {min: {}, max: {}}; - for (var k in test.key) { - // new Array with join creates string length 1 less than size, so add 1 - midKey[k] = new Array(test.keyFieldSize+1).join('a'); - // min & max keys for each field in the index - chunkKeys.min[k] = MinKey; - chunkKeys.max[k] = MaxKey; - } - configDB.adminCommand({ shardCollection: "test."+collName, key: test.key}); - res = configDB.adminCommand({ split: "test."+collName, middle: midKey}); - if (test.expectFail) { - assert(!res.ok, "Split: "+collName); - assert(res.errmsg !== null, "Split errmsg: "+collName); - } else { - assert(res.ok, "Split: "+collName+" "+res.errmsg); - } - verifyChunk(chunkKeys, test.expectFail); - st.s0.getCollection("test."+collName).drop(); -} - - // Tests // - name: Name of test, used in collection name // - key: key to test @@ -55,11 +33,36 @@ var tests = [ var st = new ShardingTest({ shards: 1 }); var configDB = st.s.getDB('config'); -configDB.adminCommand({ enableSharding: 'test' }); +assert.commandWorked(configDB.adminCommand({ enableSharding: 'test' })); tests.forEach(function(test){ - runTest(test); + var collName = "split_large_key_" + test.name; + var midKey = {}; + var chunkKeys = {min: {}, max: {}}; + for (var k in test.key) { + // new Array with join creates string length 1 less than size, so add 1 + midKey[k] = new Array(test.keyFieldSize+1).join('a'); + // min & max keys for each field in the index + chunkKeys.min[k] = MinKey; + chunkKeys.max[k] = MaxKey; + } + + assert.commandWorked( + configDB.adminCommand({ shardCollection: "test." + collName, key: test.key })); + + var res = configDB.adminCommand({ split: "test."+collName, middle: midKey}); + if (test.expectFail) { + assert(!res.ok, "Split: " + collName); + assert(res.errmsg !== null, "Split errmsg: " + collName); + } else { + assert(res.ok, "Split: " + collName + " " + res.errmsg); + } + + verifyChunk(chunkKeys, test.expectFail); + + st.s0.getCollection("test." + collName).drop(); }); st.stop(); +})(); diff --git a/jstests/sharding/trace_missing_docs_test.js b/jstests/sharding/trace_missing_docs_test.js index bb7db7585c6..d7ac493cc5a 100644 --- a/jstests/sharding/trace_missing_docs_test.js +++ b/jstests/sharding/trace_missing_docs_test.js @@ -1,49 +1,49 @@ - -// // Tests tracing where a document was inserted -// +load('jstests/libs/trace_missing_docs.js'); -load('jstests/libs/trace_missing_docs.js') +(function() { +'use strict'; -var testDocMissing = function( useReplicaSet ) { +var testDocMissing = function(useReplicaSet) { + var options = { rs: useReplicaSet, + shardOptions: { master: "", oplogSize: 10 }, + rsOptions: { nodes: 1, oplogSize: 10 } }; -var options = { rs : useReplicaSet, - shardOptions : { master : "", oplogSize : 10 }, - rsOptions : { nodes : 1, oplogSize : 10 } }; + var st = new ShardingTest({ shards: 2, mongos: 1, other: options }); -var st = new ShardingTest({ shards : 2, mongos : 1, other : options }); + var mongos = st.s0; + var coll = mongos.getCollection("foo.bar"); + var admin = mongos.getDB("admin"); + var shards = mongos.getCollection("config.shards").find().toArray(); -var mongos = st.s0; -var coll = mongos.getCollection( "foo.bar" ); -var admin = mongos.getDB( "admin" ); -var shards = mongos.getCollection( "config.shards" ).find().toArray(); + assert.commandWorked(admin.runCommand({ enableSharding: coll.getDB() + "" })); + st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id); -assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok ); -printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) ); -coll.ensureIndex({ sk : 1 }); -assert( admin.runCommand({ shardCollection : coll + "", key : { sk : 1 } }).ok ); + coll.ensureIndex({ sk: 1 }); + assert.commandWorked(admin.runCommand({ shardCollection: coll + "", key: { sk: 1 } })); -assert.writeOK(coll.insert({ _id : 12345, sk : 67890, hello : "world" })); -assert.writeOK(coll.update({ _id : 12345 }, { $set : { baz : 'biz' } })); -assert.writeOK(coll.update({ sk : 67890 }, { $set : { baz : 'boz' } })); + assert.writeOK(coll.insert({ _id: 12345, sk: 67890, hello: "world" })); + assert.writeOK(coll.update({ _id: 12345 }, { $set: { baz: 'biz' } })); + assert.writeOK(coll.update({ sk: 67890 }, { $set: { baz: 'boz' } })); -assert( admin.runCommand({ moveChunk : coll + "", - find : { sk : 0 }, - to : shards[1]._id, - _waitForDelete : true }).ok ); + assert.commandWorked(admin.runCommand({ moveChunk: coll + "", + find: { sk: 0 }, + to: shards[1]._id, + _waitForDelete: true })); -st.printShardingStatus(); + st.printShardingStatus(); -var ops = traceMissingDoc( coll, { _id : 12345, sk : 67890 } ); + var ops = traceMissingDoc(coll, { _id: 12345, sk: 67890 }); -assert.eq( ops[0].op, 'i' ); -assert.eq( ops.length, 5 ); + assert.eq(ops[0].op, 'i'); + assert.eq(ops.length, 5); -jsTest.log( "DONE! " + ( useReplicaSet ? "(using rs)" : "(using master/slave)" ) ); + jsTest.log("DONE! " + (useReplicaSet ? "(using rs)": "(using master/slave)")); -st.stop(); + st.stop(); +}; -} +testDocMissing(true); +testDocMissing(false); -testDocMissing( true ); -testDocMissing( false ); +})(); diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js index 441b190de73..0bf8c5892b5 100644 --- a/jstests/sharding/version2.js +++ b/jstests/sharding/version2.js @@ -1,17 +1,17 @@ (function() { +'use strict'; var s = new ShardingTest({ name: "version2", shards: 1 }); -s.adminCommand( { enablesharding : "alleyinsider" } ); -s.adminCommand( { shardcollection : "alleyinsider.foo" , key : { num : 1 } } ); -s.adminCommand( { shardcollection : "alleyinsider.bar" , key : { num : 1 } } ); +assert.commandWorked(s.s0.adminCommand({ enablesharding: "alleyinsider" })); +assert.commandWorked(s.s0.adminCommand({ shardcollection: "alleyinsider.foo", key: { num: 1 } })); +assert.commandWorked(s.s0.adminCommand({ shardcollection: "alleyinsider.bar", key: { num: 1 } })); -a = s._connections[0].getDB( "admin" ); +var a = s._connections[0].getDB("admin"); -// setup from one client - -assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i, 0 ); -assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i, 0 ); +// Setup from one client +assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).mine.i, 0); +assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).global.i, 0); var fooEpoch = s.getDB('config').chunks.findOne({ ns: 'alleyinsider.foo' }).lastmodEpoch; assert.commandWorked( @@ -25,42 +25,41 @@ assert.commandWorked( shardHost: s.s.host, })); -printjson( s.config.chunks.findOne() ); - -assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.t, 1 ); -assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.t, 1 ); +printjson(s.config.chunks.findOne()); -// from another client +assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).mine.t, 1); +assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).global.t, 1); -a2 = connect( s._connections[0].name + "/admin" ); +// From a different client +var a2 = connect(s._connections[0].name + "/admin"); -assert.eq( a2.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.t , 1 , "a2 global 1" ); -assert.eq( a2.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i , 0 , "a2 mine 1" ); +assert.eq(a2.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).global.t, 1, "a2 global 1"); +assert.eq(a2.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).mine.i, 0, "a2 mine 1"); function simpleFindOne(){ - return a2.getMongo().getDB( "alleyinsider" ).foo.findOne(); + return a2.getMongo().getDB("alleyinsider").foo.findOne(); } var barEpoch = s.getDB('config').chunks.findOne({ ns: 'alleyinsider.bar' }).lastmodEpoch; -assert.commandWorked( a2.runCommand({ setShardVersion: "alleyinsider.bar", +assert.commandWorked(a2.runCommand({ setShardVersion: "alleyinsider.bar", configdb: s._configDB, version: new Timestamp(1, 0), versionEpoch: barEpoch, shard: 'shard0000', authoritative: true }), - "setShardVersion bar temp" ); + "setShardVersion bar temp"); -assert.throws( simpleFindOne , [] , "should complain about not in sharded mode 1" ); +assert.throws(simpleFindOne, [], "should complain about not in sharded mode 1"); // the only way that setSharVersion passes is if the shard agrees with the version // the shard takes its version from config directly // TODO bump timestamps in config -// assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 1 , "setShardVersion a2-1"); +// assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version: 2 }).ok == 1, "setShardVersion a2-1"); // simpleFindOne(); // now should run ok -// assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).ok == 1 , "setShardVersion a2-2"); +// assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version: 3 }).ok == 1, "setShardVersion a2-2"); // simpleFindOne(); // newer version is ok diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js index 799c36cc3aa..dc8abc71597 100644 --- a/jstests/sharding/write_cmd_auto_split.js +++ b/jstests/sharding/write_cmd_auto_split.js @@ -1,13 +1,14 @@ /** * Tests the auto split will be triggered when using write commands. */ +(function() { +'use strict'; var st = new ShardingTest({ shards: 1, other: { chunkSize: 1 }}); -st.stopBalancer(); var configDB = st.s.getDB('config'); -configDB.adminCommand({ enableSharding: 'test' }); -configDB.adminCommand({ shardCollection: 'test.insert', key: { x: 1 }}); +assert.commandWorked(configDB.adminCommand({ enableSharding: 'test' })); +assert.commandWorked(configDB.adminCommand({ shardCollection: 'test.insert', key: { x: 1 }})); var doc1k = (new Array(1024)).join('x'); var testDB = st.s.getDB('test'); @@ -151,3 +152,4 @@ assert.eq(1, configDB.chunks.find().itcount()); st.stop(); +})(); diff --git a/jstests/sharding/write_commands_sharding_state.js b/jstests/sharding/write_commands_sharding_state.js index d3d1a4b788a..7d0991870eb 100644 --- a/jstests/sharding/write_commands_sharding_state.js +++ b/jstests/sharding/write_commands_sharding_state.js @@ -3,11 +3,9 @@ // @tags: [requires_persistence]
(function() {
-
'use strict';
var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2 });
-st.stopBalancer();
var dbTestName = 'WriteCommandsTestDB';
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js index 9a63268fac2..d10e81e2655 100644 --- a/jstests/sharding/zbigMapReduce.js +++ b/jstests/sharding/zbigMapReduce.js @@ -1,35 +1,31 @@ // This test is skipped on 32-bit platforms function setupTest() { - - s = new ShardingTest( { shards : 2, - verbose : 1, - mongos : 1, - other: { rs: true, - numReplicas: 2, - chunkSize: 1, - rsOptions: { oplogSize : 50 }, - enableBalancer : 1 - } } ); - - // reduce chunk size to split + var s = new ShardingTest({ shards: 2, + mongos: 1, + other: { rs: true, + numReplicas: 2, + chunkSize: 1, + rsOptions: { oplogSize: 50 }, + enableBalancer: 1 } }); + + // Reduce chunk size to split var config = s.getDB("config"); config.settings.save({_id: "chunksize", value: 1}); - s.adminCommand( { enablesharding : "test" } ) + assert.commandWorked(s.s0.adminCommand({ enablesharding: "test" })); s.ensurePrimaryShard('test', 'test-rs0'); - s.adminCommand( { shardcollection : "test.foo", key : { "_id" : 1 } } ) + assert.commandWorked(s.s0.adminCommand({ shardcollection: "test.foo", key: { "_id": 1 } })); return s; } - function runTest(s) { - jsTest.log( "Inserting a lot of documents into test.foo" ) - db = s.getDB( "test" ); + jsTest.log("Inserting a lot of documents into test.foo"); + db = s.getDB("test"); var idInc = 0; var valInc = 0; - var str="" + var str = ""; if (db.serverBuildInfo().bits == 32) { // Make data ~0.5MB for 32 bit builds @@ -42,36 +38,43 @@ function runTest(s) { var bulk = db.foo.initializeUnorderedBulkOp(); for (j=0; j<100; j++) { - for (i=0; i<512; i++){ + for (i=0; i<512; i++) { bulk.insert({ i: idInc++, val: valInc++, y:str }); } } assert.writeOK(bulk.execute()); - jsTest.log( "Documents inserted, waiting for error..." ) - jsTest.log( "Doing double-checks of insert..." ) + jsTest.log("Documents inserted, doing double-checks of insert..."); // Collect some useful stats to figure out what happened - if( db.foo.find().itcount() != 51200 ){ - sleep( 1000 ) + if (db.foo.find().itcount() != 51200) { + sleep(1000); s.printShardingStatus(true); - print( "Shard 0: " + s.shard0.getCollection( db.foo + "" ).find().itcount() ) - print( "Shard 1: " + s.shard1.getCollection( db.foo + "" ).find().itcount() ) + print("Shard 0: " + s.shard0.getCollection(db.foo + "").find().itcount()); + print("Shard 1: " + s.shard1.getCollection(db.foo + "").find().itcount()); - for( var i = 0; i < 51200; i++ ){ - if( ! db.foo.findOne({ i : i }, { i : 1 }) ){ - print( "Could not find: " + i ) + for (var i = 0; i < 51200; i++) { + if(!db.foo.findOne({ i: i }, { i: 1 })) { + print("Could not find: " + i); } - if( i % 100 == 0 ) print( "Checked " + i ) + + if(i % 100 == 0) print("Checked " + i); } - print( "PROBABLY WILL ASSERT NOW" ) + print("PROBABLY WILL ASSERT NOW"); } - assert.soon( function(){ var c = db.foo.find().itcount(); print( "Count is " + c ); return c == 51200 } ) - //assert.eq( 51200, db.foo.find().itcount(), "Not all data was saved!" ) + assert.soon(function() { + var c = db.foo.find().itcount(); + if (c == 51200) { + return true; + } + + print("Count is " + c); + return false; + }); s.printChunks(); s.printChangeLog(); @@ -79,32 +82,29 @@ function runTest(s) { function map() { emit('count', 1); } function reduce(key, values) { return Array.sum(values) } - jsTest.log( "Test basic mapreduce..." ) + jsTest.log("Test basic mapreduce..."); // Test basic mapReduce - for ( iter=0; iter<5; iter++ ){ - - print( "Test #" + iter ) - + for (var iter = 0; iter < 5; iter++) { + print("Test #" + iter); out = db.foo.mapReduce(map, reduce,"big_out") } - print( "Testing output to different db..." ) + print("Testing output to different db...") // test output to a different DB // do it multiple times so that primary shard changes for (iter = 0; iter < 5; iter++) { + print("Test #" + iter); - print( "Test #" + iter ) - - assert.eq( 51200, db.foo.find().itcount(), "Not all data was found!" ) + assert.eq(51200, db.foo.find().itcount(), "Not all data was found!"); outCollStr = "mr_replace_col_" + iter; outDbStr = "mr_db_" + iter; print("Testing mr replace into DB " + iter) - res = db.foo.mapReduce( map , reduce , { out : { replace: outCollStr, db: outDbStr } } ) + res = db.foo.mapReduce(map , reduce , { out: { replace: outCollStr, db: outDbStr } }) printjson(res); outDb = s.getDB(outDbStr); @@ -112,116 +112,115 @@ function runTest(s) { obj = outColl.convertToSingleObject("value"); - assert.eq( 51200 , obj.count , "Received wrong result " + obj.count ); + assert.eq(51200 , obj.count , "Received wrong result " + obj.count); print("checking result field"); assert.eq(res.result.collection, outCollStr, "Wrong collection " + res.result.collection); assert.eq(res.result.db, outDbStr, "Wrong db " + res.result.db); } - jsTest.log( "Verifying nonatomic M/R throws..." ) + jsTest.log("Verifying nonatomic M/R throws...") // check nonAtomic output - assert.throws(function() { db.foo.mapReduce(map, reduce,{out: {replace: "big_out", nonAtomic: true}})}); + assert.throws(function() { + db.foo.mapReduce(map, reduce, { out: {replace: "big_out", nonAtomic: true } }); + }); - jsTest.log( ) + jsTest.log(); - // add docs with dup "i" + // Add docs with dup "i" valInc = 0; - for (j=0; j<100; j++){ - print( "Inserted document: " + (j * 100) ); + for (j=0; j<100; j++) { + print("Inserted document: " + (j * 100)); bulk = db.foo.initializeUnorderedBulkOp(); - for (i=0; i<512; i++){ - bulk.insert({ i : idInc++, val: valInc++, y: str }); + for (i=0; i<512; i++) { + bulk.insert({ i: idInc++, val: valInc++, y: str }); } // wait for replication to catch up assert.writeOK(bulk.execute({ w: 2 })); } - jsTest.log( "No errors..." ); + jsTest.log("No errors..."); map2 = function() { emit(this.val, 1); } reduce2 = function(key, values) { return Array.sum(values); } - // test merge + // Test merge outcol = "big_out_merge"; - jsTestLog( "Test A" ) - - // mr quarter of the docs - out = db.foo.mapReduce(map2, reduce2,{ query: {i : {$lt: 25600}}, out: {merge: outcol}}); + // M/R quarter of the docs + jsTestLog("Test A"); + out = db.foo.mapReduce(map2, reduce2, { query: {i: {$lt: 25600} }, out: { merge: outcol } }); printjson(out); - assert.eq( 25600 , out.counts.emit , "Received wrong result" ); - assert.eq( 25600 , out.counts.output , "Received wrong result" ); + assert.eq(25600 , out.counts.emit , "Received wrong result"); + assert.eq(25600 , out.counts.output , "Received wrong result"); - jsTestLog( "Test B" ) - - // mr further docs - out = db.foo.mapReduce(map2, reduce2,{ query: {i : {$gte: 25600, $lt: 51200}}, out: {merge: outcol}}); + // M/R further docs + jsTestLog("Test B"); + out = db.foo.mapReduce( + map2, reduce2, { query: {i: {$gte: 25600, $lt: 51200} }, out: { merge: outcol } }); printjson(out); - assert.eq( 25600 , out.counts.emit , "Received wrong result" ); - assert.eq( 51200 , out.counts.output , "Received wrong result" ); - - jsTestLog( "Test C" ) + assert.eq(25600 , out.counts.emit , "Received wrong result"); + assert.eq(51200 , out.counts.output , "Received wrong result"); - // do 2nd half of docs - out = db.foo.mapReduce(map2, reduce2,{ query: {i : {$gte: 51200}}, out: {merge: outcol, nonAtomic: true}}); + // M/R do 2nd half of docs + jsTestLog("Test C"); + out = db.foo.mapReduce( + map2, reduce2, { query: {i: {$gte: 51200} }, out: { merge: outcol, nonAtomic: true } }); printjson(out); - assert.eq( 51200 , out.counts.emit , "Received wrong result" ); - assert.eq( 51200 , out.counts.output , "Received wrong result" ); - assert.eq( 1 , db[outcol].findOne().value , "Received wrong result" ); - - jsTestLog( "Test D" ) + assert.eq(51200 , out.counts.emit , "Received wrong result"); + assert.eq(51200 , out.counts.output , "Received wrong result"); + assert.eq(1 , db[outcol].findOne().value , "Received wrong result"); - // test reduce + // Test reduce + jsTestLog("Test D"); outcol = "big_out_reduce"; - // mr quarter of the docs - out = db.foo.mapReduce(map2, reduce2,{ query: {i : {$lt: 25600}}, out: {reduce: outcol}}); + // M/R quarter of the docs + out = db.foo.mapReduce(map2, reduce2,{ query: { i: { $lt: 25600 } }, out: { reduce: outcol } }); printjson(out); - assert.eq( 25600 , out.counts.emit , "Received wrong result" ); - assert.eq( 25600 , out.counts.output , "Received wrong result" ); + assert.eq(25600 , out.counts.emit , "Received wrong result"); + assert.eq(25600 , out.counts.output , "Received wrong result"); - jsTestLog( "Test E" ) - - // mr further docs - out = db.foo.mapReduce(map2, reduce2,{ query: {i : {$gte: 25600, $lt: 51200}}, out: {reduce: outcol}}); + // M/R further docs + jsTestLog("Test E"); + out = db.foo.mapReduce( + map2, reduce2, { query: { i: { $gte: 25600, $lt: 51200 } }, out: { reduce: outcol } }); printjson(out); - assert.eq( 25600 , out.counts.emit , "Received wrong result" ); - assert.eq( 51200 , out.counts.output , "Received wrong result" ); - - jsTestLog( "Test F" ) + assert.eq(25600 , out.counts.emit , "Received wrong result"); + assert.eq(51200 , out.counts.output , "Received wrong result"); - // do 2nd half of docs - out = db.foo.mapReduce(map2, reduce2,{ query: {i : {$gte: 51200}}, out: {reduce: outcol, nonAtomic: true}}); + // M/R do 2nd half of docs + jsTestLog("Test F"); + out = db.foo.mapReduce( + map2, reduce2, { query: { i: {$gte: 51200} }, out: { reduce: outcol, nonAtomic: true } }); printjson(out); - assert.eq( 51200 , out.counts.emit , "Received wrong result" ); - assert.eq( 51200 , out.counts.output , "Received wrong result" ); - assert.eq( 2 , db[outcol].findOne().value , "Received wrong result" ); + assert.eq(51200 , out.counts.emit , "Received wrong result"); + assert.eq(51200 , out.counts.output , "Received wrong result"); + assert.eq(2 , db[outcol].findOne().value , "Received wrong result"); - jsTestLog( "Test G" ) + // Verify that data is also on secondary + jsTestLog("Test G"); + var primary = s._rs[0].test.liveNodes.master; + var secondaries = s._rs[0].test.liveNodes.slaves; - // verify that data is also on secondary - var primary = s._rs[0].test.liveNodes.master - var secondaries = s._rs[0].test.liveNodes.slaves // Stop the balancer to prevent new writes from happening and make sure // that replication can keep up even on slow machines. s.stopBalancer(); s._rs[0].test.awaitReplication(300 * 1000); - assert.eq( 51200 , primary.getDB("test")[outcol].count() , "Wrong count" ); + assert.eq(51200 , primary.getDB("test")[outcol].count() , "Wrong count"); for (var i = 0; i < secondaries.length; ++i) { - assert.eq( 51200 , secondaries[i].getDB("test")[outcol].count() , "Wrong count" ); + assert.eq(51200 , secondaries[i].getDB("test")[outcol].count() , "Wrong count"); } - - jsTestLog( "DONE" ) - } var s = setupTest(); -if (s.getDB( "admin" ).runCommand( "buildInfo" ).bits < 64) { + +if (s.getDB("admin").runCommand("buildInfo").bits < 64) { print("Skipping test on 32-bit platforms"); } else { runTest(s); } -s.stop() + +s.stop(); diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js index 8bfd871450f..20fae7ac522 100644 --- a/jstests/sharding/zero_shard_version.js +++ b/jstests/sharding/zero_shard_version.js @@ -8,9 +8,9 @@ var st = new ShardingTest({ shards: 2, mongos: 4 }); var testDB_s0 = st.s.getDB('test'); -testDB_s0.adminCommand({ enableSharding: 'test' }); +assert.commandWorked(testDB_s0.adminCommand({ enableSharding: 'test' })); st.ensurePrimaryShard('test', 'shard0001'); -testDB_s0.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}); +assert.commandWorked(testDB_s0.adminCommand({ shardCollection: 'test.user', key: { x: 1 }})); var checkShardMajorVersion = function(conn, expectedVersion) { var shardVersionInfo = conn.adminCommand({ getShardVersion: 'test.user' }); diff --git a/jstests/slow1/replsets_priority1.js b/jstests/slow1/replsets_priority1.js index c9ec08fd78f..614c6b7cec7 100644 --- a/jstests/slow1/replsets_priority1.js +++ b/jstests/slow1/replsets_priority1.js @@ -9,7 +9,7 @@ var rs = new ReplSetTest( {name: 'testSet', nodes: 3, nodeOptions: {verbose: 2}} var nodes = rs.startSet(); rs.initiate(); -var master = rs.getMaster(); +var master = rs.getPrimary(); var everyoneOkSoon = function() { var status; @@ -133,7 +133,7 @@ for (i=0; i<n; i++) { try { master.adminCommand({replSetReconfig : config}); - master = rs.getMaster(); + master = rs.getPrimary(); reconnect(master); version = master.getDB("local").system.replset.findOne().version; @@ -148,7 +148,7 @@ for (i=0; i<n; i++) { print("\nreplsets_priority1.js wait for 2 slaves"); assert.soon(function() { - rs.getMaster(); + rs.getPrimary(); return rs.liveNodes.slaves.length == 2; }, "2 slaves"); @@ -177,7 +177,7 @@ for (i=0; i<n; i++) { rs.stop(max._id); - var master = rs.getMaster(); + var master = rs.getPrimary(); print("\nkilled max primary. Checking statuses."); @@ -187,7 +187,7 @@ for (i=0; i<n; i++) { print("restart max " + max._id); rs.restart(max._id); - master = rs.getMaster(); + master = rs.getPrimary(); print("max restarted. Checking statuses."); checkPrimaryIs(max); diff --git a/jstests/slow2/mr_during_migrate.js b/jstests/slow2/mr_during_migrate.js index 78e48eb9db8..ae0ae7ce0fe 100644 --- a/jstests/slow2/mr_during_migrate.js +++ b/jstests/slow2/mr_during_migrate.js @@ -40,14 +40,14 @@ var checkMigrate = function(){ print( "Result of migrate : " ); printjson( this // Creates a number of migrations of random chunks to diff shard servers var ops = [] -for(var i = 0; i < st._shardServers.length; i++) { +for(var i = 0; i < st._connections.length; i++) { ops.push({ op: "command", ns: "admin", command: { moveChunk: "" + coll, find: { _id: { "#RAND_INT" : [ 0, numDocs ] }}, - to: st._shardServers[i].shardName, + to: st._connections[i].shardName, _waitForDelete: true }, showResult: true diff --git a/jstests/slow2/replsets_killop.js b/jstests/slow2/replsets_killop.js index a5c708ae1c5..603e1f9c63e 100644 --- a/jstests/slow2/replsets_killop.js +++ b/jstests/slow2/replsets_killop.js @@ -7,7 +7,7 @@ numDocs = 1e5; replTest = new ReplSetTest( { name:'test', nodes:3 } ); nodes = replTest.startSet(); replTest.initiate(); -primary = replTest.getMaster(); +primary = replTest.getPrimary(); secondary = replTest.getSecondary(); db = primary.getDB( 'test' ); db.test.save( { a:0 } ); diff --git a/jstests/slow2/rollback4.js b/jstests/slow2/rollback4.js index 77f4dbd06c8..e77a8c59f1e 100644 --- a/jstests/slow2/rollback4.js +++ b/jstests/slow2/rollback4.js @@ -19,7 +19,7 @@ var r = replTest.initiate({ "_id": "unicomplex", replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // Make sure we have a master -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var b_conn = conns[1]; b_conn.setSlaveOk(); var B = b_conn.getDB("admin"); @@ -50,7 +50,7 @@ replTest.stop( 0 ); // after the node reports that it is primary via heartbeats, but before ismaster indicates that the // node will accept writes. replTest.waitForState(conns[1], replTest.PRIMARY, 5 * 60 * 1000); -master = replTest.getMaster(5 * 60 * 1000); +master = replTest.getPrimary(5 * 60 * 1000); // Save to new master, forcing rollback of old master master.getDB( 'db' ).c.save( big ); diff --git a/jstests/ssl/initial_sync1_x509.js b/jstests/ssl/initial_sync1_x509.js index f767dba0dde..9674056eba9 100644 --- a/jstests/ssl/initial_sync1_x509.js +++ b/jstests/ssl/initial_sync1_x509.js @@ -16,7 +16,7 @@ function runInitialSyncTest() { var conns = replTest.startSet(); replTest.initiate(); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var foo = master.getDB("foo"); var admin = master.getDB("admin"); @@ -40,7 +40,7 @@ function runInitialSyncTest() { replTest.awaitReplication(); print("5. Insert some stuff"); - master = replTest.getMaster(); + master = replTest.getPrimary(); bulk = foo.bar.initializeUnorderedBulkOp(); for (var i = 0; i < 100; i++) { bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" }); diff --git a/jstests/ssl/upgrade_to_ssl.js b/jstests/ssl/upgrade_to_ssl.js index 802e99d9eeb..0c4a2caf222 100644 --- a/jstests/ssl/upgrade_to_ssl.js +++ b/jstests/ssl/upgrade_to_ssl.js @@ -19,14 +19,14 @@ var rst = new ReplSetTest({ name: 'sslSet', nodes: 3, nodeOptions : opts }); rst.startSet(); rst.initiate(); -var rstConn1 = rst.getMaster(); +var rstConn1 = rst.getPrimary(); rstConn1.getDB("test").a.insert({a:1, str:"TESTTESTTEST"}); assert.eq(1, rstConn1.getDB("test").a.count(), "Error interacting with replSet"); print("===== UPGRADE allowSSL -> preferSSL ====="); opts.sslMode = "preferSSL"; rst.upgradeSet(opts); -var rstConn2 = rst.getMaster(); +var rstConn2 = rst.getPrimary(); rstConn2.getDB("test").a.insert({a:2, str:"CHECKCHECK"}); assert.eq(2, rstConn2.getDB("test").a.count(), "Error interacting with replSet"); @@ -37,7 +37,7 @@ assert.eq(0, canConnectNoSSL, "non-SSL Connection attempt failed when it should print("===== UPGRADE preferSSL -> requireSSL ====="); opts.sslMode = "requireSSL"; rst.upgradeSet(opts); -var rstConn3 = rst.getMaster(); +var rstConn3 = rst.getPrimary(); rstConn3.getDB("test").a.insert({a:3, str:"GREENEGGSANDHAM"}); assert.eq(3, rstConn3.getDB("test").a.count(), "Error interacting with replSet"); diff --git a/jstests/ssl/upgrade_to_x509_ssl.js b/jstests/ssl/upgrade_to_x509_ssl.js index a7f8b571d29..2fef4e3c149 100644 --- a/jstests/ssl/upgrade_to_x509_ssl.js +++ b/jstests/ssl/upgrade_to_x509_ssl.js @@ -26,7 +26,7 @@ rst.startSet(); rst.initiate(); // Connect to master and do some basic operations -var rstConn1 = rst.getMaster(); +var rstConn1 = rst.getPrimary(); print("Performing basic operations on master."); rstConn1.getDB("admin").createUser({user:"root", pwd:"pwd", roles:["root"]}, {w: NUM_NODES}); rstConn1.getDB("admin").auth("root", "pwd"); @@ -43,7 +43,7 @@ rst.upgradeSet({sslMode:"preferSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT}, "root", "pwd"); // The upgradeSet call restarts the nodes so we need to reauthenticate. authAllNodes(); -var rstConn3 = rst.getMaster(); +var rstConn3 = rst.getPrimary(); rstConn3.getDB("test").a.insert({a:3, str:"TESTTESTTEST"}); assert.eq(3, rstConn3.getDB("test").a.count(), "Error interacting with replSet"); rst.awaitReplication(); @@ -57,6 +57,6 @@ rst.upgradeSet({sslMode:"requireSSL", sslPEMKeyFile: SERVER_CERT, clusterAuthMode:"x509", keyFile: KEYFILE, sslCAFile: CA_CERT}, "root", "pwd"); authAllNodes(); -var rstConn4 = rst.getMaster(); +var rstConn4 = rst.getPrimary(); rstConn4.getDB("test").a.insert({a:4, str:"TESTTESTTEST"}); assert.eq(4, rstConn4.getDB("test").a.count(), "Error interacting with replSet"); diff --git a/jstests/sslSpecial/upgrade_to_ssl_nossl.js b/jstests/sslSpecial/upgrade_to_ssl_nossl.js index 1f5f0002d4e..53f7bd77fb9 100644 --- a/jstests/sslSpecial/upgrade_to_ssl_nossl.js +++ b/jstests/sslSpecial/upgrade_to_ssl_nossl.js @@ -13,19 +13,19 @@ var rst = new ReplSetTest({ name: 'sslSet', nodes: 3, nodeOptions : {sslMode:"di rst.startSet(); rst.initiate(); -var rstConn1 = rst.getMaster(); +var rstConn1 = rst.getPrimary(); rstConn1.getDB("test").a.insert({a:1, str:"TESTTESTTEST"}); assert.eq(1, rstConn1.getDB("test").a.count(), "Error interacting with replSet"); print("===== UPGRADE disabled -> allowSSL ====="); rst.upgradeSet({sslMode:"allowSSL", sslPEMKeyFile: SERVER_CERT, sslAllowInvalidCertificates:""}); -var rstConn2 = rst.getMaster(); +var rstConn2 = rst.getPrimary(); rstConn2.getDB("test").a.insert({a:2, str:"TESTTESTTEST"}); assert.eq(2, rstConn2.getDB("test").a.count(), "Error interacting with replSet"); print("===== UPGRADE allowSSL -> preferSSL ====="); rst.upgradeSet({sslMode:"preferSSL", sslPEMKeyFile: SERVER_CERT, sslAllowInvalidCertificates:""}); -var rstConn3 = rst.getMaster(); +var rstConn3 = rst.getPrimary(); rstConn3.getDB("test").a.insert({a:3, str:"TESTTESTTEST"}); assert.eq(3, rstConn3.getDB("test").a.count(), "Error interacting with replSet"); diff --git a/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js b/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js index 72bcc9fc76d..74aae02a896 100644 --- a/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js +++ b/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js @@ -23,7 +23,7 @@ rst.startSet(); rst.initiate(); // Connect to master and do some basic operations -var rstConn1 = rst.getMaster(); +var rstConn1 = rst.getPrimary(); rstConn1.getDB("admin").createUser({user: "root", pwd: "pwd", roles: ["root"]}, {w: NUM_NODES}); rstConn1.getDB("admin").auth("root", "pwd"); rstConn1.getDB("test").a.insert({a:1, str:"TESTTESTTEST"}); @@ -38,7 +38,7 @@ rst.upgradeSet({sslMode:"allowSSL", sslPEMKeyFile: SERVER_CERT, authAllNodes(); rst.awaitReplication(); -var rstConn2 = rst.getMaster(); +var rstConn2 = rst.getPrimary(); rstConn2.getDB("test").a.insert({a:2, str:"CHECKCHECKCHECK"}); assert.eq(2, rstConn2.getDB("test").a.count(), "Error interacting with replSet"); @@ -50,7 +50,7 @@ rst.upgradeSet({sslMode:"preferSSL", sslPEMKeyFile: SERVER_CERT, authAllNodes(); rst.awaitReplication(); -var rstConn3 = rst.getMaster(); +var rstConn3 = rst.getPrimary(); rstConn3.getDB("test").a.insert({a:3, str:"PEASandCARROTS"}); assert.eq(3, rstConn3.getDB("test").a.count(), "Error interacting with replSet"); @@ -67,7 +67,7 @@ rst.upgradeSet({sslMode:"preferSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT}, "root", "pwd"); authAllNodes(); rst.awaitReplication(); -var rstConn4 = rst.getMaster(); +var rstConn4 = rst.getPrimary(); rstConn4.getDB("test").a.insert({a:4, str:"BEEP BOOP"}); rst.awaitReplication(); assert.eq(4, rstConn4.getDB("test").a.count(), "Error interacting with replSet"); diff --git a/jstests/tool/dumprestore10.js b/jstests/tool/dumprestore10.js index 858032827a7..6cf3cbbbfa1 100644 --- a/jstests/tool/dumprestore10.js +++ b/jstests/tool/dumprestore10.js @@ -13,7 +13,7 @@ step(); var replTest = new ReplSetTest( {name: name, nodes: 2} ); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var total = 1000; { diff --git a/jstests/tool/dumprestore7.js b/jstests/tool/dumprestore7.js index 04414bf85a8..9a7d09665ef 100644 --- a/jstests/tool/dumprestore7.js +++ b/jstests/tool/dumprestore7.js @@ -11,7 +11,7 @@ step(); var replTest = new ReplSetTest( {name: name, nodes: 1} ); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); { step("first chunk of data"); @@ -24,7 +24,7 @@ var master = replTest.getMaster(); { step("wait"); replTest.awaitReplication(); - var time = replTest.getMaster().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next(); + var time = replTest.getPrimary().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next(); step(time.ts.t); } diff --git a/jstests/tool/dumpsecondary.js b/jstests/tool/dumpsecondary.js index 00f166dcf4c..7a641542498 100644 --- a/jstests/tool/dumpsecondary.js +++ b/jstests/tool/dumpsecondary.js @@ -3,7 +3,7 @@ var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} ); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); db = master.getDB("foo") db.foo.save({a: 1000}); replTest.awaitReplication(); diff --git a/jstests/tool/tool_replset.js b/jstests/tool/tool_replset.js index b5e8059045d..af5c7981482 100644 --- a/jstests/tool/tool_replset.js +++ b/jstests/tool/tool_replset.js @@ -23,7 +23,7 @@ config.members[0].priority = 3; config.members[1].priority = 0; replTest.initiate(config); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); assert.eq(nodes[0], master, "incorrect master elected"); for (var i = 0; i < 100; i++) { assert.writeOK(master.getDB("foo").bar.insert({ a: i })); diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js index 8a1c8df7714..5695fe4c964 100644 --- a/src/mongo/shell/replsettest.js +++ b/src/mongo/shell/replsettest.js @@ -63,1132 +63,1186 @@ * numNodes {number} - number of nodes * nodes {Array.<Mongo>} - connection to replica set members */ -ReplSetTest = function(opts) { - this.name = opts.name || "testReplSet"; - this.useHostName = opts.useHostName == undefined ? true : opts.useHostName; - this.host = this.useHostName ? (opts.host || getHostName()) : 'localhost'; - this.oplogSize = opts.oplogSize || 40; - this.useSeedList = opts.useSeedList || false; - this.keyFile = opts.keyFile; - this.shardSvr = opts.shardSvr || false; - this.protocolVersion = opts.protocolVersion; - this.useBridge = opts.useBridge || false; - this.bridgeOptions = opts.bridgeOptions || {}; - this.configSettings = opts.settings || false; +var ReplSetTest = function(opts) { + 'use strict'; - this.nodeOptions = {}; + if (!(this instanceof ReplSetTest)) { + return new ReplSetTest(opts); + } - var i; - if (isObject(opts.nodes )) { - var len = 0; + // Capture the 'this' reference + var self = this; - for(i in opts.nodes) { - var options = this.nodeOptions[ "n" + len ] = Object.merge(opts.nodeOptions, - opts.nodes[i]); - if( i.startsWith( "a" ) ) { - options.arbiter = true; + // Replica set health states + var Health = { UP: 1, DOWN: 0 }; + + var _alldbpaths; + var _configSettings; + + // mongobridge related variables. Only available if the bridge option is selected. + var _useBridge; + var _bridgeOptions; + var _unbridgedPorts; + var _unbridgedNodes; + + // Publicly exposed variables + + /** + * Populates a reference to all reachable nodes. + */ + function _clearLiveNodes() { + self.liveNodes = { master: null, slaves: [] }; + } + + /** + * Invokes the 'ismaster' command on each individual node and returns whether the node is the + * current RS master. + */ + function _callIsMaster() { + _clearLiveNodes(); + + self.nodes.forEach(function(node) { + try { + var n = node.getDB('admin').runCommand({ ismaster: 1 }); + if (n.ismaster == true) { + self.liveNodes.master = node; + } + else { + node.setSlaveOk(); + self.liveNodes.slaves.push(node); + } } - len++; - } + catch (err) { + print("ReplSetTest Could not call ismaster on node " + node + ": " + tojson(err)); + } + }); - this.numNodes = len; + return self.liveNodes.master || false; } - else if (Array.isArray(opts.nodes)) { - for(i = 0; i < opts.nodes.length; i++) { - this.nodeOptions[ "n" + i ] = Object.merge(opts.nodeOptions, opts.nodes[i]); + + /** + * Wait for a rs indicator to go to a particular state or states. + * + * @param node is a single node or list of nodes, by id or conn + * @param states is a single state or list of states + * @param ind is the indicator specified + * @param timeout how long to wait for the state to be reached + */ + function _waitForIndicator(node, states, ind, timeout) { + if (node.length) { + var nodes = node; + for(var i = 0; i < nodes.length; i++) { + if (states.length) + _waitForIndicator(nodes[i], states[i], ind, timeout); + else + _waitForIndicator(nodes[i], states, ind, timeout); + } + + return; } - this.numNodes = opts.nodes.length; - } - else { - for (i = 0; i < opts.nodes; i++) { - this.nodeOptions[ "n" + i ] = opts.nodeOptions; + timeout = timeout || 30000; + + if (!node.getDB) { + node = self.nodes[node]; } - this.numNodes = opts.nodes; - } + if (!states.length) { + states = [states]; + } - this.ports = allocatePorts(this.numNodes); - this.nodes = []; + print("ReplSetTest waitForIndicator " + ind + " on " + node); + printjson(states); + print("ReplSetTest waitForIndicator from node " + node); - if (this.useBridge) { - this._unbridgedPorts = allocatePorts(this.numNodes); - this._unbridgedNodes = []; - } + var lastTime = null; + var currTime = new Date().getTime(); + var status; - this.initLiveNodes(); + assert.soon(function() { + try { + var conn = _callIsMaster(); + if (!conn) { + conn = self.liveNodes.slaves[0]; + } - Object.extend( this, ReplSetTest.Health ); - Object.extend( this, ReplSetTest.State ); -}; + // Try again to load connection + if (!conn) return false; -// List of nodes as host:port strings. -ReplSetTest.prototype.nodeList = function() { - var list = []; - for(var i=0; i<this.ports.length; i++) { - list.push( this.host + ":" + this.ports[i]); - } + var getStatusFunc = function() { + status = conn.getDB('admin').runCommand({ replSetGetStatus: 1 }); + }; - return list; -}; + if (self.keyFile) { + // Authenticate connection used for running replSetGetStatus if needed + authutil.asCluster(conn, self.keyFile, getStatusFunc); + } else { + getStatusFunc(); + } + } + catch (ex) { + print("ReplSetTest waitForIndicator could not get status: " + tojson(ex)); + return false; + } -// Here we store a reference to all reachable nodes. -ReplSetTest.prototype.initLiveNodes = function() { - this.liveNodes = { master: null, slaves: [] }; -}; + var printStatus = false; + if (lastTime == null || (currTime = new Date().getTime()) - (1000 * 5) > lastTime) { + if (lastTime == null) { + print("ReplSetTest waitForIndicator Initial status (timeout : " + + timeout + ") :"); + } -ReplSetTest.prototype.getNodeId = function(node) { - - if( node.toFixed ) { - return parseInt( node ); + printjson(status); + lastTime = new Date().getTime(); + printStatus = true; + } + + if (typeof status.members == 'undefined') { + return false; + } + + for(var i = 0; i < status.members.length; i++) { + if (printStatus) { + print("Status for : " + status.members[i].name + ", checking " + + node.host + "/" + node.name); + } + + if (status.members[i].name == node.host || status.members[i].name == node.name) { + for(var j = 0; j < states.length; j++) { + if (printStatus) { + print("Status -- " + " current state: " + status.members[i][ind] + + ", target state : " + states[j]); + } + + if (typeof(states[j]) != "number") { + throw new Error("State was not an number -- type:" + + typeof(states[j]) + ", value:" + states[j]); + } + if (status.members[i][ind] == states[j]) { + return true; + } + } + } + } + + return false; + + }, "waiting for state indicator " + ind + " for " + timeout + "ms", timeout); + + print("ReplSetTest waitForIndicator final status:"); + printjson(status); } - - for( var i = 0; i < this.nodes.length; i++ ){ - if( this.nodes[i] == node ) { - return i; + + /** + * Wait for a health indicator to go to a particular state or states. + * + * @param node is a single node or list of nodes, by id or conn + * @param state is a single state or list of states. ReplSetTest.Health.DOWN can + * only be used in cases when there is a primary available or slave[0] can + * respond to the isMaster command. + */ + function _waitForHealth(node, state, timeout) { + _waitForIndicator(node, state, "health", timeout); + } + + /** + * Returns the optime for the specified host by issuing replSetGetStatus. + */ + function _getLastOpTime(conn) { + var replSetStatus = + assert.commandWorked(conn.getDB("admin").runCommand({ replSetGetStatus: 1 })); + var connStatus = replSetStatus.members.filter(m => m.self)[0]; + if (!connStatus.optime) { + // Must be an ARBITER + return undefined; } + + var myOpTime = connStatus.optime; + return myOpTime.ts ? myOpTime.ts : myOpTime; } - - if( node instanceof ObjectId ) { - for(i = 0; i < this.nodes.length; i++){ - if( this.nodes[i].runId == node ) { + + /** + * Returns list of nodes as host:port strings. + */ + this.nodeList = function() { + var list = []; + for(var i=0; i<this.ports.length; i++) { + list.push(this.host + ":" + this.ports[i]); + } + + return list; + }; + + this.getNodeId = function(node) { + if (node.toFixed) { + return parseInt(node); + } + + for(var i = 0; i < this.nodes.length; i++) { + if (this.nodes[i] == node) { return i; } } - } - - if( node.nodeId != null ) { - return parseInt( node.nodeId ); - } - - return undefined; - -}; -ReplSetTest.prototype.getPort = function( n ){ - - n = this.getNodeId( n ); - - print( "ReplSetTest n: " + n + " ports: " + tojson( this.ports ) + "\t" + this.ports[n] + " " + typeof(n) ); - return this.ports[ n ]; -}; + if (node instanceof ObjectId) { + for(i = 0; i < this.nodes.length; i++) { + if (this.nodes[i].runId == node) { + return i; + } + } + } -ReplSetTest.prototype.getPath = function( n ){ - - if( n.host ) - n = this.getNodeId( n ); - - var p = MongoRunner.dataPath + this.name + "-"+n; - if ( ! this._alldbpaths ) - this._alldbpaths = [ p ]; - else - this._alldbpaths.push( p ); - return p; -}; + if (node.nodeId != null) { + return parseInt(node.nodeId); + } -ReplSetTest.prototype.getReplSetConfig = function() { - var cfg = {}; + return undefined; + }; - cfg._id = this.name; - if (this.protocolVersion !== undefined && this.protocolVersion !== null) { - cfg.protocolVersion = this.protocolVersion; - } + this.getPort = function(n) { + var n = this.getNodeId(n); + return this.ports[n]; + }; - cfg.members = []; + this.getPath = function(n) { + if (n.host) + n = this.getNodeId(n); - for (var i=0; i<this.ports.length; i++) { - member = {}; - member._id = i; + var p = MongoRunner.dataPath + this.name + "-" + n; + if (!_alldbpaths) + _alldbpaths = [p]; + else + _alldbpaths.push(p); - var port = this.ports[i]; + return p; + }; - member.host = this.host + ":" + port; - var nodeOpts = this.nodeOptions[ "n" + i ]; - if (nodeOpts) { - if (nodeOpts.arbiter) { - member.arbiterOnly = true; - } - if (nodeOpts.rsConfig) { - Object.extend(member, nodeOpts.rsConfig); - } + this.getReplSetConfig = function() { + var cfg = {}; + cfg._id = this.name; + + if (this.protocolVersion !== undefined && this.protocolVersion !== null) { + cfg.protocolVersion = this.protocolVersion; } - cfg.members.push(member); - } - if (jsTestOptions().useLegacyReplicationProtocol) { - cfg.protocolVersion = 0; - } + cfg.members = []; - if (this.configSettings) { - cfg.settings = this.configSettings; - } - return cfg; -}; + for (var i = 0; i < this.ports.length; i++) { + var member = {}; + member._id = i; -ReplSetTest.prototype.getURL = function(){ - var hosts = []; - - for(var i=0; i<this.ports.length; i++) { + var port = this.ports[i]; + member.host = this.host + ":" + port; - var port; - // Connect on the right port - port = this.ports[i]; - - var str = this.host + ":" + port; - hosts.push(str); - } - - return this.name + "/" + hosts.join(","); -}; + var nodeOpts = this.nodeOptions["n" + i]; + if (nodeOpts) { + if (nodeOpts.arbiter) { + member.arbiterOnly = true; + } -ReplSetTest.prototype.startSet = function( options ) { - var nodes = []; - print( "ReplSetTest Starting Set" ); + if (nodeOpts.rsConfig) { + Object.extend(member, nodeOpts.rsConfig); + } + } - for( var n = 0 ; n < this.ports.length; n++ ) { - node = this.start(n, options); - nodes.push(node); - } + cfg.members.push(member); + } - this.nodes = nodes; - return this.nodes; -}; + if (jsTestOptions().useLegacyReplicationProtocol) { + cfg.protocolVersion = 0; + } -ReplSetTest.prototype.callIsMaster = function() { - - var master = null; - this.initLiveNodes(); - - for(var i=0; i<this.nodes.length; i++) { - try { - var n = this.nodes[i].getDB('admin').runCommand({ismaster:1}); - - if(n.ismaster == true) { - master = this.nodes[i]; - this.liveNodes.master = master; - } - else { - this.nodes[i].setSlaveOk(); - this.liveNodes.slaves.push(this.nodes[i]); - } - } - catch (err) { - print("ReplSetTest Could not call ismaster on node " + i + ": " + tojson(err)); - } - } + if (_configSettings) { + cfg.settings = _configSettings; + } - return master || false; -}; + return cfg; + }; -ReplSetTest.awaitRSClientHosts = function( conn, host, hostOk, rs, timeout ) { - var hostCount = host.length; - if( hostCount ){ - for( var i = 0; i < hostCount; i++ ) { - ReplSetTest.awaitRSClientHosts( conn, host[i], hostOk, rs ); + this.getURL = function() { + var hosts = []; + + for(var i = 0; i < this.ports.length; i++) { + hosts.push(this.host + ":" + this.ports[i]); } - return; - } - - timeout = timeout || 60 * 1000; - - if( hostOk == undefined ) hostOk = { ok : true }; - if( host.host ) host = host.host; - if( rs && rs.getMaster ) rs = rs.name; - - print( "Awaiting " + host + " to be " + tojson( hostOk ) + " for " + conn + " (rs: " + rs + ")" ); - - var tests = 0; - assert.soon( function() { - var rsClientHosts = conn.getDB( "admin" ).runCommand( "connPoolStats" ).replicaSets; - if( tests++ % 10 == 0 ) - printjson( rsClientHosts ); - - for ( var rsName in rsClientHosts ){ - if( rs && rs != rsName ) continue; - for ( var i = 0; i < rsClientHosts[rsName].hosts.length; i++ ){ - var clientHost = rsClientHosts[rsName].hosts[ i ]; - if( clientHost.addr != host ) continue; - - // Check that *all* host properties are set correctly - var propOk = true; - for( var prop in hostOk ){ - if ( isObject( hostOk[prop] )) { - if ( !friendlyEqual( hostOk[prop], clientHost[prop] )){ - propOk = false; - break; - } - } - else if ( clientHost[prop] != hostOk[prop] ){ - propOk = false; - break; - } - } - - if( propOk ) return true; - } + return this.name + "/" + hosts.join(","); + }; + + this.startSet = function(options) { + print("ReplSetTest starting set"); + + var nodes = []; + for(var n = 0 ; n < this.ports.length; n++) { + nodes.push(this.start(n, options)); } - return false; - }, "timed out waiting for replica set client to recognize hosts", timeout ); - -}; -ReplSetTest.prototype.awaitSecondaryNodes = function( timeout ) { - this.getMaster(); // Wait for a primary to be selected. - var tmo = timeout || 60000; - var replTest = this; - assert.soon( - function() { - replTest.getMaster(); // Reload who the current slaves are. - var slaves = replTest.liveNodes.slaves; - var len = slaves.length; - var ready = true; - for(var i=0; i<len; i++) { - var isMaster = slaves[i].getDB("admin").runCommand({ismaster: 1}); - var arbiter = isMaster.arbiterOnly == undefined ? false : isMaster.arbiterOnly; - ready = ready && ( isMaster.secondary || arbiter ); - } - return ready; - }, "Awaiting secondaries", tmo); -}; + this.nodes = nodes; + return this.nodes; + }; -ReplSetTest.prototype.getMaster = function( timeout ) { - var tries = 0; - var sleepTime = 500; - var tmo = timeout || 60000; - var master = null; + /** + * Blocks until the secondary nodes have completed recovery and their roles are known. + */ + this.awaitSecondaryNodes = function(timeout) { + timeout = timeout || 60000; - try { - var self = this; - assert.soon(function() { - master = self.callIsMaster(); - return master; - }, "Finding master", tmo); - } - catch (err) { - print("ReplSetTest getMaster failed: " + tojson(err)); - printStackTrace(); - throw err; - } - return master; -}; + assert.soon(function() { + // Reload who the current slaves are + self.getPrimary(); -ReplSetTest.prototype.getPrimary = ReplSetTest.prototype.getMaster; + var slaves = self.liveNodes.slaves; + var len = slaves.length; + var ready = true; -ReplSetTest.prototype.awaitNoPrimary = function(msg, timeout) { - msg = msg || "Timed out waiting for there to be no primary in replset: " + this.name; - timeout = timeout || 30000; - var self = this; - assert.soon(function() { - return self.callIsMaster() == false; - }, msg, timeout); + for(var i = 0; i < len; i++) { + var isMaster = slaves[i].getDB("admin").runCommand({ ismaster: 1 }); + var arbiter = (isMaster.arbiterOnly == undefined ? false : isMaster.arbiterOnly); + ready = ready && (isMaster.secondary || arbiter); + } -}; + return ready; + }, "Awaiting secondaries", timeout); + }; + + /** + * Blocking call, which will wait for a primary to be elected for some pre-defined timeout and + * if primary is available will return a connection to it. Otherwise throws an exception. + */ + this.getPrimary = function(timeout) { + var tmo = timeout || 60000; + var master = null; -ReplSetTest.prototype.getSecondaries = function( timeout ){ - var master = this.getMaster( timeout ); - var secs = []; - for( var i = 0; i < this.nodes.length; i++ ){ - if( this.nodes[i] != master ){ - secs.push( this.nodes[i] ); + try { + assert.soon(function() { + master = _callIsMaster(); + return master; + }, "Finding master", tmo); + } + catch (err) { + print("ReplSetTest getPrimary failed: " + tojson(err)); + printStackTrace(); + throw err; } - } - return secs; -}; -ReplSetTest.prototype.getSecondary = function( timeout ){ - return this.getSecondaries( timeout )[0]; -}; + return master; + }; -ReplSetTest.prototype.status = function( timeout ){ - var master = this.callIsMaster(); - if( ! master ) master = this.liveNodes.slaves[0]; - return master.getDB("admin").runCommand({replSetGetStatus: 1}); -}; + this.awaitNoPrimary = function(msg, timeout) { + msg = msg || "Timed out waiting for there to be no primary in replset: " + this.name; + timeout = timeout || 30000; -// Add a node to the test set -ReplSetTest.prototype.add = function(config) { - var nextPort = allocatePort(); - print("ReplSetTest Next port: " + nextPort); + assert.soon(function() { + return _callIsMaster() == false; + }, msg, timeout); + }; - this.ports.push(nextPort); - printjson(this.ports); + this.getSecondaries = function(timeout) { + var master = this.getPrimary(timeout); + var secs = []; + for(var i = 0; i < this.nodes.length; i++) { + if (this.nodes[i] != master) { + secs.push(this.nodes[i]); + } + } - if (this.useBridge) { - this._unbridgedPorts.push(allocatePort()); - } + return secs; + }; - var nextId = this.nodes.length; - printjson(this.nodes); + this.getSecondary = function(timeout) { + return this.getSecondaries(timeout)[0]; + }; - print("ReplSetTest nextId: " + nextId); - return this.start(nextId, config); -}; + this.status = function(timeout) { + var master = _callIsMaster(); + if (!master) { + master = this.liveNodes.slaves[0]; + } -ReplSetTest.prototype.remove = function( nodeId ) { - nodeId = this.getNodeId( nodeId ); - this.nodes.splice( nodeId, 1 ); - this.ports.splice( nodeId, 1 ); + return master.getDB("admin").runCommand({ replSetGetStatus: 1 }); + }; - if (this.useBridge) { - this._unbridgedNodes.splice(nodeId, 1); - this._unbridgedPorts.splice(nodeId, 1); - } -}; + /** + * Adds a node to the replica set managed by this instance. + */ + this.add = function(config) { + var nextPort = allocatePort(); + print("ReplSetTest Next port: " + nextPort); -ReplSetTest.prototype.initiate = function( cfg , initCmd , timeout ) { - var master = this.nodes[0].getDB("admin"); - var config = cfg || this.getReplSetConfig(); - var cmd = {}; - var cmdKey = initCmd || 'replSetInitiate'; - timeout = timeout || 60000; - if (jsTestOptions().useLegacyReplicationProtocol && !config.hasOwnProperty("protocolVersion")) { - config.protocolVersion = 0; - } - cmd[cmdKey] = config; - printjson(cmd); + this.ports.push(nextPort); + printjson(this.ports); + + if (_useBridge) { + _unbridgedPorts.push(allocatePort()); + } - assert.commandWorked(master.runCommand(cmd), tojson(cmd)); - this.awaitSecondaryNodes(timeout); + var nextId = this.nodes.length; + printjson(this.nodes); - // Setup authentication if running test with authentication - if ((jsTestOptions().keyFile) && cmdKey == 'replSetInitiate') { - master = this.getMaster(); - jsTest.authenticateNodes(this.nodes); - } -}; + print("ReplSetTest nextId: " + nextId); + return this.start(nextId, config); + }; -/** - * Gets the current replica set config from the primary. - * - * throws if any error occurs on the command. - */ -ReplSetTest.prototype.getConfigFromPrimary = function() { - var primary = this.getPrimary(90 * 1000 /* 90 sec timeout */); - return assert.commandWorked(primary.getDB("admin").adminCommand("replSetGetConfig")).config; -}; + this.remove = function(nodeId) { + nodeId = this.getNodeId(nodeId); + this.nodes.splice(nodeId, 1); + this.ports.splice(nodeId, 1); -// alias to match rs.conf* behavior in the shell. -ReplSetTest.prototype.conf = ReplSetTest.prototype.getConfigFromPrimary; -ReplSetTest.prototype.config = ReplSetTest.prototype.conf; + if (_useBridge) { + _unbridgedPorts.splice(nodeId, 1); + _unbridgedNodes.splice(nodeId, 1); + } + }; -ReplSetTest.prototype.reInitiate = function() { - "use strict"; + this.initiate = function(cfg, initCmd, timeout) { + var master = this.nodes[0].getDB("admin"); + var config = cfg || this.getReplSetConfig(); + var cmd = {}; + var cmdKey = initCmd || 'replSetInitiate'; + timeout = timeout || 60000; + if (jsTestOptions().useLegacyReplicationProtocol && !config.hasOwnProperty("protocolVersion")) { + config.protocolVersion = 0; + } + cmd[cmdKey] = config; + printjson(cmd); - var config = this.getReplSetConfig(); - var newVersion = this.getConfigFromPrimary().version + 1; - config.version = newVersion; + assert.commandWorked(master.runCommand(cmd), tojson(cmd)); + this.awaitSecondaryNodes(timeout); - if (jsTestOptions().useLegacyReplicationProtocol && !config.hasOwnProperty("protocolVersion")) { - config.protocolVersion = 0; - } - try { - assert.commandWorked(this.getPrimary().adminCommand({replSetReconfig: config})); - } - catch (e) { - if (tojson(e).indexOf("error doing query: failed") < 0) { - throw e; + // Setup authentication if running test with authentication + if ((jsTestOptions().keyFile) && cmdKey == 'replSetInitiate') { + master = this.getPrimary(); + jsTest.authenticateNodes(this.nodes); } - } -}; + }; -ReplSetTest.prototype.getLastOpTime = function(conn) { - var replStatus = conn.getDB("admin").runCommand("replSetGetStatus"); - var myOpTime = replStatus.members.filter(m=>m.self)[0].optime; - return myOpTime.ts ? myOpTime.ts : myOpTime; -}; + /** + * Gets the current replica set config from the primary. + * + * throws if any error occurs on the command. + */ + this.getConfigFromPrimary = function() { + var primary = this.getPrimary(90 * 1000 /* 90 sec timeout */); + return assert.commandWorked(primary.adminCommand("replSetGetConfig")).config; + }; -ReplSetTest.prototype.getLastOpTimeWritten = function() { - var master = this.getMaster(); - var self = this; - assert.soon(function() { + // Aliases to match rs.conf* behavior in the shell. + this.conf = this.getConfigFromPrimary; + + this.reInitiate = function() { + var config = this.getReplSetConfig(); + var newVersion = this.getConfigFromPrimary().version + 1; + config.version = newVersion; + + if (jsTestOptions().useLegacyReplicationProtocol && !config.hasOwnProperty("protocolVersion")) { + config.protocolVersion = 0; + } try { - self.latest = self.getLastOpTime(master); + assert.commandWorked(this.getPrimary().adminCommand({replSetReconfig: config})); } - catch(e) { - print("ReplSetTest caught exception " + e); - return false; + catch (e) { + if (tojson(e).indexOf("error doing query: failed") < 0) { + throw e; + } } + }; - return true; - }, "awaiting oplog query", 30000); -}; + /** + * Waits for the last oplog entry on the primary to be visible in the committed snapshop view + * of the oplog on *all* secondaries. + */ + this.awaitLastOpCommitted = function() { + var rst = this; + var master = rst.getPrimary(); + var lastOp = master.getDB('local').oplog.rs.find().sort({ $natural: -1 }).limit(1).next(); + + var opTime; + var filter; + if (this.getReplSetConfig().protocolVersion === 1) { + opTime = {ts: lastOp.ts, t: lastOp.t}; + filter = opTime; + } else { + opTime = {ts: lastOp.ts, t: -1}; + filter = {ts: lastOp.ts}; + } -/** - * Waits for the last oplog entry on the primary to be visible in the committed snapshop view - * of the oplog on *all* secondaries. - */ -ReplSetTest.prototype.awaitLastOpCommitted = function() { - var rst = this; - var master = rst.getMaster(); - var lastOp = master.getDB('local').oplog.rs.find().sort({ $natural: -1 }).limit(1).next(); - - var opTime; - var filter; - if (this.getReplSetConfig().protocolVersion === 1) { - opTime = {ts: lastOp.ts, t: lastOp.t}; - filter = opTime; - } else { - opTime = {ts: lastOp.ts, t: -1}; - filter = {ts: lastOp.ts}; - } - print("Waiting for op with OpTime " + tojson(opTime) + " to be committed on all secondaries"); + print("Waiting for op with OpTime " + tojson(opTime) + " to be committed on all secondaries"); - var isLastOpCommitted = function() { - for (var i = 0; i < rst.nodes.length; i++) { - var node = rst.nodes[i]; + assert.soon(function() { + for (var i = 0; i < rst.nodes.length; i++) { + var node = rst.nodes[i]; - // Continue if we're connected to an arbiter - var res = node.getDB("admin").runCommand({replSetGetStatus: 1}); - assert.commandWorked(res); - if (res.myState == 7) { - continue; - } + // Continue if we're connected to an arbiter + var res = assert.commandWorked(node.adminCommand({ replSetGetStatus: 1 })); + if (res.myState == ReplSetTest.State.ARBITER) { + continue; + } - res = node.getDB('local').runCommand({find: 'oplog.rs', - filter: filter, - readConcern: {level: "majority", - afterOpTime: opTime}, - maxTimeMS: 1000}); - if (!res.ok) { - printjson(res); - return false; - } - var cursor = new DBCommandCursor(node, res); - if (!cursor.hasNext()) { - return false; + res = node.getDB('local').runCommand({find: 'oplog.rs', + filter: filter, + readConcern: {level: "majority", + afterOpTime: opTime}, + maxTimeMS: 1000}); + if (!res.ok) { + printjson(res); + return false; + } + + var cursor = new DBCommandCursor(node, res); + if (!cursor.hasNext()) { + return false; + } } - } - return true; + + return true; + }, "Op failed to become committed on all secondaries: " + tojson(lastOp)); }; - assert.soon(isLastOpCommitted, - "Op failed to become committed on all secondaries: " + tojson(lastOp)); -}; -ReplSetTest.prototype.awaitReplication = function(timeout) { - timeout = timeout || 30000; - - this.getLastOpTimeWritten(); - - // get the latest config version from master. if there is a problem, grab master and try again - var configVersion; - var masterOpTime; - var masterName; - var master; - try { - master = this.getMaster(); - configVersion = this.conf().version; - masterOpTime = this.getLastOpTime(master); - masterName = master.toString().substr(14); // strip "connection to " - } - catch (e) { - master = this.getMaster(); - configVersion = this.conf().version; - masterOpTime = this.getLastOpTime(master); - masterName = master.toString().substr(14); // strip "connection to " - } + this.awaitReplication = function(timeout) { + timeout = timeout || 30000; - print("ReplSetTest awaitReplication: starting: timestamp for primary, " + masterName + - ", is " + tojson(this.latest) + - ", last oplog entry is " + tojsononeline(masterOpTime)); + var masterLatestOpTime; - var self = this; - assert.soon(function() { - try { - print("ReplSetTest awaitReplication: checking secondaries against timestamp " + - tojson(self.latest)); - var secondaryCount = 0; - for (var i=0; i < self.liveNodes.slaves.length; i++) { - var slave = self.liveNodes.slaves[i]; - var slaveName = slave.toString().substr(14); // strip "connection to " - - var slaveConfigVersion = + // Blocking call, which will wait for the last optime written on the master to be available + var awaitLastOpTimeWrittenFn = function() { + var master = self.getPrimary(); + assert.soon(function() { + try { + masterLatestOpTime = _getLastOpTime(master); + } + catch(e) { + print("ReplSetTest caught exception " + e); + return false; + } + + return true; + }, "awaiting oplog query", 30000); + }; + + awaitLastOpTimeWrittenFn(); + + // get the latest config version from master. if there is a problem, grab master and try again + var configVersion; + var masterOpTime; + var masterName; + var master; + + try { + master = this.getPrimary(); + configVersion = this.conf().version; + masterOpTime = _getLastOpTime(master); + masterName = master.toString().substr(14); // strip "connection to " + } + catch (e) { + master = this.getPrimary(); + configVersion = this.conf().version; + masterOpTime = _getLastOpTime(master); + masterName = master.toString().substr(14); // strip "connection to " + } + + print("ReplSetTest awaitReplication: starting: timestamp for primary, " + masterName + + ", is " + tojson(masterLatestOpTime) + + ", last oplog entry is " + tojsononeline(masterOpTime)); + + assert.soon(function() { + try { + print("ReplSetTest awaitReplication: checking secondaries against timestamp " + + tojson(masterLatestOpTime)); + var secondaryCount = 0; + for (var i = 0; i < self.liveNodes.slaves.length; i++) { + var slave = self.liveNodes.slaves[i]; + var slaveName = slave.toString().substr(14); // strip "connection to " + + var slaveConfigVersion = slave.getDB("local")['system.replset'].findOne().version; - if (configVersion != slaveConfigVersion) { - print("ReplSetTest awaitReplication: secondary #" + secondaryCount + - ", " + slaveName + ", has config version #" + slaveConfigVersion + - ", but expected config version #" + configVersion); + if (configVersion != slaveConfigVersion) { + print("ReplSetTest awaitReplication: secondary #" + secondaryCount + + ", " + slaveName + ", has config version #" + slaveConfigVersion + + ", but expected config version #" + configVersion); - if (slaveConfigVersion > configVersion) { - master = this.getMaster(); - configVersion = master.getDB("local")['system.replset'].findOne().version; - masterOpTime = self.getLastOpTime(master); - masterName = master.toString().substr(14); // strip "connection to " + if (slaveConfigVersion > configVersion) { + master = this.getPrimary(); + configVersion = master.getDB("local")['system.replset'].findOne().version; + masterOpTime = _getLastOpTime(master); + masterName = master.toString().substr(14); // strip "connection to " - print("ReplSetTest awaitReplication: timestamp for primary, " + - masterName + ", is " + tojson(this.latest) + - ", last oplog entry is " + tojsononeline(masterOpTime)); + print("ReplSetTest awaitReplication: timestamp for primary, " + + masterName + ", is " + tojson(masterLatestOpTime) + + ", last oplog entry is " + tojsononeline(masterOpTime)); + } + + return false; } - return false; - } - - // Continue if we're connected to an arbiter - if (res = slave.getDB("admin").runCommand({replSetGetStatus: 1})) { - if (res.myState == self.ARBITER) { - continue; - } - } - - ++secondaryCount; - print("ReplSetTest awaitReplication: checking secondary #" + - secondaryCount + ": " + slaveName); - - slave.getDB("admin").getMongo().setSlaveOk(); - - var ts = self.getLastOpTime(slave); - if (self.latest.t < ts.t || - (self.latest.t == ts.t && self.latest.i < ts.i)) { - self.latest = self.getLastOpTime(master); - print("ReplSetTest awaitReplication: timestamp for " + slaveName + - " is newer, resetting latest to " + tojson(self.latest)); - return false; - } - - if (!friendlyEqual(self.latest, ts)) { - print("ReplSetTest awaitReplication: timestamp for secondary #" + - secondaryCount + ", " + slaveName + ", is " + tojson(ts) + - " but latest is " + tojson(self.latest)); - print("ReplSetTest awaitReplication: secondary #" + - secondaryCount + ", " + slaveName + ", is NOT synced"); - return false; - } - - print("ReplSetTest awaitReplication: secondary #" + - secondaryCount + ", " + slaveName + ", is synced"); - } - - print("ReplSetTest awaitReplication: finished: all " + secondaryCount + - " secondaries synced at timestamp " + tojson(self.latest)); - return true; - } - catch (e) { - print("ReplSetTest awaitReplication: caught exception: " + e); - - // we might have a new master now - self.getLastOpTimeWritten(); - print("ReplSetTest awaitReplication: resetting: timestamp for primary " + - self.liveNodes.master + " is " + tojson(self.latest)); - return false; - } - }, "awaiting replication", timeout); -}; + // Continue if we're connected to an arbiter + var res = assert.commandWorked(slave.adminCommand({ replSetGetStatus: 1 })); + if (res.myState == ReplSetTest.State.ARBITER) { + continue; + } -ReplSetTest.prototype.getHashes = function( db ){ - this.getMaster(); - var res = {}; - res.master = this.liveNodes.master.getDB( db ).runCommand( "dbhash" ); - res.slaves = this.liveNodes.slaves.map( function(z){ return z.getDB( db ).runCommand( "dbhash" ); } ); - return res; -}; + ++secondaryCount; + print("ReplSetTest awaitReplication: checking secondary #" + + secondaryCount + ": " + slaveName); -/** - * Starts up a server. Options are saved by default for subsequent starts. - * - * - * Options { remember : true } re-applies the saved options from a prior start. - * Options { noRemember : true } ignores the current properties. - * Options { appendOptions : true } appends the current options to those remembered. - * Options { startClean : true } clears the data directory before starting. - * - * @param {int|conn|[int|conn]} n array or single server number (0, 1, 2, ...) or conn - * @param {object} [options] - * @param {boolean} [restart] If false, the data directory will be cleared - * before the server starts. Default: false. - * - */ -ReplSetTest.prototype.start = function( n , options , restart , wait ) { - if( n.length ) { - var nodes = n; - var started = []; - - for( var i = 0; i < nodes.length; i++ ){ - if( this.start( nodes[i], Object.merge({}, options), restart, wait ) ){ - started.push( nodes[i] ); + slave.getDB("admin").getMongo().setSlaveOk(); + + var ts = _getLastOpTime(slave); + if (masterLatestOpTime.t < ts.t || (masterLatestOpTime.t == ts.t && masterLatestOpTime.i < ts.i)) { + masterLatestOpTime = _getLastOpTime(master); + print("ReplSetTest awaitReplication: timestamp for " + slaveName + + " is newer, resetting latest to " + tojson(masterLatestOpTime)); + return false; + } + + if (!friendlyEqual(masterLatestOpTime, ts)) { + print("ReplSetTest awaitReplication: timestamp for secondary #" + + secondaryCount + ", " + slaveName + ", is " + tojson(ts) + + " but latest is " + tojson(masterLatestOpTime)); + print("ReplSetTest awaitReplication: secondary #" + + secondaryCount + ", " + slaveName + ", is NOT synced"); + return false; + } + + print("ReplSetTest awaitReplication: secondary #" + + secondaryCount + ", " + slaveName + ", is synced"); + } + + print("ReplSetTest awaitReplication: finished: all " + secondaryCount + + " secondaries synced at timestamp " + tojson(masterLatestOpTime)); + return true; + } + catch (e) { + print("ReplSetTest awaitReplication: caught exception " + e + ';\n' + e.stack); + + // We might have a new master now + awaitLastOpTimeWrittenFn(); + + print("ReplSetTest awaitReplication: resetting: timestamp for primary " + + self.liveNodes.master + " is " + tojson(masterLatestOpTime)); + + return false; } + }, "awaiting replication", timeout); + }; + + this.getHashes = function(db) { + this.getPrimary(); + var res = {}; + res.master = this.liveNodes.master.getDB(db).runCommand("dbhash"); + res.slaves = this.liveNodes.slaves.map(function(z) { return z.getDB(db).runCommand("dbhash"); }); + return res; + }; + + /** + * Starts up a server. Options are saved by default for subsequent starts. + * + * + * Options { remember : true } re-applies the saved options from a prior start. + * Options { noRemember : true } ignores the current properties. + * Options { appendOptions : true } appends the current options to those remembered. + * Options { startClean : true } clears the data directory before starting. + * + * @param {int|conn|[int|conn]} n array or single server number (0, 1, 2, ...) or conn + * @param {object} [options] + * @param {boolean} [restart] If false, the data directory will be cleared + * before the server starts. Default: false. + * + */ + this.start = function(n, options, restart, wait) { + if (n.length) { + var nodes = n; + var started = []; + + for(var i = 0; i < nodes.length; i++) { + if (this.start(nodes[i], Object.merge({}, options), restart, wait)) { + started.push(nodes[i]); + } + } + + return started; } + + // TODO: should we do something special if we don't currently know about this node? + n = this.getNodeId(n); - return started; - } + print("ReplSetTest n is : " + n); + + var defaults = { useHostName : this.useHostName, + oplogSize : this.oplogSize, + keyFile : this.keyFile, + port : _useBridge ? _unbridgedPorts[n] : this.ports[n], + noprealloc : "", + smallfiles : "", + replSet : this.useSeedList ? this.getURL() : this.name, + dbpath : "$set-$node" }; + + // + // Note : this replaces the binVersion of the shared startSet() options the first time + // through, so the full set is guaranteed to have different versions if size > 1. If using + // start() independently, independent version choices will be made + // + if (options && options.binVersion) { + options.binVersion = + MongoRunner.versionIterator(options.binVersion); + } - // TODO: should we do something special if we don't currently know about this node? - n = this.getNodeId(n); - - print( "ReplSetTest n is : " + n ); - - defaults = { useHostName : this.useHostName, - oplogSize : this.oplogSize, - keyFile : this.keyFile, - port : this.useBridge ? this._unbridgedPorts[n] : this.ports[n], - noprealloc : "", - smallfiles : "", - replSet : this.useSeedList ? this.getURL() : this.name, - dbpath : "$set-$node" }; - - defaults = Object.merge( defaults, ReplSetTest.nodeOptions || {} ); - - // - // Note : this replaces the binVersion of the shared startSet() options the first time - // through, so the full set is guaranteed to have different versions if size > 1. If using - // start() independently, independent version choices will be made - // - if( options && options.binVersion ){ - options.binVersion = - MongoRunner.versionIterator( options.binVersion ); - } - - options = Object.merge( defaults, options ); - options = Object.merge( options, this.nodeOptions[ "n" + n ] ); - delete options.rsConfig; + options = Object.merge(defaults, options); + options = Object.merge(options, this.nodeOptions["n" + n]); + delete options.rsConfig; - options.restart = options.restart || restart; + options.restart = options.restart || restart; - var pathOpts = { node : n, set : this.name }; - options.pathOpts = Object.merge( options.pathOpts || {}, pathOpts ); - - if( tojson(options) != tojson({}) ) - printjson(options); - - // make sure to call getPath, otherwise folders wont be cleaned - this.getPath(n); - - print("ReplSetTest " + (restart ? "(Re)" : "") + "Starting...."); - - if (this.useBridge) { - var bridgeOptions = Object.merge(this.bridgeOptions, options.bridgeOptions || {}); - bridgeOptions = Object.merge(bridgeOptions, { - hostName: this.host, - port: this.ports[n], - // The mongod processes identify themselves to mongobridge as host:port, where the host - // is the actual hostname of the machine and not localhost. - dest: getHostName() + ":" + this._unbridgedPorts[n], - }); + var pathOpts = { node : n, set : this.name }; + options.pathOpts = Object.merge(options.pathOpts || {}, pathOpts); + + if (tojson(options) != tojson({})) + printjson(options); - this.nodes[n] = new MongoBridge(bridgeOptions); - } + // make sure to call getPath, otherwise folders wont be cleaned + this.getPath(n); - var conn = MongoRunner.runMongod(options); - if (!conn) { - throw new Error("Failed to start node " + n); - } + print("ReplSetTest " + (restart ? "(Re)" : "") + "Starting...."); - if (this.useBridge) { - this.nodes[n].connectToBridge(); - this._unbridgedNodes[n] = conn; - } else { - this.nodes[n] = conn; - } - - // Add replica set specific attributes. - this.nodes[n].nodeId = n; - - printjson( this.nodes ); + if (_useBridge) { + var bridgeOptions = Object.merge(_bridgeOptions, options.bridgeOptions || {}); + bridgeOptions = Object.merge(bridgeOptions, { + hostName: this.host, + port: this.ports[n], + // The mongod processes identify themselves to mongobridge as host:port, where the + // host is the actual hostname of the machine and not localhost. + dest: getHostName() + ":" + _unbridgedPorts[n], + }); + + this.nodes[n] = new MongoBridge(bridgeOptions); + } + + var conn = MongoRunner.runMongod(options); + if (!conn) { + throw new Error("Failed to start node " + n); + } + + if (_useBridge) { + this.nodes[n].connectToBridge(); + _unbridgedNodes[n] = conn; + } else { + this.nodes[n] = conn; + } - wait = wait || false; - if( ! wait.toFixed ){ - if( wait ) wait = 0; - else wait = -1; - } + // Add replica set specific attributes. + this.nodes[n].nodeId = n; + + printjson(this.nodes); + + wait = wait || false; + if (! wait.toFixed) { + if (wait) wait = 0; + else wait = -1; + } - if (wait >= 0) { - // Wait for node to start up. - this.waitForHealth(this.nodes[n], this.UP, wait); - } + if (wait >= 0) { + // Wait for node to start up. + _waitForHealth(this.nodes[n], Health.UP, wait); + } - return this.nodes[n]; -}; + return this.nodes[n]; + }; + /** + * Restarts a db without clearing the data directory by default. If the server is not + * stopped first, this function will not work. + * + * Option { startClean : true } forces clearing the data directory. + * Option { auth : Object } object that contains the auth details for admin credentials. + * Should contain the fields 'user' and 'pwd' + * + * @param {int|conn|[int|conn]} n array or single server number (0, 1, 2, ...) or conn + */ + this.restart = function(n, options, signal, wait) { + // Can specify wait as third parameter, if using default signal + if (signal == true || signal == false) { + wait = signal; + signal = undefined; + } + + this.stop(n, signal, options); -/** - * Restarts a db without clearing the data directory by default. If the server is not - * stopped first, this function will not work. - * - * Option { startClean : true } forces clearing the data directory. - * Option { auth : Object } object that contains the auth details for admin credentials. - * Should contain the fields 'user' and 'pwd' - * - * @param {int|conn|[int|conn]} n array or single server number (0, 1, 2, ...) or conn - */ -ReplSetTest.prototype.restart = function( n , options, signal, wait ){ - // Can specify wait as third parameter, if using default signal - if( signal == true || signal == false ){ - wait = signal; - signal = undefined; - } - - this.stop(n, signal, options); - started = this.start( n , options , true, wait ); - - if (jsTestOptions().keyFile) { - if (started.length) { - // if n was an array of conns, start will return an array of connections - for (var i = 0; i < started.length; i++) { - jsTest.authenticate(started[i]); + var started = this.start(n, options, true, wait); + + if (jsTestOptions().keyFile) { + if (started.length) { + // if n was an array of conns, start will return an array of connections + for (var i = 0; i < started.length; i++) { + jsTest.authenticate(started[i]); + } + } else { + jsTest.authenticate(started); } - } else { - jsTest.authenticate(started); } - } - return started; -}; + return started; + }; -ReplSetTest.prototype.stopMaster = function(signal, opts) { - var master = this.getMaster(); - var master_id = this.getNodeId( master ); - return this.stop(master_id, signal, opts); -}; + this.stopMaster = function(signal, opts) { + var master = this.getPrimary(); + var master_id = this.getNodeId(master); + return this.stop(master_id, signal, opts); + }; -/** - * Stops a particular node or nodes, specified by conn or id - * - * @param {number|Mongo} n the index or connection object of the replica set member to stop. - * @param {number} signal the signal number to use for killing - * @param {Object} opts @see MongoRunner.stopMongod - */ -ReplSetTest.prototype.stop = function(n, signal, opts) { - - // Flatten array of nodes to stop - if( n.length ){ - nodes = n; + /** + * Stops a particular node or nodes, specified by conn or id + * + * @param {number|Mongo} n the index or connection object of the replica set member to stop. + * @param {number} signal the signal number to use for killing + * @param {Object} opts @see MongoRunner.stopMongod + */ + this.stop = function(n, signal, opts) { + // Flatten array of nodes to stop + if (n.length) { + var nodes = n; + + var stopped = []; + for(var i = 0; i < nodes.length; i++) { + if (this.stop(nodes[i], signal, opts)) + stopped.push(nodes[i]); + } + + return stopped; + } - var stopped = []; - for( var i = 0; i < nodes.length; i++ ){ - if (this.stop(nodes[i], signal, opts)) - stopped.push( nodes[i] ); + // Can specify wait as second parameter, if using default signal + if (signal == true || signal == false) { + signal = undefined; } - return stopped; - } - - // Can specify wait as second parameter, if using default signal - if( signal == true || signal == false ){ - signal = undefined; - } - - n = this.getNodeId(n); - var port = this.useBridge ? this._unbridgedPorts[n] : this.ports[n]; - print('ReplSetTest stop *** Shutting down mongod in port ' + port + ' ***'); - var ret = MongoRunner.stopMongod( port , signal, opts ); + n = this.getNodeId(n); - print('ReplSetTest stop *** Mongod in port ' + port + - ' shutdown with code (' + ret + ') ***'); + var port = _useBridge ? _unbridgedPorts[n] : this.ports[n]; + print('ReplSetTest stop *** Shutting down mongod in port ' + port + ' ***'); + var ret = MongoRunner.stopMongod(port, signal, opts); - if (this.useBridge) { - this.nodes[n].stop(); - } + print('ReplSetTest stop *** Mongod in port ' + port + + ' shutdown with code (' + ret + ') ***'); - return ret; -}; + if (_useBridge) { + this.nodes[n].stop(); + } -/** - * Kill all members of this replica set. - * - * @param {number} signal The signal number to use for killing the members - * @param {boolean} forRestart will not cleanup data directory - * @param {Object} opts @see MongoRunner.stopMongod - */ -ReplSetTest.prototype.stopSet = function( signal , forRestart, opts ) { - for(var i=0; i < this.ports.length; i++) { - this.stop(i, signal, opts); - } - if ( forRestart ) { return; } - if ( this._alldbpaths ){ - print("ReplSetTest stopSet deleting all dbpaths"); - for(i=0; i<this._alldbpaths.length; i++) { - resetDbpath( this._alldbpaths[i] ); + return ret; + }; + + /** + * Kill all members of this replica set. + * + * @param {number} signal The signal number to use for killing the members + * @param {boolean} forRestart will not cleanup data directory + * @param {Object} opts @see MongoRunner.stopMongod + */ + this.stopSet = function(signal, forRestart, opts) { + for(var i=0; i < this.ports.length; i++) { + this.stop(i, signal, opts); } - } - _forgetReplSet(this.name); - print('ReplSetTest stopSet *** Shut down repl set - test worked ****' ); -}; + if (forRestart) { return; } -/** - * Walks all oplogs and ensures matching entries. - */ -ReplSetTest.prototype.ensureOplogsMatch = function() { - "use strict"; - var OplogReader = function(mongo) { - this.next = function() { - if (!this.cursor) - throw Error("reader is not open!"); - - var nextDoc = this.cursor.next(); - if (nextDoc) - this.lastDoc = nextDoc; - return nextDoc; - }; - - this.getLastDoc = function() { - if (this.lastDoc) - return this.lastDoc; - return this.next(); - }; - - this.hasNext = function() { - if (!this.cursor) - throw Error("reader is not open!"); - return this.cursor.hasNext(); - }; - - this.query = function(ts) { - var coll = this.getOplogColl(); - var query = {"ts": {"$gte": ts ? ts : new Timestamp()}}; - this.cursor = coll.find(query).sort({$natural:1}); - this.cursor.addOption(DBQuery.Option.oplogReplay); - }; - - this.getFirstDoc = function(){ - return this.getOplogColl().find().sort({$natural:1}).limit(-1).next(); - }; - - this.getOplogColl = function () { - return this.mongo.getDB("local")["oplog.rs"]; - }; - - this.lastDoc = null; - this.cursor = null; - this.mongo = mongo; + if (_alldbpaths) { + print("ReplSetTest stopSet deleting all dbpaths"); + for(var i = 0; i < _alldbpaths.length; i++) { + resetDbpath(_alldbpaths[i]); + } + } + + _forgetReplSet(this.name); + + print('ReplSetTest stopSet *** Shut down repl set - test worked ****'); }; - - if (this.nodes.length && this.nodes.length > 1) { - var readers = []; - var largestTS = null; - var nodes = this.nodes; - var rsSize = nodes.length; - for (var i = 0; i < rsSize; i++) { - readers[i] = new OplogReader(nodes[i]); - var currTS = readers[i].getFirstDoc().ts; - if (currTS.t > largestTS.t || (currTS.t == largestTS.t && currTS.i > largestTS.i) ) { - largestTS = currTS; + + /** + * Walks all oplogs and ensures matching entries. + */ + this.ensureOplogsMatch = function() { + var OplogReader = function(mongo) { + this.next = function() { + if (!this.cursor) + throw Error("reader is not open!"); + + var nextDoc = this.cursor.next(); + if (nextDoc) + this.lastDoc = nextDoc; + return nextDoc; + }; + + this.getLastDoc = function() { + if (this.lastDoc) + return this.lastDoc; + return this.next(); + }; + + this.hasNext = function() { + if (!this.cursor) + throw Error("reader is not open!"); + return this.cursor.hasNext(); + }; + + this.query = function(ts) { + var coll = this.getOplogColl(); + var query = {"ts": {"$gte": ts ? ts : new Timestamp()}}; + this.cursor = coll.find(query).sort({$natural:1}); + this.cursor.addOption(DBQuery.Option.oplogReplay); + }; + + this.getFirstDoc = function() { + return this.getOplogColl().find().sort({$natural:1}).limit(-1).next(); + }; + + this.getOplogColl = function () { + return this.mongo.getDB("local")["oplog.rs"]; + }; + + this.lastDoc = null; + this.cursor = null; + this.mongo = mongo; + }; + + if (this.nodes.length && this.nodes.length > 1) { + var readers = []; + var largestTS = null; + var nodes = this.nodes; + var rsSize = nodes.length; + for (var i = 0; i < rsSize; i++) { + readers[i] = new OplogReader(nodes[i]); + var currTS = readers[i].getFirstDoc().ts; + if (currTS.t > largestTS.t || (currTS.t == largestTS.t && currTS.i > largestTS.i)) { + largestTS = currTS; + } + } + + // start all oplogReaders at the same place. + for (i = 0; i < rsSize; i++) { + readers[i].query(largestTS); + } + + var firstReader = readers[0]; + while (firstReader.hasNext()) { + var ts = firstReader.next().ts; + for(i = 1; i < rsSize; i++) { + assert.eq(ts, + readers[i].next().ts, + " non-matching ts for node: " + readers[i].mongo); + } + } + + // ensure no other node has more oplog + for (i = 1; i < rsSize; i++) { + assert.eq(false, + readers[i].hasNext(), + "" + readers[i] + " shouldn't have more oplog."); } } - - // start all oplogReaders at the same place. - for (i = 0; i < rsSize; i++) { - readers[i].query(largestTS); + }; + + /** + * Wait for a state indicator to go to a particular state or states. + * + * @param node is a single node or list of nodes, by id or conn + * @param state is a single state or list of states + * + */ + this.waitForState = function(node, state, timeout) { + _waitForIndicator(node, state, "state", timeout); + }; + + /** + * Waits until there is a master node. + */ + this.waitForMaster = function(timeout) { + var master; + assert.soon(function() { + return (master = self.getPrimary()); + }, "waiting for master", timeout); + + return master; + }; + + /** + * Overflows a replica set secondary or secondaries, specified by id or conn. + */ + this.overflow = function(secondaries) { + // Create a new collection to overflow, allow secondaries to replicate + var master = this.getPrimary(); + var overflowColl = master.getCollection("_overflow.coll"); + overflowColl.insert({ replicated : "value" }); + this.awaitReplication(); + + this.stop(secondaries); + + var count = master.getDB("local").oplog.rs.count(); + var prevCount = -1; + + // Insert batches of documents until we exceed the capped size for the oplog and truncate it. + + while (count > prevCount) { + print("ReplSetTest overflow inserting 10000"); + var bulk = overflowColl.initializeUnorderedBulkOp(); + for (var i = 0; i < 10000; i++) { + bulk.insert({ overflow : "Insert some large overflow value to eat oplog space faster." }); + } + assert.writeOK(bulk.execute()); + + prevCount = count; + this.awaitReplication(); + + count = master.getDB("local").oplog.rs.count(); + + print("ReplSetTest overflow count : " + count + " prev : " + prevCount); } - var firstReader = readers[0]; - while (firstReader.hasNext()) { - var ts = firstReader.next().ts; - for(i = 1; i < rsSize; i++) { - assert.eq(ts, - readers[i].next().ts, - " non-matching ts for node: " + readers[i].mongo); + // Do one writeConcern:2 write in order to ensure that all of the oplog gets propagated to + // the secondary which is online. + assert.writeOK(overflowColl.insert({ overflow: "Last overflow value" }, + { writeConcern: { w: 2 } })); + + // Restart all our secondaries and wait for recovery state + this.start(secondaries, { remember : true }, true, true); + this.waitForState(secondaries, ReplSetTest.State.RECOVERING, 5 * 60 * 1000); + }; + + // + // ReplSetTest initialization + // + + this.name = opts.name || "testReplSet"; + this.useHostName = opts.useHostName == undefined ? true : opts.useHostName; + this.host = this.useHostName ? (opts.host || getHostName()) : 'localhost'; + this.oplogSize = opts.oplogSize || 40; + this.useSeedList = opts.useSeedList || false; + this.keyFile = opts.keyFile; + this.shardSvr = opts.shardSvr || false; + this.protocolVersion = opts.protocolVersion; + + _useBridge = opts.useBridge || false; + _bridgeOptions = opts.bridgeOptions || {}; + + _configSettings = opts.settings || false; + + this.nodeOptions = {}; + + if (isObject(opts.nodes)) { + var len = 0; + for(var i in opts.nodes) { + var options = this.nodeOptions["n" + len] = Object.merge(opts.nodeOptions, + opts.nodes[i]); + if (i.startsWith("a")) { + options.arbiter = true; } + + len++; } - - // ensure no other node has more oplog - for (i = 1; i < rsSize; i++) { - assert.eq(false, - readers[i].hasNext(), - "" + readers[i] + " shouldn't have more oplog."); + + this.numNodes = len; + } + else if (Array.isArray(opts.nodes)) { + for(var i = 0; i < opts.nodes.length; i++) { + this.nodeOptions["n" + i] = Object.merge(opts.nodeOptions, opts.nodes[i]); } + + this.numNodes = opts.nodes.length; } -}; -/** - * Waits until there is a master node - */ -ReplSetTest.prototype.waitForMaster = function( timeout ){ - - var master; - - var self = this; - assert.soon(function() { - return ( master = self.getMaster() ); - }, "waiting for master", timeout); - - return master; -}; + else { + for (var i = 0; i < opts.nodes; i++) { + this.nodeOptions["n" + i] = opts.nodeOptions; + } + this.numNodes = opts.nodes; + } -/** - * Wait for a health indicator to go to a particular state or states. - * - * @param node is a single node or list of nodes, by id or conn - * @param state is a single state or list of states. ReplSetTest.Health.DOWN can - * only be used in cases when there is a primary available or slave[0] can - * respond to the isMaster command. - */ -ReplSetTest.prototype.waitForHealth = function( node, state, timeout ){ - this.waitForIndicator( node, state, "health", timeout ); + this.ports = allocatePorts(this.numNodes); + this.nodes = []; + + if (_useBridge) { + _unbridgedPorts = allocatePorts(this.numNodes); + _unbridgedNodes = []; + } + + _clearLiveNodes(); + + Object.extend(this, ReplSetTest.State); }; /** - * Wait for a state indicator to go to a particular state or states. - * - * @param node is a single node or list of nodes, by id or conn - * @param state is a single state or list of states - * + * Set of states that the replica set can be in. Used for the wait functions. */ -ReplSetTest.prototype.waitForState = function( node, state, timeout ){ - this.waitForIndicator( node, state, "state", timeout ); +ReplSetTest.State = { + PRIMARY: 1, + SECONDARY: 2, + RECOVERING: 3, + // Note there is no state 4 + STARTUP_2: 5, + UNKNOWN: 6, + ARBITER: 7, + DOWN: 8, + ROLLBACK: 9, + REMOVED: 10, }; /** - * Wait for a rs indicator to go to a particular state or states. - * - * @param node is a single node or list of nodes, by id or conn - * @param states is a single state or list of states - * @param ind is the indicator specified - * + * Waits for the specified hosts to enter a certain state. */ -ReplSetTest.prototype.waitForIndicator = function( node, states, ind, timeout ){ - - if( node.length ){ - - var nodes = node; - for( var i = 0; i < nodes.length; i++ ){ - if( states.length ) - this.waitForIndicator( nodes[i], states[i], ind, timeout ); - else - this.waitForIndicator( nodes[i], states, ind, timeout ); +ReplSetTest.awaitRSClientHosts = function(conn, host, hostOk, rs, timeout) { + var hostCount = host.length; + if (hostCount) { + for(var i = 0; i < hostCount; i++) { + ReplSetTest.awaitRSClientHosts(conn, host[i], hostOk, rs); } - + return; - } - - timeout = timeout || 30000; - - if( ! node.getDB ){ - node = this.nodes[node]; } - - if( ! states.length ) states = [ states ]; - - print( "ReplSetTest waitForIndicator " + ind + " on " + node ); - printjson( states ); - print( "ReplSetTest waitForIndicator from node " + node ); - - var lastTime = null; - var currTime = new Date().getTime(); - var status; - var self = this; - assert.soon(function() { - try { - var conn = self.callIsMaster(); - if (!conn) conn = self.liveNodes.slaves[0]; - if (!conn) return false; // Try again to load connection - - var getStatusFunc = function() { - status = conn.getDB('admin').runCommand({replSetGetStatus: 1}); - }; - if (self.keyFile) { - // Authenticate connection used for running replSetGetStatus if needed. - authutil.asCluster(conn, self.keyFile, getStatusFunc); - } else { - getStatusFunc(); - } - } - catch ( ex ) { - print( "ReplSetTest waitForIndicator could not get status: " + tojson( ex ) ); - return false; - } + timeout = timeout || 60000; - var printStatus = false; - if( lastTime == null || ( currTime = new Date().getTime() ) - (1000 * 5) > lastTime ) { - if( lastTime == null ) { - print( "ReplSetTest waitForIndicator Initial status ( timeout : " + - timeout + " ) :" ); - } - printjson( status ); - lastTime = new Date().getTime(); - printStatus = true; - } + if (hostOk == undefined) hostOk = { ok: true }; + if (host.host) host = host.host; + if (rs) rs = rs.name; - if (typeof status.members == 'undefined') { - return false; + print("Awaiting " + host + " to be " + tojson(hostOk) + " for " + conn + " (rs: " + rs + ")"); + + var tests = 0; + + assert.soon(function() { + var rsClientHosts = conn.adminCommand('connPoolStats').replicaSets; + if (tests++ % 10 == 0) { + printjson(rsClientHosts); } - for( var i = 0; i < status.members.length; i++ ) { - if( printStatus ) { - print( "Status for : " + status.members[i].name + ", checking " + - node.host + "/" + node.name ); - } - if( status.members[i].name == node.host || status.members[i].name == node.name ) { - for( var j = 0; j < states.length; j++ ) { - if( printStatus ) { - print( "Status -- " + " current state: " + status.members[i][ind] + - ", target state : " + states[j] ); - } + for (var rsName in rsClientHosts) { + if (rs && rs != rsName) continue; + + for (var i = 0; i < rsClientHosts[rsName].hosts.length; i++) { + var clientHost = rsClientHosts[rsName].hosts[i]; + if (clientHost.addr != host) continue; - if (typeof(states[j]) != "number") { - throw new Error("State was not an number -- type:" + - typeof(states[j]) + ", value:" + states[j]); + // Check that *all* host properties are set correctly + var propOk = true; + for(var prop in hostOk) { + if (isObject(hostOk[prop])) { + if (!friendlyEqual(hostOk[prop], clientHost[prop])) { + propOk = false; + break; + } } - if( status.members[i][ind] == states[j] ) { - return true; + else if (clientHost[prop] != hostOk[prop]) { + propOk = false; + break; } } + + if (propOk) { + return true; + } } } return false; - - }, "waiting for state indicator " + ind + " for " + timeout + "ms", timeout); - - print( "ReplSetTest waitForIndicator final status:" ); - printjson( status ); -}; - -ReplSetTest.Health = {}; -ReplSetTest.Health.UP = 1; -ReplSetTest.Health.DOWN = 0; - -ReplSetTest.State = {}; -ReplSetTest.State.PRIMARY = 1; -ReplSetTest.State.SECONDARY = 2; -ReplSetTest.State.RECOVERING = 3; -// Note there is no state 4. -ReplSetTest.State.STARTUP_2 = 5; -ReplSetTest.State.UNKNOWN = 6; -ReplSetTest.State.ARBITER = 7; -ReplSetTest.State.DOWN = 8; -ReplSetTest.State.ROLLBACK = 9; -ReplSetTest.State.REMOVED = 10; - -/** - * Overflows a replica set secondary or secondaries, specified by id or conn. - */ -ReplSetTest.prototype.overflow = function( secondaries ) { - // Create a new collection to overflow, allow secondaries to replicate - var master = this.getMaster(); - var overflowColl = master.getCollection( "_overflow.coll" ); - overflowColl.insert({ replicated : "value" }); - this.awaitReplication(); - - this.stop(secondaries); - - var count = master.getDB("local").oplog.rs.count(); - var prevCount = -1; - - // Insert batches of documents until we exceed the capped size for the oplog and truncate it. - - while (count > prevCount) { - print("ReplSetTest overflow inserting 10000"); - var bulk = overflowColl.initializeUnorderedBulkOp(); - for (var i = 0; i < 10000; i++) { - bulk.insert({ overflow : "Insert some large overflow value to eat oplog space faster." }); - } - assert.writeOK(bulk.execute()); - - prevCount = count; - this.awaitReplication(); - - count = master.getDB("local").oplog.rs.count(); - - print( "ReplSetTest overflow count : " + count + " prev : " + prevCount ); - } - - // Do one writeConcern:2 write in order to ensure that all of the oplog gets propagated to the - // secondary which is online - assert.writeOK( - overflowColl.insert({ overflow: "Last overflow value" }, { writeConcern: { w: 2 } })); - - // Restart all our secondaries and wait for recovery state - this.start( secondaries, { remember : true }, true, true ); - this.waitForState( secondaries, this.RECOVERING, 5 * 60 * 1000 ); + }, + 'timed out waiting for replica set client to recognize hosts', + timeout); }; diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js index 985663d25c7..e06a43f9176 100644 --- a/src/mongo/shell/shardingtest.js +++ b/src/mongo/shell/shardingtest.js @@ -94,6 +94,22 @@ */ var ShardingTest = function(params) { + if (!(this instanceof ShardingTest)) { + return new ShardingTest(params); + } + + // Capture the 'this' reference + var self = this; + + // Used for counting the test duration + var _startTime = new Date(); + + // Populated with the paths of all shard hosts (config servers + hosts) and is used for + // cleaning up the data files on shutdown + var _alldbpaths = []; + + // Publicly exposed variables + /** * Attempts to open a connection to the specified connection string or throws if unable to * connect. @@ -120,12 +136,34 @@ var ShardingTest = function(params) { return tojsononeline(r.min) + " -> " + tojsononeline(r.max); } - // Used for counting the test duration - var _startTime = new Date(); + /** + * Checks whether the specified collection is sharded by consulting the config metadata. + */ + function _isSharded(collName) { + var collName = "" + collName; + var dbName; - // Populated with the paths of all shard hosts (config servers + hosts) and is used for - // cleaning up the data files on shutdown - var _alldbpaths = []; + if (typeof collName.getCollectionNames == 'function') { + dbName = "" + collName; + collName = undefined; + } + + if (dbName) { + var x = self.config.databases.findOne({ _id : dbname }); + if (x) + return x.partitioned; + else + return false; + } + + if (collName) { + var x = self.config.collections.findOne({ _id : collName }); + if (x) + return true; + else + return false; + } + } // ShardingTest API @@ -155,7 +193,7 @@ var ShardingTest = function(params) { throw Error("couldn't find dbname: " + dbname + " total: " + this.config.databases.count()); } - return this.config.shards.find({ _id : { $ne : x.primary } }).map(function(z) { return z._id; }) + return this.config.shards.find({ _id: { $ne: x.primary } }).map(z => z._id); }; this.getConnNames = function() { @@ -175,12 +213,12 @@ var ShardingTest = function(params) { var rsName = null; if (name.indexOf("/") > 0) - rsName = name.substring(0 , name.indexOf("/")); + rsName = name.substring(0, name.indexOf("/")); for (var i=0; i<this._connections.length; i++) { var c = this._connections[i]; - if (connectionURLTheSame(name , c.name) || - connectionURLTheSame(rsName , c.name)) + if (connectionURLTheSame(name, c.name) || + connectionURLTheSame(rsName, c.name)) return c; } @@ -195,26 +233,32 @@ var ShardingTest = function(params) { }; this.getOther = function(one) { - if (this._connections.length < 2) + if (this._connections.length < 2) { throw Error("getOther only works with 2 servers"); + } + + if (one._mongo) { + one = one._mongo; + } - if (one._mongo) - one = one._mongo - for(var i = 0; i < this._connections.length; i++) { - if (this._connections[i] != one) return this._connections[i] + if (this._connections[i] != one) { + return this._connections[i]; + } } - + return null; }; this.getAnother = function(one) { - if (this._connections.length < 2) + if (this._connections.length < 2) { throw Error("getAnother() only works with multiple servers"); - - if (one._mongo) - one = one._mongo - + } + + if (one._mongo) { + one = one._mongo; + } + for(var i = 0; i < this._connections.length; i++) { if (this._connections[i] == one) return this._connections[(i + 1) % this._connections.length]; @@ -222,10 +266,12 @@ var ShardingTest = function(params) { }; this.getFirstOther = function(one) { - for (var i=0; i<this._connections.length; i++) { - if (this._connections[i] != one) - return this._connections[i]; + for (var i = 0; i < this._connections.length; i++) { + if (this._connections[i] != one) { + return this._connections[i]; + } } + throw Error("impossible"); }; @@ -257,7 +303,8 @@ var ShardingTest = function(params) { var timeMillis = new Date().getTime() - _startTime.getTime(); - print('*** ShardingTest ' + this._testName + " completed successfully in " + (timeMillis / 1000) + " seconds ***"); + print('*** ShardingTest ' + this._testName + " completed successfully in " + + (timeMillis / 1000) + " seconds ***"); }; this.adminCommand = function(cmd) { @@ -290,17 +337,17 @@ var ShardingTest = function(params) { }; this.getChunksString = function(ns) { - var q = {} - if (ns) + var q = {}; + if (ns) { q.ns = ns; + } var s = ""; - this.config.chunks.find(q).sort({ ns : 1 , min : 1 }).forEach( - function(z) { - s += " " + z._id + "\t" + z.lastmod.t + "|" + z.lastmod.i + "\t" + tojson(z.min) + " -> " + tojson(z.max) + " " + z.shard + " " + z.ns + "\n"; - } - ); - + this.config.chunks.find(q).sort({ ns : 1, min : 1 }).forEach(function(z) { + s += " " + z._id + "\t" + z.lastmod.t + "|" + z.lastmod.i + "\t" + + tojson(z.min) + " -> " + tojson(z.max) + " " + z.shard + " " + z.ns + "\n"; + }); + return s; }; @@ -312,20 +359,25 @@ var ShardingTest = function(params) { printShardingStatus(this.config); }; - this.printCollectionInfo = function(ns , msg) { + this.printCollectionInfo = function(ns, msg) { var out = ""; - if (msg) + if (msg) { out += msg + "\n"; + } out += "sharding collection info: " + ns + "\n"; - for (var i=0; i<this._connections.length; i++) { + + for (var i = 0; i<this._connections.length; i++) { var c = this._connections[i]; - out += " mongod " + c + " " + tojson(c.getCollection(ns).getShardVersion() , " " , true) + "\n"; + out += " mongod " + c + " " + + tojson(c.getCollection(ns).getShardVersion(), " ", true) + "\n"; } - for (var i=0; i<this._mongos.length; i++) { + + for (var i = 0; i < this._mongos.length; i++) { var c = this._mongos[i]; - out += " mongos " + c + " " + tojson(c.getCollection(ns).getShardVersion() , " " , true) + "\n"; + out += " mongos " + c + " " + + tojson(c.getCollection(ns).getShardVersion(), " ", true) + "\n"; } - + out += this.getChunksString(ns); print("ShardingTest " + out); @@ -335,49 +387,57 @@ var ShardingTest = function(params) { this.adminCommand("connpoolsync"); }; - this.onNumShards = function(collName , dbName) { - this.sync(); // we should sync since we're going directly to mongod here + this.onNumShards = function(collName, dbName) { dbName = dbName || "test"; - var num=0; - for (var i=0; i<this._connections.length; i++) - if (this._connections[i].getDB(dbName).getCollection(collName).count() > 0) + + // We should sync since we're going directly to mongod here + this.sync(); + + var num = 0; + for (var i = 0; i < this._connections.length; i++) { + if (this._connections[i].getDB(dbName).getCollection(collName).count() > 0) { num++; + } + } + return num; }; - this.shardCounts = function(collName , dbName) { - this.sync(); // we should sync since we're going directly to mongod here + this.shardCounts = function(collName, dbName) { dbName = dbName || "test"; - var counts = {} - for (var i=0; i<this._connections.length; i++) + + // We should sync since we're going directly to mongod here + this.sync(); + + var counts = {}; + for (var i = 0; i < this._connections.length; i++) { counts[i] = this._connections[i].getDB(dbName).getCollection(collName).count(); + } + return counts; }; - this.chunkCounts = function(collName , dbName) { + this.chunkCounts = function(collName, dbName) { dbName = dbName || "test"; - var x = {} - this.config.shards.find().forEach( - function(z) { - x[z._id] = 0; - } - ); - - this.config.chunks.find({ ns : dbName + "." + collName }).forEach( - function(z) { - if (x[z.shard]) - x[z.shard]++ - else - x[z.shard] = 1; - } - ); + var x = {}; + this.config.shards.find().forEach(function(z) { + x[z._id] = 0; + }); + + this.config.chunks.find({ ns : dbName + "." + collName }).forEach(function(z) { + if (x[z.shard]) + x[z.shard]++; + else + x[z.shard] = 1; + }); return x; }; - this.chunkDiff = function(collName , dbName) { - var c = this.chunkCounts(collName , dbName); + this.chunkDiff = function(collName, dbName) { + var c = this.chunkCounts(collName, dbName); + var min = 100000000; var max = 0; for (var s in c) { @@ -386,21 +446,24 @@ var ShardingTest = function(params) { if (c[s] > max) max = c[s]; } - print("ShardingTest input: " + tojson(c) + " min: " + min + " max: " + max ); + + print("ShardingTest input: " + tojson(c) + " min: " + min + " max: " + max); return max - min; }; - // Waits up to one minute for the difference in chunks between the most loaded shard and least - // loaded shard to be 0 or 1, indicating that the collection is well balanced. - // This should only be called after creating a big enough chunk difference to trigger balancing. - this.awaitBalance = function(collName , dbName , timeToWait) { + /** + * Waits up to one minute for the difference in chunks between the most loaded shard and least + * loaded shard to be 0 or 1, indicating that the collection is well balanced. This should only + * be called after creating a big enough chunk difference to trigger balancing. + */ + this.awaitBalance = function(collName, dbName, timeToWait) { timeToWait = timeToWait || 60000; - var shardingTest = this; + assert.soon(function() { - var x = shardingTest.chunkDiff(collName , dbName); + var x = self.chunkDiff(collName, dbName); print("chunk diff: " + x); return x < 2; - } , "no balance happened", 60000); + }, "no balance happened", 60000); }; this.getShardNames = function() { @@ -412,20 +475,21 @@ var ShardingTest = function(params) { }; this.getShard = function(coll, query, includeEmpty) { - var shards = this.getShardsForQuery(coll, query, includeEmpty) - assert.eq(shards.length, 1) - return shards[0] + var shards = this.getShardsForQuery(coll, query, includeEmpty); + assert.eq(shards.length, 1); + return shards[0]; }; /** * Returns the shards on which documents matching a particular query reside. */ this.getShardsForQuery = function(coll, query, includeEmpty) { - if (! coll.getDB) - coll = this.s.getCollection(coll) + if (!coll.getDB) { + coll = this.s.getCollection(coll); + } - var explain = coll.find(query).explain("executionStats") - var shards = [] + var explain = coll.find(query).explain("executionStats"); + var shards = []; var execStages = explain.executionStats.executionStages; var plannerShards = explain.queryPlanner.winningPlan.shards; @@ -442,8 +506,8 @@ var ShardingTest = function(params) { for(var i = 0; i < shards.length; i++) { for(var j = 0; j < this._connections.length; j++) { - if (connectionURLTheSame( this._connections[j] , shards[i])) { - shards[i] = this._connections[j] + if (connectionURLTheSame( this._connections[j], shards[i])) { + shards[i] = this._connections[j]; break; } } @@ -452,66 +516,54 @@ var ShardingTest = function(params) { return shards; }; - this.isSharded = function(collName) { - var collName = "" + collName - var dbName = undefined - - if (typeof collName.getCollectionNames == 'function') { - dbName = "" + collName - collName = undefined - } - - if (dbName) { - var x = this.config.databases.findOne({ _id : dbname }) - if (x) return x.partitioned - else return false - } - - if (collName) { - var x = this.config.collections.findOne({ _id : collName }) - if (x) return true - else return false - } - }; - - this.shardColl = function(collName , key , split , move , dbName, waitForDelete) { - split = (split != false ? (split || key) : split) - move = (split != false && move != false ? (move || split) : false) + this.shardColl = function(collName, key, split, move, dbName, waitForDelete) { + split = (split != false ? (split || key) : split); + move = (split != false && move != false ? (move || split) : false); if (collName.getDB) - dbName = "" + collName.getDB() + dbName = "" + collName.getDB(); else dbName = dbName || "test"; var c = dbName + "." + collName; - if (collName.getDB) - c = "" + collName + if (collName.getDB) { + c = "" + collName; + } var isEmpty = (this.s.getCollection(c).count() == 0); - if (! this.isSharded(dbName)) - this.s.adminCommand({ enableSharding : dbName }) + if (!_isSharded(dbName)) { + this.s.adminCommand({ enableSharding : dbName }); + } - var result = this.s.adminCommand({ shardcollection : c , key : key }) - if (! result.ok) { - printjson(result) - assert(false) + var result = this.s.adminCommand({ shardcollection : c, key : key }); + if (!result.ok) { + printjson(result); + assert(false); } - if (split == false) return; + if (split == false) { + return; + } - result = this.s.adminCommand({ split : c , middle : split }); + result = this.s.adminCommand({ split : c, middle : split }); if (! result.ok) { - printjson(result) - assert(false) + printjson(result); + assert(false); } - if (move == false) return; + if (move == false) { + return; + } - var result = null + var result; for(var i = 0; i < 5; i++) { - result = this.s.adminCommand({ movechunk : c , find : move , to : this.getOther(this.getServer(dbName)).name, _waitForDelete: waitForDelete }); + result = this.s.adminCommand({ movechunk: c, + find: move, + to: this.getOther(this.getServer(dbName)).name, + _waitForDelete: waitForDelete }); if (result.ok) break; + sleep(5 * 1000); } @@ -520,7 +572,10 @@ var ShardingTest = function(params) { }; this.stopBalancer = function(timeout, interval) { - if (typeof db == "undefined") db = undefined; + if (typeof db == "undefined") { + db = undefined; + } + var oldDB = db; db = this.config; @@ -534,7 +589,10 @@ var ShardingTest = function(params) { }; this.startBalancer = function(timeout, interval) { - if (typeof db == "undefined") db = undefined; + if (typeof db == "undefined") { + db = undefined; + } + var oldDB = db; db = this.config; @@ -594,7 +652,7 @@ var ShardingTest = function(params) { } else { MongoRunner.stopMongod(this._configServers[n]); } - } + }; /** * Stops and restarts a mongos process. @@ -753,7 +811,6 @@ var ShardingTest = function(params) { this["c" + n] = this._configServers[n]; }; - /** * Helper method for setting primary shard of a database and making sure that it was successful. * Note: first mongos needs to be up. @@ -776,11 +833,13 @@ var ShardingTest = function(params) { var numMongos = otherParams.hasOwnProperty('mongos') ? otherParams.mongos : 1; var numConfigs = otherParams.hasOwnProperty('config') ? otherParams.config : 3; - // Allow specifying options like : - // { mongos : [ { noprealloc : "" } ], config : [ { smallfiles : "" } ], shards : { rs : true, d : true } } + // Allow specifying mixed-type options like this: + // { mongos : [ { noprealloc : "" } ], + // config : [ { smallfiles : "" } ], + // shards : { rs : true, d : true } } if (Array.isArray(numShards)) { for(var i = 0; i < numShards.length; i++) { - otherParams[ "d" + i ] = numShards[i]; + otherParams["d" + i] = numShards[i]; } numShards = numShards.length; @@ -788,7 +847,7 @@ var ShardingTest = function(params) { else if (isObject(numShards)) { var tempCount = 0; for(var i in numShards) { - otherParams[ i ] = numShards[i]; + otherParams[i] = numShards[i]; tempCount++; } @@ -797,7 +856,7 @@ var ShardingTest = function(params) { if (Array.isArray(numMongos)) { for(var i = 0; i < numMongos.length; i++) { - otherParams[ "s" + i ] = numMongos[i]; + otherParams["s" + i] = numMongos[i]; } numMongos = numMongos.length; @@ -805,7 +864,7 @@ var ShardingTest = function(params) { else if (isObject(numMongos)) { var tempCount = 0; for(var i in numMongos) { - otherParams[ i ] = numMongos[i]; + otherParams[i] = numMongos[i]; tempCount++; } @@ -814,15 +873,15 @@ var ShardingTest = function(params) { if (Array.isArray(numConfigs)) { for(var i = 0; i < numConfigs.length; i++) { - otherParams[ "c" + i ] = numConfigs[i]; + otherParams["c" + i] = numConfigs[i]; } - numConfigs = numConfigs.length + numConfigs = numConfigs.length; } else if (isObject(numConfigs)) { var tempCount = 0; for(var i in numConfigs) { - otherParams[ i ] = numConfigs[i]; + otherParams[i] = numConfigs[i]; tempCount++; } @@ -834,26 +893,24 @@ var ShardingTest = function(params) { true : otherParams.useHostname; otherParams.useBridge = otherParams.useBridge || false; otherParams.bridgeOptions = otherParams.bridgeOptions || {}; - var keyFile = otherParams.keyFile || otherParams.extraOptions.keyFile + + var keyFile = otherParams.keyFile || otherParams.extraOptions.keyFile; var hostName = getHostName(); - this._testName = testName - this._otherParams = otherParams - + this._testName = testName; + this._otherParams = otherParams; + var pathOpts = { testName: testName }; - var hasRS = false for(var k in otherParams) { if (k.startsWith("rs") && otherParams[k] != undefined) { - hasRS = true - break + break; } } - this._connections = [] - this._shardServers = this._connections - this._rs = [] - this._rsObjects = [] + this._connections = []; + this._rs = []; + this._rsObjects = []; if (otherParams.useBridge) { var unbridgedConnections = []; @@ -866,26 +923,24 @@ var ShardingTest = function(params) { if (otherParams.rs || otherParams["rs" + i]) { var setName = testName + "-rs" + i; - rsDefaults = { useHostname : otherParams.useHostname, - noJournalPrealloc : otherParams.nopreallocj, - oplogSize : 16, - pathOpts : Object.merge(pathOpts, { shard : i })} + var rsDefaults = { useHostname : otherParams.useHostname, + noJournalPrealloc : otherParams.nopreallocj, + oplogSize : 16, + pathOpts : Object.merge(pathOpts, { shard : i })}; - rsDefaults = Object.merge(rsDefaults, ShardingTest.rsOptions || {}) - rsDefaults = Object.merge(rsDefaults, otherParams.rs) - rsDefaults = Object.merge(rsDefaults, otherParams.rsOptions) - rsDefaults = Object.merge(rsDefaults, otherParams["rs" + i]) - rsDefaults.nodes = rsDefaults.nodes || otherParams.numReplicas + rsDefaults = Object.merge(rsDefaults, otherParams.rs); + rsDefaults = Object.merge(rsDefaults, otherParams.rsOptions); + rsDefaults = Object.merge(rsDefaults, otherParams["rs" + i]); + rsDefaults.nodes = rsDefaults.nodes || otherParams.numReplicas; var numReplicas = rsDefaults.nodes || 3; delete rsDefaults.nodes; + var protocolVersion = rsDefaults.protocolVersion; delete rsDefaults.protocolVersion; var initiateTimeout = rsDefaults.initiateTimeout; delete rsDefaults.initiateTimeout; - print("Replica set test!") - var rs = new ReplSetTest({ name : setName, nodes : numReplicas, useHostName : otherParams.useHostname, @@ -901,9 +956,9 @@ var ShardingTest = function(params) { url : rs.getURL() }; rs.initiate(null, null, initiateTimeout); - this["rs" + i] = rs - this._rsObjects[i] = rs + this["rs" + i] = rs; + this._rsObjects[i] = rs; _alldbpaths.push(null); this._connections.push(null); @@ -923,11 +978,11 @@ var ShardingTest = function(params) { if (otherParams.shardOptions && otherParams.shardOptions.binVersion) { otherParams.shardOptions.binVersion = - MongoRunner.versionIterator(otherParams.shardOptions.binVersion) + MongoRunner.versionIterator(otherParams.shardOptions.binVersion); } - options = Object.merge(options, otherParams.shardOptions) - options = Object.merge(options, otherParams["d" + i]) + options = Object.merge(options, otherParams.shardOptions); + options = Object.merge(options, otherParams["d" + i]); options.port = options.port || allocatePort(); @@ -952,7 +1007,7 @@ var ShardingTest = function(params) { if (otherParams.useBridge) { bridge.connectToBridge(); this._connections.push(bridge); - unbridgedConnections.push(conn) + unbridgedConnections.push(conn); } else { this._connections.push(conn); } @@ -973,18 +1028,20 @@ var ShardingTest = function(params) { } var rs = this._rs[i].test; - - rs.getMaster().getDB("admin").foo.save({ x : 1 }) + rs.getPrimary().getDB("admin").foo.save({ x : 1 }); + if (keyFile) { authutil.asCluster(rs.nodes, keyFile, function() { rs.awaitReplication(); }); } + rs.awaitSecondaryNodes(); - + var rsConn = new Mongo(rs.getURL()); rsConn.name = rs.getURL(); - this._connections[i] = rsConn - this["shard" + i] = rsConn - rsConn.rs = rs + + this._connections[i] = rsConn; + this["shard" + i] = rsConn; + rsConn.rs = rs; } // Default to using 3-node legacy config servers if jsTestOptions().useLegacyOptions is true @@ -1014,15 +1071,13 @@ var ShardingTest = function(params) { journal : "", configsvr : "" }; - options = Object.merge(options, ShardingTest.configOptions || {}) - if (otherParams.configOptions && otherParams.configOptions.binVersion) { otherParams.configOptions.binVersion = - MongoRunner.versionIterator(otherParams.configOptions.binVersion) + MongoRunner.versionIterator(otherParams.configOptions.binVersion); } - options = Object.merge(options, otherParams.configOptions) - options = Object.merge(options, otherParams["c" + i]) + options = Object.merge(options, otherParams.configOptions); + options = Object.merge(options, otherParams["c" + i]); options.port = options.port || allocatePort(); @@ -1079,19 +1134,19 @@ var ShardingTest = function(params) { storageEngine : "wiredTiger", }; - startOptions = Object.merge(startOptions, ShardingTest.configOptions || {}) - if (otherParams.configOptions && otherParams.configOptions.binVersion) { otherParams.configOptions.binVersion = - MongoRunner.versionIterator(otherParams.configOptions.binVersion) + MongoRunner.versionIterator(otherParams.configOptions.binVersion); } - startOptions = Object.merge(startOptions, otherParams.configOptions) + startOptions = Object.merge(startOptions, otherParams.configOptions); + var nodeOptions = []; for (var i = 0; i < numConfigs; ++i) { nodeOptions.push(otherParams["c" + i] || {}); } - rstOptions["nodes"] = nodeOptions; + + rstOptions.nodes = nodeOptions; this.configRS = new ReplSetTest(rstOptions); this.configRS.startSet(startOptions); @@ -1102,7 +1157,7 @@ var ShardingTest = function(params) { var initiateTimeout = otherParams.rsOptions && otherParams.rsOptions.initiateTimeout; this.configRS.initiate(config, null, initiateTimeout); - this.configRS.getMaster(); // Wait for master to be elected before starting mongos + this.configRS.getPrimary(); // Wait for master to be elected before starting mongos this._configDB = this.configRS.getURL(); this._configServers = this.configRS.nodes; @@ -1115,20 +1170,23 @@ var ShardingTest = function(params) { printjson("config servers: " + this._configDB); - this._configConnection = _connectWithRetry(this._configDB); + var configConnection = _connectWithRetry(this._configDB); - print("ShardingTest " + this._testName + " :\n" + tojson({ config : this._configDB, shards : this._connections })); + print("ShardingTest " + this._testName + " :\n" + + tojson({ config : this._configDB, shards : this._connections })); if (numMongos == 0 && !otherParams.noChunkSize) { if (keyFile) { throw Error("Cannot set chunk size without any mongos when using auth"); } else { - this._configConnection.getDB("config").settings.insert( - { _id : "chunksize" , value : otherParams.chunksize || otherParams.chunkSize || 50 }); + configConnection.getDB("config").settings.insert({ + _id : "chunksize", + value : otherParams.chunksize || otherParams.chunkSize || 50 + }); } } - this._mongos = [] + this._mongos = []; // Start the MongoS servers for (var i = 0; i < ((numMongos == 0 ? -1 : numMongos) || 1); i++) { @@ -1137,23 +1195,21 @@ var ShardingTest = function(params) { pathOpts: Object.merge(pathOpts, {mongos: i}), configdb: this._configDB, verbose: verboseLevel || 0, - keyFile: keyFile + keyFile: keyFile, }; if (!otherParams.noChunkSize) { options.chunkSize = otherParams.chunksize || otherParams.chunkSize || 50; } - options = Object.merge(options, ShardingTest.mongosOptions || {}) - if (otherParams.mongosOptions && otherParams.mongosOptions.binVersion) { otherParams.mongosOptions.binVersion = MongoRunner.versionIterator(otherParams.mongosOptions.binVersion); } - options = Object.merge(options, otherParams.mongosOptions) - options = Object.merge(options, otherParams.extraOptions) - options = Object.merge(options, otherParams["s" + i]) + options = Object.merge(options, otherParams.mongosOptions); + options = Object.merge(options, otherParams.extraOptions); + options = Object.merge(options, otherParams["s" + i]); options.port = options.port || allocatePort(); @@ -1242,7 +1298,7 @@ var ShardingTest = function(params) { } if (jsTestOptions().keyFile) { - jsTest.authenticate(this._configConnection); + jsTest.authenticate(configConnection); jsTest.authenticateNodes(this._configServers); jsTest.authenticateNodes(this._mongos); } |