diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2015-11-25 11:20:43 -0500 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2015-12-08 13:05:00 -0500 |
commit | 3ed6635a5fb26c354046d275a1217c4526b2fe02 (patch) | |
tree | f40aa20b5e62996843ce3df0f47b82042dd683a7 /jstests | |
parent | 4f24dc58f48cb087db8a4832421d298e9e2633a0 (diff) | |
download | mongo-3ed6635a5fb26c354046d275a1217c4526b2fe02.tar.gz |
SERVER-21050 Cleanup ReplSetTest
This is just a cleanup work to hide some of the private state of
ReplSetTest so it is easier to encapsulate and add new logic. Also enables
strict mode.
Diffstat (limited to 'jstests')
135 files changed, 541 insertions, 532 deletions
diff --git a/jstests/auth/copyauth.js b/jstests/auth/copyauth.js index 2d3ee534666..a0e0b92c51e 100644 --- a/jstests/auth/copyauth.js +++ b/jstests/auth/copyauth.js @@ -60,7 +60,7 @@ function ClusterSpawnHelper(clusterType, startWithAuth) { else { replSetTest.awaitReplication(); } - this.conn = replSetTest.getMaster(); + this.conn = replSetTest.getPrimary(); this.connString = replSetTest.getURL(); } else { diff --git a/jstests/auth/user_defined_roles_on_secondaries.js b/jstests/auth/user_defined_roles_on_secondaries.js index 474ee7ad5d0..4ca2d14f651 100644 --- a/jstests/auth/user_defined_roles_on_secondaries.js +++ b/jstests/auth/user_defined_roles_on_secondaries.js @@ -95,7 +95,7 @@ m0.getDB("db1").createRole({ rstest.add(); rstest.reInitiate(); -rstest.getMaster().getDB("db1").createRole({ +rstest.getPrimary().getDB("db1").createRole({ role: "r3", roles: [ "r1", "r2" ], privileges: [ @@ -116,8 +116,8 @@ rstest.nodes.forEach(function (node) { }); // Verify that updating roles propagates. -rstest.getMaster().getDB("db1").revokeRolesFromRole("r1", [ "read" ], { w: 2 }); -rstest.getMaster().getDB("db1").grantRolesToRole("r1", [ "dbAdmin" ], { w: 2 }); +rstest.getPrimary().getDB("db1").revokeRolesFromRole("r1", [ "read" ], { w: 2 }); +rstest.getPrimary().getDB("db1").grantRolesToRole("r1", [ "dbAdmin" ], { w: 2 }); rstest.nodes.forEach(function (node) { var role = node.getDB("db1").getRole("r1"); assert.eq(1, role.roles.length, node); @@ -125,7 +125,7 @@ rstest.nodes.forEach(function (node) { }); // Verify that dropping roles propagates. -rstest.getMaster().getDB("db1").dropRole("r2", { w: 2}); +rstest.getPrimary().getDB("db1").dropRole("r2", { w: 2}); rstest.nodes.forEach(function (node) { assert.eq(null, node.getDB("db1").getRole("r2")); var role = node.getDB("db1").getRole("r3"); @@ -137,8 +137,8 @@ rstest.nodes.forEach(function (node) { }); // Verify that dropping the admin database propagates. -assert.commandWorked(rstest.getMaster().getDB("admin").dropDatabase()); -assert.commandWorked(rstest.getMaster().getDB("admin").getLastErrorObj(2)); +assert.commandWorked(rstest.getPrimary().getDB("admin").dropDatabase()); +assert.commandWorked(rstest.getPrimary().getDB("admin").getLastErrorObj(2)); rstest.nodes.forEach(function (node) { var roles = node.getDB("db1").getRoles(); assert.eq(0, roles.length, node); @@ -146,7 +146,7 @@ rstest.nodes.forEach(function (node) { // Verify that applyOps commands propagate. // NOTE: This section of the test depends on the oplog and roles schemas. -assert.commandWorked(rstest.getMaster().getDB("admin").runCommand({ applyOps: [ +assert.commandWorked(rstest.getPrimary().getDB("admin").runCommand({ applyOps: [ { op: "c", ns: "admin.$cmd", @@ -214,7 +214,7 @@ assert.commandWorked(rstest.getMaster().getDB("admin").runCommand({ applyOps: [ } ] })); -assert.commandWorked(rstest.getMaster().getDB("admin").getLastErrorObj(2)); +assert.commandWorked(rstest.getPrimary().getDB("admin").getLastErrorObj(2)); rstest.nodes.forEach(function (node) { var role = node.getDB("db1").getRole("t1"); assert.eq(1, role.roles.length, node); diff --git a/jstests/gle/get_last_error.js b/jstests/gle/get_last_error.js index 8d0b3d940f3..3b5d6368c61 100644 --- a/jstests/gle/get_last_error.js +++ b/jstests/gle/get_last_error.js @@ -5,7 +5,7 @@ var replTest = new ReplSetTest({name: name, oplogSize: 1, nodes: 3, settings: {chainingAllowed: false}}); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var mdb = master.getDB("test"); // synchronize replication @@ -49,7 +49,7 @@ assert.eq(gle.wtimeout, null); // take a node down and GLE for more nodes than are up replTest.stop(2); -master = replTest.getMaster(); +master = replTest.getPrimary(); mdb = master.getDB("test"); // do w:2 write so secondary is caught up before calling {gle w:3}. assert.writeOK(mdb.foo.insert({_id: "3"}, {writeConcern: {w: 2, wtimeout:30000}})); diff --git a/jstests/libs/parallelTester.js b/jstests/libs/parallelTester.js index bd948327310..cb5fb085a94 100644 --- a/jstests/libs/parallelTester.js +++ b/jstests/libs/parallelTester.js @@ -1,22 +1,18 @@ /** * The ParallelTester class is used to test more than one test concurrently */ - - -if ( typeof _threadInject != "undefined" ){ - //print( "fork() available!" ); - +if (typeof _threadInject != "undefined") { Thread = function(){ this.init.apply( this, arguments ); } _threadInject( Thread.prototype ); - + ScopedThread = function() { this.init.apply( this, arguments ); } ScopedThread.prototype = new Thread( function() {} ); _scopedThreadInject( ScopedThread.prototype ); - + fork = function() { var t = new Thread( function() {} ); Thread.apply( t, arguments ); @@ -29,7 +25,7 @@ if ( typeof _threadInject != "undefined" ){ if (host == undefined) host = db.getMongo().host; this.events = new Array( me, collectionName, host ); } - + EventGenerator.prototype._add = function( action ) { this.events.push( [ Random.genExp( this.mean ), action ] ); } diff --git a/jstests/noPassthrough/minvalid.js b/jstests/noPassthrough/minvalid.js index 056b36f3f5a..cbaf26e1b71 100644 --- a/jstests/noPassthrough/minvalid.js +++ b/jstests/noPassthrough/minvalid.js @@ -9,7 +9,7 @@ var host = getHostName(); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var mdb = master.getDB("foo"); print("1: initial insert"); diff --git a/jstests/noPassthrough/minvalid2.js b/jstests/noPassthrough/minvalid2.js index d34c93120d4..72a8c9a4d80 100644 --- a/jstests/noPassthrough/minvalid2.js +++ b/jstests/noPassthrough/minvalid2.js @@ -30,7 +30,7 @@ replTest.initiate({_id : name, members : [ {_id : 2, host : host+":"+replTest.ports[2], arbiterOnly : true} ]}); var slaves = replTest.liveNodes.slaves; -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var masterId = replTest.getNodeId(master); var slave = slaves[0]; var slaveId = replTest.getNodeId(slave); @@ -62,7 +62,7 @@ print("6: start up slave"); replTest.restart(slaveId); print("7: writes on former slave") -master = replTest.getMaster(); +master = replTest.getPrimary(); mdb1 = master.getDB("foo"); mdb1.foo.save({a:1002}); diff --git a/jstests/noPassthrough/wt_nojournal_repl.js b/jstests/noPassthrough/wt_nojournal_repl.js index 01bd23b10da..71cf78c5afc 100644 --- a/jstests/noPassthrough/wt_nojournal_repl.js +++ b/jstests/noPassthrough/wt_nojournal_repl.js @@ -40,7 +40,7 @@ else { config.members[0].priority = 1; replTest.initiate(config); - var masterDB = replTest.getMaster().getDB("test"); + var masterDB = replTest.getPrimary().getDB("test"); var secondary1 = replTest.liveNodes.slaves[0]; jsTestLog("add some data to collection foo"); diff --git a/jstests/noPassthroughWithMongod/indexbg_drop.js b/jstests/noPassthroughWithMongod/indexbg_drop.js index 59dc0cf181a..d929665df66 100644 --- a/jstests/noPassthroughWithMongod/indexbg_drop.js +++ b/jstests/noPassthroughWithMongod/indexbg_drop.js @@ -27,7 +27,7 @@ replTest.initiate({"_id" : "bgIndex", {"_id" : 1, "host" : nodes[1]}, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var second = replTest.getSecondary(); var masterId = replTest.getNodeId(master); diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js index 7289de6f25b..24a04775746 100644 --- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js +++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js @@ -44,7 +44,7 @@ replTest.initiate({"_id" : "bgIndex", {"_id" : 1, "host" : nodes[1]}, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var second = replTest.getSecondary(); var masterDB = master.getDB(dbname); diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js index bb71ef6aa2f..80379b64844 100644 --- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js +++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js @@ -27,7 +27,7 @@ replTest.initiate({"_id" : "bgIndex", {"_id" : 1, "host" : nodes[1]}, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var second = replTest.getSecondary(); var secondId = replTest.getNodeId(second); diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js index f971fc1ba73..ee25b5874b5 100644 --- a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js +++ b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js @@ -53,7 +53,7 @@ {"_id" : 1, "host" : nodenames[1]}, {"_id" : 2, "host" : nodenames[2], arbiterOnly: true}]}); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var second = replTest.getSecondary(); var secondId = replTest.getNodeId(second); diff --git a/jstests/noPassthroughWithMongod/moveprimary-replset.js b/jstests/noPassthroughWithMongod/moveprimary-replset.js index 964dd1afd26..50fe756463b 100755 --- a/jstests/noPassthroughWithMongod/moveprimary-replset.js +++ b/jstests/noPassthroughWithMongod/moveprimary-replset.js @@ -26,7 +26,7 @@ var replSet1 = shardingTest.rs0; var replSet2 = shardingTest.rs1; jsTest.log("Adding data to our first replica set"); -var repset1DB = replSet1.getMaster().getDB(testDBName); +var repset1DB = replSet1.getPrimary().getDB(testDBName); for (var i = 1; i <= numDocs; i++) { repset1DB[testCollName].insert({ x : i }); } @@ -48,13 +48,13 @@ jsTest.log("Adding replSet2 as second shard"); mongosConn.adminCommand({ addshard : replSet2.getURL() }); mongosConn.getDB('admin').printShardingStatus(); -printjson(replSet2.getMaster().getDBs()); +printjson(replSet2.getPrimary().getDBs()); jsTest.log("Moving test db from replSet1 to replSet2"); assert.commandWorked(mongosConn.getDB('admin').runCommand({ moveprimary: testDBName, to: replSet2.getURL() })); mongosConn.getDB('admin').printShardingStatus(); -printjson(replSet2.getMaster().getDBs()); +printjson(replSet2.getPrimary().getDBs()); assert.eq(testDB.getSiblingDB("config").databases.findOne({ "_id" : testDBName }).primary, replSet2.name, "Failed to change primary shard for unsharded database."); diff --git a/jstests/noPassthroughWithMongod/replica_set_shard_version.js b/jstests/noPassthroughWithMongod/replica_set_shard_version.js index 23328bf2378..400c49a3a4c 100644 --- a/jstests/noPassthroughWithMongod/replica_set_shard_version.js +++ b/jstests/noPassthroughWithMongod/replica_set_shard_version.js @@ -21,7 +21,7 @@ coll.findOne() var sadmin = shard.getDB( "admin" ) assert.throws(function() { sadmin.runCommand({ replSetStepDown : 3000, force : true }); }); -st.rs0.getMaster(); +st.rs0.getPrimary(); mongosA.getDB("admin").runCommand({ setParameter : 1, traceExceptions : true }) diff --git a/jstests/noPassthroughWithMongod/sharding_rs_arb1.js b/jstests/noPassthroughWithMongod/sharding_rs_arb1.js index 4c36ff4f05d..30570c261c5 100644 --- a/jstests/noPassthroughWithMongod/sharding_rs_arb1.js +++ b/jstests/noPassthroughWithMongod/sharding_rs_arb1.js @@ -12,7 +12,7 @@ replTest.initiate({_id : name, members : replTest.awaitReplication(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var db = master.getDB( "test" ); printjson( rs.status() ); diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js index f56134f5008..431154cb033 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl.js +++ b/jstests/noPassthroughWithMongod/ttl_repl.js @@ -14,7 +14,7 @@ var rt = new ReplSetTest( { name : "ttl_repl" , nodes: 2 } ); // setup set var nodes = rt.startSet(); rt.initiate(); -var master = rt.getMaster(); +var master = rt.getPrimary(); rt.awaitSecondaryNodes(); var slave1 = rt.liveNodes.slaves[0]; diff --git a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js index f1c33f448f7..bf9317aad95 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js +++ b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js @@ -6,7 +6,7 @@ var rt = new ReplSetTest( { name : "ttl_repl" , nodes: 2 } ); // setup set var nodes = rt.startSet(); rt.initiate(); -var master = rt.getMaster(); +var master = rt.getPrimary(); rt.awaitSecondaryNodes(); var slave1 = rt.getSecondary(); diff --git a/jstests/replsets/apply_batch_only_goes_forward.js b/jstests/replsets/apply_batch_only_goes_forward.js index 7786be9cc4f..8abbb693fe0 100644 --- a/jstests/replsets/apply_batch_only_goes_forward.js +++ b/jstests/replsets/apply_batch_only_goes_forward.js @@ -25,7 +25,7 @@ var nodes = replTest.startSet(); replTest.initiate(); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var mTest = master.getDB("test"); var mLocal = master.getDB("local"); var mMinvalid = mLocal["replset.minvalid"]; @@ -66,7 +66,7 @@ replTest.waitForState(master, replTest.RECOVERING, 90000); // Slave is now master... so do a write to get a minvalid entry on the secondary. - assert.writeOK(replTest.getMaster().getDB("test").foo.save({}, {writeConcern: {w: 3}})); + assert.writeOK(replTest.getPrimary().getDB("test").foo.save({}, {writeConcern: {w: 3}})); assert.soon(function() { var mv; diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js index 939b1d90e22..fc0eb463fbf 100644 --- a/jstests/replsets/auth1.js +++ b/jstests/replsets/auth1.js @@ -59,7 +59,7 @@ result = m.getDB("admin").runCommand({replSetInitiate : rs.getReplSetConfig()}); assert.eq(result.ok, 1, "couldn't initiate: "+tojson(result)); m.getDB('admin').logout(); // In case this node doesn't become primary, make sure its not auth'd -var master = rs.getMaster(); +var master = rs.getPrimary(); rs.awaitSecondaryNodes(); var mId = rs.getNodeId(master); var slave = rs.liveNodes.slaves[0]; @@ -107,7 +107,7 @@ assert.writeOK(bulk.execute({ w: 3, wtimeout: 60000 })); print("fail over"); rs.stop(mId); -master = rs.getMaster(); +master = rs.getPrimary(); print("add some more data 1"); master.getDB("test").auth("bar", "baz"); @@ -119,7 +119,7 @@ assert.writeOK(bulk.execute({ w: 2 })); print("resync"); rs.restart(mId, {"keyFile" : key1_600}); -master = rs.getMaster(); +master = rs.getPrimary(); print("add some more data 2"); bulk = master.getDB("test").foo.initializeUnorderedBulkOp(); @@ -146,7 +146,7 @@ try { catch (e) { print("error: "+e); } -master = rs.getMaster(); +master = rs.getPrimary(); master.getDB("admin").auth("foo", "bar"); diff --git a/jstests/replsets/auth2.js b/jstests/replsets/auth2.js index 8899162d04e..62179c1c7af 100644 --- a/jstests/replsets/auth2.js +++ b/jstests/replsets/auth2.js @@ -39,7 +39,7 @@ rs.initiate({ "_id" : name, {"_id" : 2, "host" : hostnames[2], priority: 0} ]}); -var master = rs.getMaster(); +var master = rs.getPrimary(); print("add an admin user"); master.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles}, diff --git a/jstests/replsets/auth3.js b/jstests/replsets/auth3.js index d940c5e3e37..504bfeffe9c 100644 --- a/jstests/replsets/auth3.js +++ b/jstests/replsets/auth3.js @@ -19,13 +19,13 @@ rs.startSet(); rs.initiate(); - master = rs.getMaster(); + master = rs.getPrimary(); jsTest.log("adding user"); master.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles}, {w: 2, wtimeout: 30000}); var safeInsert = function() { - master = rs.getMaster(); + master = rs.getPrimary(); master.getDB("admin").auth("foo", "bar"); assert.writeOK(master.getDB("foo").bar.insert({ x: 1 })); }; @@ -44,7 +44,7 @@ jsTest.log("write stuff to 0&2"); rs.stop(1); - master = rs.getMaster(); + master = rs.getPrimary(); master.getDB("admin").auth("foo", "bar"); master.getDB("foo").bar.drop(); jsTest.log("last op: " + diff --git a/jstests/replsets/auth_no_pri.js b/jstests/replsets/auth_no_pri.js index 71c976586d7..59eefa52f52 100644 --- a/jstests/replsets/auth_no_pri.js +++ b/jstests/replsets/auth_no_pri.js @@ -6,7 +6,7 @@ var nodes = rs.startSet(); rs.initiate(); // Add user -var master = rs.getMaster(); +var master = rs.getPrimary(); master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {w: NODE_COUNT}); // Can authenticate replset connection when whole set is up. diff --git a/jstests/replsets/buildindexes.js b/jstests/replsets/buildindexes.js index 5ec65765e27..a114011c3a0 100644 --- a/jstests/replsets/buildindexes.js +++ b/jstests/replsets/buildindexes.js @@ -15,7 +15,7 @@ replTest.initiate(config); - var master = replTest.getMaster().getDB(name); + var master = replTest.getPrimary().getDB(name); var slaveConns = replTest.liveNodes.slaves; var slave = []; for (var i in slaveConns) { diff --git a/jstests/replsets/capped_id.js b/jstests/replsets/capped_id.js index cd866fb0234..83942f6405f 100644 --- a/jstests/replsets/capped_id.js +++ b/jstests/replsets/capped_id.js @@ -18,9 +18,9 @@ var nodes = replTest.startSet(); // This will wait for initiation replTest.initiate(); -// Call getMaster to return a reference to the node that's been +// Call getPrimary to return a reference to the node that's been // elected master -var master = replTest.getMaster(); +var master = replTest.getPrimary(); // wait for secondaries to be up, since we'll be reading from them replTest.awaitSecondaryNodes(); @@ -28,7 +28,7 @@ replTest.awaitSecondaryNodes(); var slave1 = replTest.liveNodes.slaves[0]; var slave2 = replTest.liveNodes.slaves[1]; -// Calling getMaster made available the liveNodes structure, +// Calling getPrimary made available the liveNodes structure, // which looks like this: // liveNodes = {master: masterNode, slaves: [slave1, slave2] } printjson( replTest.liveNodes ); diff --git a/jstests/replsets/capped_insert_order.js b/jstests/replsets/capped_insert_order.js index b55972afc16..9b39021732c 100644 --- a/jstests/replsets/capped_insert_order.js +++ b/jstests/replsets/capped_insert_order.js @@ -8,7 +8,7 @@ replTest.startSet(); replTest.initiate(); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var slave = replTest.liveNodes.slaves[0]; var dbName = "db"; diff --git a/jstests/replsets/cloneDb.js b/jstests/replsets/cloneDb.js index 54b50d9c63f..ca53d370f4a 100644 --- a/jstests/replsets/cloneDb.js +++ b/jstests/replsets/cloneDb.js @@ -21,7 +21,7 @@ if (jsTest.options().keyFile) { var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); replTest.startSet(); replTest.initiate(); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var secondary = replTest.liveNodes.slaves[0]; var masterDB = master.getDB(replsetDBName); masterDB.dropDatabase(); diff --git a/jstests/replsets/config_server_checks.js b/jstests/replsets/config_server_checks.js index b7627923d8d..41579aceb89 100644 --- a/jstests/replsets/config_server_checks.js +++ b/jstests/replsets/config_server_checks.js @@ -51,7 +51,7 @@ var conf = rst.getReplSetConfig(); conf.configsvr = true; assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); -rst.getMaster(); +rst.getPrimary(); expectState(rst, rst.PRIMARY); rst.stopSet(); })(); @@ -71,10 +71,10 @@ var conf = rst.getReplSetConfig(); conf.configsvr = true; assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); -rst.getMaster(); +rst.getPrimary(); expectState(rst, rst.PRIMARY); -var conf = rst.getMaster().getDB('local').system.replset.findOne(); +var conf = rst.getPrimary().getDB('local').system.replset.findOne(); assert(conf.configsvr, tojson(conf)); rst.stopSet(); @@ -93,7 +93,7 @@ var rst = new ReplSetTest({name: "configrs6", rst.startSet(); assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: 1})); -rst.getMaster(); +rst.getPrimary(); expectState(rst, rst.PRIMARY); rst.stopSet(); })(); @@ -111,7 +111,7 @@ rst.startSet(); var conf = rst.getReplSetConfig(); assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); -rst.getMaster(); +rst.getPrimary(); expectState(rst, rst.PRIMARY); assert.throws(function() { rst.restart(0, {configsvr: ""}); @@ -135,7 +135,7 @@ var conf = rst.getReplSetConfig(); conf.configsvr = true; assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); -rst.getMaster(); +rst.getPrimary(); expectState(rst, rst.PRIMARY); var node = rst.nodes[0]; diff --git a/jstests/replsets/copydb.js b/jstests/replsets/copydb.js index d3df7de08c2..59730f70084 100644 --- a/jstests/replsets/copydb.js +++ b/jstests/replsets/copydb.js @@ -8,7 +8,7 @@ replTest.startSet(); replTest.initiate(); - var primary = replTest.getMaster(); + var primary = replTest.getPrimary(); var secondary = replTest.liveNodes.slaves[0]; var sourceDBName = 'copydb-repl-test-source'; diff --git a/jstests/replsets/drop_oplog.js b/jstests/replsets/drop_oplog.js index 90c920e1b27..8a84bb2050e 100644 --- a/jstests/replsets/drop_oplog.js +++ b/jstests/replsets/drop_oplog.js @@ -6,7 +6,7 @@ var nodes = rt.startSet(); rt.initiate(); - var master = rt.getMaster(); + var master = rt.getPrimary(); var ml = master.getDB( 'local' ); var threw = false; diff --git a/jstests/replsets/election_not_blocked.js b/jstests/replsets/election_not_blocked.js index 20c2ff7cc59..c3523200b0b 100644 --- a/jstests/replsets/election_not_blocked.js +++ b/jstests/replsets/election_not_blocked.js @@ -24,7 +24,7 @@ // so it cannot vote while fsync locked in PV1. Use PV0 explicitly here. protocolVersion: 0}); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // do a write assert.writeOK(master.getDB("foo").bar.insert({x:1}, {writeConcern: {w: 3}})); diff --git a/jstests/replsets/explain_slaveok.js b/jstests/replsets/explain_slaveok.js index 0714c5074d1..93069e6ac01 100644 --- a/jstests/replsets/explain_slaveok.js +++ b/jstests/replsets/explain_slaveok.js @@ -12,7 +12,7 @@ print("Start replica set with two nodes"); var replTest = new ReplSetTest({name: name, nodes: 2}); var nodes = replTest.startSet(); replTest.initiate(); -var primary = replTest.getMaster(); +var primary = replTest.getPrimary(); // Insert a document and let it sync to the secondary. print("Initial sync"); diff --git a/jstests/replsets/fsync_lock_read_secondaries.js b/jstests/replsets/fsync_lock_read_secondaries.js index 3f55bc8ef8b..81f6a0be8ae 100644 --- a/jstests/replsets/fsync_lock_read_secondaries.js +++ b/jstests/replsets/fsync_lock_read_secondaries.js @@ -31,7 +31,7 @@ var replTest = new ReplSetTest({name: 'testSet', nodes: 2, oplogSize: 5}); var nodes = replTest.startSet(); // This will wait for initiation replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var ret = master.getDB("admin").fsyncLock(); if (!ret.ok) { @@ -48,7 +48,7 @@ for(var i=0; i<docNum; i++) { waitForAllMembers(master.getDB("foo")); replTest.awaitReplication(); -// Calling getMaster also makes available the liveNodes structure, which looks like this: +// Calling getPrimary also makes available the liveNodes structure, which looks like this: // liveNodes = {master: masterNode, slaves: [slave1, slave2] } var slaves = replTest.liveNodes.slaves; slaves[0].setSlaveOk(); diff --git a/jstests/replsets/groupAndMapReduce.js b/jstests/replsets/groupAndMapReduce.js index a60ce82f5f8..1aff25a18eb 100644 --- a/jstests/replsets/groupAndMapReduce.js +++ b/jstests/replsets/groupAndMapReduce.js @@ -18,9 +18,9 @@ doTest = function( signal ) { // This will wait for initiation replTest.initiate(); - // Call getMaster to return a reference to the node that's been + // Call getPrimary to return a reference to the node that's been // elected master. - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // save some records var len = 100 diff --git a/jstests/replsets/index_delete.js b/jstests/replsets/index_delete.js index c2e871fe6f1..8747eacc412 100644 --- a/jstests/replsets/index_delete.js +++ b/jstests/replsets/index_delete.js @@ -39,7 +39,7 @@ replTest.initiate({"_id" : "fgIndex", {"_id" : 1, "host" : nodes[1]}, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var second = replTest.getSecondary(); var masterDB = master.getDB('fgIndexSec'); var secondDB = second.getDB('fgIndexSec'); diff --git a/jstests/replsets/index_restart_secondary.js b/jstests/replsets/index_restart_secondary.js index d792839c580..7308de83271 100644 --- a/jstests/replsets/index_restart_secondary.js +++ b/jstests/replsets/index_restart_secondary.js @@ -24,7 +24,7 @@ if (conns[0].getDB('test').serverBuildInfo().bits !== 32) { {"_id" : 1, "host" : nodes[1]}, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var second = replTest.getSecondary(); var secondId = replTest.getNodeId(second); diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js index a3ecaf5db68..8b673117fd1 100644 --- a/jstests/replsets/initial_sync1.js +++ b/jstests/replsets/initial_sync1.js @@ -26,7 +26,7 @@ var replTest = new ReplSetTest({name: basename, var conns = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var foo = master.getDB("foo"); var admin = master.getDB("admin"); @@ -98,7 +98,7 @@ reconnect(slave1); replTest.waitForState(slave1, [replTest.PRIMARY, replTest.SECONDARY], 60 * 1000); print("10. Insert some stuff"); -master = replTest.getMaster(); +master = replTest.getPrimary(); bulk = foo.bar.initializeUnorderedBulkOp(); for (var i = 0; i < 100; i++) { bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" }); diff --git a/jstests/replsets/initial_sync2.js b/jstests/replsets/initial_sync2.js index 7888f7d3354..9a913aeafc5 100644 --- a/jstests/replsets/initial_sync2.js +++ b/jstests/replsets/initial_sync2.js @@ -25,7 +25,7 @@ var replTest = new ReplSetTest( {name: basename, nodes: 2} ); var conns = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var origMaster = master; var foo = master.getDB("foo"); var admin = master.getDB("admin"); diff --git a/jstests/replsets/initial_sync3.js b/jstests/replsets/initial_sync3.js index b7446813004..4456cfbd498 100644 --- a/jstests/replsets/initial_sync3.js +++ b/jstests/replsets/initial_sync3.js @@ -27,7 +27,7 @@ replTest.initiate({ ] }); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); print("Initial sync"); master.getDB("foo").bar.baz.insert({x:1}); @@ -48,7 +48,7 @@ assert(!result.secondary, tojson(result)); print("bring 0 back up"); replTest.restart(0); print("0 should become primary"); -master = replTest.getMaster(); +master = replTest.getPrimary(); print("now 1 should be able to initial sync"); assert.soon(function() { diff --git a/jstests/replsets/initial_sync4.js b/jstests/replsets/initial_sync4.js index da49839ef0c..c7c23e65497 100644 --- a/jstests/replsets/initial_sync4.js +++ b/jstests/replsets/initial_sync4.js @@ -8,7 +8,7 @@ replTest = new ReplSetTest( {name: basename, nodes: 1} ); replTest.startSet(); replTest.initiate(); -m = replTest.getMaster(); +m = replTest.getPrimary(); md = m.getDB("d"); mc = m.getDB("d")["c"]; diff --git a/jstests/replsets/ismaster1.js b/jstests/replsets/ismaster1.js index 76252a069cb..1904145c882 100644 --- a/jstests/replsets/ismaster1.js +++ b/jstests/replsets/ismaster1.js @@ -116,7 +116,7 @@ var agreeOnPrimaryAndSetVersion = function( setVersion ) { return true; } -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert.soon( function() { return agreeOnPrimaryAndSetVersion( 1 ); }, "Nodes did not initiate in less than a minute", 60000 ); @@ -199,7 +199,7 @@ catch(e) { print(e); } -master = replTest.getMaster(); +master = replTest.getPrimary(); assert.soon( function() { return agreeOnPrimaryAndSetVersion( 2 ); }, "Nodes did not sync in less than a minute", 60000 ); diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js index 34c0e83993b..5ecc15456dc 100644 --- a/jstests/replsets/maintenance.js +++ b/jstests/replsets/maintenance.js @@ -8,7 +8,7 @@ replTest.initiate(config); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60000); // Make sure we have a master -var master = replTest.getMaster(); +var master = replTest.getPrimary(); for (i = 0; i < 20; i++) { master.getDB("bar").foo.insert({x:1,y:i,abc:123,str:"foo bar baz"}); diff --git a/jstests/replsets/maintenance2.js b/jstests/replsets/maintenance2.js index 9b2793a3bd9..401bfeb8701 100644 --- a/jstests/replsets/maintenance2.js +++ b/jstests/replsets/maintenance2.js @@ -15,9 +15,9 @@ // This will wait for initiation replTest.initiate(); - // Call getMaster to return a reference to the node that's been + // Call getPrimary to return a reference to the node that's been // elected master. - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // save some records var len = 100 diff --git a/jstests/replsets/maxSyncSourceLagSecs.js b/jstests/replsets/maxSyncSourceLagSecs.js index 087db2edaab..8d44dd5ddb2 100644 --- a/jstests/replsets/maxSyncSourceLagSecs.js +++ b/jstests/replsets/maxSyncSourceLagSecs.js @@ -18,7 +18,7 @@ { "_id": 2, "host": nodes[2], priority: 0 }], }); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); master.getDB("foo").bar.save({a: 1}); replTest.awaitReplication(); var slaves = replTest.liveNodes.slaves; diff --git a/jstests/replsets/no_chaining.js b/jstests/replsets/no_chaining.js index ebedae9fa5d..97acc61875a 100644 --- a/jstests/replsets/no_chaining.js +++ b/jstests/replsets/no_chaining.js @@ -20,13 +20,13 @@ replTest.initiate( } ); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); replTest.awaitReplication(); var breakNetwork = function() { nodes[0].disconnect(nodes[2]); - master = replTest.getMaster(); + master = replTest.getPrimary(); }; var checkNoChaining = function() { diff --git a/jstests/replsets/oplog_format.js b/jstests/replsets/oplog_format.js index c07d4f66cef..c7cc18a5908 100644 --- a/jstests/replsets/oplog_format.js +++ b/jstests/replsets/oplog_format.js @@ -9,7 +9,7 @@ var replTest = new ReplSetTest( { nodes: 1, oplogSize:2, nodeOptions: {smallfiles:""}} ); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var coll = master.getDB("o").fake; var cdb = coll.getDB(); diff --git a/jstests/replsets/oplog_truncated_on_recovery.js b/jstests/replsets/oplog_truncated_on_recovery.js index 8142b8df7ee..c9e2fcaeae2 100644 --- a/jstests/replsets/oplog_truncated_on_recovery.js +++ b/jstests/replsets/oplog_truncated_on_recovery.js @@ -36,7 +36,7 @@ var nodes = replTest.startSet(); replTest.initiate(); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var testDB = master.getDB("test"); var localDB = master.getDB("local"); var minvalidColl = localDB["replset.minvalid"]; diff --git a/jstests/replsets/optime.js b/jstests/replsets/optime.js index 647864245dc..5d64719fe8c 100644 --- a/jstests/replsets/optime.js +++ b/jstests/replsets/optime.js @@ -32,7 +32,7 @@ var replTest = new ReplSetTest( { name : "replStatus" , nodes: 3, oplogSize: 1 } replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); replTest.awaitReplication(); replTest.awaitSecondaryNodes(); diff --git a/jstests/replsets/pipelineout.js b/jstests/replsets/pipelineout.js index 5e0c1d7f45c..97accba2eec 100644 --- a/jstests/replsets/pipelineout.js +++ b/jstests/replsets/pipelineout.js @@ -10,7 +10,7 @@ replTest.initiate({"_id" : name, {"_id" : 1, "host" : nodes[1]} ]}); -var primary = replTest.getMaster().getDB(name); +var primary = replTest.getPrimary().getDB(name); var secondary = replTest.liveNodes.slaves[0].getDB(name); // populate the collection diff --git a/jstests/replsets/plan_cache_slaveok.js b/jstests/replsets/plan_cache_slaveok.js index 311efef86bb..a63be51fae1 100644 --- a/jstests/replsets/plan_cache_slaveok.js +++ b/jstests/replsets/plan_cache_slaveok.js @@ -81,7 +81,7 @@ print("Start replica set with two nodes"); var replTest = new ReplSetTest({name: name, nodes: 2}); var nodes = replTest.startSet(); replTest.initiate(); -var primary = replTest.getMaster(); +var primary = replTest.getPrimary(); // Insert a document and let it sync to the secondary. print("Initial sync"); diff --git a/jstests/replsets/protocol_version_upgrade_downgrade.js b/jstests/replsets/protocol_version_upgrade_downgrade.js index a5906d2e88d..ebb0a740d7c 100644 --- a/jstests/replsets/protocol_version_upgrade_downgrade.js +++ b/jstests/replsets/protocol_version_upgrade_downgrade.js @@ -18,7 +18,7 @@ conf.members[2].priority = 0; rst.initiate(conf); rst.awaitSecondaryNodes(); -var primary = rst.getMaster(); +var primary = rst.getPrimary(); var primaryColl = primary.getDB("test").coll; // Set verbosity for replication on all nodes. diff --git a/jstests/replsets/reindex_secondary.js b/jstests/replsets/reindex_secondary.js index af08dd4cec8..d3c0991a5d3 100644 --- a/jstests/replsets/reindex_secondary.js +++ b/jstests/replsets/reindex_secondary.js @@ -4,7 +4,7 @@ var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); replTest.awaitSecondaryNodes() var slaves = replTest.liveNodes.slaves; diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js index 794b9dd9000..e15b793434b 100644 --- a/jstests/replsets/remove1.js +++ b/jstests/replsets/remove1.js @@ -16,7 +16,7 @@ print("Start set with two nodes"); var replTest = new ReplSetTest( {name: name, nodes: 2} ); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var secondary = replTest.getSecondary(); print("Initial sync"); @@ -79,7 +79,7 @@ assert.soon(function() { try { } catch (e) { return false; } }); -master = replTest.getMaster(); +master = replTest.getPrimary(); printjson(master.getDB("admin").runCommand({replSetGetStatus:1})); var newConfig = master.getDB("local").system.replset.findOne(); print("newConfig: " + tojson(newConfig)); diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js index de0fb30df4f..36bc2de74d1 100644 --- a/jstests/replsets/replset1.js +++ b/jstests/replsets/replset1.js @@ -25,9 +25,9 @@ var doTest = function( signal ) { // This will wait for initiation replTest.initiate(); - // Call getMaster to return a reference to the node that's been + // Call getPrimary to return a reference to the node that's been // elected master. - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var isPV1 = (replTest.getConfigFromPrimary().protocolVersion == 1); if (isPV1) { @@ -36,7 +36,7 @@ var doTest = function( signal ) { assert.eq("new primary", oplog_entry["o"]["msg"]); assert.eq("n", oplog_entry["op"]); } - // Calling getMaster also makes available the liveNodes structure, + // Calling getPrimary also makes available the liveNodes structure, // which looks like this: // liveNodes = {master: masterNode, // slaves: [slave1, slave2] @@ -68,7 +68,7 @@ var doTest = function( signal ) { replTest.stop( master_id ); // Now let's see who the new master is: - var new_master = replTest.getMaster(); + var new_master = replTest.getPrimary(); // Is the new master the same as the old master? var new_master_id = replTest.getNodeId( new_master ); @@ -104,7 +104,7 @@ var doTest = function( signal ) { }); // And that both slave nodes have all the updates - new_master = replTest.getMaster(); + new_master = replTest.getPrimary(); assert.eq( 1000 , new_master.getDB( "bar" ).runCommand( { count:"bar"} ).n , "assumption 2"); replTest.awaitSecondaryNodes(); replTest.awaitReplication(); @@ -119,7 +119,7 @@ var doTest = function( signal ) { }); // last error - master = replTest.getMaster(); + master = replTest.getPrimary(); slaves = replTest.liveNodes.slaves; printjson(replTest.liveNodes); diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js index 20364381dd0..656b727ba81 100644 --- a/jstests/replsets/replset2.js +++ b/jstests/replsets/replset2.js @@ -18,9 +18,9 @@ doTest = function (signal) { var testDB = "repl-test"; - // Call getMaster to return a reference to the node that's been + // Call getPrimary to return a reference to the node that's been // elected master. - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // Wait for replication to a single node master.getDB(testDB).bar.insert({ n: 1 }); diff --git a/jstests/replsets/replset3.js b/jstests/replsets/replset3.js index 5731e269ce9..6bb29a196ec 100644 --- a/jstests/replsets/replset3.js +++ b/jstests/replsets/replset3.js @@ -15,7 +15,7 @@ var doTest = function (signal) { replTest.initiate(); // Get master node - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // Write some data to master // NOTE: this test fails unless we write some data. @@ -35,7 +35,7 @@ var doTest = function (signal) { print(phase++); try { - var new_master = replTest.getMaster(); + var new_master = replTest.getPrimary(); } catch (err) { throw ("Could not elect new master before timeout."); diff --git a/jstests/replsets/replset4.js b/jstests/replsets/replset4.js index 95fa3dbd543..605284e01da 100644 --- a/jstests/replsets/replset4.js +++ b/jstests/replsets/replset4.js @@ -6,7 +6,7 @@ doTest = function (signal) { replTest.startSet(); replTest.initiate(); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // Kill both slaves, simulating a network partition var slaves = replTest.liveNodes.slaves; diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js index 3f239499e1b..717a0c8153b 100644 --- a/jstests/replsets/replset5.js +++ b/jstests/replsets/replset5.js @@ -19,7 +19,7 @@ load("jstests/replsets/rslib.js"); replTest.initiate(config); // - var master = replTest.getMaster(); + var master = replTest.getPrimary(); replTest.awaitSecondaryNodes(); var testDB = "foo"; diff --git a/jstests/replsets/replset6.js b/jstests/replsets/replset6.js index f9111e28dbd..1c772cc7c28 100644 --- a/jstests/replsets/replset6.js +++ b/jstests/replsets/replset6.js @@ -6,7 +6,7 @@ baseName = "jstests_replsets_replset6"; var rt = new ReplSetTest({ name : "replset6tests" , nodes: 2 }); var nodes = rt.startSet(); rt.initiate(); -var m = rt.getMaster(); +var m = rt.getPrimary(); rt.awaitSecondaryNodes(); var slaves = rt.liveNodes.slaves; s = slaves[0]; diff --git a/jstests/replsets/replset7.js b/jstests/replsets/replset7.js index 0714a23e962..1c63fd8f35f 100644 --- a/jstests/replsets/replset7.js +++ b/jstests/replsets/replset7.js @@ -5,7 +5,7 @@ var rt = new ReplSetTest( { name : "replset7tests" , nodes: 1 } ); var nodes = rt.startSet(); rt.initiate(); -var master = rt.getMaster(); +var master = rt.getPrimary(); var md = master.getDB( 'd' ); var mdc = md[ 'c' ]; diff --git a/jstests/replsets/replset8.js b/jstests/replsets/replset8.js index 51cae86670a..ead9c50f066 100644 --- a/jstests/replsets/replset8.js +++ b/jstests/replsets/replset8.js @@ -5,7 +5,7 @@ var rt = new ReplSetTest( { name : "replset8tests" , nodes: 1 } ); var nodes = rt.startSet(); rt.initiate(); -var master = rt.getMaster(); +var master = rt.getPrimary(); var bigstring = "a"; var md = master.getDB( 'd' ); var mdc = md[ 'c' ]; diff --git a/jstests/replsets/replset9.js b/jstests/replsets/replset9.js index 382ddd3c3e6..8ae46863087 100644 --- a/jstests/replsets/replset9.js +++ b/jstests/replsets/replset9.js @@ -4,7 +4,7 @@ var rt = new ReplSetTest( { name : "replset9tests" , nodes: 1, oplogSize: 300 } var nodes = rt.startSet(); rt.initiate(); -var master = rt.getMaster(); +var master = rt.getPrimary(); var bigstring = Array(5000).toString(); var md = master.getDB( 'd' ); var mdc = md[ 'c' ]; @@ -57,7 +57,7 @@ var slave = rt.add(); print ("initiation complete!"); var sc = slave.getDB( 'd' )[ 'c' ]; slave.setSlaveOk(); -master = rt.getMaster(); +master = rt.getPrimary(); print ("updating and deleting documents"); bulk = master.getDB('d')['c'].initializeUnorderedBulkOp(); diff --git a/jstests/replsets/replsetadd_profile.js b/jstests/replsets/replsetadd_profile.js index 45267f9ed4f..cc36f4c1a57 100644 --- a/jstests/replsets/replsetadd_profile.js +++ b/jstests/replsets/replsetadd_profile.js @@ -12,7 +12,7 @@ var replTest = new ReplSetTest({name: 'ReplSetAddProfileTestSet', nodes: [{profile: 2}]}); replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var masterCollection = master.getDB('test').getCollection(collectionName); masterCollection.save({a: 1}); diff --git a/jstests/replsets/replsetarb2.js b/jstests/replsets/replsetarb2.js index 75c53008c54..16388c8b92b 100644 --- a/jstests/replsets/replsetarb2.js +++ b/jstests/replsets/replsetarb2.js @@ -15,7 +15,7 @@ ]}); // Make sure we have a master - var master = replTest.getMaster(); + var master = replTest.getPrimary(); // Make sure we have an arbiter assert.soon(function() { @@ -37,7 +37,7 @@ replTest.stop(mId); // And make sure that the slave is promoted - var new_master = replTest.getMaster(); + var new_master = replTest.getPrimary(); var newMasterId = replTest.getNodeId(new_master); assert.neq(newMasterId, mId, "Secondary wasn't promoted to new primary"); diff --git a/jstests/replsets/replsetfreeze.js b/jstests/replsets/replsetfreeze.js index 03e9f724f8a..67c08740e98 100644 --- a/jstests/replsets/replsetfreeze.js +++ b/jstests/replsets/replsetfreeze.js @@ -48,7 +48,7 @@ var config = {"_id" : "unicomplex", "members" : [ {"_id" : 1, "host" : nodes[1] }, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}; var r = replTest.initiate(config); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var secondary = replTest.getSecondary(); replTest.awaitSecondaryNodes(); @@ -79,7 +79,7 @@ while ((new Date()).getTime() - start < (28 * 1000) ) { // we need less 30 since print("5: check for new master"); -master = replTest.getMaster(); +master = replTest.getPrimary(); print("6: step down new master"); @@ -102,7 +102,7 @@ master.getDB("admin").runCommand({replSetFreeze : 0}); print("9: check we get a new master within 30 seconds"); -master = replTest.getMaster(); +master = replTest.getPrimary(); replTest.stopSet( 15 ); diff --git a/jstests/replsets/replsethostnametrim.js b/jstests/replsets/replsethostnametrim.js index 44f4c7b2320..c303ecdea0d 100644 --- a/jstests/replsets/replsethostnametrim.js +++ b/jstests/replsets/replsethostnametrim.js @@ -4,7 +4,7 @@ var replTest = new ReplSetTest({ name: 'testSet', nodes: 1 }); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var config = master.getDB("local").system.replset.findOne(); config.version++; var origHost = config.members[0].host; diff --git a/jstests/replsets/replsetprio1.js b/jstests/replsets/replsetprio1.js index 04a3fb1f237..c0d6cb48a01 100644 --- a/jstests/replsets/replsetprio1.js +++ b/jstests/replsets/replsetprio1.js @@ -22,7 +22,7 @@ replTest.waitForState(nodes[1], replTest.PRIMARY, 60000); // do some writes on 1 - var master = replTest.getMaster(); + var master = replTest.getPrimary(); for (var i=0; i<1000; i++) { master.getDB("foo").bar.insert({i:i}); } @@ -36,7 +36,7 @@ replTest.waitForState(nodes[2], replTest.PRIMARY, 60000); // make sure nothing was rolled back - master = replTest.getMaster(); + master = replTest.getPrimary(); for (i=0; i<1000; i++) { assert(master.getDB("foo").bar.findOne({i:i}) != null, 'checking '+i); assert(master.getDB("bar").baz.findOne({i:i}) != null, 'checking '+i); diff --git a/jstests/replsets/replsetrestart1.js b/jstests/replsets/replsetrestart1.js index 0224d0f47c7..9ff3773a24f 100644 --- a/jstests/replsets/replsetrestart1.js +++ b/jstests/replsets/replsetrestart1.js @@ -35,9 +35,9 @@ // DOWN, later. replTest.awaitSecondaryNodes(); - // Call getMaster to return a reference to the node that's been + // Call getPrimary to return a reference to the node that's been // elected master. - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var config1 = master.getDB("local").system.replset.findOne(); // Now we're going to shut down all nodes @@ -60,7 +60,7 @@ replTest.restart( s2Id ); // Make sure that a new master comes up - master = replTest.getMaster(); + master = replTest.getPrimary(); replTest.awaitSecondaryNodes(); var config2 = master.getDB("local").system.replset.findOne(); compare_configs(config1, config2); diff --git a/jstests/replsets/restore_term.js b/jstests/replsets/restore_term.js index 00fa68ed0d0..0a0af27c08f 100644 --- a/jstests/replsets/restore_term.js +++ b/jstests/replsets/restore_term.js @@ -30,7 +30,7 @@ conf.protocolVersion = 1; rst.initiate(conf); rst.awaitSecondaryNodes(); -var primary = rst.getMaster(); +var primary = rst.getPrimary(); var primaryColl = primary.getDB("test").coll; // Current term may be greater than 1 if election race happens. @@ -53,13 +53,13 @@ try { rst.awaitSecondaryNodes(); // The secondary became the new primary now with a higher term. // Since there's only one secondary who may run for election, the new term is higher by 1. -assert.eq(getCurrentTerm(rst.getMaster()), firstSuccessfulTerm + 1); +assert.eq(getCurrentTerm(rst.getPrimary()), firstSuccessfulTerm + 1); // Restart the replset and verify the term is the same. rst.stopSet(null /* signal */, true /* forRestart */); rst.startSet({restart: true}); rst.awaitSecondaryNodes(); -primary = rst.getMaster(); +primary = rst.getPrimary(); assert.eq(primary.getDB("test").coll.find().itcount(), 1); // After restart, the new primary stands up with the newer term. diff --git a/jstests/replsets/resync_with_write_load.js b/jstests/replsets/resync_with_write_load.js index c67f1f67d59..f96c9a8e152 100644 --- a/jstests/replsets/resync_with_write_load.js +++ b/jstests/replsets/resync_with_write_load.js @@ -19,7 +19,7 @@ var config = { "_id": testName, var r = replTest.initiate(config); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // Make sure we have a master -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var a_conn = conns[0]; var b_conn = conns[1]; a_conn.setSlaveOk(); diff --git a/jstests/replsets/rollback.js b/jstests/replsets/rollback.js index 4e886b487a4..e66357b1fc5 100644 --- a/jstests/replsets/rollback.js +++ b/jstests/replsets/rollback.js @@ -45,7 +45,7 @@ load("jstests/replsets/rslib.js"); // Make sure we have a master replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var a_conn = conns[0]; var A = a_conn.getDB("admin"); var b_conn = conns[1]; diff --git a/jstests/replsets/rollback2.js b/jstests/replsets/rollback2.js index 1b91c8803f3..000346b17d3 100644 --- a/jstests/replsets/rollback2.js +++ b/jstests/replsets/rollback2.js @@ -41,7 +41,7 @@ load("jstests/replsets/rslib.js"); // Make sure we have a master and that that master is node A replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var a_conn = conns[0]; a_conn.setSlaveOk(); var A = a_conn.getDB("admin"); diff --git a/jstests/replsets/rollback3.js b/jstests/replsets/rollback3.js index 1ba4d14d255..b5bfcd655c5 100755 --- a/jstests/replsets/rollback3.js +++ b/jstests/replsets/rollback3.js @@ -46,7 +46,7 @@ load("jstests/replsets/rslib.js"); // Make sure we have a master and that that master is node A replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var a_conn = conns[0]; a_conn.setSlaveOk(); var A = a_conn.getDB("admin"); diff --git a/jstests/replsets/rollback5.js b/jstests/replsets/rollback5.js index 7159edead5c..c85e86bbe7c 100644 --- a/jstests/replsets/rollback5.js +++ b/jstests/replsets/rollback5.js @@ -22,7 +22,7 @@ var r = replTest.initiate({ "_id": "rollback5", // Make sure we have a master replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var a_conn = conns[0]; var b_conn = conns[1]; a_conn.setSlaveOk(); @@ -46,13 +46,13 @@ var options = { writeConcern: { w: 2, wtimeout: 60000 }, upsert: true }; assert.writeOK(A.foo.update({ key: 'value1' }, { $set: { req: 'req' }}, options)); replTest.stop(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host == master.host); options = { writeConcern: { w: 1, wtimeout: 60000 }, upsert: true }; assert.writeOK(B.foo.update({key:'value1'}, {$set: {res: 'res'}}, options)); replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host == master.host); options = { writeConcern: { w: 1, wtimeout: 60000 }, upsert: true }; assert.writeOK(A.foo.update({ key: 'value2' }, { $set: { req: 'req' }}, options)); diff --git a/jstests/replsets/rollback_auth.js b/jstests/replsets/rollback_auth.js index 589f755aaed..4e32e51b49a 100644 --- a/jstests/replsets/rollback_auth.js +++ b/jstests/replsets/rollback_auth.js @@ -40,7 +40,7 @@ // Make sure we have a master replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var a_conn = conns[0]; var b_conn = conns[1]; a_conn.setSlaveOk(); diff --git a/jstests/replsets/rollback_cmd_unrollbackable.js b/jstests/replsets/rollback_cmd_unrollbackable.js index 8cfe12e6523..13cac4e8459 100644 --- a/jstests/replsets/rollback_cmd_unrollbackable.js +++ b/jstests/replsets/rollback_cmd_unrollbackable.js @@ -25,7 +25,7 @@ var BID = replTest.getNodeId(b_conn); // get master and do an initial write replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -35,7 +35,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // insert a fake oplog entry with a non-rollbackworthy command -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; // another insert to set minvalid ahead @@ -49,7 +49,7 @@ assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_collMod_PowerOf2Sizes.js b/jstests/replsets/rollback_collMod_PowerOf2Sizes.js index 2032f9a584b..0500e5f28e3 100644 --- a/jstests/replsets/rollback_collMod_PowerOf2Sizes.js +++ b/jstests/replsets/rollback_collMod_PowerOf2Sizes.js @@ -25,7 +25,7 @@ replTest.initiate({"_id": name, { "_id": 2, "host": nodes[2], arbiterOnly: true}] }); // Get master and do an initial write. -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var a_conn = master; var slaves = replTest.liveNodes.slaves; var b_conn = slaves[0]; @@ -63,7 +63,7 @@ assert.eq(getOptions(a_conn), {flags: 2, // Shut down A and fail over to B. replTest.stop(AID); replTest.restart(BID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert.eq(b_conn.host, master.host, "b_conn assumed to be master"); b_conn = master; diff --git a/jstests/replsets/rollback_collMod_fatal.js b/jstests/replsets/rollback_collMod_fatal.js index 62cb22ac20a..03cde3e904c 100644 --- a/jstests/replsets/rollback_collMod_fatal.js +++ b/jstests/replsets/rollback_collMod_fatal.js @@ -26,7 +26,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -37,7 +37,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // do a collMod altering TTL which should cause FATAL when rolled back -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); assert.commandWorked(b_conn.getDB(name).runCommand({collMod: "foo", index: {keyPattern: {x:1}, @@ -46,7 +46,7 @@ assert.commandWorked(b_conn.getDB(name).runCommand({collMod: "foo", // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_different_h.js b/jstests/replsets/rollback_different_h.js index a6350a325da..a9ca7d1aac3 100644 --- a/jstests/replsets/rollback_different_h.js +++ b/jstests/replsets/rollback_different_h.js @@ -37,7 +37,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -47,7 +47,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // change the h value of the most recent entry on B -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0]; @@ -62,7 +62,7 @@ assert.writeOK(b_conn.getDB(name).foo.insert({x: 123})); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_dropdb.js b/jstests/replsets/rollback_dropdb.js index 368dbb6ee95..aba4264c561 100644 --- a/jstests/replsets/rollback_dropdb.js +++ b/jstests/replsets/rollback_dropdb.js @@ -26,7 +26,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -36,7 +36,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // drop database which should cause FATAL when rolled back -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); b_conn.getDB(name).dropDatabase(); assert.eq(0, b_conn.getDB(name).foo.count(), "dropping database failed"); @@ -44,7 +44,7 @@ assert.eq(0, b_conn.getDB(name).foo.count(), "dropping database failed"); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_empty_ns.js b/jstests/replsets/rollback_empty_ns.js index a5c6a92c1be..fea10564b43 100644 --- a/jstests/replsets/rollback_empty_ns.js +++ b/jstests/replsets/rollback_empty_ns.js @@ -37,7 +37,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -47,7 +47,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // insert a fake oplog entry with an empty ns -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; // another insert to set minvalid ahead @@ -60,7 +60,7 @@ assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_empty_o.js b/jstests/replsets/rollback_empty_o.js index 81a84db6eac..73a887ee2d9 100644 --- a/jstests/replsets/rollback_empty_o.js +++ b/jstests/replsets/rollback_empty_o.js @@ -37,7 +37,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -47,7 +47,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // insert a fake oplog entry with an empty o -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; // another insert to set minvalid ahead @@ -60,7 +60,7 @@ assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_empty_o2.js b/jstests/replsets/rollback_empty_o2.js index 7e4a22be06d..d705aa999b6 100644 --- a/jstests/replsets/rollback_empty_o2.js +++ b/jstests/replsets/rollback_empty_o2.js @@ -37,7 +37,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -47,7 +47,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // insert a fake oplog entry with an empty o2 -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; // another insert to set minvalid ahead @@ -61,7 +61,7 @@ assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_fake_cmd.js b/jstests/replsets/rollback_fake_cmd.js index 03ae1345790..f4d54a1f146 100644 --- a/jstests/replsets/rollback_fake_cmd.js +++ b/jstests/replsets/rollback_fake_cmd.js @@ -37,7 +37,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -47,7 +47,7 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); replTest.stop(AID); // insert a fake oplog entry with a nonexistent command -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; // another insert to set minvalid ahead @@ -61,7 +61,7 @@ assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // do a write so that B will have to roll back diff --git a/jstests/replsets/rollback_index.js b/jstests/replsets/rollback_index.js index 83e8df5b9a6..67360624a03 100644 --- a/jstests/replsets/rollback_index.js +++ b/jstests/replsets/rollback_index.js @@ -39,7 +39,7 @@ var BID = replTest.getNodeId(b_conn); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // get master and do an initial write -var master = replTest.getMaster(); +var master = replTest.getPrimary(); assert(master === conns[0], "conns[0] assumed to be master"); assert(a_conn.host === master.host, "a_conn assumed to be master"); var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true}; @@ -50,7 +50,7 @@ replTest.stop(AID); // Create a unique index that, if not dropped during rollback, would // cause errors when applying operations from the primary. -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(b_conn.host === master.host, "b_conn assumed to be master"); options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true}; // another insert to set minvalid ahead @@ -61,7 +61,7 @@ assert.writeError(b_conn.getDB(name).foo.insert({x: 123})); // shut down B and bring back the original master replTest.stop(BID); replTest.restart(AID); -master = replTest.getMaster(); +master = replTest.getPrimary(); assert(a_conn.host === master.host, "a_conn assumed to be master"); // Insert a document with the same value for 'x' that should be diff --git a/jstests/replsets/rollback_too_new.js b/jstests/replsets/rollback_too_new.js index c3ff41368ca..6365a3eceeb 100644 --- a/jstests/replsets/rollback_too_new.js +++ b/jstests/replsets/rollback_too_new.js @@ -27,7 +27,7 @@ var CID = replTest.getNodeId(c_conn); // get master and do an initial write - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var options = {writeConcern: {w: 2, wtimeout: 60000}}; assert.writeOK(master.getDB(name).foo.insert({x: 1}, options)); diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js index 7111063f38e..d4cea8a51eb 100644 --- a/jstests/replsets/rslib.js +++ b/jstests/replsets/rslib.js @@ -102,7 +102,7 @@ waitForAllMembers = function(master, timeout) { reconfig = function(rs, config, force) { "use strict"; - var admin = rs.getMaster().getDB("admin"); + var admin = rs.getPrimary().getDB("admin"); var e; var master; try { @@ -114,7 +114,7 @@ reconfig = function(rs, config, force) { } } - var master = rs.getMaster().getDB("admin"); + var master = rs.getPrimary().getDB("admin"); waitForAllMembers(master); return master; diff --git a/jstests/replsets/server8070.js b/jstests/replsets/server8070.js index 8d0be219ab3..74f4d43cafa 100644 --- a/jstests/replsets/server8070.js +++ b/jstests/replsets/server8070.js @@ -38,7 +38,7 @@ replSet.initiate( ); // set up common points of access -var master = replSet.getMaster(); +var master = replSet.getPrimary(); var primary = master.getDB("foo"); replSet.nodes[1].setSlaveOk(); replSet.nodes[2].setSlaveOk(); diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js index f8b6dd9510b..c2b26bb09a3 100644 --- a/jstests/replsets/slavedelay1.js +++ b/jstests/replsets/slavedelay1.js @@ -16,7 +16,7 @@ doTest = function( signal ) { replTest.initiate(config); - var master = replTest.getMaster().getDB(name); + var master = replTest.getPrimary().getDB(name); var slaveConns = replTest.liveNodes.slaves; var slaves = []; for (var i in slaveConns) { @@ -88,7 +88,7 @@ doTest = function( signal ) { config.members[3].slaveDelay = 15; reconfig(replTest, config); - master = replTest.getMaster().getDB(name); + master = replTest.getPrimary().getDB(name); assert.soon(function() { return conn.getDB("local").system.replset.findOne().version == config.version; }); diff --git a/jstests/replsets/slavedelay3.js b/jstests/replsets/slavedelay3.js index 920fad2a354..5a19027a4ad 100644 --- a/jstests/replsets/slavedelay3.js +++ b/jstests/replsets/slavedelay3.js @@ -11,7 +11,7 @@ config.members[1].slaveDelay = 5; config.members[2].priority = 0; replTest.initiate(config); -var master = replTest.getMaster().getDB(name); +var master = replTest.getPrimary().getDB(name); replTest.awaitReplication(); var slaveConns = replTest.liveNodes.slaves; diff --git a/jstests/replsets/stepdown.js b/jstests/replsets/stepdown.js index 77dd76462df..502f0cf8c4c 100644 --- a/jstests/replsets/stepdown.js +++ b/jstests/replsets/stepdown.js @@ -36,7 +36,7 @@ var replTest = new ReplSetTest({ var nodes = replTest.startSet(); replTest.initiate(); replTest.waitForState(nodes[0], replTest.PRIMARY, 60 * 1000); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); // do a write print("\ndo a write"); @@ -80,7 +80,7 @@ replTest.liveNodes.slaves.forEach(function(slave) { print("\nreset stepped down time"); assert.commandWorked(master.getDB("admin").runCommand({replSetFreeze:0})); -master = replTest.getMaster(); +master = replTest.getPrimary(); print("\nawait"); replTest.awaitSecondaryNodes(90000); @@ -97,7 +97,7 @@ assert.soon(function() { } }, "wait for n0 to be primary", 60000); -master = replTest.getMaster(); +master = replTest.getPrimary(); var firstMaster = master; print("\nmaster is now "+firstMaster); @@ -113,10 +113,10 @@ catch (e) { } print("\nget a master"); -replTest.getMaster(); +replTest.getPrimary(); assert.soon(function() { - var secondMaster = replTest.getMaster(); + var secondMaster = replTest.getPrimary(); return firstMaster.host !== secondMaster.host; }, "making sure " + firstMaster.host + " isn't still master", 60000); @@ -135,7 +135,7 @@ catch (e) { } -master = replTest.getMaster(); +master = replTest.getPrimary(); assert.soon(function() { try { var result = master.getDB("admin").runCommand({replSetGetStatus:1}); @@ -149,7 +149,7 @@ assert.soon(function() { } catch (e) { print("error getting status from master: " + e); - master = replTest.getMaster(); + master = replTest.getPrimary(); return false; } }, 'make sure master knows that slave is down before proceeding'); @@ -166,7 +166,7 @@ assert.gte((new Date()) - now, 2750); print("\nsend shutdown command"); -var currentMaster = replTest.getMaster(); +var currentMaster = replTest.getPrimary(); try { printjson(currentMaster.getDB("admin").runCommand({shutdown : 1, force : true})); } diff --git a/jstests/replsets/stepdown3.js b/jstests/replsets/stepdown3.js index b4464f12c22..9c3cf3d9544 100644 --- a/jstests/replsets/stepdown3.js +++ b/jstests/replsets/stepdown3.js @@ -6,7 +6,7 @@ var replTest = new ReplSetTest({ name: 'testSet', nodes: 2 }); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); // do a write to allow stepping down of the primary; // otherwise, the primary will refuse to step down diff --git a/jstests/replsets/stepdown_wrt_electable.js b/jstests/replsets/stepdown_wrt_electable.js index cb0d2446fe3..365c35f6643 100644 --- a/jstests/replsets/stepdown_wrt_electable.js +++ b/jstests/replsets/stepdown_wrt_electable.js @@ -8,7 +8,7 @@ var c = replTest.getReplSetConfig(); c.members[1].priority = 0; // not electable replTest.initiate(c); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var testDB = master.getDB('test'); var firstPrimary = testDB.isMaster().primary diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js index 878600c9a94..d847127cae3 100644 --- a/jstests/replsets/sync2.js +++ b/jstests/replsets/sync2.js @@ -10,7 +10,7 @@ replTest.initiate({"_id": "sync2", {"_id": 4, host: nodes[4]}] }); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); jsTestLog("Replica set test initialized"); // initial sync @@ -27,7 +27,7 @@ conns[4].disconnect(conns[1]); conns[4].disconnect(conns[3]); assert.soon(function() { - master = replTest.getMaster(); + master = replTest.getPrimary(); return master === conns[0]; }, 60 * 1000, "node 0 did not become primary quickly enough"); diff --git a/jstests/replsets/sync_passive.js b/jstests/replsets/sync_passive.js index f73dad062c9..fef16253896 100644 --- a/jstests/replsets/sync_passive.js +++ b/jstests/replsets/sync_passive.js @@ -31,7 +31,7 @@ config.members[2].priority = 0; replTest.initiate(config); replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); -var master = replTest.getMaster().getDB("test"); +var master = replTest.getPrimary().getDB("test"); var server0 = master; var server1 = replTest.liveNodes.slaves[0]; @@ -67,7 +67,7 @@ replTest.awaitReplication(60 * 1000); print("add data"); reconnect(server1); -master = replTest.getMaster().getDB("test"); +master = replTest.getPrimary().getDB("test"); for (var i=0;i<1000;i++) { master.bar.insert({x:i}); } diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js index 62ca15e82d8..e4d4ccd50e8 100644 --- a/jstests/replsets/tags2.js +++ b/jstests/replsets/tags2.js @@ -24,7 +24,7 @@ replTest.initiate( conf ); replTest.awaitReplication(); -master = replTest.getMaster(); +master = replTest.getPrimary(); var db = master.getDB("test"); assert.writeOK(db.foo.insert({ x: 1 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }})); @@ -33,7 +33,7 @@ conf.settings.getLastErrorModes.backedUp.backup = 3; master.getDB("admin").runCommand( {replSetReconfig: conf} ); replTest.awaitReplication(); -master = replTest.getMaster(); +master = replTest.getPrimary(); var db = master.getDB("test"); assert.writeOK(db.foo.insert({ x: 2 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }})); @@ -42,7 +42,7 @@ conf.members[0].priorty = 3; conf.members[2].priorty = 0; master.getDB("admin").runCommand( {replSetReconfig: conf} ); -master = replTest.getMaster(); +master = replTest.getPrimary(); var db = master.getDB("test"); assert.writeOK(db.foo.insert({ x: 3 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }})); diff --git a/jstests/replsets/tags_with_reconfig.js b/jstests/replsets/tags_with_reconfig.js index 22b2404e009..8f1e01ce176 100644 --- a/jstests/replsets/tags_with_reconfig.js +++ b/jstests/replsets/tags_with_reconfig.js @@ -24,7 +24,7 @@ replTest.initiate( conf ); replTest.awaitReplication(); -master = replTest.getMaster(); +master = replTest.getPrimary(); var db = master.getDB("test"); // Insert a document with write concern : anydc @@ -54,7 +54,7 @@ var config = master.getDB("local").system.replset.findOne(); printjson(config); -master = replTest.getMaster(); +master = replTest.getPrimary(); var db = master.getDB("test"); // Insert a document with write concern : anydc diff --git a/jstests/replsets/temp_namespace.js b/jstests/replsets/temp_namespace.js index 88a256db87f..3e75ff400e3 100644 --- a/jstests/replsets/temp_namespace.js +++ b/jstests/replsets/temp_namespace.js @@ -13,7 +13,7 @@ replTest.initiate({"_id" : "testSet", {"_id" : 1, "host" : nodes[1]}, {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]}); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var second = replTest.getSecondary(); var masterId = replTest.getNodeId(master); diff --git a/jstests/replsets/toostale.js b/jstests/replsets/toostale.js index 9225921daf2..82e6b062d5d 100644 --- a/jstests/replsets/toostale.js +++ b/jstests/replsets/toostale.js @@ -58,7 +58,7 @@ replTest.initiate({_id : name, members : [ {_id : 1, host : host+":"+replTest.ports[1], arbiterOnly : true}, {_id : 2, host : host+":"+replTest.ports[2], priority: 0} ]}); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var mdb = master.getDB("foo"); diff --git a/jstests/replsets/two_initsync.js b/jstests/replsets/two_initsync.js index 08e440406cb..603625df975 100755 --- a/jstests/replsets/two_initsync.js +++ b/jstests/replsets/two_initsync.js @@ -54,7 +54,7 @@ doTest = function (signal) { return result['ok'] == 1; }); - var a = replTest.getMaster().getDB("two"); + var a = replTest.getPrimary().getDB("two"); for (var i = 0; i < 20000; i++) a.coll.insert({ i: i, s: "a b" }); diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js index 2ec634cae9a..2bd57cf1da4 100644 --- a/jstests/sharding/addshard2.js +++ b/jstests/sharding/addshard2.js @@ -13,12 +13,12 @@ var conn2 = MongoRunner.runMongod({useHostname: true}); var rs1 = new ReplSetTest( { "name" : "add_shard2_rs1", nodes : 3 } ); rs1.startSet(); rs1.initiate(); -var master1 = rs1.getMaster(); +var master1 = rs1.getPrimary(); var rs2 = new ReplSetTest( { "name" : "add_shard2_rs2", nodes : 3 } ); rs2.startSet(); rs2.initiate(); -var master2 = rs2.getMaster(); +var master2 = rs2.getPrimary(); // replica set with set name = 'config' var rs3 = new ReplSetTest({ 'name': 'config', nodes: 3 }); @@ -95,7 +95,7 @@ assert.commandFailed(s.admin.runCommand({ addshard: 'dummy:12345' })); // // SERVER-17231 Adding replica set w/ set name = 'config' // -var configReplURI = 'config/' + getHostName() + ':' + rs3.getMaster().port; +var configReplURI = 'config/' + getHostName() + ':' + rs3.getPrimary().port; assert(!s.admin.runCommand({ 'addshard': configReplURI }).ok, 'accepted replica set shard with set name "config"'); @@ -109,7 +109,7 @@ assert(shard, 'shard with name "not_config" not found'); // // SERVER-17232 Try inserting into shard with name 'admin' // -assert(s.admin.runCommand({ 'addshard': 'admin/' + getHostName() + ':' + rs4.getMaster().port}).ok, +assert(s.admin.runCommand({ 'addshard': 'admin/' + getHostName() + ':' + rs4.getPrimary().port}).ok, 'adding replica set with name "admin" should work'); var wRes = s.getDB('test').foo.insert({ x: 1 }); assert(!wRes.hasWriteError() && wRes.nInserted === 1, diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js index aa4ccbfed19..2a66cbc74fe 100644 --- a/jstests/sharding/addshard4.js +++ b/jstests/sharding/addshard4.js @@ -17,7 +17,7 @@ r.initiate(config); //to pre-allocate files on slow systems r.awaitReplication(); -var master = r.getMaster(); +var master = r.getPrimary(); var members = config.members.map(function(elem) { return elem.host; }); var shardName = "addshard4/"+members.join(","); @@ -46,7 +46,7 @@ r.initiate(config); // to pre-allocate files on slow systems r.awaitReplication(); -master = r.getMaster(); +master = r.getPrimary(); print("adding shard addshard42"); diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js index e16cd8cf5aa..494c37ec601 100644 --- a/jstests/sharding/auth.js +++ b/jstests/sharding/auth.js @@ -38,7 +38,7 @@ function logout(userObj, thingToUse) { } function getShardName(rsTest) { - var master = rsTest.getMaster(); + var master = rsTest.getPrimary(); var config = master.getDB("local").system.replset.findOne(); var members = config.members.map(function(elem) { return elem.host; }); return config._id+"/"+members.join(","); @@ -118,7 +118,7 @@ d1.stopSet(); d1.startSet({keyFile : "jstests/libs/key1" }); d1.initiate(); -var master = d1.getMaster(); +var master = d1.getPrimary(); print("adding shard w/auth " + shardName); @@ -257,11 +257,11 @@ authutil.asCluster(d1.nodes, "jstests/libs/key1", function() { d1.awaitReplicati authutil.asCluster(d2.nodes, "jstests/libs/key1", function() { d2.awaitReplication(120000); }); // add admin on shard itself, hack to prevent localhost auth bypass -d1.getMaster().getDB(adminUser.db).createUser({user: adminUser.username, +d1.getPrimary().getDB(adminUser.db).createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles}, {w: 3, wtimeout: 60000}); -d2.getMaster().getDB(adminUser.db).createUser({user: adminUser.username, +d2.getPrimary().getDB(adminUser.db).createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles}, {w: 3, wtimeout: 60000}); diff --git a/jstests/sharding/copydb_from_mongos.js b/jstests/sharding/copydb_from_mongos.js index 4ec392f5789..aa6ac16b465 100644 --- a/jstests/sharding/copydb_from_mongos.js +++ b/jstests/sharding/copydb_from_mongos.js @@ -1,3 +1,5 @@ +(function() { + var st = new ShardingTest({ shards: 1 }); var testDB = st.s.getDB('test'); @@ -20,3 +22,5 @@ assert.commandFailed(testDB.adminCommand({ copydb: 1, todb: 'test/copy' })); st.stop(); + +})(); diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js index 470174890c4..410e2e4a4f3 100644 --- a/jstests/sharding/count_slaveok.js +++ b/jstests/sharding/count_slaveok.js @@ -1,21 +1,22 @@ // Tests count and distinct using slaveOk. Also tests a scenario querying a set where only one // secondary is up. (function() { +'use strict'; var st = new ShardingTest({ name: "countSlaveOk", shards: 1, mongos: 1, - other: { rs : true, - rs0 : { nodes : 2 } } }); + other: { rs: true, + rs0: { nodes: 2 } } }); var rst = st._rs[0].test; // Insert data into replica set -var conn = new Mongo( st.s.host ) -conn.setLogLevel( 3 ) +var conn = new Mongo(st.s.host); +conn.setLogLevel(3); -var coll = conn.getCollection( "test.countSlaveOk" ) -coll.drop() +var coll = conn.getCollection('test.countSlaveOk'); +coll.drop(); var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < 300; i++ ){ @@ -36,7 +37,7 @@ var primary = rst.getPrimary() var sec = rst.getSecondary() // Data now inserted... stop the master, since only two in set, other will still be secondary -rst.stop(rst.getMaster()); +rst.stop(rst.getPrimary()); printjson( rst.status() ) // Wait for the mongos to recognize the slave diff --git a/jstests/sharding/create_idx_empty_primary.js b/jstests/sharding/create_idx_empty_primary.js index 16d2a921125..b6eeacb8cd1 100644 --- a/jstests/sharding/create_idx_empty_primary.js +++ b/jstests/sharding/create_idx_empty_primary.js @@ -2,15 +2,14 @@ * Test to make sure that the createIndex command gets sent to all shards. */ (function() { -"use strict"; +'use strict'; var st = new ShardingTest({ shards: 2 }); +assert.commandWorked(st.s.adminCommand({ enablesharding: 'test' })); +st.ensurePrimaryShard('test', 'shard0001'); var testDB = st.s.getDB('test'); - -testDB.adminCommand({ enablesharding: 'test' }); -var res = testDB.adminCommand({ movePrimary: 'test', to: 'shard0001' }); -testDB.adminCommand({ shardcollection: 'test.user', key: { _id: 1 }}); +assert.commandWorked(testDB.adminCommand({ shardcollection: 'test.user', key: { _id: 1 }})); // Move only chunk out of primary shard. assert.commandWorked(testDB.adminCommand({ movechunk: 'test.user', diff --git a/jstests/sharding/csrs_upgrade_during_migrate.js b/jstests/sharding/csrs_upgrade_during_migrate.js index 1f0da2dc34d..45177857ea7 100644 --- a/jstests/sharding/csrs_upgrade_during_migrate.js +++ b/jstests/sharding/csrs_upgrade_during_migrate.js @@ -34,10 +34,10 @@ var st; }; var addSlaveDelay = function(rst) { - var conf = rst.getMaster().getDB('local').system.replset.findOne(); + var conf = rst.getPrimary().getDB('local').system.replset.findOne(); conf.version++; var secondaryIndex = 0; - if (conf.members[secondaryIndex].host === rst.getMaster().host) { + if (conf.members[secondaryIndex].host === rst.getPrimary().host) { secondaryIndex = 1; } conf.members[secondaryIndex].priority = 0; diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js index 1ddff72d5d0..9b8500f01d1 100644 --- a/jstests/sharding/diffservers1.js +++ b/jstests/sharding/diffservers1.js @@ -3,8 +3,8 @@ var s = new ShardingTest({ name: "diffservers1", shards: 2 }); assert.eq( 2 , s.config.shards.count() , "server count wrong" ); -assert.eq( 0 , s._shardServers[0].getDB( "config" ).shards.count() , "shouldn't be here" ); -assert.eq( 0 , s._shardServers[1].getDB( "config" ).shards.count() , "shouldn't be here" ); +assert.eq( 0 , s._connections[0].getDB( "config" ).shards.count() , "shouldn't be here" ); +assert.eq( 0 , s._connections[1].getDB( "config" ).shards.count() , "shouldn't be here" ); test1 = s.getDB( "test1" ).foo; test1.save( { a : 1 } ); diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js index 8bf3937732d..84c857e644c 100644 --- a/jstests/sharding/features3.js +++ b/jstests/sharding/features3.js @@ -5,12 +5,14 @@ // - Inserts 10k documents and ensures they're evenly distributed // - Verifies a $where query can be killed on multiple DBs // - Tests fsync and fsync+lock permissions on sharded db +(function() { +'use strict'; var s = new ShardingTest({shards: 2, mongos: 1 }); +var dbForTest = s.getDB("test"); +dbForTest.foo.drop(); -var db = s.getDB("test"); // db variable name is required due to startParallelShell() var numDocs = 10000; -db.foo.drop(); // shard test.foo and add a split point s.adminCommand({enablesharding: "test"}); @@ -26,12 +28,12 @@ s.adminCommand({moveChunk: "test.foo", find: {_id: 3}, s.startBalancer(); // insert 10k small documents into the sharded collection -var bulk = db.foo.initializeUnorderedBulkOp(); +var bulk = dbForTest.foo.initializeUnorderedBulkOp(); for (i = 0; i < numDocs; i++) bulk.insert({ _id: i }); assert.writeOK(bulk.execute()); -var x = db.foo.stats(); +var x = dbForTest.foo.stats(); // verify the colleciton has been sharded and documents are evenly distributed assert.eq("test.foo", x.ns, "namespace mismatch"); @@ -42,8 +44,8 @@ assert.eq(numDocs / 2, x.shards.shard0001.count, "count on shard0001"); assert(x.totalIndexSize > 0); // insert one doc into a non-sharded collection -db.bar.insert({x: 1}); -var x = db.bar.stats(); +dbForTest.bar.insert({x: 1}); +var x = dbForTest.bar.stats(); assert.eq(1, x.count, "XXX1"); assert.eq("test.bar", x.ns, "XXX2"); assert(!x.sharded, "XXX3: " + tojson(x)); @@ -62,33 +64,35 @@ var parallelCommand = // fork a parallel shell, but do not wait for it to start print("about to fork new shell at: " + Date()); -var awaitShell = startParallelShell(parallelCommand); +var awaitShell = startParallelShell(parallelCommand, s.s.port); print("done forking shell at: " + Date()); // Get all current $where operations function getMine(printInprog) { - var inprog = db.currentOp().inprog; + var inprog = dbForTest.currentOp().inprog; if (printInprog) printjson(inprog); // Find all the where queries - var mine = []; - for (var x=0; x<inprog.length; x++) { + var myProcs = []; + for (var x = 0; x < inprog.length; x++) { if (inprog[x].query && inprog[x].query.filter && inprog[x].query.filter.$where) { - mine.push(inprog[x]); + myProcs.push(inprog[x]); } } - return mine; + return myProcs; } var curOpState = 0; // 0 = not found, 1 = killed var killTime = null; var i = 0; +var mine; assert.soon(function() { // Get all the current operations mine = getMine(true); // SERVER-8794: print all operations + // get curren tops, but only print out operations before we see a $where op has started // mine = getMine(curOpState == 0 && i > 20); i++; @@ -99,7 +103,7 @@ assert.soon(function() { curOpState = 1; // kill all $where mine.forEach(function(z) { - printjson(db.getSisterDB("admin").killOp(z.opid)); + printjson(dbForTest.getSisterDB("admin").killOp(z.opid)); }); killTime = new Date(); } @@ -130,20 +134,22 @@ var end = new Date(); print("elapsed: " + (end.getTime() - start.getTime())); // test fsync command on non-admin db -x = db.runCommand("fsync"); +x = dbForTest.runCommand("fsync"); assert(!x.ok , "fsync on non-admin namespace should fail : " + tojson(x)); assert(x.code == 13, "fsync on non-admin succeeded, but should have failed: " + tojson(x)); // test fsync on admin db -x = db._adminCommand("fsync"); +x = dbForTest._adminCommand("fsync"); assert(x.ok == 1, "fsync failed: " + tojson(x)); if ( x.all.shard0000 > 0 ) { assert(x.numFiles > 0, "fsync failed: " + tojson(x)); } // test fsync+lock on admin db -x = db._adminCommand({"fsync" :1, lock:true}); +x = dbForTest._adminCommand({"fsync" :1, lock:true}); assert(!x.ok, "lock should fail: " + tojson(x)); s.stop(); + +})(); diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js index a5c20f51ea5..f7fba0e0f88 100644 --- a/jstests/sharding/group_slaveok.js +++ b/jstests/sharding/group_slaveok.js @@ -1,63 +1,63 @@ // Tests group using slaveOk (function() { +'use strict'; var st = new ShardingTest({ name: "groupSlaveOk", shards: 1, mongos: 1, - other :{ rs : true, - rs0 : { nodes : 2 } } }); + other: { rs: true, + rs0: { nodes: 2 } } }); -var rst = st._rs[0].test +var rst = st._rs[0].test; // Insert data into replica set -var conn = new Mongo( st.s.host ) -conn.setLogLevel( 3 ) +var conn = new Mongo(st.s.host); +conn.setLogLevel(3); -var coll = conn.getCollection( "test.groupSlaveOk" ) -coll.drop() +var coll = conn.getCollection("test.groupSlaveOk"); +coll.drop(); var bulk = coll.initializeUnorderedBulkOp(); -for( var i = 0; i < 300; i++ ){ - bulk.insert( { i : i % 10 } ); +for(var i = 0; i < 300; i++) { + bulk.insert({ i: i % 10 }); } -assert.writeOK( bulk.execute() ); +assert.writeOK(bulk.execute()); -st.printShardingStatus() +st.printShardingStatus(); // Wait for client to update itself and replication to finish -rst.awaitReplication() +rst.awaitReplication(); -var primary = rst.getPrimary() -var sec = rst.getSecondary() +var primary = rst.getPrimary(); +var sec = rst.getSecondary(); // Data now inserted... stop the master, since only two in set, other will still be secondary -rst.stop(rst.getMaster()); -printjson( rst.status() ) +rst.stop(rst.getPrimary()); +printjson(rst.status()); // Wait for the mongos to recognize the slave -ReplSetTest.awaitRSClientHosts( conn, sec, { ok : true, secondary : true } ) +ReplSetTest.awaitRSClientHosts(conn, sec, { ok: true, secondary: true }); // Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when // master is down -conn.setSlaveOk() +conn.setSlaveOk(); // Should not throw exception, since slaveOk'd -assert.eq( 10, coll.group({ key : { i : true } , - reduce : function( obj, ctx ){ ctx.count += 1 } , - initial : { count : 0 } }).length ) - -try { - - conn.setSlaveOk( false ) - var res = coll.group({ key : { i : true } , - reduce : function( obj, ctx ){ ctx.count += 1 } , - initial : { count : 0 } }); - - print( "Should not reach here! Group result: " + tojson(res) ); - assert( false ); +assert.eq(10, coll.group({ key: { i: true } , + reduce: function(obj, ctx) { ctx.count += 1 }, + initial: { count: 0 } }).length) + +try { + conn.setSlaveOk(false); + var res = coll.group({ key: { i: true }, + reduce: function(obj, ctx) { ctx.count += 1 }, + initial: { count: 0 } }); + + print("Should not reach here! Group result: " + tojson(res)); + assert(false); } -catch( e ){ - print( "Non-slaveOk'd connection failed." + tojson(e) ) +catch(e){ + print("Non-slaveOk'd connection failed." + tojson(e)); } st.stop(); diff --git a/jstests/sharding/mongos_validate_backoff.js b/jstests/sharding/mongos_validate_backoff.js index 877ab808dcc..4faff61698d 100644 --- a/jstests/sharding/mongos_validate_backoff.js +++ b/jstests/sharding/mongos_validate_backoff.js @@ -1,28 +1,27 @@ -// // Ensures that single mongos shard-key errors are fast, but slow down when many are triggered -// +(function() { +'use strict'; -var st = new ShardingTest({ shards : 1, mongos : 1 }) +var st = new ShardingTest({ shards : 1, mongos : 1 }); -var mongos = st.s0 -var admin = mongos.getDB( "admin" ) -var coll = mongos.getCollection( "foo.bar" ) +var mongos = st.s0; +var admin = mongos.getDB("admin"); +var coll = mongos.getCollection("foo.bar"); -printjson( admin.runCommand({ enableSharding : coll.getDB() + "" }) ) +assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB() + "" })); -coll.ensureIndex({ shardKey : 1 }) -printjson( admin.runCommand({ shardCollection : coll + "", key : { shardKey : 1 } }) ) +coll.ensureIndex({ shardKey : 1 }); +assert.commandWorked(admin.runCommand({ shardCollection : coll + "", key : { shardKey : 1 } })); -var timeBadInsert = function(){ - - var start = new Date().getTime() +var timeBadInsert = function() { + var start = new Date().getTime(); // Bad insert, no shard key assert.writeError(coll.insert({ hello : "world" })); - var end = new Date().getTime() + var end = new Date().getTime(); - return end - start + return end - start; } // We need to work at least twice in order to check resetting the counter @@ -31,28 +30,31 @@ var success = 0; // Loop over this test a few times, to ensure that the error counters get reset if we don't have // bad inserts over a long enough time. -for( var test = 0; test < 5; test++ ){ - - var firstWait = timeBadInsert() - var lastWait = 0 - - for( var i = 0; i < 20; i++ ){ - printjson( lastWait = timeBadInsert() ) +for (var test = 0; test < 5; test++) { + var firstWait = timeBadInsert(); + var lastWait = 0; + + for(var i = 0; i < 20; i++) { + printjson(lastWait = timeBadInsert()); } - // Kind a heuristic test, we want to make sure that the error wait after sleeping is much less - // than the error wait after a lot of errors - if( lastWait > firstWait * 2 * 2 ) success++; // Success! - - if( success >= successNeeded ) break; + // As a heuristic test, we want to make sure that the error wait after sleeping is much less + // than the error wait after a lot of errors. + if (lastWait > firstWait * 2 * 2) { + success++; + } + if (success >= successNeeded) { + break; + } + // Abort if we've failed too many times - assert.lt( test, 4 ); - + assert.lt(test, 4); + // Sleeping for long enough to reset our exponential counter - sleep( 3000 ) + sleep(3000); } -jsTest.log( "DONE!" ) +st.stop(); -st.stop() +})(); diff --git a/jstests/sharding/mr_shard_version.js b/jstests/sharding/mr_shard_version.js index fd143b72ded..fddccfb3fa0 100644 --- a/jstests/sharding/mr_shard_version.js +++ b/jstests/sharding/mr_shard_version.js @@ -36,12 +36,12 @@ var migrateOp = { op : "command", ns : "admin", command : { moveChunk : "" + col var checkMigrate = function(){ print( "Result of migrate : " ); printjson( this ) } var ops = {} -for( var i = 0; i < st._shardServers.length; i++ ){ +for( var i = 0; i < st._connections.length; i++ ){ for( var j = 0; j < 2; j++ ){ ops[ "" + (i * 2 + j) ] = { op : "command", ns : "admin", command : { moveChunk : "" + coll, find : { _id : ( j == 0 ? 0 : halfId ) }, - to : st._shardServers[i].shardName }, + to : st._connections[i].shardName }, check : checkMigrate }; } } diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js index ea98a8273ee..7b92eb0d1b4 100755 --- a/jstests/sharding/read_pref.js +++ b/jstests/sharding/read_pref.js @@ -17,7 +17,7 @@ var doTest = function(useDollarQuerySyntax) { rs0: { nodes: NODES, oplogSize: 10, useHostName: true } }}); var replTest = st.rs0; - var primaryNode = replTest.getMaster(); + var primaryNode = replTest.getPrimary(); // The $-prefixed query syntax is only legal for compatibility mode reads, not for the // find/getMore commands. diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js index c4efc5bd666..8862103d129 100644 --- a/jstests/sharding/recovering_slaveok.js +++ b/jstests/sharding/recovering_slaveok.js @@ -25,8 +25,8 @@ var collSOk = mongosSOK.getCollection( "" + coll ); var rsA = shardTest._rs[0].test; var rsB = shardTest._rs[1].test; -rsA.getMaster().getDB( "test_a" ).dummy.insert({ x : 1 }); -rsB.getMaster().getDB( "test_b" ).dummy.insert({ x : 1 }); +rsA.getPrimary().getDB( "test_a" ).dummy.insert({ x : 1 }); +rsB.getPrimary().getDB( "test_b" ).dummy.insert({ x : 1 }); rsA.awaitReplication(); rsB.awaitReplication(); diff --git a/jstests/sharding/replset_config/basic_sharding_params.js b/jstests/sharding/replset_config/basic_sharding_params.js index 37a87855711..80a617e9987 100644 --- a/jstests/sharding/replset_config/basic_sharding_params.js +++ b/jstests/sharding/replset_config/basic_sharding_params.js @@ -22,7 +22,7 @@ function shardingTestUsingObjects() { assert.eq( c0, st._configServers[0] ); var d0 = st.d0; - assert.eq( d0, st._shardServers[0] ); + assert.eq( d0, st._connections[0] ); var rs1 = st.rs1; assert.eq( rs1, st._rsObjects[1] ); @@ -57,10 +57,10 @@ function shardingTestUsingArrays() { assert.eq( c0, st._configServers[0] ); var d0 = st.d0; - assert.eq( d0, st._shardServers[0] ); + assert.eq( d0, st._connections[0] ); var d1 = st.d1; - assert.eq( d1, st._shardServers[1] ); + assert.eq( d1, st._connections[1] ); assert.contains( "-vvvvv", s0.commandLine ); assert.contains( "-vvvv", s1.commandLine ); diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js index c858933a449..0c8dc5208ba 100644 --- a/jstests/sharding/shard_insert_getlasterror_w2.js +++ b/jstests/sharding/shard_insert_getlasterror_w2.js @@ -29,7 +29,7 @@ var replSet1 = shardingTest.rs0; // Add data to it - var testDBReplSet1 = replSet1.getMaster().getDB(testDBName); + var testDBReplSet1 = replSet1.getPrimary().getDB(testDBName); var bulk = testDBReplSet1.foo.initializeUnorderedBulkOp(); for (var i = 0; i < numDocs; i++) { bulk.insert({ x: i, text: textString }); diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js index 1dfa816c637..e3a2082ba8d 100644 --- a/jstests/sharding/sharding_rs2.js +++ b/jstests/sharding/sharding_rs2.js @@ -47,7 +47,7 @@ catch ( e ){ assert.soon( function(){ try { - printjson( rs.test.getMaster().getDB("admin").runCommand( "isMaster" ) ) + printjson( rs.test.getPrimary().getDB("admin").runCommand( "isMaster" ) ) s.config.shards.find().forEach( printjsononeline ); return countNodes() == 3; } @@ -80,7 +80,7 @@ rs.test.waitForState( rs.test.getSecondaries(), rs.test.SECONDARY, 180 * 1000 ) m = new Mongo( s.s.name ); ts = m.getDB( "test" ).foo -before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +before = rs.test.getPrimary().adminCommand( "serverStatus" ).opcounters for ( i=0; i<10; i++ ) assert.eq( 17 , ts.findOne().x , "B1" ) @@ -89,7 +89,7 @@ m.setSlaveOk() for ( i=0; i<10; i++ ) assert.eq( 17 , ts.findOne().x , "B2" ) -after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +after = rs.test.getPrimary().adminCommand( "serverStatus" ).opcounters printjson( before ) printjson( after ) @@ -141,14 +141,14 @@ assert.commandWorked(s.getDB('admin').runCommand({ moveChunk: "test.foo", _waitForDelete: true })); assert.eq( 100 , t.count() , "C3" ) -assert.eq( 50 , rs.test.getMaster().getDB( "test" ).foo.count() , "C4" ) +assert.eq( 50 , rs.test.getPrimary().getDB( "test" ).foo.count() , "C4" ) // by non-shard key m = new Mongo( s.s.name ); ts = m.getDB( "test" ).foo -before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +before = rs.test.getPrimary().adminCommand( "serverStatus" ).opcounters for ( i=0; i<10; i++ ) assert.eq( 17 , ts.findOne( { _id : 5 } ).x , "D1" ) @@ -157,7 +157,7 @@ m.setSlaveOk() for ( i=0; i<10; i++ ) assert.eq( 17 , ts.findOne( { _id : 5 } ).x , "D2" ) -after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +after = rs.test.getPrimary().adminCommand( "serverStatus" ).opcounters assert.lte( before.query + 10 , after.query , "D3" ) @@ -170,7 +170,7 @@ db.printShardingStatus() ts = m.getDB( "test" ).foo -before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +before = rs.test.getPrimary().adminCommand( "serverStatus" ).opcounters for ( i=0; i<10; i++ ) assert.eq( 57 , ts.findOne( { x : 57 } ).x , "E1" ) @@ -179,7 +179,7 @@ m.setSlaveOk() for ( i=0; i<10; i++ ) assert.eq( 57 , ts.findOne( { x : 57 } ).x , "E2" ) -after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +after = rs.test.getPrimary().adminCommand( "serverStatus" ).opcounters assert.lte( before.query + 10 , after.query , "E3" ) diff --git a/jstests/sharding/split_large_key.js b/jstests/sharding/split_large_key.js index c18b92b93e4..a0cdcd61d67 100644 --- a/jstests/sharding/split_large_key.js +++ b/jstests/sharding/split_large_key.js @@ -1,5 +1,7 @@ // Test for splitting a chunk with a very large shard key value should not be allowed // and does not corrupt the config.chunks metadata. +(function() { +'use strict'; function verifyChunk(keys, expectFail) { // If split failed then there's only 1 chunk @@ -14,30 +16,6 @@ function verifyChunk(keys, expectFail) { } } -function runTest(test) { - var collName = "split_large_key_"+test.name; - var midKey = {}; - var chunkKeys = {min: {}, max: {}}; - for (var k in test.key) { - // new Array with join creates string length 1 less than size, so add 1 - midKey[k] = new Array(test.keyFieldSize+1).join('a'); - // min & max keys for each field in the index - chunkKeys.min[k] = MinKey; - chunkKeys.max[k] = MaxKey; - } - configDB.adminCommand({ shardCollection: "test."+collName, key: test.key}); - res = configDB.adminCommand({ split: "test."+collName, middle: midKey}); - if (test.expectFail) { - assert(!res.ok, "Split: "+collName); - assert(res.errmsg !== null, "Split errmsg: "+collName); - } else { - assert(res.ok, "Split: "+collName+" "+res.errmsg); - } - verifyChunk(chunkKeys, test.expectFail); - st.s0.getCollection("test."+collName).drop(); -} - - // Tests // - name: Name of test, used in collection name // - key: key to test @@ -55,11 +33,36 @@ var tests = [ var st = new ShardingTest({ shards: 1 }); var configDB = st.s.getDB('config'); -configDB.adminCommand({ enableSharding: 'test' }); +assert.commandWorked(configDB.adminCommand({ enableSharding: 'test' })); tests.forEach(function(test){ - runTest(test); + var collName = "split_large_key_" + test.name; + var midKey = {}; + var chunkKeys = {min: {}, max: {}}; + for (var k in test.key) { + // new Array with join creates string length 1 less than size, so add 1 + midKey[k] = new Array(test.keyFieldSize+1).join('a'); + // min & max keys for each field in the index + chunkKeys.min[k] = MinKey; + chunkKeys.max[k] = MaxKey; + } + + assert.commandWorked( + configDB.adminCommand({ shardCollection: "test." + collName, key: test.key })); + + var res = configDB.adminCommand({ split: "test."+collName, middle: midKey}); + if (test.expectFail) { + assert(!res.ok, "Split: " + collName); + assert(res.errmsg !== null, "Split errmsg: " + collName); + } else { + assert(res.ok, "Split: " + collName + " " + res.errmsg); + } + + verifyChunk(chunkKeys, test.expectFail); + + st.s0.getCollection("test." + collName).drop(); }); st.stop(); +})(); diff --git a/jstests/sharding/trace_missing_docs_test.js b/jstests/sharding/trace_missing_docs_test.js index bb7db7585c6..d7ac493cc5a 100644 --- a/jstests/sharding/trace_missing_docs_test.js +++ b/jstests/sharding/trace_missing_docs_test.js @@ -1,49 +1,49 @@ - -// // Tests tracing where a document was inserted -// +load('jstests/libs/trace_missing_docs.js'); -load('jstests/libs/trace_missing_docs.js') +(function() { +'use strict'; -var testDocMissing = function( useReplicaSet ) { +var testDocMissing = function(useReplicaSet) { + var options = { rs: useReplicaSet, + shardOptions: { master: "", oplogSize: 10 }, + rsOptions: { nodes: 1, oplogSize: 10 } }; -var options = { rs : useReplicaSet, - shardOptions : { master : "", oplogSize : 10 }, - rsOptions : { nodes : 1, oplogSize : 10 } }; + var st = new ShardingTest({ shards: 2, mongos: 1, other: options }); -var st = new ShardingTest({ shards : 2, mongos : 1, other : options }); + var mongos = st.s0; + var coll = mongos.getCollection("foo.bar"); + var admin = mongos.getDB("admin"); + var shards = mongos.getCollection("config.shards").find().toArray(); -var mongos = st.s0; -var coll = mongos.getCollection( "foo.bar" ); -var admin = mongos.getDB( "admin" ); -var shards = mongos.getCollection( "config.shards" ).find().toArray(); + assert.commandWorked(admin.runCommand({ enableSharding: coll.getDB() + "" })); + st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id); -assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok ); -printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) ); -coll.ensureIndex({ sk : 1 }); -assert( admin.runCommand({ shardCollection : coll + "", key : { sk : 1 } }).ok ); + coll.ensureIndex({ sk: 1 }); + assert.commandWorked(admin.runCommand({ shardCollection: coll + "", key: { sk: 1 } })); -assert.writeOK(coll.insert({ _id : 12345, sk : 67890, hello : "world" })); -assert.writeOK(coll.update({ _id : 12345 }, { $set : { baz : 'biz' } })); -assert.writeOK(coll.update({ sk : 67890 }, { $set : { baz : 'boz' } })); + assert.writeOK(coll.insert({ _id: 12345, sk: 67890, hello: "world" })); + assert.writeOK(coll.update({ _id: 12345 }, { $set: { baz: 'biz' } })); + assert.writeOK(coll.update({ sk: 67890 }, { $set: { baz: 'boz' } })); -assert( admin.runCommand({ moveChunk : coll + "", - find : { sk : 0 }, - to : shards[1]._id, - _waitForDelete : true }).ok ); + assert.commandWorked(admin.runCommand({ moveChunk: coll + "", + find: { sk: 0 }, + to: shards[1]._id, + _waitForDelete: true })); -st.printShardingStatus(); + st.printShardingStatus(); -var ops = traceMissingDoc( coll, { _id : 12345, sk : 67890 } ); + var ops = traceMissingDoc(coll, { _id: 12345, sk: 67890 }); -assert.eq( ops[0].op, 'i' ); -assert.eq( ops.length, 5 ); + assert.eq(ops[0].op, 'i'); + assert.eq(ops.length, 5); -jsTest.log( "DONE! " + ( useReplicaSet ? "(using rs)" : "(using master/slave)" ) ); + jsTest.log("DONE! " + (useReplicaSet ? "(using rs)": "(using master/slave)")); -st.stop(); + st.stop(); +}; -} +testDocMissing(true); +testDocMissing(false); -testDocMissing( true ); -testDocMissing( false ); +})(); diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js index 441b190de73..0bf8c5892b5 100644 --- a/jstests/sharding/version2.js +++ b/jstests/sharding/version2.js @@ -1,17 +1,17 @@ (function() { +'use strict'; var s = new ShardingTest({ name: "version2", shards: 1 }); -s.adminCommand( { enablesharding : "alleyinsider" } ); -s.adminCommand( { shardcollection : "alleyinsider.foo" , key : { num : 1 } } ); -s.adminCommand( { shardcollection : "alleyinsider.bar" , key : { num : 1 } } ); +assert.commandWorked(s.s0.adminCommand({ enablesharding: "alleyinsider" })); +assert.commandWorked(s.s0.adminCommand({ shardcollection: "alleyinsider.foo", key: { num: 1 } })); +assert.commandWorked(s.s0.adminCommand({ shardcollection: "alleyinsider.bar", key: { num: 1 } })); -a = s._connections[0].getDB( "admin" ); +var a = s._connections[0].getDB("admin"); -// setup from one client - -assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i, 0 ); -assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i, 0 ); +// Setup from one client +assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).mine.i, 0); +assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).global.i, 0); var fooEpoch = s.getDB('config').chunks.findOne({ ns: 'alleyinsider.foo' }).lastmodEpoch; assert.commandWorked( @@ -25,42 +25,41 @@ assert.commandWorked( shardHost: s.s.host, })); -printjson( s.config.chunks.findOne() ); - -assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.t, 1 ); -assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.t, 1 ); +printjson(s.config.chunks.findOne()); -// from another client +assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).mine.t, 1); +assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).global.t, 1); -a2 = connect( s._connections[0].name + "/admin" ); +// From a different client +var a2 = connect(s._connections[0].name + "/admin"); -assert.eq( a2.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.t , 1 , "a2 global 1" ); -assert.eq( a2.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i , 0 , "a2 mine 1" ); +assert.eq(a2.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).global.t, 1, "a2 global 1"); +assert.eq(a2.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).mine.i, 0, "a2 mine 1"); function simpleFindOne(){ - return a2.getMongo().getDB( "alleyinsider" ).foo.findOne(); + return a2.getMongo().getDB("alleyinsider").foo.findOne(); } var barEpoch = s.getDB('config').chunks.findOne({ ns: 'alleyinsider.bar' }).lastmodEpoch; -assert.commandWorked( a2.runCommand({ setShardVersion: "alleyinsider.bar", +assert.commandWorked(a2.runCommand({ setShardVersion: "alleyinsider.bar", configdb: s._configDB, version: new Timestamp(1, 0), versionEpoch: barEpoch, shard: 'shard0000', authoritative: true }), - "setShardVersion bar temp" ); + "setShardVersion bar temp"); -assert.throws( simpleFindOne , [] , "should complain about not in sharded mode 1" ); +assert.throws(simpleFindOne, [], "should complain about not in sharded mode 1"); // the only way that setSharVersion passes is if the shard agrees with the version // the shard takes its version from config directly // TODO bump timestamps in config -// assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 1 , "setShardVersion a2-1"); +// assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version: 2 }).ok == 1, "setShardVersion a2-1"); // simpleFindOne(); // now should run ok -// assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).ok == 1 , "setShardVersion a2-2"); +// assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version: 3 }).ok == 1, "setShardVersion a2-2"); // simpleFindOne(); // newer version is ok diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js index 799c36cc3aa..dc8abc71597 100644 --- a/jstests/sharding/write_cmd_auto_split.js +++ b/jstests/sharding/write_cmd_auto_split.js @@ -1,13 +1,14 @@ /** * Tests the auto split will be triggered when using write commands. */ +(function() { +'use strict'; var st = new ShardingTest({ shards: 1, other: { chunkSize: 1 }}); -st.stopBalancer(); var configDB = st.s.getDB('config'); -configDB.adminCommand({ enableSharding: 'test' }); -configDB.adminCommand({ shardCollection: 'test.insert', key: { x: 1 }}); +assert.commandWorked(configDB.adminCommand({ enableSharding: 'test' })); +assert.commandWorked(configDB.adminCommand({ shardCollection: 'test.insert', key: { x: 1 }})); var doc1k = (new Array(1024)).join('x'); var testDB = st.s.getDB('test'); @@ -151,3 +152,4 @@ assert.eq(1, configDB.chunks.find().itcount()); st.stop(); +})(); diff --git a/jstests/sharding/write_commands_sharding_state.js b/jstests/sharding/write_commands_sharding_state.js index d3d1a4b788a..7d0991870eb 100644 --- a/jstests/sharding/write_commands_sharding_state.js +++ b/jstests/sharding/write_commands_sharding_state.js @@ -3,11 +3,9 @@ // @tags: [requires_persistence]
(function() {
-
'use strict';
var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2 });
-st.stopBalancer();
var dbTestName = 'WriteCommandsTestDB';
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js index 9a63268fac2..d10e81e2655 100644 --- a/jstests/sharding/zbigMapReduce.js +++ b/jstests/sharding/zbigMapReduce.js @@ -1,35 +1,31 @@ // This test is skipped on 32-bit platforms function setupTest() { - - s = new ShardingTest( { shards : 2, - verbose : 1, - mongos : 1, - other: { rs: true, - numReplicas: 2, - chunkSize: 1, - rsOptions: { oplogSize : 50 }, - enableBalancer : 1 - } } ); - - // reduce chunk size to split + var s = new ShardingTest({ shards: 2, + mongos: 1, + other: { rs: true, + numReplicas: 2, + chunkSize: 1, + rsOptions: { oplogSize: 50 }, + enableBalancer: 1 } }); + + // Reduce chunk size to split var config = s.getDB("config"); config.settings.save({_id: "chunksize", value: 1}); - s.adminCommand( { enablesharding : "test" } ) + assert.commandWorked(s.s0.adminCommand({ enablesharding: "test" })); s.ensurePrimaryShard('test', 'test-rs0'); - s.adminCommand( { shardcollection : "test.foo", key : { "_id" : 1 } } ) + assert.commandWorked(s.s0.adminCommand({ shardcollection: "test.foo", key: { "_id": 1 } })); return s; } - function runTest(s) { - jsTest.log( "Inserting a lot of documents into test.foo" ) - db = s.getDB( "test" ); + jsTest.log("Inserting a lot of documents into test.foo"); + db = s.getDB("test"); var idInc = 0; var valInc = 0; - var str="" + var str = ""; if (db.serverBuildInfo().bits == 32) { // Make data ~0.5MB for 32 bit builds @@ -42,36 +38,43 @@ function runTest(s) { var bulk = db.foo.initializeUnorderedBulkOp(); for (j=0; j<100; j++) { - for (i=0; i<512; i++){ + for (i=0; i<512; i++) { bulk.insert({ i: idInc++, val: valInc++, y:str }); } } assert.writeOK(bulk.execute()); - jsTest.log( "Documents inserted, waiting for error..." ) - jsTest.log( "Doing double-checks of insert..." ) + jsTest.log("Documents inserted, doing double-checks of insert..."); // Collect some useful stats to figure out what happened - if( db.foo.find().itcount() != 51200 ){ - sleep( 1000 ) + if (db.foo.find().itcount() != 51200) { + sleep(1000); s.printShardingStatus(true); - print( "Shard 0: " + s.shard0.getCollection( db.foo + "" ).find().itcount() ) - print( "Shard 1: " + s.shard1.getCollection( db.foo + "" ).find().itcount() ) + print("Shard 0: " + s.shard0.getCollection(db.foo + "").find().itcount()); + print("Shard 1: " + s.shard1.getCollection(db.foo + "").find().itcount()); - for( var i = 0; i < 51200; i++ ){ - if( ! db.foo.findOne({ i : i }, { i : 1 }) ){ - print( "Could not find: " + i ) + for (var i = 0; i < 51200; i++) { + if(!db.foo.findOne({ i: i }, { i: 1 })) { + print("Could not find: " + i); } - if( i % 100 == 0 ) print( "Checked " + i ) + + if(i % 100 == 0) print("Checked " + i); } - print( "PROBABLY WILL ASSERT NOW" ) + print("PROBABLY WILL ASSERT NOW"); } - assert.soon( function(){ var c = db.foo.find().itcount(); print( "Count is " + c ); return c == 51200 } ) - //assert.eq( 51200, db.foo.find().itcount(), "Not all data was saved!" ) + assert.soon(function() { + var c = db.foo.find().itcount(); + if (c == 51200) { + return true; + } + + print("Count is " + c); + return false; + }); s.printChunks(); s.printChangeLog(); @@ -79,32 +82,29 @@ function runTest(s) { function map() { emit('count', 1); } function reduce(key, values) { return Array.sum(values) } - jsTest.log( "Test basic mapreduce..." ) + jsTest.log("Test basic mapreduce..."); // Test basic mapReduce - for ( iter=0; iter<5; iter++ ){ - - print( "Test #" + iter ) - + for (var iter = 0; iter < 5; iter++) { + print("Test #" + iter); out = db.foo.mapReduce(map, reduce,"big_out") } - print( "Testing output to different db..." ) + print("Testing output to different db...") // test output to a different DB // do it multiple times so that primary shard changes for (iter = 0; iter < 5; iter++) { + print("Test #" + iter); - print( "Test #" + iter ) - - assert.eq( 51200, db.foo.find().itcount(), "Not all data was found!" ) + assert.eq(51200, db.foo.find().itcount(), "Not all data was found!"); outCollStr = "mr_replace_col_" + iter; outDbStr = "mr_db_" + iter; print("Testing mr replace into DB " + iter) - res = db.foo.mapReduce( map , reduce , { out : { replace: outCollStr, db: outDbStr } } ) + res = db.foo.mapReduce(map , reduce , { out: { replace: outCollStr, db: outDbStr } }) printjson(res); outDb = s.getDB(outDbStr); @@ -112,116 +112,115 @@ function runTest(s) { obj = outColl.convertToSingleObject("value"); - assert.eq( 51200 , obj.count , "Received wrong result " + obj.count ); + assert.eq(51200 , obj.count , "Received wrong result " + obj.count); print("checking result field"); assert.eq(res.result.collection, outCollStr, "Wrong collection " + res.result.collection); assert.eq(res.result.db, outDbStr, "Wrong db " + res.result.db); } - jsTest.log( "Verifying nonatomic M/R throws..." ) + jsTest.log("Verifying nonatomic M/R throws...") // check nonAtomic output - assert.throws(function() { db.foo.mapReduce(map, reduce,{out: {replace: "big_out", nonAtomic: true}})}); + assert.throws(function() { + db.foo.mapReduce(map, reduce, { out: {replace: "big_out", nonAtomic: true } }); + }); - jsTest.log( ) + jsTest.log(); - // add docs with dup "i" + // Add docs with dup "i" valInc = 0; - for (j=0; j<100; j++){ - print( "Inserted document: " + (j * 100) ); + for (j=0; j<100; j++) { + print("Inserted document: " + (j * 100)); bulk = db.foo.initializeUnorderedBulkOp(); - for (i=0; i<512; i++){ - bulk.insert({ i : idInc++, val: valInc++, y: str }); + for (i=0; i<512; i++) { + bulk.insert({ i: idInc++, val: valInc++, y: str }); } // wait for replication to catch up assert.writeOK(bulk.execute({ w: 2 })); } - jsTest.log( "No errors..." ); + jsTest.log("No errors..."); map2 = function() { emit(this.val, 1); } reduce2 = function(key, values) { return Array.sum(values); } - // test merge + // Test merge outcol = "big_out_merge"; - jsTestLog( "Test A" ) - - // mr quarter of the docs - out = db.foo.mapReduce(map2, reduce2,{ query: {i : {$lt: 25600}}, out: {merge: outcol}}); + // M/R quarter of the docs + jsTestLog("Test A"); + out = db.foo.mapReduce(map2, reduce2, { query: {i: {$lt: 25600} }, out: { merge: outcol } }); printjson(out); - assert.eq( 25600 , out.counts.emit , "Received wrong result" ); - assert.eq( 25600 , out.counts.output , "Received wrong result" ); + assert.eq(25600 , out.counts.emit , "Received wrong result"); + assert.eq(25600 , out.counts.output , "Received wrong result"); - jsTestLog( "Test B" ) - - // mr further docs - out = db.foo.mapReduce(map2, reduce2,{ query: {i : {$gte: 25600, $lt: 51200}}, out: {merge: outcol}}); + // M/R further docs + jsTestLog("Test B"); + out = db.foo.mapReduce( + map2, reduce2, { query: {i: {$gte: 25600, $lt: 51200} }, out: { merge: outcol } }); printjson(out); - assert.eq( 25600 , out.counts.emit , "Received wrong result" ); - assert.eq( 51200 , out.counts.output , "Received wrong result" ); - - jsTestLog( "Test C" ) + assert.eq(25600 , out.counts.emit , "Received wrong result"); + assert.eq(51200 , out.counts.output , "Received wrong result"); - // do 2nd half of docs - out = db.foo.mapReduce(map2, reduce2,{ query: {i : {$gte: 51200}}, out: {merge: outcol, nonAtomic: true}}); + // M/R do 2nd half of docs + jsTestLog("Test C"); + out = db.foo.mapReduce( + map2, reduce2, { query: {i: {$gte: 51200} }, out: { merge: outcol, nonAtomic: true } }); printjson(out); - assert.eq( 51200 , out.counts.emit , "Received wrong result" ); - assert.eq( 51200 , out.counts.output , "Received wrong result" ); - assert.eq( 1 , db[outcol].findOne().value , "Received wrong result" ); - - jsTestLog( "Test D" ) + assert.eq(51200 , out.counts.emit , "Received wrong result"); + assert.eq(51200 , out.counts.output , "Received wrong result"); + assert.eq(1 , db[outcol].findOne().value , "Received wrong result"); - // test reduce + // Test reduce + jsTestLog("Test D"); outcol = "big_out_reduce"; - // mr quarter of the docs - out = db.foo.mapReduce(map2, reduce2,{ query: {i : {$lt: 25600}}, out: {reduce: outcol}}); + // M/R quarter of the docs + out = db.foo.mapReduce(map2, reduce2,{ query: { i: { $lt: 25600 } }, out: { reduce: outcol } }); printjson(out); - assert.eq( 25600 , out.counts.emit , "Received wrong result" ); - assert.eq( 25600 , out.counts.output , "Received wrong result" ); + assert.eq(25600 , out.counts.emit , "Received wrong result"); + assert.eq(25600 , out.counts.output , "Received wrong result"); - jsTestLog( "Test E" ) - - // mr further docs - out = db.foo.mapReduce(map2, reduce2,{ query: {i : {$gte: 25600, $lt: 51200}}, out: {reduce: outcol}}); + // M/R further docs + jsTestLog("Test E"); + out = db.foo.mapReduce( + map2, reduce2, { query: { i: { $gte: 25600, $lt: 51200 } }, out: { reduce: outcol } }); printjson(out); - assert.eq( 25600 , out.counts.emit , "Received wrong result" ); - assert.eq( 51200 , out.counts.output , "Received wrong result" ); - - jsTestLog( "Test F" ) + assert.eq(25600 , out.counts.emit , "Received wrong result"); + assert.eq(51200 , out.counts.output , "Received wrong result"); - // do 2nd half of docs - out = db.foo.mapReduce(map2, reduce2,{ query: {i : {$gte: 51200}}, out: {reduce: outcol, nonAtomic: true}}); + // M/R do 2nd half of docs + jsTestLog("Test F"); + out = db.foo.mapReduce( + map2, reduce2, { query: { i: {$gte: 51200} }, out: { reduce: outcol, nonAtomic: true } }); printjson(out); - assert.eq( 51200 , out.counts.emit , "Received wrong result" ); - assert.eq( 51200 , out.counts.output , "Received wrong result" ); - assert.eq( 2 , db[outcol].findOne().value , "Received wrong result" ); + assert.eq(51200 , out.counts.emit , "Received wrong result"); + assert.eq(51200 , out.counts.output , "Received wrong result"); + assert.eq(2 , db[outcol].findOne().value , "Received wrong result"); - jsTestLog( "Test G" ) + // Verify that data is also on secondary + jsTestLog("Test G"); + var primary = s._rs[0].test.liveNodes.master; + var secondaries = s._rs[0].test.liveNodes.slaves; - // verify that data is also on secondary - var primary = s._rs[0].test.liveNodes.master - var secondaries = s._rs[0].test.liveNodes.slaves // Stop the balancer to prevent new writes from happening and make sure // that replication can keep up even on slow machines. s.stopBalancer(); s._rs[0].test.awaitReplication(300 * 1000); - assert.eq( 51200 , primary.getDB("test")[outcol].count() , "Wrong count" ); + assert.eq(51200 , primary.getDB("test")[outcol].count() , "Wrong count"); for (var i = 0; i < secondaries.length; ++i) { - assert.eq( 51200 , secondaries[i].getDB("test")[outcol].count() , "Wrong count" ); + assert.eq(51200 , secondaries[i].getDB("test")[outcol].count() , "Wrong count"); } - - jsTestLog( "DONE" ) - } var s = setupTest(); -if (s.getDB( "admin" ).runCommand( "buildInfo" ).bits < 64) { + +if (s.getDB("admin").runCommand("buildInfo").bits < 64) { print("Skipping test on 32-bit platforms"); } else { runTest(s); } -s.stop() + +s.stop(); diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js index 8bfd871450f..20fae7ac522 100644 --- a/jstests/sharding/zero_shard_version.js +++ b/jstests/sharding/zero_shard_version.js @@ -8,9 +8,9 @@ var st = new ShardingTest({ shards: 2, mongos: 4 }); var testDB_s0 = st.s.getDB('test'); -testDB_s0.adminCommand({ enableSharding: 'test' }); +assert.commandWorked(testDB_s0.adminCommand({ enableSharding: 'test' })); st.ensurePrimaryShard('test', 'shard0001'); -testDB_s0.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}); +assert.commandWorked(testDB_s0.adminCommand({ shardCollection: 'test.user', key: { x: 1 }})); var checkShardMajorVersion = function(conn, expectedVersion) { var shardVersionInfo = conn.adminCommand({ getShardVersion: 'test.user' }); diff --git a/jstests/slow1/replsets_priority1.js b/jstests/slow1/replsets_priority1.js index c9ec08fd78f..614c6b7cec7 100644 --- a/jstests/slow1/replsets_priority1.js +++ b/jstests/slow1/replsets_priority1.js @@ -9,7 +9,7 @@ var rs = new ReplSetTest( {name: 'testSet', nodes: 3, nodeOptions: {verbose: 2}} var nodes = rs.startSet(); rs.initiate(); -var master = rs.getMaster(); +var master = rs.getPrimary(); var everyoneOkSoon = function() { var status; @@ -133,7 +133,7 @@ for (i=0; i<n; i++) { try { master.adminCommand({replSetReconfig : config}); - master = rs.getMaster(); + master = rs.getPrimary(); reconnect(master); version = master.getDB("local").system.replset.findOne().version; @@ -148,7 +148,7 @@ for (i=0; i<n; i++) { print("\nreplsets_priority1.js wait for 2 slaves"); assert.soon(function() { - rs.getMaster(); + rs.getPrimary(); return rs.liveNodes.slaves.length == 2; }, "2 slaves"); @@ -177,7 +177,7 @@ for (i=0; i<n; i++) { rs.stop(max._id); - var master = rs.getMaster(); + var master = rs.getPrimary(); print("\nkilled max primary. Checking statuses."); @@ -187,7 +187,7 @@ for (i=0; i<n; i++) { print("restart max " + max._id); rs.restart(max._id); - master = rs.getMaster(); + master = rs.getPrimary(); print("max restarted. Checking statuses."); checkPrimaryIs(max); diff --git a/jstests/slow2/mr_during_migrate.js b/jstests/slow2/mr_during_migrate.js index 78e48eb9db8..ae0ae7ce0fe 100644 --- a/jstests/slow2/mr_during_migrate.js +++ b/jstests/slow2/mr_during_migrate.js @@ -40,14 +40,14 @@ var checkMigrate = function(){ print( "Result of migrate : " ); printjson( this // Creates a number of migrations of random chunks to diff shard servers var ops = [] -for(var i = 0; i < st._shardServers.length; i++) { +for(var i = 0; i < st._connections.length; i++) { ops.push({ op: "command", ns: "admin", command: { moveChunk: "" + coll, find: { _id: { "#RAND_INT" : [ 0, numDocs ] }}, - to: st._shardServers[i].shardName, + to: st._connections[i].shardName, _waitForDelete: true }, showResult: true diff --git a/jstests/slow2/replsets_killop.js b/jstests/slow2/replsets_killop.js index a5c708ae1c5..603e1f9c63e 100644 --- a/jstests/slow2/replsets_killop.js +++ b/jstests/slow2/replsets_killop.js @@ -7,7 +7,7 @@ numDocs = 1e5; replTest = new ReplSetTest( { name:'test', nodes:3 } ); nodes = replTest.startSet(); replTest.initiate(); -primary = replTest.getMaster(); +primary = replTest.getPrimary(); secondary = replTest.getSecondary(); db = primary.getDB( 'test' ); db.test.save( { a:0 } ); diff --git a/jstests/slow2/rollback4.js b/jstests/slow2/rollback4.js index 77f4dbd06c8..e77a8c59f1e 100644 --- a/jstests/slow2/rollback4.js +++ b/jstests/slow2/rollback4.js @@ -19,7 +19,7 @@ var r = replTest.initiate({ "_id": "unicomplex", replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000); // Make sure we have a master -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var b_conn = conns[1]; b_conn.setSlaveOk(); var B = b_conn.getDB("admin"); @@ -50,7 +50,7 @@ replTest.stop( 0 ); // after the node reports that it is primary via heartbeats, but before ismaster indicates that the // node will accept writes. replTest.waitForState(conns[1], replTest.PRIMARY, 5 * 60 * 1000); -master = replTest.getMaster(5 * 60 * 1000); +master = replTest.getPrimary(5 * 60 * 1000); // Save to new master, forcing rollback of old master master.getDB( 'db' ).c.save( big ); diff --git a/jstests/ssl/initial_sync1_x509.js b/jstests/ssl/initial_sync1_x509.js index f767dba0dde..9674056eba9 100644 --- a/jstests/ssl/initial_sync1_x509.js +++ b/jstests/ssl/initial_sync1_x509.js @@ -16,7 +16,7 @@ function runInitialSyncTest() { var conns = replTest.startSet(); replTest.initiate(); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); var foo = master.getDB("foo"); var admin = master.getDB("admin"); @@ -40,7 +40,7 @@ function runInitialSyncTest() { replTest.awaitReplication(); print("5. Insert some stuff"); - master = replTest.getMaster(); + master = replTest.getPrimary(); bulk = foo.bar.initializeUnorderedBulkOp(); for (var i = 0; i < 100; i++) { bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" }); diff --git a/jstests/ssl/upgrade_to_ssl.js b/jstests/ssl/upgrade_to_ssl.js index 802e99d9eeb..0c4a2caf222 100644 --- a/jstests/ssl/upgrade_to_ssl.js +++ b/jstests/ssl/upgrade_to_ssl.js @@ -19,14 +19,14 @@ var rst = new ReplSetTest({ name: 'sslSet', nodes: 3, nodeOptions : opts }); rst.startSet(); rst.initiate(); -var rstConn1 = rst.getMaster(); +var rstConn1 = rst.getPrimary(); rstConn1.getDB("test").a.insert({a:1, str:"TESTTESTTEST"}); assert.eq(1, rstConn1.getDB("test").a.count(), "Error interacting with replSet"); print("===== UPGRADE allowSSL -> preferSSL ====="); opts.sslMode = "preferSSL"; rst.upgradeSet(opts); -var rstConn2 = rst.getMaster(); +var rstConn2 = rst.getPrimary(); rstConn2.getDB("test").a.insert({a:2, str:"CHECKCHECK"}); assert.eq(2, rstConn2.getDB("test").a.count(), "Error interacting with replSet"); @@ -37,7 +37,7 @@ assert.eq(0, canConnectNoSSL, "non-SSL Connection attempt failed when it should print("===== UPGRADE preferSSL -> requireSSL ====="); opts.sslMode = "requireSSL"; rst.upgradeSet(opts); -var rstConn3 = rst.getMaster(); +var rstConn3 = rst.getPrimary(); rstConn3.getDB("test").a.insert({a:3, str:"GREENEGGSANDHAM"}); assert.eq(3, rstConn3.getDB("test").a.count(), "Error interacting with replSet"); diff --git a/jstests/ssl/upgrade_to_x509_ssl.js b/jstests/ssl/upgrade_to_x509_ssl.js index a7f8b571d29..2fef4e3c149 100644 --- a/jstests/ssl/upgrade_to_x509_ssl.js +++ b/jstests/ssl/upgrade_to_x509_ssl.js @@ -26,7 +26,7 @@ rst.startSet(); rst.initiate(); // Connect to master and do some basic operations -var rstConn1 = rst.getMaster(); +var rstConn1 = rst.getPrimary(); print("Performing basic operations on master."); rstConn1.getDB("admin").createUser({user:"root", pwd:"pwd", roles:["root"]}, {w: NUM_NODES}); rstConn1.getDB("admin").auth("root", "pwd"); @@ -43,7 +43,7 @@ rst.upgradeSet({sslMode:"preferSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT}, "root", "pwd"); // The upgradeSet call restarts the nodes so we need to reauthenticate. authAllNodes(); -var rstConn3 = rst.getMaster(); +var rstConn3 = rst.getPrimary(); rstConn3.getDB("test").a.insert({a:3, str:"TESTTESTTEST"}); assert.eq(3, rstConn3.getDB("test").a.count(), "Error interacting with replSet"); rst.awaitReplication(); @@ -57,6 +57,6 @@ rst.upgradeSet({sslMode:"requireSSL", sslPEMKeyFile: SERVER_CERT, clusterAuthMode:"x509", keyFile: KEYFILE, sslCAFile: CA_CERT}, "root", "pwd"); authAllNodes(); -var rstConn4 = rst.getMaster(); +var rstConn4 = rst.getPrimary(); rstConn4.getDB("test").a.insert({a:4, str:"TESTTESTTEST"}); assert.eq(4, rstConn4.getDB("test").a.count(), "Error interacting with replSet"); diff --git a/jstests/sslSpecial/upgrade_to_ssl_nossl.js b/jstests/sslSpecial/upgrade_to_ssl_nossl.js index 1f5f0002d4e..53f7bd77fb9 100644 --- a/jstests/sslSpecial/upgrade_to_ssl_nossl.js +++ b/jstests/sslSpecial/upgrade_to_ssl_nossl.js @@ -13,19 +13,19 @@ var rst = new ReplSetTest({ name: 'sslSet', nodes: 3, nodeOptions : {sslMode:"di rst.startSet(); rst.initiate(); -var rstConn1 = rst.getMaster(); +var rstConn1 = rst.getPrimary(); rstConn1.getDB("test").a.insert({a:1, str:"TESTTESTTEST"}); assert.eq(1, rstConn1.getDB("test").a.count(), "Error interacting with replSet"); print("===== UPGRADE disabled -> allowSSL ====="); rst.upgradeSet({sslMode:"allowSSL", sslPEMKeyFile: SERVER_CERT, sslAllowInvalidCertificates:""}); -var rstConn2 = rst.getMaster(); +var rstConn2 = rst.getPrimary(); rstConn2.getDB("test").a.insert({a:2, str:"TESTTESTTEST"}); assert.eq(2, rstConn2.getDB("test").a.count(), "Error interacting with replSet"); print("===== UPGRADE allowSSL -> preferSSL ====="); rst.upgradeSet({sslMode:"preferSSL", sslPEMKeyFile: SERVER_CERT, sslAllowInvalidCertificates:""}); -var rstConn3 = rst.getMaster(); +var rstConn3 = rst.getPrimary(); rstConn3.getDB("test").a.insert({a:3, str:"TESTTESTTEST"}); assert.eq(3, rstConn3.getDB("test").a.count(), "Error interacting with replSet"); diff --git a/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js b/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js index 72bcc9fc76d..74aae02a896 100644 --- a/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js +++ b/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js @@ -23,7 +23,7 @@ rst.startSet(); rst.initiate(); // Connect to master and do some basic operations -var rstConn1 = rst.getMaster(); +var rstConn1 = rst.getPrimary(); rstConn1.getDB("admin").createUser({user: "root", pwd: "pwd", roles: ["root"]}, {w: NUM_NODES}); rstConn1.getDB("admin").auth("root", "pwd"); rstConn1.getDB("test").a.insert({a:1, str:"TESTTESTTEST"}); @@ -38,7 +38,7 @@ rst.upgradeSet({sslMode:"allowSSL", sslPEMKeyFile: SERVER_CERT, authAllNodes(); rst.awaitReplication(); -var rstConn2 = rst.getMaster(); +var rstConn2 = rst.getPrimary(); rstConn2.getDB("test").a.insert({a:2, str:"CHECKCHECKCHECK"}); assert.eq(2, rstConn2.getDB("test").a.count(), "Error interacting with replSet"); @@ -50,7 +50,7 @@ rst.upgradeSet({sslMode:"preferSSL", sslPEMKeyFile: SERVER_CERT, authAllNodes(); rst.awaitReplication(); -var rstConn3 = rst.getMaster(); +var rstConn3 = rst.getPrimary(); rstConn3.getDB("test").a.insert({a:3, str:"PEASandCARROTS"}); assert.eq(3, rstConn3.getDB("test").a.count(), "Error interacting with replSet"); @@ -67,7 +67,7 @@ rst.upgradeSet({sslMode:"preferSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT}, "root", "pwd"); authAllNodes(); rst.awaitReplication(); -var rstConn4 = rst.getMaster(); +var rstConn4 = rst.getPrimary(); rstConn4.getDB("test").a.insert({a:4, str:"BEEP BOOP"}); rst.awaitReplication(); assert.eq(4, rstConn4.getDB("test").a.count(), "Error interacting with replSet"); diff --git a/jstests/tool/dumprestore10.js b/jstests/tool/dumprestore10.js index 858032827a7..6cf3cbbbfa1 100644 --- a/jstests/tool/dumprestore10.js +++ b/jstests/tool/dumprestore10.js @@ -13,7 +13,7 @@ step(); var replTest = new ReplSetTest( {name: name, nodes: 2} ); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); var total = 1000; { diff --git a/jstests/tool/dumprestore7.js b/jstests/tool/dumprestore7.js index 04414bf85a8..9a7d09665ef 100644 --- a/jstests/tool/dumprestore7.js +++ b/jstests/tool/dumprestore7.js @@ -11,7 +11,7 @@ step(); var replTest = new ReplSetTest( {name: name, nodes: 1} ); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); { step("first chunk of data"); @@ -24,7 +24,7 @@ var master = replTest.getMaster(); { step("wait"); replTest.awaitReplication(); - var time = replTest.getMaster().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next(); + var time = replTest.getPrimary().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next(); step(time.ts.t); } diff --git a/jstests/tool/dumpsecondary.js b/jstests/tool/dumpsecondary.js index 00f166dcf4c..7a641542498 100644 --- a/jstests/tool/dumpsecondary.js +++ b/jstests/tool/dumpsecondary.js @@ -3,7 +3,7 @@ var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} ); var nodes = replTest.startSet(); replTest.initiate(); -var master = replTest.getMaster(); +var master = replTest.getPrimary(); db = master.getDB("foo") db.foo.save({a: 1000}); replTest.awaitReplication(); diff --git a/jstests/tool/tool_replset.js b/jstests/tool/tool_replset.js index b5e8059045d..af5c7981482 100644 --- a/jstests/tool/tool_replset.js +++ b/jstests/tool/tool_replset.js @@ -23,7 +23,7 @@ config.members[0].priority = 3; config.members[1].priority = 0; replTest.initiate(config); - var master = replTest.getMaster(); + var master = replTest.getPrimary(); assert.eq(nodes[0], master, "incorrect master elected"); for (var i = 0; i < 100; i++) { assert.writeOK(master.getDB("foo").bar.insert({ a: i })); |